repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
gridpp/dirac-getting-started | perform_frame_query.py | 1 | 3626 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DIRAC and GridPP: perform a query on the CERN@school frames.
"""
#...for the operating system stuff.
import os
#...for parsing the arguments.
import argparse
#...for the logging.
import logging as lg
# Import the JSON library.
import json
# The DIRAC import statements.
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.Interfaces.API.Dirac import Dirac
#...for the DIRAC File Catalog client interface.
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
if __name__ == "__main__":
print("")
print("########################################################")
print("* GridPP and DIRAC: CERN@school frame metadata queries *")
print("########################################################")
print("")
# Get the datafile path from the command line.
parser = argparse.ArgumentParser()
parser.add_argument("queryJson", help="Path to the query JSON.")
parser.add_argument("outputPath", help="The path for the output files.")
parser.add_argument("dfcBaseDir", help="The name of the base directory on the DFC.")
parser.add_argument("-v", "--verbose", help="Increase output verbosity", action="store_true")
args = parser.parse_args()
## The path to the data file.
datapath = args.queryJson
## The output path.
outputpath = args.outputPath
# Check if the output directory exists. If it doesn't, quit.
if not os.path.isdir(outputpath):
raise IOError("* ERROR: '%s' output directory does not exist!" % (outputpath))
## Base directory for the file uploads.
dfc_base = args.dfcBaseDir
# Set the logging level.
if args.verbose:
level=lg.DEBUG
else:
level=lg.INFO
# Configure the logging.
lg.basicConfig(filename='log_perform_frame_query.log', filemode='w', level=level)
print("*")
print("* Input JSON : '%s'" % (datapath))
print("* Output path : '%s'" % (outputpath))
print("* DFC base directory : '%s'" % (dfc_base))
## The DFC client.
fc = FileCatalogClient()
## The frame query JSON file - FIXME: check it exists...
qf = open(datapath, "r")
#
qd = json.load(qf)
qf.close()
meta_dict = {\
"start_time" : { ">=" : int(qd[0]["start_time"]) },
"end_time" : { "<=" : int(qd[0]["end_time" ]) }
# #"lat" : { ">" : 60.0 }\
# #"n_pixel" : { ">" : 700 }\
# #"n_kluster" : { ">" : 40}\
}
## The query result.
result = fc.findFilesByMetadata(meta_dict, path=dfc_base)
print("*")
print "* Metadata query:", meta_dict
print("*")
print("* Number of frames found : %d" % (len(result["Value"])))
print("*")
# Get the cluster file names from the metadata query.
# ## Kluster file names.
# kluster_file_names = []
for fn in sorted(result["Value"]):
#print("* Found: '%s'." % (fn))
filemetadata = fc.getFileUserMetadata(fn)
frameid = str(filemetadata['Value']['frameid'])
n_kluster = int(filemetadata['Value']['n_kluster'])
print("*--> Frame ID : '%s'" % (frameid))
print("*--> Number of clusters = %d" % (n_kluster))
#print("*")
# for i in range(n_kluster):
# kn = "%s_k%05d.png" % (frameid, i)
# kluster_file_names.append(kn)
# print("*")
#
# #lg.info(" * Clusters to be downloaded:")
# #for kn in kluster_names:
# # lg.info(" *--> '%s'" % (kn))
#
# print("* Number of clusters found : %d" % (len(kluster_file_names)))
| mit | -6,275,122,451,724,172,000 | 28.479675 | 97 | 0.565913 | false |
QISKit/qiskit-sdk-py | test/python/transpiler/test_consolidate_blocks.py | 1 | 11319 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Tests for the ConsolidateBlocks transpiler pass.
"""
import unittest
import numpy as np
from qiskit import transpile
from qiskit.circuit import QuantumCircuit, QuantumRegister
from qiskit.extensions import UnitaryGate
from qiskit.converters import circuit_to_dag
from qiskit.execute import execute
from qiskit.transpiler.passes import ConsolidateBlocks
from qiskit.providers.basicaer import UnitarySimulatorPy
from qiskit.quantum_info.operators.measures import process_fidelity
from qiskit.test import QiskitTestCase
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import Collect2qBlocks
class TestConsolidateBlocks(QiskitTestCase):
"""
Tests to verify that consolidating blocks of gates into unitaries
works correctly.
"""
def test_consolidate_small_block(self):
"""test a small block of gates can be turned into a unitary on same wires"""
qr = QuantumRegister(2, "qr")
qc = QuantumCircuit(qr)
qc.u1(0.5, qr[0])
qc.u2(0.2, 0.6, qr[1])
qc.cx(qr[0], qr[1])
dag = circuit_to_dag(qc)
pass_ = ConsolidateBlocks(force_consolidate=True)
pass_.property_set['block_list'] = [list(dag.topological_op_nodes())]
new_dag = pass_.run(dag)
sim = UnitarySimulatorPy()
result = execute(qc, sim).result()
unitary = UnitaryGate(result.get_unitary())
self.assertEqual(len(new_dag.op_nodes()), 1)
fidelity = process_fidelity(new_dag.op_nodes()[0].op.to_matrix(), unitary.to_matrix())
self.assertAlmostEqual(fidelity, 1.0, places=7)
def test_wire_order(self):
"""order of qubits and the corresponding unitary is correct"""
qr = QuantumRegister(2, "qr")
qc = QuantumCircuit(qr)
qc.cx(qr[1], qr[0])
dag = circuit_to_dag(qc)
pass_ = ConsolidateBlocks(force_consolidate=True)
pass_.property_set['block_list'] = [dag.op_nodes()]
new_dag = pass_.run(dag)
new_node = new_dag.op_nodes()[0]
self.assertEqual(new_node.qargs, [qr[0], qr[1]])
# the canonical CNOT matrix occurs when the control is more
# significant than target, which is the case here
fidelity = process_fidelity(new_node.op.to_matrix(), np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0]]))
self.assertAlmostEqual(fidelity, 1.0, places=7)
def test_topological_order_preserved(self):
"""the original topological order of nodes is preserved
______
q0:--[u1]-------.---- q0:-------------| |--
| ______ | U2 |
q1:--[u2]--(+)-(+)--- = q1:---| |--|______|--
| | U1 |
q2:---------.-------- q2:---|______|------------
"""
qr = QuantumRegister(3, "qr")
qc = QuantumCircuit(qr)
qc.u1(0.5, qr[0])
qc.u2(0.2, 0.6, qr[1])
qc.cx(qr[2], qr[1])
qc.cx(qr[0], qr[1])
dag = circuit_to_dag(qc)
pass_ = ConsolidateBlocks(force_consolidate=True)
topo_ops = list(dag.topological_op_nodes())
block_1 = [topo_ops[1], topo_ops[2]]
block_2 = [topo_ops[0], topo_ops[3]]
pass_.property_set['block_list'] = [block_1, block_2]
new_dag = pass_.run(dag)
new_topo_ops = [i for i in new_dag.topological_op_nodes() if i.type == 'op']
self.assertEqual(len(new_topo_ops), 2)
self.assertEqual(new_topo_ops[0].qargs, [qr[1], qr[2]])
self.assertEqual(new_topo_ops[1].qargs, [qr[0], qr[1]])
def test_3q_blocks(self):
"""blocks of more than 2 qubits work."""
qr = QuantumRegister(3, "qr")
qc = QuantumCircuit(qr)
qc.u1(0.5, qr[0])
qc.u2(0.2, 0.6, qr[1])
qc.cx(qr[2], qr[1])
qc.cx(qr[0], qr[1])
dag = circuit_to_dag(qc)
pass_ = ConsolidateBlocks(force_consolidate=True)
pass_.property_set['block_list'] = [list(dag.topological_op_nodes())]
new_dag = pass_.run(dag)
sim = UnitarySimulatorPy()
result = execute(qc, sim).result()
unitary = UnitaryGate(result.get_unitary())
self.assertEqual(len(new_dag.op_nodes()), 1)
fidelity = process_fidelity(new_dag.op_nodes()[0].op.to_matrix(), unitary.to_matrix())
self.assertAlmostEqual(fidelity, 1.0, places=7)
def test_block_spanning_two_regs(self):
"""blocks spanning wires on different quantum registers work."""
qr0 = QuantumRegister(1, "qr0")
qr1 = QuantumRegister(1, "qr1")
qc = QuantumCircuit(qr0, qr1)
qc.u1(0.5, qr0[0])
qc.u2(0.2, 0.6, qr1[0])
qc.cx(qr0[0], qr1[0])
dag = circuit_to_dag(qc)
pass_ = ConsolidateBlocks(force_consolidate=True)
pass_.property_set['block_list'] = [list(dag.topological_op_nodes())]
new_dag = pass_.run(dag)
sim = UnitarySimulatorPy()
result = execute(qc, sim).result()
unitary = UnitaryGate(result.get_unitary())
self.assertEqual(len(new_dag.op_nodes()), 1)
fidelity = process_fidelity(new_dag.op_nodes()[0].op.to_matrix(), unitary.to_matrix())
self.assertAlmostEqual(fidelity, 1.0, places=7)
def test_block_spanning_two_regs_different_index(self):
"""blocks spanning wires on different quantum registers work when the wires
could have conflicting indices. This was raised in #2806 when a CX was applied
across multiple registers and their indices collided, raising an error."""
qr0 = QuantumRegister(1, "qr0")
qr1 = QuantumRegister(2, "qr1")
qc = QuantumCircuit(qr0, qr1)
qc.cx(qr0[0], qr1[1])
dag = circuit_to_dag(qc)
pass_ = ConsolidateBlocks(force_consolidate=True)
pass_.property_set['block_list'] = [list(dag.topological_op_nodes())]
new_dag = pass_.run(dag)
sim = UnitarySimulatorPy()
original_result = execute(qc, sim).result()
original_unitary = UnitaryGate(original_result.get_unitary())
from qiskit.converters import dag_to_circuit
new_result = execute(dag_to_circuit(new_dag), sim).result()
new_unitary = UnitaryGate(new_result.get_unitary())
self.assertEqual(original_unitary, new_unitary)
def test_node_added_before_block(self):
"""Test that a node before a block remains before the block
This issue was raised in #2737 where the measure was moved
to be after the 2nd ID gate, as the block was added when the
first node in the block was seen.
blocks = [['id', 'cx', 'id']]
┌────┐┌───┐
q_0: |0>┤ Id ├┤ X ├──────
└┬─┬─┘└─┬─┘┌────┐
q_1: |0>─┤M├────■──┤ Id ├
└╥┘ └────┘
c_0: 0 ══╩══════════════
"""
qc = QuantumCircuit(2, 1)
qc.iden(0)
qc.measure(1, 0)
qc.cx(1, 0)
qc.iden(1)
# can't just add all the nodes to one block as in other tests
# as we are trying to test the block gets added in the correct place
# so use a pass to collect the blocks instead
pass_manager = PassManager()
pass_manager.append(Collect2qBlocks())
pass_manager.append(ConsolidateBlocks())
qc1 = transpile(qc, pass_manager=pass_manager)
self.assertEqual(qc, qc1)
def test_node_added_after_block(self):
"""Test that a node after the block remains after the block
This example was raised in #2764, and checks that the final CX
stays after the main block, even though one of the nodes in the
block was declared after it. This occured when the block was
added when the last node in the block was seen.
blocks = [['cx', 'id', 'id']]
q_0: |0>─────────────■──
┌────┐┌─┴─┐
q_1: |0>──■──┤ Id ├┤ X ├
┌─┴─┐├────┤└───┘
q_2: |0>┤ X ├┤ Id ├─────
└───┘└────┘
"""
qc = QuantumCircuit(3)
qc.cx(1, 2)
qc.iden(1)
qc.cx(0, 1)
qc.iden(2)
pass_manager = PassManager()
pass_manager.append(Collect2qBlocks())
pass_manager.append(ConsolidateBlocks())
qc1 = transpile(qc, pass_manager=pass_manager)
self.assertEqual(qc, qc1)
def test_node_middle_of_blocks(self):
"""Test that a node surrounded by blocks stays in the same place
This is a larger test to ensure multiple blocks can all be collected
and added back in the correct order.
blocks = [['cx', 'id'], ['cx', 'id'], ['id', 'cx'], ['id', 'cx']]
q_0: |0>──■───────────────────■──
┌─┴─┐┌────┐ ┌────┐┌─┴─┐
q_1: |0>┤ X ├┤ Id ├─X─┤ Id ├┤ X ├
├───┤├────┤ │ ├────┤├───┤
q_2: |0>┤ X ├┤ Id ├─X─┤ Id ├┤ X ├
└─┬─┘└────┘ └────┘└─┬─┘
q_3: |0>──■───────────────────■──
"""
qc = QuantumCircuit(4)
qc.cx(0, 1)
qc.cx(3, 2)
qc.iden(1)
qc.iden(2)
qc.swap(1, 2)
qc.iden(1)
qc.iden(2)
qc.cx(0, 1)
qc.cx(3, 2)
pass_manager = PassManager()
pass_manager.append(Collect2qBlocks())
pass_manager.append(ConsolidateBlocks())
qc1 = transpile(qc, pass_manager=pass_manager)
self.assertEqual(qc, qc1)
def test_classical_conditions_maintained(self):
"""Test that consolidate blocks doesn't drop the classical conditions
This issue was raised in #2752
"""
qc = QuantumCircuit(1, 1)
qc.h(0).c_if(qc.cregs[0], 1)
qc.measure(0, 0)
pass_manager = PassManager()
pass_manager.append(Collect2qBlocks())
pass_manager.append(ConsolidateBlocks())
qc1 = transpile(qc, pass_manager=pass_manager)
self.assertEqual(qc, qc1)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 5,975,666,243,580,427,000 | 35.811644 | 94 | 0.553354 | false |
tmaciejewski/ipa | test/test_ipa_db.py | 1 | 5075 | import unittest
import datetime
import time
import ipa_db
import ipa_config
class TestDb(unittest.TestCase):
@classmethod
def setUpClass(self):
self.db = ipa_db.Db(ipa_config.db['test'])
def setUp(self):
self.db.remove_schema()
self.db.create_schema()
def test_adding_trains(self):
name = ['train', 'other train']
self.assertEqual(list(self.db.get_train_id(name[0])), [])
self.assertEqual(list(self.db.get_train_id(name[1])), [])
self.db.add_train(name[0])
self.db.add_train(name[1])
self.db.commit()
self.assertEqual(list(self.db.get_train_id(name[0])), [{'train_name': name[0], 'train_id': 1}])
self.assertEqual(list(self.db.get_train_id(name[1])), [{'train_name': name[1], 'train_id': 2}])
def test_train_name_uniqueness(self):
name = 'train'
self.db.add_train(name)
with self.assertRaises(ipa_db.DbError):
self.db.add_train(name)
self.db.commit()
self.assertEqual(list(self.db.get_train_id(name)), [{'train_name': name, 'train_id': 1}])
def test_adding_stations(self):
name = ['station', 'other station']
self.assertEqual(list(self.db.get_station_id(name[0])), [])
self.assertEqual(list(self.db.get_station_id(name[1])), [])
self.db.add_station(name[0])
self.db.add_station(name[1])
self.db.commit()
self.assertEqual(list(self.db.get_station_id(name[0])), [{'station_id': 1}])
self.assertEqual(list(self.db.get_station_id(name[1])), [{'station_id': 2}])
def test_station_name_uniqueness(self):
name = 'station'
self.db.add_station(name)
with self.assertRaises(ipa_db.DbError):
self.db.add_station(name)
self.db.commit()
self.assertEqual(list(self.db.get_station_id(name)), [{'station_id': 1}])
def test_updating_schedules(self):
schedule_id = 123
schedule_date = datetime.date(2010, 12, 21)
train_id = 1
train_name = "train name"
expected_schedule = {'schedule_id': schedule_id, 'schedule_date': schedule_date}
self.assertEqual(list(self.db.get_schedules(train_id)), [])
self.db.add_train(train_name)
self.db.update_schedule(schedule_id, str(schedule_date), train_id)
self.db.commit()
self.assertEqual(list(self.db.get_schedules(train_id)), [expected_schedule])
def test_active_schedules(self):
schedule_id = 123
schedule_date = datetime.date(2010, 12, 21)
train_id = 10
self.db.update_schedule(schedule_id, str(schedule_date), train_id)
self.db.commit()
self.assertEqual(list(self.db.get_active_schedules()), [{'schedule_id': schedule_id}])
self.db.set_active(schedule_id, False)
self.db.commit()
self.assertEqual(list(self.db.get_active_schedules()), [])
self.db.set_active(schedule_id, True)
self.db.commit()
self.assertEqual(list(self.db.get_active_schedules()), [{'schedule_id': schedule_id}])
def test_updating_schedule_infos(self):
schedule_id = 222
info = [
{'arrival_time': None, 'arrival_delay': None,
'departure_time': '2016-12-21 12:01:30', 'departure_delay': 3},
{'arrival_time': '2016-12-21 12:05:00', 'arrival_delay': -10,
'departure_time': '2016-12-21 12:08:00', 'departure_delay': 0},
{'arrival_time': '2016-12-21 12:10:10', 'arrival_delay': 0,
'departure_time': None, 'departure_delay': None},
]
self.assertEqual(list(self.db.get_schedule_infos(schedule_id)), [])
self.db.add_station('station 1')
self.db.add_station('station 2')
self.db.add_station('station 3')
self.db.update_schedule_info(schedule_id, 0, 1, info[0])
self.db.update_schedule_info(schedule_id, 2, 3, info[2])
self.db.update_schedule_info(schedule_id, 1, 2, info[1])
self.db.commit()
self.assertEqual(list(self.db.get_schedule_infos(schedule_id)),
[{'arrival_delay': None,
'arrival_time': None,
'departure_delay': 3,
'departure_time': datetime.datetime(2016, 12, 21, 12, 1, 30),
'station_name': 'station 1'},
{'arrival_delay': -10,
'arrival_time': datetime.datetime(2016, 12, 21, 12, 5, 0),
'departure_delay': 0,
'departure_time': datetime.datetime(2016, 12, 21, 12, 8, 0),
'station_name': 'station 2'},
{'arrival_delay': 0,
'arrival_time': datetime.datetime(2016, 12, 21, 12, 10, 10),
'departure_delay': None,
'departure_time': None,
'station_name': 'station 3'}])
if __name__ == '__main__':
unittest.main()
| mit | -3,475,179,416,301,964,000 | 37.157895 | 103 | 0.558424 | false |
songsense/Pregelix_Social_Graph | preprocessing/twitter_with_tags_parser.py | 1 | 4458 | import os
import sys
neighborDict = {} #dictionary containing neighbors of each node
weightDict = {} #dictionary containing weights of edges
featureDict = {} #dictionary containing features of each node
featureDictTotal = {} #dictionay containing all listed features of each node
totalFeatureDict = {} #ditionary containing features of all nodes
# the path of data files
currPath = "../twitter"
# list all files
fileArray = os.listdir(currPath)
######## get totalFeature #############
for fileGraphName in fileArray:
if fileGraphName.endswith('.featnames'): # if the file is the '*.featnames' file which lists all possible features of current node
nodeNum = fileGraphName[0:len(fileGraphName)-10]; #get current node
fileGraphName = os.path.join(currPath, fileGraphName);
fileGraph = open(fileGraphName, 'r');
line = fileGraph.readline();
featureArray = []
while line:
line = line.rstrip();
lineArray = line.split(' ');
# add each feature into dictionary
if(not totalFeatureDict.has_key(lineArray[1])):
length = len(totalFeatureDict);
totalFeatureDict[lineArray[1]] = length;
featureArray.append(lineArray[1]);
line = fileGraph.readline();
featureDictTotal[nodeNum]=featureArray;
######## get features ###############
for fileGraphName in fileArray:
if fileGraphName.endswith('.egofeat'): # if the file is the '*.egofeat' file which lists the actual features of each node
nodeNum = fileGraphName[0:len(fileGraphName)-8]; #get current node
fileGraphName = os.path.join(currPath, fileGraphName);
fileGraph = open(fileGraphName, 'r');
line = fileGraph.readline();
features = []
while line:
line = line.rstrip();
lineArray = line.split(' ');
for i in range(0, len(lineArray)):
if(lineArray[i]=='1'): #'1' indicates that the node has the feature to which '1' corresponds
features.append(totalFeatureDict[featureDictTotal[nodeNum][i]]);
line = fileGraph.readline();
featureDict[nodeNum] = features;
######### get neighbors and weights #############
for fileGraphName in fileArray:
if fileGraphName.endswith('.feat'): # if the file is the '*.feat' file which lists all the neighbors of each node and their features
nodeNum = fileGraphName[0:len(fileGraphName)-5]; #get current node
fileGraphName = os.path.join(currPath, fileGraphName)
fileGraph = open(fileGraphName, 'r');
line = fileGraph.readline();
neighbor = []; # array to contain neighbors
weights = []; #array to contain weights
## get node features ##
fileNodeFeature = open(os.path.join(currPath, nodeNum+'.egofeat'), 'r');
lineEgoFeature = fileNodeFeature.readline();
lineEgoFeature = lineEgoFeature.rstrip();
lineEgoFeatureArray = lineEgoFeature.split(' ');
while line:
line = line.rstrip();
lineArray = line.split(' ');
neighbor.append(lineArray[0]);
weight = 0;
for i in range(0, len(lineEgoFeatureArray)):
if(lineArray[i+1]=='1' and lineEgoFeatureArray[i]=='1'):# if both a neighbor and current node have a same feature, weight increases by 1
weight+=1;
weights.append(weight);
line = fileGraph.readline();
neighborDict[nodeNum] = neighbor;
weightDict[nodeNum] = weights;
######### write to profile ################
### write feature and index num ####
fileName = 'featureIndex.txt'
fileOut = open(fileName, 'w');
for tag in totalFeatureDict.keys():
fileOut.writelines(tag+' '+str(totalFeatureDict[tag])+'\n')
fileOut.close()
### write neightbors and weights ####
fileName = 'graph.txt'
fileOut = open(fileName, 'w');
for nodeNum in neighborDict.keys():
line = nodeNum+' '+str(len(neighborDict[nodeNum]));
for i in range(0, len(neighborDict[nodeNum])):
line = line+' '+neighborDict[nodeNum][i];
line = line+' '+str(weightDict[nodeNum][i]);
line = line + ' ' + str(len(featureDict[nodeNum]));
for feature in featureDict[nodeNum]:
line = line + ' ' + str(feature);
line = line+'\n';
fileOut.writelines(line);
fileOut.close()
| apache-2.0 | 521,520,475,521,803,800 | 37.765217 | 152 | 0.615074 | false |
denadai2/A-Tale-of-Cities---code | converter.py | 1 | 4148 | __author__ = 'Marco De Nadai'
__license__ = "MIT"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import datetime
import csv
from collections import namedtuple
from collections import defaultdict
import fiona
from shapely.geometry import shape, Polygon
# Import the CDRs of MILANO
df = pd.read_csv('datasets/MILANO_CDRs.csv', sep=',', encoding="utf-8-sig", parse_dates=['datetime'])
# Considered technologies list
TECHNOLOGIES = ['GSM1800']
# Import the technologies' coverage areas
# Note: the input files cannot be shared.
coverage_polygons = []
for t in TECHNOLOGIES:
source = fiona.open('datasets/shapefiles/COVERAGE_'+t+'.shp', 'r')
for polygon in source:
coverage_polygons.append(shape(polygon['geometry']))
# Create squares of 1 km^2
# ref: http://stackoverflow.com/questions/4000886/gps-coordinates-1km-square-around-a-point
earth_circumference = math.cos(math.radians(df['lat'].mean()))*40075.16
# Let's create squares of 235x235 metres
kms = 0.235
gridWidth = math.float(kms * (360./earth_circumference))
gridHeight = math.float(kms/111.32)
# GRID bounds (coordinates)
XMIN = 9.011533669936474
YMIN = 45.356261753717845
XMAX = 9.312688264185276
YMAX = 45.56821407553667
# Get the number of rows and columns
rows = math.ceil((YMAX-YMIN)/gridHeight)
cols = math.ceil((XMAX-XMIN)/gridWidth)
Square = namedtuple('Square', ['x', 'y', 'cx', 'cy', 'polygon'])
square_grid = []
for i in range(int(rows)):
for j in range(int(cols)):
x = XMIN+j*gridWidth
y = YMIN+i*gridHeight
centerx = (x+x+gridWidth)/2.
centery = (y+y+gridHeight)/2.
p = Polygon([[x,y], [x, y+gridHeight], [x+gridWidth, y+gridHeight], [x+gridWidth, y]])
square_grid.append(Square(x, y, centerx, centery, p))
# Calculate the intersections of the coverage cells with the grids' square
intersections = []
for t in TECHNOLOGIES:
for i, v in enumerate(coverage_polygons[t]):
total_coverage_area = v.polygon.area
for j, s in enumerate(square_grid):
if v.polygon.intersects(s.polygon):
# To avoid Python floating point errors
if s.polygon.contains(v.polygon):
fraction = 1.0
else:
# Calculates the proportion between the intersection between the coverage and the grid
# square. This is useful to assign the right proportion of the the mobile usage to the
# grid square.
fraction = (v.polygon.intersection(s.polygon).area/total_coverage_area)
coverage_polygons[t][i].intersections.append([j, fraction])
coverage_intersections = defaultdict(dict)
for t in TECHNOLOGIES:
coverage_intersections[t] = defaultdict(dict)
for p in coverage_polygons[t]:
coverage_intersections[t][p.CGI] = p.intersections
# We build a hash table to search in a fast way all the CGI of a technology
hash_cgi_tech = {}
for index,row in df.groupby(['cgi','technology'], as_index=False).sum().iterrows():
hash_cgi_tech[row['cgi']] = row['technology']
# Select the data grouped by hour and countrycode
groups = df.groupby(['datetime', 'countrycode'])
#
# Example file with the format:
# datetime,CGI,countryCode,numRecords
#
with open('dati/MILANO_grid.csv', 'wb') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerow(["datetime", "GridCell", "countryCode", "numRecords"])
for name, group in groups:
# iterate group's rows
data = []
d = defaultdict(int)
for index, row in enumerate(group.values):
CGI = row[1]
tech = hash_cgi_tech[CGI]
if CGI in coverage_intersections[tech]:
for (cell_number, cell_intersection_portion) in coverage_intersections[tech][CGI]:
d[str(cell_number) + "_" + str(row[3])] += float(row[2]*cell_intersection_portion)
datetime_rows = group.values[0, 0]
rows = [[datetime_rows] + k.split("_") + [v] for (k, v) in d.iteritems()]
csvwriter.writerows(rows)
| mit | -7,795,471,707,981,156,000 | 32.451613 | 106 | 0.657425 | false |
Edu-Glez/mesos-test | container/test_naive_bayes.py | 1 | 1645 | import pickle
import pandas as pd
#import numpy as np
import nltk
import time
start_time = time.time()
a=pd.read_table('tweets_pos_clean.txt')
b=pd.read_table('tweets_neg_clean.txt')
aux1=[]
aux2=[]
auxiliar1=[]
auxiliar2=[]
for element in a['Text']:
for w in element.split():
if (w==':)' or len(w)>3):
auxiliar1.append(w)
aux1.append((auxiliar1,'positive'))
auxiliar1=[]
for element in b['text']:
for w in element.split():
if (w==':(' or len(w)>3):
auxiliar2.append(w)
aux2.append((auxiliar2,'negative'))
auxiliar2=[]
aux1=aux1[:100]
aux2=aux2[:200]
pos_df=pd.DataFrame(aux1)
neg_df=pd.DataFrame(aux2)
pos_df.columns=['words','sentiment']
neg_df.columns=['words','sentiment']
#table_aux=[pos_df,neg_df]
#tweets1=pd.concat(table_aux)
#tweets1.columns('words','sentiment')
table_aux1=aux1+aux2
def get_words_in_tweets(tweets):
all_words = []
for (words, sentiment) in tweets:
all_words.extend(words)
return all_words
def get_word_features(wordlist):
wordlist = nltk.FreqDist(wordlist)
word_features = wordlist.keys()
return word_features
def extract_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains(%s)' % word] = (word in document_words)
return features
word_features = get_word_features(get_words_in_tweets(table_aux1))
training_set = nltk.classify.apply_features(extract_features, table_aux1)
classifier = nltk.NaiveBayesClassifier.train(training_set)
word_features = list(word_features)
with open('objs2.pickle','wb') as f:
pickle.dump([classifier, word_features],f)
print("Tomo %s segundos ejecutarse" % (time.time() - start_time))
| mit | -3,334,451,982,492,560,400 | 21.847222 | 73 | 0.711246 | false |
ReactiveX/RxPY | examples/asyncio/toasyncgenerator.py | 1 | 1779 | import asyncio
from asyncio import Future
import rx
from rx import operators as ops
from rx.scheduler.eventloop import AsyncIOScheduler
from rx.core import Observable
def to_async_generator(sentinel=None):
loop = asyncio.get_event_loop()
future = Future()
notifications = []
def _to_async_generator(source: Observable):
def feeder():
nonlocal future
if not notifications or future.done():
return
notification = notifications.pop(0)
if notification.kind == "E":
future.set_exception(notification.exception)
elif notification.kind == "C":
future.set_result(sentinel)
else:
future.set_result(notification.value)
def on_next(value):
"""Takes on_next values and appends them to the notification queue"""
notifications.append(value)
loop.call_soon(feeder)
source.pipe(ops.materialize()).subscribe(on_next)
@asyncio.coroutine
def gen():
"""Generator producing futures"""
nonlocal future
loop.call_soon(feeder)
future = Future()
return future
return gen
return _to_async_generator
@asyncio.coroutine
def go(loop):
scheduler = AsyncIOScheduler(loop)
xs = rx.from_([x for x in range(10)], scheduler=scheduler)
gen = xs.pipe(to_async_generator())
# Wish we could write something like:
# ys = (x for x in yield from gen())
while True:
x = yield from gen()
if x is None:
break
print(x)
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(go(loop))
if __name__ == '__main__':
main()
| mit | 6,042,081,814,149,646,000 | 22.72 | 81 | 0.587409 | false |
gibil5/openhealth | models/emr/test_treatment.py | 1 | 16733 | # -*- coding: utf-8 -*-
"""
Ex Test treatment
Explore treatment
Renamed - To avoid conflict with nose2.
Used by: Treatment
Created: 14 aug 2018
Last up: 14 apr 2021
"""
from __future__ import print_function
import json
from . import test_funcs
# ----------------------------------------------------------- Exceptions -------
class OrderErrorException(Exception):
pass
class ProductErrorException(Exception):
pass
TEST_CASES = [
'product',
'laser',
'medical',
'cosmetology',
'new',
]
# ------------------------------------------------------- First Level - Buttons -------------------
# ----------------------------------------------- Test Integration -------------
def test_integration_treatment(self):
"""
End to end Tests
Integration Testing for the Treatment Class.
"""
print()
print('*** TESTING')
print('test_treatment.py - test_integration_treatment')
test_array = []
# Init
fname = '/Users/gibil/cellar/github/openhealth/config/data.txt'
with open(fname) as json_file:
data = json.load(json_file)
#print(data)
#print(data['test'])
for tc in TEST_CASES:
go = int(data['test'][tc])
if go:
test_array.append(tc)
#for p in data['test']:
# print(p)
print(test_array)
# Create Consultation
verbose = False
create_consultation(self, verbose)
# Loop
# Create Recommendations and Sale
for test_case in test_array:
create_reco_and_procedure_sale(self, test_case)
# Create sessions
create_sessions(self, True, 1)
# Create controls
create_controls(self, True, 1)
# test_integration_treatment
# ----------------------------------------------- 2nd level ---------------------------------------
# -------------------------------------------------- Procedure -----------------
def create_reco_and_procedure_sale(self, test_case):
"""
Create Recommendations and Procedure Sale
"""
msg = 'Create Recommendations and Procedure Sale'
print()
print(msg)
# Create recommendation
create_recommendations(self, test_case)
# Create order
self.btn_create_order_pro()
# Pay
test_funcs.disablePrint()
for order in self.order_ids:
if order.state in ['draft']:
# Manage Exception
try:
order.pay_myself()
except OrderErrorException:
raise OrderErrorException('TEST TREATMENT - ERROR - pay_myself')
# Reset recos
self.service_ids.unlink()
test_funcs.enablePrint()
# ----------------------------------------------------------- Second Level ------------------------
# -------------------------------------------------- Create Recommendations ----
def create_recommendations(self, test_case='all'):
"""
Create Recommendations
Test Cases
Be sure to cover:
- All Families.
- All Sub Families.
- All Sub sub Families.
"""
print()
print('TEST - Create Recommendations')
print(test_case)
# Init
name_dic = {
# Products
'prod_0': 'ACNETOPIC 200ML', # Topic
'prod_1': 'KIT POST LASER CO2 COOPER', # Kit
'prod_2': 'TARJETA VIP', # Card
'prod_3': 'OTROS', # Other
'prod_4': 'COMISION DE ENVIO', # Comission
# Lasers
'co2': 'LASER CO2 FRACCIONAL - Cuello - Rejuvenecimiento - Grado 1 - 1 sesion', # Co2
'exc': 'LASER EXCILITE - Abdomen - Alopecias - 1 sesion - 15 min', # Excilite
'ipl': 'LASER M22 IPL - Abdomen - Depilacion - 1 sesion - 15 min', # Ipl
'ndy': 'LASER M22 ND YAG - Localizado Cuerpo - Hemangiomas - 1 sesion - 15 min', # Ndyag
'qui': 'QUICKLASER - Cuello - Rejuvenecimiento - Grado 1 - 1 sesion', # Quick
# Cosmetology
'cos_0': 'CARBOXITERAPIA - Cuerpo - Rejuvenecimiento - 1 sesion - 30 min', # Carboxitherapy
'cos_1': 'PUNTA DE DIAMANTES - Rostro - Limpieza profunda - 1 sesion - 30 min', # Diamond Tip
'cos_2': 'LASER TRIACTIVE + CARBOXITERAPIA - Rostro + Papada + Cuello - Reafirmacion - 10 sesiones - 30 min', # Laser Triactive + Carbo
# Medical
'med_0': 'BOTOX - 1 Zona - Rejuvenecimiento Zona - 1 sesion', # Botox
'med_1': 'CRIOCIRUGIA - Todo Rostro - Acne - 1 sesion', # Cryo
'med_2': 'ACIDO HIALURONICO - 1 Jeringa - Rejuvenecimiento Facial - 1 sesion - FILORGA UNIVERSAL', # Hialuronic
'med_3': 'INFILTRACIONES', # Infil
'med_4': 'MESOTERAPIA NCTF - Todo Rostro - Rejuvenecimiento Facial - 5 sesiones', # Meso
'med_5': 'PLASMA - Todo Rostro - Rejuvenecimiento Facial - 1 sesion', # Plasma
'med_6': 'REDUX - 1 Zona - Rejuvenecimiento Zona - 1 sesion', # Redux
'med_7': 'ESCLEROTERAPIA - Piernas - Varices - 1 sesion', # Sclero
'med_8': 'VITAMINA C ENDOVENOSA', # Vitamin
# New Services
'gyn': 'LASER CO2 FRACCIONAL - Monalisa Touch / Revitalizacion',
'echo': 'ECOGRAFIAS ESPECIALES - Cadera Pediatrica (Bilateral) - 1 sesion',
'prom': 'CARBOXITERAPIA - Localizado Cuerpo - Rejuvenecimiento Facial - 6 sesiones',
}
tst_list_all = [
'prod_0',
'prod_1',
'prod_2',
'prod_3',
'prod_4',
'co2',
'exc',
'ipl',
'ndy',
'qui',
'cos_0',
'cos_1',
'cos_2',
'med_0',
'med_1',
'med_2',
'med_3',
'med_4',
'med_5',
'med_6',
'med_7',
'med_8',
'gyn',
'echo',
'prom',
]
tst_list_prod = [
'prod_0',
'prod_1',
'prod_2',
'prod_3',
'prod_4',
]
tst_list_laser = [
'co2',
'exc',
'ipl',
'ndy',
'qui',
]
tst_list_cos = [
'cos_0',
'cos_1',
'cos_2',
]
tst_list_med = [
'med_0',
'med_1',
'med_2',
'med_3',
'med_4',
'med_5',
'med_6',
'med_7',
'med_8',
]
tst_list_new = [
'gyn',
'echo',
'prom',
]
tst_list_empty = []
# Test cases
if test_case in ['all']:
tst_list = tst_list_all
elif test_case in ['laser']:
tst_list = tst_list_laser
elif test_case in ['product']:
tst_list = tst_list_prod
elif test_case in ['medical']:
tst_list = tst_list_med
elif test_case in ['cosmetology']:
tst_list = tst_list_cos
elif test_case in ['new']:
tst_list = tst_list_new
elif test_case in [False]:
tst_list = tst_list_empty
else:
print('This should not happen !!!')
# Loop
for tst in tst_list:
# Init
name = name_dic[tst]
# Search
product = self.env['product.template'].search([
('name', '=', name),
('pl_price_list', 'in', ['2019']),
],
#order='date_order desc',
limit=1,
)
# Check Exceptions
try:
product.ensure_one()
except ProductErrorException:
msg_name = "ERROR: Record Must be One Only."
class_name = type(product).__name__
obj_name = name
msg = msg_name + '\n' + class_name + '\n' + obj_name
raise ProductErrorException('msg')
product_id = product.id
# Check if product complete
print()
print('Check product_template complete')
print(product)
print(product.name)
print(product.pl_price_list)
print(product.pl_family)
print(product.pl_subfamily)
print(product.pl_zone)
print(product.pl_pathology)
print(product.pl_sessions)
print(product.pl_level)
print(product.pl_time)
print(product.pl_zone)
print(product.pl_treatment)
print()
# *** Create recommendation
service = self.env['openhealth.service'].create({
'service': product_id,
'family': product.pl_family,
'subfamily': product.pl_subfamily,
'zone': product.pl_zone,
'pathology': product.pl_pathology,
'sessions': product.pl_sessions,
'level': product.pl_level,
'time': product.pl_time,
'price_applied': product.pl_price_list,
'sel_zone': product.pl_zone,
'pl_treatment': product.pl_treatment,
'treatment': self.id,
})
# Check if service complete
#print()
#pint(service)
#print(service.name)
#print(service.pl_treatment)
#print(service.family)
#print(service.subfamily)
#print(service.zone)
#print(service.pathology)
#print(service.sessions)
#print(service.level)
#print(service.time)
#print(service.price_applied)
#print(service.sel_zone)
#print(service.treatment)
#print()
# create_recommendations
# ------------------------------------------------------ Reset Treatment -------
def test_reset_treatment(self):
"""
Test Reset - Used by Treatment
"""
print()
print('*** TEST TRE - test_reset_treatment')
# Consultation
self.consultation_ids.unlink()
# Recos
self.service_ids.unlink()
#self.service_all_ids.unlink() # Dep
# Procedures
self.procedure_ids.unlink()
self.session_ids.unlink()
self.control_ids.unlink()
# Alta
self.treatment_closed = False
# Orders - Do not keep them !
for order in self.order_ids:
order.remove_myself_force()
# reset
# ----------------------------------------------- Test Report Management ----------------------------------------------
def test_report_management(self):
"""
Test Report Management
"""
print()
print('Test Report Management')
# Print Disable
#test_funcs.disablePrint()
# Test
report = self.report_management
report.update_fast()
report.update_patients()
report.update_doctors()
report.update_productivity()
report.update_daily()
# Print Enable
#test_funcs.enablePrint()
# ----------------------------------------------- Test Report product -----------------------------------------------
def test_report_product(self):
"""
Test Report product
"""
print()
print('Test Report product')
# Test
report = self.report_product
report.validate()
# ----------------------------------------------- Test Report Marketing -----------------------------------------------
def test_report_marketing(self):
"""
Test Report Marketing
"""
print()
print('Test Report Marketing')
# Print Disable
test_funcs.disablePrint()
# Test
report = self.report_marketing
report.update_patients()
report.pl_update_sales()
# Print Enable
test_funcs.enablePrint()
# ----------------------------------------------- Test Report account -----------------------------------------------
def test_report_account(self):
"""
Test Report account
"""
print()
print('Test Report account')
# Print Disable
#test_funcs.disablePrint()
# Test
report = self.report_account
report.pl_create_electronic()
report.pl_export_txt()
# Print Enable
#test_funcs.enablePrint()
# ----------------------------------------------- Test Report account -----------------------------------------------
def test_report_contasis(self):
"""
Test Report account
"""
print()
print('Test Report Contasis')
# Print Disable
#test_funcs.disablePrint()
# Test
report = self.report_contasis
report.update()
# Print Enable
#test_funcs.enablePrint()
# ------------------------------------------------------- Level 0 - Creates ----
def test_create(self, value):
"""
Test create - Used by Treatment
"""
# Consultation
if value == 'test_create_budget_consultation':
test_create_budget_consultation(self)
elif value == 'test_create_sale_consultation':
test_create_sale_consultation(self)
elif value == 'test_create_consultation':
test_create_consultation(self)
# Reco
elif value == 'test_create_recommendations':
test_create_recommendations(self)
# Procedure
elif value == 'test_create_budget_procedure':
test_create_budget_procedure(self)
elif value == 'test_create_sale_procedure':
test_create_sale_procedure(self)
elif value == 'test_create_procedure':
self.btn_create_procedure_man()
elif value == 'test_create_sessions':
test_create_sessions(self)
elif value == 'test_create_controls':
test_create_controls(self)
def create_credit_note(self):
"""
Create Credit Note
"""
msg = 'Create Credit Note'
print()
print(msg)
for order in self.order_ids:
if order.state in ['sale']:
order.create_credit_note()
order.cancel_order()
def create_block_flow(self):
"""
Create Block Flow
"""
msg = 'Create Block Flow'
print()
print(msg)
print('Create Block Flow')
for order in self.order_ids:
if order.state in ['sale']:
order.block_flow()
# ----------------------------------------------- Sessions ---------------------
def create_sessions(self, verbose, nr_sessions):
"""
Create Sessions
"""
print()
print('test_treatment - create_sessions')
if verbose:
test_funcs.enablePrint()
else:
test_funcs.disablePrint()
for procedure in self.procedure_ids:
for _ in range(1):
procedure.create_sessions(nr_sessions)
# ----------------------------------------------- Controls ---------------------
def create_controls(self, verbose, nr_controls):
"""
Create Controls
"""
print()
print('TEST TREATMENT - create_controls')
if verbose:
test_funcs.enablePrint()
else:
test_funcs.disablePrint()
for procedure in self.procedure_ids:
for _ in range(6):
procedure.create_controls(nr_controls)
# ----------------------------------------------- 3nd level ---------------------------------------
# ----------------------------------------------- Test Cycle -------------------
def test_create_budget_consultation(self):
"""
Test Budget
"""
print()
print('Test Create Budget Consultation')
# Create Budget Consultation
self.btn_create_order_con() # Actual Button
def test_create_sale_consultation(self):
"""
Test Sale
"""
print()
print('Test Create Sale Consultation')
# Pay Budget Consultation
for order in self.order_ids:
if order.state in ['draft']:
order.pay_myself()
def test_create_consultation(self):
"""
Test Consultation
"""
print()
print('Test Create Consultation')
# Create Consultation
self.btn_create_consultation() # Actual Button
def test_create_recommendations(self):
"""
Test Service
"""
print()
print('Test Create Recommendations')
# Init
name_list = ['LASER CO2 FRACCIONAL - Cuello - Rejuvenecimiento - Grado 1 - 1 sesion']
# Loop
for name in name_list:
# Search
print()
print('Search Product')
product = self.env['product.template'].search([
('name', '=', name),
('pl_price_list', 'in', ['2019']),
],
#order='date_order desc',
limit=1,
)
product_id = product.id
print()
print(product)
print(product.name)
# Create service
print()
print('Create Service')
service = self.env['openhealth.service'].create({
'service': product_id,
'family': product.pl_family,
'subfamily': product.pl_subfamily,
'zone': product.pl_zone,
'pathology': product.pl_pathology,
'sessions': product.pl_sessions,
'level': product.pl_level,
'time': product.pl_time,
'price_applied': product.list_price,
'sel_zone': product.pl_zone,
'pl_treatment': product.pl_treatment,
'treatment': self.id,
})
print(service)
# test_create_recommendations
def test_create_budget_procedure(self):
"""
Test Budget Pro
"""
print()
print('Test Create Budget Procedure')
# Pay Budget Procedures
self.btn_create_order_pro() # Actual Button - 2019
def test_create_sale_procedure(self):
"""
Test Sale Pro
"""
print()
print('Test Create Sale Procedure')
# Pay Budget Procedures
for order in self.order_ids:
if order.state in ['draft']:
# Manage Exception
try:
order.pay_myself()
except OrderErrorException:
raise OrderErrorException('jx - Pay myself exception')
def test_create_sessions(self):
"""
Test Session
"""
print()
print('Test Create Sessions')
# Create Sessions
for procedure in self.procedure_ids:
print(procedure)
#for _ in range(2):
for _ in range(1):
print('create sesion')
procedure.btn_create_sessions_manual() # Button
def test_create_controls(self):
"""
Test Control
"""
print()
print('Test Create Controls')
# Create Controls
for procedure in self.procedure_ids:
print(procedure)
#for _ in range(1):
for _ in range(6):
print('create control')
procedure.btn_create_controls_manual() # Button
# ----------------------------------------------- Consultation -----------------
def create_consultation(self, verbose=False):
"""
Create Consultation
"""
msg = '** test_treatment - create_consultation'
print()
print(msg)
if verbose:
test_funcs.enablePrint()
else:
test_funcs.disablePrint()
# Create Consultation Order
self.btn_create_order_con() # Actual Button
for order in self.order_ids:
if order.state in ['draft']:
order.pay_myself()
# Create and fill Consultation object
self.btn_create_consultation() # Actual Button
for consultation in self.consultation_ids:
consultation.autofill()
| agpl-3.0 | 4,452,663,856,938,850,000 | 21.460403 | 141 | 0.582621 | false |
tkarna/cofs | examples/channel3d/channel3d_closed.py | 1 | 2541 | """
Idealised channel flow in 3D
============================
Solves shallow water equations in closed rectangular domain
with sloping bathymetry.
Initially water elevation is set to a piecewise linear function
with a slope in the deeper (left) end of the domain. This results
in a wave that develops a shock as it reaches shallower end of the domain.
This example tests the integrity of the coupled 2D-3D model and stability
of momentum advection.
This test is also useful for testing tracer conservation and consistency
by advecting a constant passive tracer.
"""
from thetis import *
n_layers = 6
outputdir = 'outputs_closed'
lx = 100e3
ly = 3000.
nx = 80
ny = 3
mesh2d = RectangleMesh(nx, ny, lx, ly)
print_output('Exporting to ' + outputdir)
t_end = 6 * 3600
t_export = 900.0
if os.getenv('THETIS_REGRESSION_TEST') is not None:
t_end = 5*t_export
# bathymetry
P1_2d = get_functionspace(mesh2d, 'CG', 1)
bathymetry_2d = Function(P1_2d, name='Bathymetry')
depth_max = 20.0
depth_min = 7.0
xy = SpatialCoordinate(mesh2d)
bathymetry_2d.interpolate(depth_max - (depth_max-depth_min)*xy[0]/lx)
u_max = 4.5
w_max = 5e-3
# create solver
solver_obj = solver.FlowSolver(mesh2d, bathymetry_2d, n_layers)
options = solver_obj.options
options.element_family = 'dg-dg'
options.timestepper_type = 'SSPRK22'
options.solve_salinity = True
options.solve_temperature = False
options.use_implicit_vertical_diffusion = False
options.use_bottom_friction = False
options.use_ale_moving_mesh = True
options.use_limiter_for_tracers = True
options.use_lax_friedrichs_velocity = False
options.use_lax_friedrichs_tracer = False
options.simulation_export_time = t_export
options.simulation_end_time = t_end
options.output_directory = outputdir
options.horizontal_velocity_scale = Constant(u_max)
options.vertical_velocity_scale = Constant(w_max)
options.check_volume_conservation_2d = True
options.check_volume_conservation_3d = True
options.check_salinity_conservation = True
options.check_salinity_overshoot = True
options.fields_to_export = ['uv_2d', 'elev_2d', 'elev_3d', 'uv_3d',
'w_3d', 'w_mesh_3d', 'salt_3d',
'uv_dav_2d']
# initial elevation, piecewise linear function
elev_init_2d = Function(P1_2d, name='elev_2d_init')
max_elev = 6.0
elev_slope_x = 30e3
elev_init_2d.interpolate(conditional(xy[0] < elev_slope_x, -xy[0]*max_elev/elev_slope_x + max_elev, 0.0))
salt_init_3d = Constant(4.5)
solver_obj.assign_initial_conditions(elev=elev_init_2d, salt=salt_init_3d)
solver_obj.iterate()
| mit | -5,102,377,859,405,900,000 | 31.576923 | 105 | 0.72924 | false |
BD2KGenomics/slugflow | src/toil/test/batchSystems/test_lsf_helper.py | 1 | 4936 | """lsfHelper.py shouldn't need a batch system and so the unit tests here should aim to run on any system."""
from toil.batchSystems.lsfHelper import parse_mem_and_cmd_from_output
from toil.test import ToilTest
class LSFHelperTest(ToilTest):
def test_parse_mem_and_cmd_from_output(self):
# https://github.com/DataBiosphere/toil/pull/3475
output = ('\nJob <2924748>, Job Name <toil_job_64>, User <thiagogenez>, Project <default>, S'
'\n tatus <RUN>, Queue <research-rh74>, Job Priority <50>, Com'
'\n mand <_toil_worker CactusBarRecursion file:/hps/nobackup/p'
'\n roduction/ensembl/thiagogenez/pairwises/arabidopsis/run/jo'
'\n bstore/3 kind-CactusBarRecursion/instance-iu6wo56x --conte'
'\n xt gAShortenedh32xqlE51Yi4=>, Share group charged </thiago'
'\n genez>, Esub <esub>'
'\nThu Mar 18 02:06:32: Submitted from host <noah-login-01>, CWD </hps/nobackup/pr'
'\n oduction/ensembl/thiagogenez/pairwises/arabidopsis/run>, S'
'\n pecified CWD </hps/nobackup/production/ensembl/thiagogenez'
'\n /pairwises/arabidopsis/run/.>, Output File </tmp/toil_work'
'\n flow_10e83102-2e4b-4093-9128-2a52f4bbc65a_job_64_batch_lsf'
'\n _2924748_std_output.log>, Error File </tmp/toil_workflow_1'
'\n 0e83102-2e4b-4093-9128-2a52f4bbc65a_job_64_batch_lsf_29247'
'\n 48_std_error.log>, Requested Resources <select[(mem>4000)]'
'\n rusage[mem=4000:duration=480, numcpus=1:duration=480]>;'
'\nThu Mar 18 02:06:33: Started on <hx-noah-31-02>, Execution Home </homes/thiagog'
'\n enez>, Execution CWD </hps/nobackup/production/ensembl/thi'
'\n agogenez/pairwises/arabidopsis/run/.>;'
'\nThu Mar 18 17:07:47: Resource usage collected.'
'\n The CPU time used is 53936 seconds.'
'\n MEM: 344 Mbytes; SWAP: 1.3 Gbytes; NTHREAD: 5'
'\n PGID: 433168; PIDs: 433168 433177 433179 444026 '
'\n'
'\n RUNLIMIT '
'\n 10085.0 min'
'\n'
'\n CORELIMIT MEMLIMIT'
'\n 0 M 3.9 G '
'\n'
'\n MEMORY USAGE:'
'\n MAX MEM: 2.5 Gbytes; AVG MEM: 343 Mbytes'
'\n'
'\n SCHEDULING PARAMETERS:'
'\n r15s r1m r15m ut pg io ls it tmp swp mem'
'\n loadSched - - - - 10.0 - - - 500M - 1000M '
'\n loadStop - - - - - - - - - - - '
'\n'
'\n availcpus '
'\n loadSched 1.0 '
'\n loadStop - '
'\n'
'\n RESOURCE REQUIREMENT DETAILS:'
'\n Combined: select[((mem>4000)) && (type == local)] order[r15s:pg] rusage[mem=40'
'\n 00.00:duration=8h:decay=0,numcpus=1.00:duration=8h:decay=0'
'\n ] span[hosts=1]'
'\n Effective: select[(((mem>4000))) && (type == local)] order[r15s:pg] rusage[mem'
'\n =4000.00:duration=8h:decay=0,numcpus=1.00:duration=8h:deca'
'\n y=0] span[hosts=1] '
'\n'
'\n')
max_mem, command = parse_mem_and_cmd_from_output(output=output)
assert len(max_mem.groups()) == 1
expected_mem = '2.5 Gbytes'
assert max_mem.group(1) == expected_mem, f'Actual: {max_mem.group(1)}, Expected: "{expected_mem}"'
assert len(command.groups()) == 1
expected_command = ('_toil_worker CactusBarRecursion file:/hps/nobackup/production/ensembl/thiagogenez/'
'pairwises/arabidopsis/run/jobstore/3 kind-CactusBarRecursion/instance-iu6wo56x '
'--context gAShortenedh32xqlE51Yi4=')
assert command.group(1) == expected_command, f'Actual: {command.group(1)}, Expected: "{expected_command}"'
print(command)
max_mem, command = parse_mem_and_cmd_from_output(output='')
assert max_mem == None
assert command == None
| apache-2.0 | -1,546,710,280,469,730,000 | 65.702703 | 114 | 0.464344 | false |
mementum/tcmanager | src/mvcbase.py | 1 | 5930 | #!/usr/bin/env python
# -*- coding: latin-1; py-indent-offset:4 -*-
################################################################################
#
# Copyright (C) 2014 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
import functools
import inspect
import threading
import weakref
from pubsub import pub
import wx
class MvcAttribute(object):
_mvccontainer = weakref.WeakValueDictionary()
def __init__(self):
self.icache = dict()
def __set__(self, instance, value):
self.icache[instance] = value
def __get__(self, instance, owner=None):
return self.icache.setdefault(instance)
def MvcContainer(cls):
cls._model = MvcAttribute()
cls._view = MvcAttribute()
cls._controller = MvcAttribute()
cls.__oldinit__ = cls.__init__
@functools.wraps(cls.__init__)
def newInit(self, *args, **kwargs):
curThId = threading.current_thread().ident
MvcAttribute._mvccontainer[curThId] = self
self.__oldinit__(*args, **kwargs)
cls.__init__ = newInit
return cls
def MvcRole(role):
def wrapper(cls):
pubsubmap = {'view': 'model', 'controller': 'view'}
oldInit = cls.__init__
@functools.wraps(cls.__init__)
def newInit(self, *args, **kwargs):
# Assign the role
self.role = role
# Assign the mvcontainer
curThId = threading.current_thread().ident
self._mvccontainer = _mvccontainer = MvcAttribute._mvccontainer[curThId]
# Pubsub some methods
methods = inspect.getmembers(self.__class__, predicate=inspect.ismethod)
mvcid = id(_mvccontainer)
for method in methods:
if hasattr(method[1], '_pubsub'):
boundmethod = method[1].__get__(self, self.__class__)
psmap = pubsubmap[role]
pstopic = '%d.%s.%s' % (mvcid, psmap, method[1]._pubsub)
pub.subscribe(boundmethod, pstopic)
elif hasattr(method[1], '_pubsubspec'):
boundmethod = method[1].__get__(self, self.__class__)
pstopic = '%d.%s' % (mvcid, method[1]._pubsubspec)
pub.subscribe(boundmethod, pstopic)
if role == 'view':
# Rebind some methods to controller
_controller = _mvccontainer._controller
methods = inspect.getmembers(_controller, predicate=inspect.ismethod)
for method in methods:
if hasattr(method[1], '_viewcontroller'):
setattr(self, method[0], method[1].__get__(self, self.__class__))
oldInit(self, *args, **kwargs)
cls.__init__ = newInit
oldGetAttribute = cls.__getattribute__
def newGetAttribute(self, name):
_mvcroles = ['_model', '_view', '_controller']
if name in _mvcroles:
_mvccontainer = oldGetAttribute(self, '_mvccontainer')
return getattr(_mvccontainer, name)
return oldGetAttribute(self, name)
cls.__getattribute__ = newGetAttribute
if False:
def PubSend(self, **kwargs):
pub.sendMessage(self.role, **kwargs)
cls.PubSend = PubSend
return cls
return wrapper
ModelRole = MvcRole('model')
ViewRole = MvcRole('view')
ControllerRole = MvcRole('controller')
def ViewManager(func):
@functools.wraps(func)
def wrapper(self, event, *args, **kwargs):
event.Skip()
return func(self, event, *args, **kwargs)
wrapper._viewcontroller = True
return wrapper
def PubSubscribe(subtopic):
def decorate(func):
func._pubsub = subtopic
return func
return decorate
def PubSubscribeSpecific(subtopic):
def decorate(func):
func._pubsubspec = subtopic
return func
return decorate
def PubSend(topic=None, queue=True):
def decorate(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
mvcid = id(self._mvccontainer)
try:
msg = func(self, *args, **kwargs)
if not topic:
sendtopic = None
else:
sendtopic = '%d.%s.%s' % (mvcid, self.role, topic)
except Exception, e:
msg = str(e)
sendtopic = '%d.%s.%s' % (mvcid, self.role, 'error')
if sendtopic:
if queue:
wx.CallAfter(pub.sendMessage, sendtopic, msg=msg)
else:
pub.sendMessage(sendtopic, msg=msg)
return wrapper
return decorate
def PubSendSpecific(topic, queue=True):
def decorate(func):
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
msg = func(self, *args, **kwargs)
mvcid = id(self._mvccontainer)
sendtopic = '%d.%s' % (mvcid, topic)
# sendtopic = topic
if queue:
wx.CallAfter(pub.sendMessage, sendtopic, msg=msg)
else:
pub.sendMessage(sendtopic, msg=msg)
return wrapper
return decorate
| gpl-3.0 | -4,661,281,447,062,204,000 | 31.762431 | 89 | 0.558685 | false |
garnaat/bchelpers | bchelpers/exception.py | 1 | 1147 | # Copyright 2014 Mitch Garnaat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class BaseOperationError(Exception):
def __init__(self, error_code, error_body, operation_name):
msg = 'Error(%d) when calling (%s): %s' % (error_code,
operation_name,
error_body)
super(BaseOperationError, self).__init__(msg)
self.error_code = error_code
self.error_body = error_body
self.operation_name = operation_name
class ClientError(BaseOperationError):
pass
class ServerError(BaseOperationError):
pass
| apache-2.0 | -8,495,006,085,509,659,000 | 33.757576 | 74 | 0.655623 | false |
chrys87/orca-beep | test/keystrokes/firefox/line_nav_button_in_link_position_relative_on_focus.py | 1 | 1860 | #!/usr/bin/python
from macaroon.playback import *
import utils
sequence = MacroSequence()
#sequence.append(WaitForDocLoad())
sequence.append(PauseAction(5000))
# Work around some new quirk in Gecko that causes this test to fail if
# run via the test harness rather than manually.
sequence.append(KeyComboAction("<Control>r"))
sequence.append(KeyComboAction("<Control>Home"))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"1. Line Down",
["BRAILLE LINE: 'Line 1'",
" VISIBLE: 'Line 1', cursor=1",
"SPEECH OUTPUT: 'Line 1'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"2. Line Down",
["BRAILLE LINE: ' Line 2 push button'",
" VISIBLE: ' Line 2 push button', cursor=1",
"SPEECH OUTPUT: ' Line 2 push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Down"))
sequence.append(utils.AssertPresentationAction(
"3. Line Down",
["BRAILLE LINE: 'Line 3'",
" VISIBLE: 'Line 3', cursor=1",
"SPEECH OUTPUT: 'Line 3'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"4. Line Up",
["BRAILLE LINE: ' Line 2 push button'",
" VISIBLE: ' Line 2 push button', cursor=1",
"SPEECH OUTPUT: ' Line 2 push button'"]))
sequence.append(utils.StartRecordingAction())
sequence.append(KeyComboAction("Up"))
sequence.append(utils.AssertPresentationAction(
"5. Line Up",
["BRAILLE LINE: 'Line 1'",
" VISIBLE: 'Line 1', cursor=1",
"SPEECH OUTPUT: 'Line 1'"]))
sequence.append(utils.AssertionSummaryAction())
sequence.start()
| lgpl-2.1 | -3,976,546,266,804,137,500 | 31.421053 | 70 | 0.690476 | false |
jbest/digitization_tools | productivity/productivity.py | 1 | 6643 | """
Imaging productivity stats
Jason Best - [email protected]
Generates a productivity report based on the creation timestamps of image files.
Details of the imaging session are extracted from the folder name containing the images.
Assumed folder name format is: YYYY-MM-DD_ImagerID_OtherInfo
Usage:
python productivity.py [session_folder_path]
Requirements:
See requirements.txt.
"""
from datetime import datetime
import sys
import time
import os
import csv
import re
# If you don't need moving mean calculated,
# or don't want to have to install other modules,
# you can remove the following imports then
# comment out the moving mean calculations
import pandas as pd
# parameters
# File extensions that are used to determine productivity. Others are ignored.
inputFileTypes = ('.jpg', '.jpeg', '.JPG', '.JPEG') # Variations of JPEG extensions
#inputFileTypes = ('.CR2', '.cr2') # Variations of Canon RAW extensions.
fieldDelimiter = ',' # delimiter used in output CSV
moving_mean_window = 21
def getImages(local_path=None):
"""
Generates a list of specimen files stored at a local path.
"""
imageFiles = []
dirList = os.listdir(local_path)
for fileName in dirList:
#TODO ignore case for extension evaluation
if fileName.endswith(inputFileTypes):
#imageFiles.append(local_path + fileName)
imageFiles.append(os.path.join(local_path, fileName))
return imageFiles
def getImageData(images):
stats_list = []
for imageFile in images:
#imageStats.append((imageFile, os.path.getmtime(imageFile)))
stats_list.append((imageFile, os.path.getmtime(imageFile)))
return stats_list
def get_session_data(session_folder=None):
if session_folder is not None:
print 'Analyzing: ', session_folder
session_re = re.match('(\d\d\d\d)[-_](\d\d)[-_](\d\d)[-_]([a-zA-Z]*)(.*)', session_folder)
if session_re:
if session_re.group(1):
year = int(session_re.group(1))
else:
year = 'NONE'
if session_re.group(2):
month = int(session_re.group(2))
else:
month = 'NONE'
if session_re.group(3):
day = int(session_re.group(3))
else:
day = 'NONE'
if session_re.group(4):
imager = session_re.group(4)
else:
imager = 'NONE'
if session_re.group(5):
other = session_re.group(5)
else:
other = 'NONE'
else:
return {'imager': 'NONE', 'year': 'NONE', 'month': 'NONE', 'day': 'NONE', 'other': 'NONE'}
return {'imager': imager, 'year': year, 'month': month, 'day': day, 'other': other}
else:
print 'No session folder provided.'
return None
# Analyze the image files
startTime = datetime.now()
# Determine session folder containing images
try:
if os.path.exists(sys.argv[1]):
#session_path = sys.argv[1]
session_path = os.path.abspath(sys.argv[1])
else:
session_path = os.path.dirname(os.path.realpath(__file__))
print 'No valid directory path provided. Assuming:', session_path
except IndexError:
# No path provided, assuming script directory
session_path = os.path.dirname(os.path.realpath(__file__))
print 'No valid directory path provided. Assuming:', session_path
session_folder = os.path.basename(session_path)
print 'session_path', session_path, 'session_folder', session_folder
#dir_path = os.path.dirname(imageFile) # full path of parent directory
#basename = os.path.basename(imageFile) # filename with extension
#filename, file_extension = os.path.splitext(basename)
imagesToEvaluate = getImages(session_path)
session_data = get_session_data(session_folder)
print 'session_data:', session_data
# populate imageStats
image_stats = getImageData(imagesToEvaluate)
# Create data structure
creation_time = None
creation_series = []
series_data = []
cumulative_time = 0
cumulative_mean = 0
image_count = 0
for image_data in sorted(image_stats,key=lambda x: x[1]): # sorted ensures results are in order of creation
file_path = image_data[0]
file_basename = os.path.basename(file_path) # filename with extension
if creation_time is None:
time_diff = 0
else:
time_diff = image_data[1] - creation_time
cumulative_time = cumulative_time + time_diff
image_count += 1
cumulative_mean = cumulative_time/image_count
creation_time = image_data[1]
creation_series.append(time_diff)
try:
cumulative_images_per_min = 60/cumulative_mean
except ZeroDivisionError:
cumulative_images_per_min = 0
#TODO format floats
session_date = str(session_data['month']) + '/' + str(session_data['day']) + '/' + str(session_data['year'])
series_data.append([file_path, file_basename, session_data['imager'], session_data['year'], session_data['month'], session_data['day'], session_data['other'], session_date, time.ctime(creation_time),time_diff, cumulative_time, cumulative_mean, cumulative_images_per_min ])
print 'Analyzing:', file_basename
# calculate moving mean
#TODO test to see if any data are available
data = pd.Series(creation_series)
data_mean = pd.rolling_mean(data, window=moving_mean_window).shift(-(moving_mean_window/2))
# Create file for results
log_file_base_name = session_data['imager'] + '_' + str(session_data['year']) + '-' + str(session_data['month']) + '-' + str(session_data['day'])
log_file_ext = '.csv'
if os.path.exists(log_file_base_name + log_file_ext):
log_file_name = log_file_base_name + '_' + startTime.isoformat().replace(':', '--') + log_file_ext
else:
log_file_name = log_file_base_name + log_file_ext
reportFile = open(log_file_name, "wb")
reportWriter = csv.writer(reportFile, delimiter = fieldDelimiter, escapechar='#')
# header
reportWriter.writerow([
"ImagePath",
"FileName",
"ImagerUsername",
"SessionYear",
"SessionMonth",
"SessionDay",
"SessionOther",
"SessionDate",
"CreationTime",
"CreationDurationSecs",
"CumulativeTimeSecs",
"CumulativeMeanSecs",
"CumulativeImagesPerMinute",
"MovingMeanSecs"])
# Merge moving mean into original data and write to file
for index, item in enumerate(series_data):
if str(data_mean[index]) == 'nan':
running_mean = 0
else:
running_mean = data_mean[index]
#print type(data_mean[index])
item.append(running_mean)
reportWriter.writerow(item)
# close file
reportFile.close()
print 'Analysis complete.'
| mit | 7,707,624,852,203,826,000 | 32.550505 | 276 | 0.657986 | false |
masschallenge/django-accelerator | accelerator/migrations/0036_add_user_deferrable_modal.py | 1 | 1688 | # Generated by Django 2.2.10 on 2021-03-03 17:08
from django.conf import settings
from django.db import (
migrations,
models,
)
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accelerator', '0035_add_deferrable_modal_model'),
]
operations = [
migrations.CreateModel(
name='UserDeferrableModal',
fields=[
('id', models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID')),
('created_at', models.DateTimeField(
auto_now_add=True,
null=True)),
('updated_at', models.DateTimeField(
auto_now=True,
null=True)),
('is_deferred', models.BooleanField(default=False)),
('deferred_to', models.DateTimeField(
blank=True,
null=True)),
('deferrable_modal', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.ACCELERATOR_DEFERRABLEMODAL_MODEL)),
('user', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'User Deferrable Modal',
'abstract': False,
'managed': True,
'swappable': None,
},
),
]
| mit | -8,131,171,024,774,738,000 | 32.098039 | 68 | 0.495853 | false |
aixiwang/mqtt_datajs | upload_data_test.py | 1 | 1737 | #!/usr/bin/python
import sys,time
try:
import paho.mqtt.client as mqtt
except ImportError:
# This part is only required to run the example from within the examples
# directory when the module itself is not installed.
#
# If you have the module installed, just use "import paho.mqtt.client"
import os
import inspect
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"../src")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import paho.mqtt.client as mqtt
def on_connect(mqttc, obj, flags, rc):
print("rc: "+str(rc))
def on_message(mqttc, obj, msg):
print(msg.topic+" "+str(msg.qos)+" "+str(msg.payload))
def on_publish(mqttc, obj, mid):
print("mid: "+str(mid))
def on_subscribe(mqttc, obj, mid, granted_qos):
print("Subscribed: "+str(mid)+" "+str(granted_qos))
def on_log(mqttc, obj, level, string):
print(string)
# If you want to use a specific client id, use
# mqttc = mqtt.Client("client-id")
# but note that the client id must be unique on the broker. Leaving the client
# id parameter empty will generate a random id for you.
mqttc = mqtt.Client()
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
# Uncomment to enable debug messages
#mqttc.on_log = on_log
#mqttc.username_pw_set('test1', 'test1')
mqttc.connect("test.mosquitto.org", 1883, 60)
#mqttc.connect("127.0.0.1", 1883, 60)
#mqttc.subscribe("room/#", 0)
while True:
(rc, final_mid) = mqttc.publish('home/room1/t1', '2.1', 0)
print 'sleep...' + str(rc) + str(final_mid)
time.sleep(1)
#mqttc.loop_forever()
| bsd-3-clause | 8,638,943,573,339,154,000 | 29.473684 | 137 | 0.679908 | false |
scottsilverlabs/raspberrystem | rstem/projects/demos/two_buttons/button_test_aux.py | 1 | 1652 | #!/usr/bin/env python
import curses, time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def main(stdscr):
# Clear screen
curses.noecho()
curses.cbreak()
curses.curs_set(0)
stdscr.addstr("Button Tester", curses.A_REVERSE)
stdscr.chgat(-1, curses.A_REVERSE)
#stdscr.addstr(curses.LINES-1, 0, "Press 'Q' to quit")
stdscr.nodelay(1) # make getch() non-blocking
# set up window to bounce ball
ball_win = curses.newwin(curses.LINES-2, curses.COLS, 1, 0)
ball_win.box()
#ball_win.addch(curses.LINES-1,curses.COLS-1, ord('F'))
# Update the internal window data structures
stdscr.noutrefresh()
ball_win.noutrefresh()
# Redraw the screen
curses.doupdate()
box_LINES, box_COLS = ball_win.getmaxyx()
ball_x, ball_y = (int(box_COLS/2), int(box_LINES/2))
while True:
# Quit if 'Q' was pressed
c = stdscr.getch()
if c == ord('Q') or c == ord('q'):
break
# remove previous location of ball
ball_win.addch(ball_y, ball_x, ord(' '))
stdscr.addstr(curses.LINES-1, 0, "Press 'Q' to quit | Left: {0} Right: {1}".format(not GPIO.input(23), not GPIO.input(18)))
if not GPIO.input(23) and ball_x < 1:
ball_x -= 1
if not GPIO.input(18) and ball_x < box_COLS-2:
ball_x += 1
# update ball location
ball_win.addch(ball_y, ball_x, ord('0'))
# Refresh the windows from the bottom up
stdscr.noutrefresh()
ball_win.noutrefresh()
curses.doupdate()
# Restore the terminal
curses.nocbreak()
curses.echo()
curses.curs_set(1)
curses.endwin()
#stdscr.refresh()
#stdscr.getkey()
curses.wrapper(main)
| apache-2.0 | 8,123,518,483,904,408,000 | 21.630137 | 129 | 0.673123 | false |
gx1997/chrome-loongson | chrome/test/pyautolib/pyauto.py | 2 | 210748 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""PyAuto: Python Interface to Chromium's Automation Proxy.
PyAuto uses swig to expose Automation Proxy interfaces to Python.
For complete documentation on the functionality available,
run pydoc on this file.
Ref: http://dev.chromium.org/developers/testing/pyauto
Include the following in your PyAuto test script to make it run standalone.
from pyauto import Main
if __name__ == '__main__':
Main()
This script can be used as an executable to fire off other scripts, similar
to unittest.py
python pyauto.py test_script
"""
import cStringIO
import copy
import functools
import hashlib
import inspect
import logging
import optparse
import os
import pickle
import pprint
import re
import shutil
import signal
import socket
import stat
import string
import subprocess
import sys
import tempfile
import time
import types
import unittest
import urllib
import pyauto_paths
def _LocateBinDirs():
"""Setup a few dirs where we expect to find dependency libraries."""
deps_dirs = [
os.path.dirname(__file__),
pyauto_paths.GetThirdPartyDir(),
os.path.join(pyauto_paths.GetThirdPartyDir(), 'webdriver', 'pylib'),
]
sys.path += map(os.path.normpath, pyauto_paths.GetBuildDirs() + deps_dirs)
_LocateBinDirs()
_PYAUTO_DOC_URL = 'http://dev.chromium.org/developers/testing/pyauto'
try:
import pyautolib
# Needed so that all additional classes (like: FilePath, GURL) exposed by
# swig interface get available in this module.
from pyautolib import *
except ImportError:
print >>sys.stderr, 'Could not locate pyautolib shared libraries. ' \
'Did you build?\n Documentation: %s' % _PYAUTO_DOC_URL
# Mac requires python2.5 even when not the default 'python' (e.g. 10.6)
if 'darwin' == sys.platform and sys.version_info[:2] != (2,5):
print >>sys.stderr, '*\n* Perhaps use "python2.5", not "python" ?\n*'
raise
# Should go after sys.path is set appropriately
import bookmark_model
import download_info
import history_info
import omnibox_info
import plugins_info
import prefs_info
from pyauto_errors import JSONInterfaceError
from pyauto_errors import NTPThumbnailNotShownError
import pyauto_utils
import simplejson as json # found in third_party
_CHROME_DRIVER_FACTORY = None
_HTTP_SERVER = None
_REMOTE_PROXY = None
_OPTIONS = None
_BROWSER_PID = None
# TODO(bartfab): Remove when crosbug.com/20709 is fixed.
AUTO_CLEAR_LOCAL_STATE_MAGIC_FILE = '/root/.forget_usernames'
class PyUITest(pyautolib.PyUITestBase, unittest.TestCase):
"""Base class for UI Test Cases in Python.
A browser is created before executing each test, and is destroyed after
each test irrespective of whether the test passed or failed.
You should derive from this class and create methods with 'test' prefix,
and use methods inherited from PyUITestBase (the C++ side).
Example:
class MyTest(PyUITest):
def testNavigation(self):
self.NavigateToURL("http://www.google.com")
self.assertEqual("Google", self.GetActiveTabTitle())
"""
def __init__(self, methodName='runTest', **kwargs):
"""Initialize PyUITest.
When redefining __init__ in a derived class, make sure that:
o you make a call this __init__
o __init__ takes methodName as an arg. this is mandated by unittest module
Args:
methodName: the default method name. Internal use by unittest module
(The rest of the args can be in any order. They can even be skipped in
which case the defaults will be used.)
clear_profile: If True, clean the profile dir before use. Defaults to True
homepage: the home page. Defaults to "about:blank"
"""
# Fetch provided keyword args, or fill in defaults.
clear_profile = kwargs.get('clear_profile', True)
homepage = kwargs.get('homepage', 'about:blank')
pyautolib.PyUITestBase.__init__(self, clear_profile, homepage)
self.Initialize(pyautolib.FilePath(self.BrowserPath()))
unittest.TestCase.__init__(self, methodName)
# Give all pyauto tests easy access to pprint.PrettyPrinter functions.
self.pprint = pprint.pprint
self.pformat = pprint.pformat
# Set up remote proxies, if they were requested.
self.remotes = []
self.remote = None
global _REMOTE_PROXY
if _REMOTE_PROXY:
self.remotes = _REMOTE_PROXY
self.remote = _REMOTE_PROXY[0]
def __del__(self):
pyautolib.PyUITestBase.__del__(self)
def _SetExtraChromeFlags(self):
"""Prepares the browser to launch with the specified extra Chrome flags.
This function is called right before the browser is launched for the first
time.
"""
for flag in self.ExtraChromeFlags():
if flag.startswith('--'):
flag = flag[2:]
split_pos = flag.find('=')
if split_pos >= 0:
flag_name = flag[:split_pos]
flag_val = flag[split_pos + 1:]
self.AppendBrowserLaunchSwitch(flag_name, flag_val)
else:
self.AppendBrowserLaunchSwitch(flag)
def __SetUp(self):
named_channel_id = None
if _OPTIONS:
named_channel_id = _OPTIONS.channel_id
if self.IsChromeOS(): # Enable testing interface on ChromeOS.
if self.get_clear_profile():
self.CleanupBrowserProfileOnChromeOS()
self.EnableCrashReportingOnChromeOS()
if not named_channel_id:
named_channel_id = self.EnableChromeTestingOnChromeOS()
else:
self._SetExtraChromeFlags() # Flags already previously set for ChromeOS.
if named_channel_id:
self._named_channel_id = named_channel_id
self.UseNamedChannelID(named_channel_id)
# Initialize automation and fire the browser (does not fire the browser
# on ChromeOS).
self.SetUp()
# Forcibly trigger all plugins to get registered. crbug.com/94123
# Sometimes flash files loaded too quickly after firing browser
# ends up getting downloaded, which seems to indicate that the plugin
# hasn't been registered yet.
if not self.IsChromeOS():
self.GetPluginsInfo()
# TODO(dtu): Remove this after crosbug.com/4558 is fixed.
if self.IsChromeOS():
self.WaitUntil(lambda: not self.GetNetworkInfo()['offline_mode'])
# If we are connected to any RemoteHosts, create PyAuto
# instances on the remote sides and set them up too.
for remote in self.remotes:
remote.CreateTarget(self)
remote.setUp()
global _BROWSER_PID
_BROWSER_PID = self.GetBrowserInfo()['browser_pid']
def setUp(self):
"""Override this method to launch browser differently.
Can be used to prevent launching the browser window by default in case a
test wants to do some additional setup before firing browser.
When using the named interface, it connects to an existing browser
instance.
On ChromeOS, a browser showing the login window is started. Tests can
initiate a user session by calling Login() or LoginAsGuest(). Cryptohome
vaults or flimflam profiles left over by previous tests can be cleared by
calling RemoveAllCryptohomeVaults() respectively CleanFlimflamDirs() before
logging in to improve isolation. Note that clearing flimflam profiles
requires a flimflam restart, briefly taking down network connectivity and
slowing down the test. This should be done for tests that use flimflam only.
"""
self.__SetUp()
def tearDown(self):
for remote in self.remotes:
remote.tearDown()
self.TearDown() # Destroy browser
# Method required by the Python standard library unittest.TestCase.
def runTest(self):
pass
@staticmethod
def BrowserPath():
"""Returns the path to Chromium binaries.
Expects the browser binaries to be in the
same location as the pyautolib binaries.
"""
return os.path.normpath(os.path.dirname(pyautolib.__file__))
def ExtraChromeFlags(self):
"""Return a list of extra chrome flags to use with Chrome for testing.
These are flags needed to facilitate testing. Override this function to
use a custom set of Chrome flags.
"""
if self.IsChromeOS():
return [
'--homepage=about:blank',
'--allow-file-access',
'--allow-file-access-from-files',
'--enable-file-cookies',
'--dom-automation',
'--skip-oauth-login',
# Enables injection of test content script for webui login automation
'--auth-ext-path=/usr/share/chromeos-assets/gaia_auth',
# Enable automation provider and chromeos net logs
'--vmodule=*/browser/automation/*=2,*/chromeos/net/*=2',
]
else:
return []
def CloseChromeOnChromeOS(self):
"""Gracefully exit chrome on ChromeOS."""
def _GetListOfChromePids():
"""Retrieves the list of currently-running Chrome process IDs.
Returns:
A list of strings, where each string represents a currently-running
'chrome' process ID.
"""
proc = subprocess.Popen(['pgrep', '^chrome$'], stdout=subprocess.PIPE)
proc.wait()
return [x.strip() for x in proc.stdout.readlines()]
orig_pids = _GetListOfChromePids()
subprocess.call(['pkill', '^chrome$'])
def _AreOrigPidsDead(orig_pids):
"""Determines whether all originally-running 'chrome' processes are dead.
Args:
orig_pids: A list of strings, where each string represents the PID for
an originally-running 'chrome' process.
Returns:
True, if all originally-running 'chrome' processes have been killed, or
False otherwise.
"""
for new_pid in _GetListOfChromePids():
if new_pid in orig_pids:
return False
return True
self.WaitUntil(lambda: _AreOrigPidsDead(orig_pids))
@staticmethod
def _IsRootSuid(path):
"""Determine if |path| is a suid-root file."""
return os.path.isfile(path) and (os.stat(path).st_mode & stat.S_ISUID)
@staticmethod
def SuidPythonPath():
"""Path to suid_python binary on ChromeOS.
This is typically in the same directory as pyautolib.py
"""
return os.path.join(PyUITest.BrowserPath(), 'suid-python')
@staticmethod
def RunSuperuserActionOnChromeOS(action):
"""Run the given action with superuser privs (on ChromeOS).
Uses the suid_actions.py script.
Args:
action: An action to perform.
See suid_actions.py for available options.
Returns:
(stdout, stderr)
"""
assert PyUITest._IsRootSuid(PyUITest.SuidPythonPath()), \
'Did not find suid-root python at %s' % PyUITest.SuidPythonPath()
file_path = os.path.join(os.path.dirname(__file__), 'chromeos',
'suid_actions.py')
args = [PyUITest.SuidPythonPath(), file_path, '--action=%s' % action]
proc = subprocess.Popen(
args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
return (stdout, stderr)
def EnableChromeTestingOnChromeOS(self):
"""Enables the named automation interface on chromeos.
Restarts chrome so that you get a fresh instance.
Also sets some testing-friendly flags for chrome.
Expects suid python to be present in the same dir as pyautolib.py
"""
assert PyUITest._IsRootSuid(self.SuidPythonPath()), \
'Did not find suid-root python at %s' % self.SuidPythonPath()
file_path = os.path.join(os.path.dirname(__file__), 'chromeos',
'enable_testing.py')
args = [self.SuidPythonPath(), file_path]
# Pass extra chrome flags for testing
for flag in self.ExtraChromeFlags():
args.append('--extra-chrome-flags=%s' % flag)
assert self.WaitUntil(lambda: self._IsSessionManagerReady(0))
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
automation_channel_path = proc.communicate()[0].strip()
assert len(automation_channel_path), 'Could not enable testing interface'
return automation_channel_path
@staticmethod
def EnableCrashReportingOnChromeOS():
"""Enables crash reporting on ChromeOS.
Writes the "/home/chronos/Consent To Send Stats" file with a 32-char
readable string. See comment in session_manager_setup.sh which does this
too.
Note that crash reporting will work only if breakpad is built in, ie in a
'Google Chrome' build (not Chromium).
"""
consent_file = '/home/chronos/Consent To Send Stats'
def _HasValidConsentFile():
if not os.path.isfile(consent_file):
return False
stat = os.stat(consent_file)
return (len(open(consent_file).read()) and
(1000, 1000) == (stat.st_uid, stat.st_gid))
if not _HasValidConsentFile():
client_id = hashlib.md5('abcdefgh').hexdigest()
# Consent file creation and chown to chronos needs to be atomic
# to avoid races with the session_manager. crosbug.com/18413
# Therefore, create a temp file, chown, then rename it as consent file.
temp_file = consent_file + '.tmp'
open(temp_file, 'w').write(client_id)
# This file must be owned by chronos:chronos!
os.chown(temp_file, 1000, 1000);
shutil.move(temp_file, consent_file)
assert _HasValidConsentFile(), 'Could not create %s' % consent_file
@staticmethod
def _IsSessionManagerReady(old_pid):
"""Is the ChromeOS session_manager running and ready to accept DBus calls?
Called after session_manager is killed to know when it has restarted.
Args:
old_pid: The pid that session_manager had before it was killed,
to ensure that we don't look at the DBus interface
of an old session_manager process.
"""
pgrep_process = subprocess.Popen(['pgrep', 'session_manager'],
stdout=subprocess.PIPE)
new_pid = pgrep_process.communicate()[0].strip()
if not new_pid or old_pid == new_pid:
return False
import dbus
try:
bus = dbus.SystemBus()
proxy = bus.get_object('org.chromium.SessionManager',
'/org/chromium/SessionManager')
dbus.Interface(proxy, 'org.chromium.SessionManagerInterface')
except dbus.DBusException:
return False
return True
@staticmethod
def CleanupBrowserProfileOnChromeOS():
"""Cleanup browser profile dir on ChromeOS.
Browser should not be running, or else there will be locked files.
"""
profile_dir = '/home/chronos/user'
for item in os.listdir(profile_dir):
# Deleting .pki causes stateful partition to get erased.
if item not in ['log', 'flimflam'] and not item.startswith('.'):
pyauto_utils.RemovePath(os.path.join(profile_dir, item))
chronos_dir = '/home/chronos'
for item in os.listdir(chronos_dir):
if item != 'user' and not item.startswith('.'):
pyauto_utils.RemovePath(os.path.join(chronos_dir, item))
@staticmethod
def CleanupFlimflamDirsOnChromeOS():
"""Clean the contents of flimflam profiles and restart flimflam."""
PyUITest.RunSuperuserActionOnChromeOS('CleanFlimflamDirs')
@staticmethod
def RemoveAllCryptohomeVaultsOnChromeOS():
"""Remove any existing cryptohome vaults."""
PyUITest.RunSuperuserActionOnChromeOS('RemoveAllCryptohomeVaults')
@staticmethod
def TryToDisableLocalStateAutoClearingOnChromeOS():
"""Disable clearing of the local state on session manager startup.
TODO(bartfab): Remove this method when crosbug.com/20709 is fixed.
"""
PyUITest.RunSuperuserActionOnChromeOS('TryToDisableLocalStateAutoClearing')
@staticmethod
def TryToEnableLocalStateAutoClearingOnChromeOS():
"""Enable clearing of the local state on session manager startup.
TODO(bartfab): Remove this method when crosbug.com/20709 is fixed.
"""
PyUITest.RunSuperuserActionOnChromeOS('TryToEnableLocalStateAutoClearing')
@staticmethod
def IsLocalStateAutoClearingEnabledOnChromeOS():
"""Check if the session manager is set to clear the local state on startup.
TODO(bartfab): Remove this method when crosbug.com/20709 is fixed.
"""
return os.path.exists(AUTO_CLEAR_LOCAL_STATE_MAGIC_FILE)
@staticmethod
def _IsInodeNew(path, old_inode):
"""Determine whether an inode has changed. POSIX only.
Args:
path: The file path to check for changes.
old_inode: The old inode number.
Returns:
True if the path exists and its inode number is different from old_inode.
False otherwise.
"""
try:
stat_result = os.stat(path)
except OSError:
return False
if not stat_result:
return False
return stat_result.st_ino != old_inode
def RestartBrowser(self, clear_profile=True, pre_launch_hook=None):
"""Restart the browser.
For use with tests that require to restart the browser.
Args:
clear_profile: If True, the browser profile is cleared before restart.
Defaults to True, that is restarts browser with a clean
profile.
pre_launch_hook: If specified, must be a callable that is invoked before
the browser is started again. Not supported in ChromeOS.
"""
if self.IsChromeOS():
assert pre_launch_hook is None, 'Not supported in ChromeOS'
self.TearDown()
if clear_profile:
self.CleanupBrowserProfileOnChromeOS()
self.CloseChromeOnChromeOS()
self.EnableChromeTestingOnChromeOS()
self.SetUp()
return
# Not chromeos
orig_clear_state = self.get_clear_profile()
self.CloseBrowserAndServer()
self.set_clear_profile(clear_profile)
if pre_launch_hook:
pre_launch_hook()
logging.debug('Restarting browser with clear_profile=%s',
self.get_clear_profile())
self.LaunchBrowserAndServer()
self.set_clear_profile(orig_clear_state) # Reset to original state.
@staticmethod
def DataDir():
"""Returns the path to the data dir chrome/test/data."""
return os.path.normpath(
os.path.join(os.path.dirname(__file__), os.pardir, "data"))
@staticmethod
def GetFileURLForPath(*path):
"""Get file:// url for the given path.
Also quotes the url using urllib.quote().
Args:
path: Variable number of strings that can be joined.
"""
path_str = os.path.join(*path)
abs_path = os.path.abspath(path_str)
if sys.platform == 'win32':
# Don't quote the ':' in drive letter ( say, C: ) on win.
# Also, replace '\' with '/' as expected in a file:/// url.
drive, rest = os.path.splitdrive(abs_path)
quoted_path = drive.upper() + urllib.quote((rest.replace('\\', '/')))
return 'file:///' + quoted_path
else:
quoted_path = urllib.quote(abs_path)
return 'file://' + quoted_path
@staticmethod
def GetFileURLForDataPath(*relative_path):
"""Get file:// url for the given path relative to the chrome test data dir.
Also quotes the url using urllib.quote().
Args:
relative_path: Variable number of strings that can be joined.
"""
return PyUITest.GetFileURLForPath(PyUITest.DataDir(), *relative_path)
@staticmethod
def GetHttpURLForDataPath(*relative_path):
"""Get http:// url for the given path in the data dir.
The URL will be usable only after starting the http server.
"""
global _HTTP_SERVER
assert _HTTP_SERVER, 'HTTP Server not yet started'
return _HTTP_SERVER.GetURL(os.path.join('files', *relative_path)).spec()
@staticmethod
def GetFtpURLForDataPath(ftp_server, *relative_path):
"""Get ftp:// url for the given path in the data dir.
Args:
ftp_server: handle to ftp server, an instance of TestServer
relative_path: any number of path elements
The URL will be usable only after starting the ftp server.
"""
assert ftp_server, 'FTP Server not yet started'
return ftp_server.GetURL(os.path.join(*relative_path)).spec()
@staticmethod
def IsMac():
"""Are we on Mac?"""
return 'darwin' == sys.platform
@staticmethod
def IsLinux():
"""Are we on Linux? ChromeOS is linux too."""
return sys.platform.startswith('linux')
@staticmethod
def IsWin():
"""Are we on Win?"""
return 'win32' == sys.platform
@staticmethod
def IsWin7():
"""Are we on Windows 7?"""
if not PyUITest.IsWin():
return False
ver = sys.getwindowsversion()
return (ver[3], ver[0], ver[1]) == (2, 6, 1)
@staticmethod
def IsWinVista():
"""Are we on Windows Vista?"""
if not PyUITest.IsWin():
return False
ver = sys.getwindowsversion()
return (ver[3], ver[0], ver[1]) == (2, 6, 0)
@staticmethod
def IsWinXP():
"""Are we on Windows XP?"""
if not PyUITest.IsWin():
return False
ver = sys.getwindowsversion()
return (ver[3], ver[0], ver[1]) == (2, 5, 1)
@staticmethod
def IsChromeOS():
"""Are we on ChromeOS (or Chromium OS)?
Checks for "CHROMEOS_RELEASE_NAME=" in /etc/lsb-release.
"""
lsb_release = '/etc/lsb-release'
if not PyUITest.IsLinux() or not os.path.isfile(lsb_release):
return False
for line in open(lsb_release).readlines():
if line.startswith('CHROMEOS_RELEASE_NAME='):
return True
return False
@staticmethod
def IsPosix():
"""Are we on Mac/Linux?"""
return PyUITest.IsMac() or PyUITest.IsLinux()
@staticmethod
def IsEnUS():
"""Are we en-US?"""
# TODO: figure out the machine's langugage.
return True
@staticmethod
def GetPlatform():
"""Return the platform name."""
# Since ChromeOS is also Linux, we check for it first.
if PyUITest.IsChromeOS():
return 'chromeos'
elif PyUITest.IsLinux():
return 'linux'
elif PyUITest.IsMac():
return 'mac'
elif PyUITest.IsWin():
return 'win'
else:
return 'unknown'
@staticmethod
def EvalDataFrom(filename):
"""Return eval of python code from given file.
The datastructure used in the file will be preserved.
"""
data_file = os.path.join(filename)
contents = open(data_file).read()
try:
ret = eval(contents)
except:
print >>sys.stderr, '%s is an invalid data file.' % data_file
raise
return ret
@staticmethod
def ChromeOSBoard():
"""What is the ChromeOS board name"""
if PyUITest.IsChromeOS():
for line in open('/etc/lsb-release'):
line = line.strip()
if line.startswith('CHROMEOS_RELEASE_BOARD='):
return line.split('=')[1]
return None
@staticmethod
def Kill(pid):
"""Terminate the given pid.
If the pid refers to a renderer, use KillRendererProcess instead.
"""
if PyUITest.IsWin():
subprocess.call(['taskkill.exe', '/T', '/F', '/PID', str(pid)])
else:
os.kill(pid, signal.SIGTERM)
@staticmethod
def ChromeFlagsForSyncTestServer(port, xmpp_port):
"""Creates the flags list for the browser to connect to the sync server.
Use the |ExtraBrowser| class to launch a new browser with these flags.
Args:
port: The HTTP port number.
xmpp_port: The XMPP port number.
Returns:
A list with the flags.
"""
return [
'--sync-url=http://127.0.0.1:%s/chromiumsync' % port,
'--sync-allow-insecure-xmpp-connection',
'--sync-notification-host=127.0.0.1:%s' % xmpp_port,
'--sync-notification-method=p2p',
]
def GetPrivateInfo(self):
"""Fetch info from private_tests_info.txt in private dir.
Returns:
a dictionary of items from private_tests_info.txt
"""
private_file = os.path.join(
self.DataDir(), 'pyauto_private', 'private_tests_info.txt')
assert os.path.exists(private_file), '%s missing' % private_file
return self.EvalDataFrom(private_file)
def WaitUntil(self, function, timeout=-1, retry_sleep=0.25, args=[],
expect_retval=None, debug=True):
"""Poll on a condition until timeout.
Waits until the |function| evalues to |expect_retval| or until |timeout|
secs, whichever occurs earlier.
This is better than using a sleep, since it waits (almost) only as much
as needed.
WARNING: This method call should be avoided as far as possible in favor
of a real wait from chromium (like wait-until-page-loaded).
Only use in case there's really no better option.
EXAMPLES:-
Wait for "file.txt" to get created:
WaitUntil(os.path.exists, args=["file.txt"])
Same as above, but using lambda:
WaitUntil(lambda: os.path.exists("file.txt"))
Args:
function: the function whose truth value is to be evaluated
timeout: the max timeout (in secs) for which to wait. The default
action is to wait for kWaitForActionMaxMsec, as set in
ui_test.cc
Use None to wait indefinitely.
retry_sleep: the sleep interval (in secs) before retrying |function|.
Defaults to 0.25 secs.
args: the args to pass to |function|
expect_retval: the expected return value for |function|. This forms the
exit criteria. In case this is None (the default),
|function|'s return value is checked for truth,
so 'non-empty-string' should match with True
debug: if True, displays debug info at each retry.
Returns:
True, if returning when |function| evaluated to True
False, when returning due to timeout
"""
if timeout == -1: # Default
timeout = self.action_max_timeout_ms() / 1000.0
assert callable(function), "function should be a callable"
begin = time.time()
debug_begin = begin
while timeout is None or time.time() - begin <= timeout:
retval = function(*args)
if (expect_retval is None and retval) or expect_retval == retval:
return True
if debug and time.time() - debug_begin > 5:
debug_begin += 5
if function.func_name == (lambda: True).func_name:
function_info = inspect.getsource(function).strip()
else:
function_info = '%s()' % function.func_name
logging.debug('WaitUntil(%s:%d %s) still waiting. '
'Expecting %s. Last returned %s.',
os.path.basename(inspect.getsourcefile(function)),
inspect.getsourcelines(function)[1],
function_info,
True if expect_retval is None else expect_retval,
retval)
time.sleep(retry_sleep)
return False
def StartSyncServer(self):
"""Start a local sync server.
Adds a dictionary attribute 'ports' in returned object.
Returns:
A handle to Sync Server, an instance of TestServer
"""
sync_server = pyautolib.TestServer(pyautolib.TestServer.TYPE_SYNC,
'127.0.0.1',
pyautolib.FilePath(''))
assert sync_server.Start(), 'Could not start sync server'
sync_server.ports = dict(port=sync_server.GetPort(),
xmpp_port=sync_server.GetSyncXmppPort())
logging.debug('Started sync server at ports %s.', sync_server.ports)
return sync_server
def StopSyncServer(self, sync_server):
"""Stop the local sync server."""
assert sync_server, 'Sync Server not yet started'
assert sync_server.Stop(), 'Could not stop sync server'
logging.debug('Stopped sync server at ports %s.', sync_server.ports)
def StartFTPServer(self, data_dir):
"""Start a local file server hosting data files over ftp://
Args:
data_dir: path where ftp files should be served
Returns:
handle to FTP Server, an instance of TestServer
"""
ftp_server = pyautolib.TestServer(pyautolib.TestServer.TYPE_FTP,
'127.0.0.1',
pyautolib.FilePath(data_dir))
assert ftp_server.Start(), 'Could not start ftp server'
logging.debug('Started ftp server at "%s".', data_dir)
return ftp_server
def StopFTPServer(self, ftp_server):
"""Stop the local ftp server."""
assert ftp_server, 'FTP Server not yet started'
assert ftp_server.Stop(), 'Could not stop ftp server'
logging.debug('Stopped ftp server.')
def StartHTTPServer(self, data_dir):
"""Starts a local HTTP TestServer serving files from |data_dir|.
Args:
data_dir: path where the TestServer should serve files from. This will be
appended to the source dir to get the final document root.
Returns:
handle to the HTTP TestServer
"""
http_server = pyautolib.TestServer(pyautolib.TestServer.TYPE_HTTP,
'127.0.0.1',
pyautolib.FilePath(data_dir))
assert http_server.Start(), 'Could not start HTTP server'
logging.debug('Started HTTP server at "%s".', data_dir)
return http_server
def StopHTTPServer(self, http_server):
assert http_server, 'HTTP server not yet started'
assert http_server.Stop(), 'Cloud not stop the HTTP server'
logging.debug('Stopped HTTP server.')
def StartHttpsServer(self, cert_type, data_dir):
"""Starts a local HTTPS TestServer serving files from |data_dir|.
Args:
cert_type: An instance of HTTPSOptions.ServerCertificate for three
certificate types: ok, expired, or mismatch.
data_dir: The path where TestServer should serve files from. This is
appended to the source dir to get the final document root.
Returns:
Handle to the HTTPS TestServer
"""
https_server = pyautolib.TestServer(
pyautolib.HTTPSOptions(cert_type), pyautolib.FilePath(data_dir))
assert https_server.Start(), 'Could not start HTTPS server.'
logging.debug('Start HTTPS server at "%s".' % data_dir)
return https_server
def StopHttpsServer(self, https_server):
assert https_server, 'HTTPS server not yet started.'
assert https_server.Stop(), 'Could not stop the HTTPS server.'
logging.debug('Stopped HTTPS server.')
class ActionTimeoutChanger(object):
"""Facilitate temporary changes to action_timeout_ms.
Automatically resets to original timeout when object is destroyed.
"""
_saved_timeout = -1 # Saved value for action_timeout_ms
def __init__(self, ui_test, new_timeout):
"""Initialize.
Args:
ui_test: a PyUITest object
new_timeout: new timeout to use (in milli secs)
"""
self._saved_timeout = ui_test.action_timeout_ms()
if new_timeout != self._saved_timeout:
ui_test.set_action_timeout_ms(new_timeout)
self._ui_test = ui_test
def __del__(self):
"""Reset command_execution_timeout_ms to original value."""
if self._ui_test.action_timeout_ms() != self._saved_timeout:
self._ui_test.set_action_timeout_ms(self._saved_timeout)
class JavascriptExecutor(object):
"""Abstract base class for JavaScript injection.
Derived classes should override Execute method."""
def Execute(self, script):
pass
class JavascriptExecutorInTab(JavascriptExecutor):
"""Wrapper for injecting JavaScript in a tab."""
def __init__(self, ui_test, tab_index=0, windex=0, frame_xpath=''):
"""Initialize.
Refer to ExecuteJavascript() for the complete argument list
description.
Args:
ui_test: a PyUITest object
"""
self._ui_test = ui_test
self.windex = windex
self.tab_index = tab_index
self.frame_xpath = frame_xpath
def Execute(self, script):
"""Execute script in the tab."""
return self._ui_test.ExecuteJavascript(script,
self.tab_index,
self.windex,
self.frame_xpath)
class JavascriptExecutorInRenderView(JavascriptExecutor):
"""Wrapper for injecting JavaScript in an extension view."""
def __init__(self, ui_test, view, frame_xpath=''):
"""Initialize.
Refer to ExecuteJavascriptInRenderView() for the complete argument list
description.
Args:
ui_test: a PyUITest object
"""
self._ui_test = ui_test
self.view = view
self.frame_xpath = frame_xpath
def Execute(self, script):
"""Execute script in the render view."""
return self._ui_test.ExecuteJavascriptInRenderView(script,
self.view,
self.frame_xpath)
def _GetResultFromJSONRequestDiagnostics(self):
"""Same as _GetResultFromJSONRequest without throwing a timeout exception.
This method is used to diagnose if a command returns without causing a
timout exception to be thrown. This should be used for debugging purposes
only.
Returns:
True if the request returned; False if it timed out.
"""
result = self._SendJSONRequest(-1,
json.dumps({'command': 'GetBrowserInfo',}),
self.action_max_timeout_ms())
if not result:
# The diagnostic command did not complete, Chrome is probably in a bad
# state
return False
return True
def _GetResultFromJSONRequest(self, cmd_dict, windex=0, timeout=-1):
"""Issue call over the JSON automation channel and fetch output.
This method packages the given dictionary into a json string, sends it
over the JSON automation channel, loads the json output string returned,
and returns it back as a dictionary.
Args:
cmd_dict: the command dictionary. It must have a 'command' key
Sample:
{
'command': 'SetOmniboxText',
'text': text,
}
windex: 0-based window index on which to work. Default: 0 (first window)
Use -ve windex or None if the automation command does not apply
to a browser window. Example: for chromeos login
timeout: request timeout (in milliseconds)
Returns:
a dictionary for the output returned by the automation channel.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
if timeout == -1: # Default
timeout = self.action_max_timeout_ms()
if windex is None: # Do not target any window
windex = -1
result = self._SendJSONRequest(windex, json.dumps(cmd_dict), timeout)
if not result:
additional_info = 'No information available.'
# Windows does not support os.kill until Python 2.7.
if not self.IsWin() and _BROWSER_PID:
browser_pid_exists = True
# Does the browser PID exist?
try:
# Does not actually kill the process
os.kill(int(_BROWSER_PID), 0)
except OSError:
browser_pid_exists = False
if browser_pid_exists:
if self._GetResultFromJSONRequestDiagnostics():
# Browser info, worked, that means this hook had a problem
additional_info = ('The browser process ID %d still exists. '
'PyAuto was able to obtain browser info. It '
'is possible this hook is broken.'
% _BROWSER_PID)
else:
additional_info = ('The browser process ID %d still exists. '
'PyAuto was not able to obtain browser info. '
'It is possible the browser is hung.'
% _BROWSER_PID)
else:
additional_info = ('The browser process ID %d no longer exists. '
'Perhaps the browser crashed.' % _BROWSER_PID)
elif not _BROWSER_PID:
additional_info = ('The browser PID was not obtained. Does this test '
'have a unique startup configuration?')
# Mask private data if it is in the JSON dictionary
cmd_dict_copy = copy.copy(cmd_dict)
if 'password' in cmd_dict_copy.keys():
cmd_dict_copy['password'] = '**********'
if 'username' in cmd_dict_copy.keys():
cmd_dict_copy['username'] = 'removed_username'
raise JSONInterfaceError('Automation call %s received empty response. '
'Additional information:\n%s' % (cmd_dict_copy,
additional_info))
ret_dict = json.loads(result)
if ret_dict.has_key('error'):
raise JSONInterfaceError(ret_dict['error'])
return ret_dict
def GetBookmarkModel(self):
"""Return the bookmark model as a BookmarkModel object.
This is a snapshot of the bookmark model; it is not a proxy and
does not get updated as the bookmark model changes.
"""
bookmarks_as_json = self._GetBookmarksAsJSON()
if bookmarks_as_json == None:
raise JSONInterfaceError('Could not resolve browser proxy.')
return bookmark_model.BookmarkModel(bookmarks_as_json)
def GetDownloadsInfo(self, windex=0):
"""Return info about downloads.
This includes all the downloads recognized by the history system.
Returns:
an instance of downloads_info.DownloadInfo
"""
return download_info.DownloadInfo(
self._SendJSONRequest(
windex, json.dumps({'command': 'GetDownloadsInfo'}),
self.action_max_timeout_ms()))
def GetOmniboxInfo(self, windex=0):
"""Return info about Omnibox.
This represents a snapshot of the omnibox. If you expect changes
you need to call this method again to get a fresh snapshot.
Note that this DOES NOT shift focus to the omnibox; you've to ensure that
the omnibox is in focus or else you won't get any interesting info.
It's OK to call this even when the omnibox popup is not showing. In this
case however, there won't be any matches, but other properties (like the
current text in the omnibox) will still be fetched.
Due to the nature of the omnibox, this function is sensitive to mouse
focus. DO NOT HOVER MOUSE OVER OMNIBOX OR CHANGE WINDOW FOCUS WHEN USING
THIS METHOD.
Args:
windex: the index of the browser window to work on.
Default: 0 (first window)
Returns:
an instance of omnibox_info.OmniboxInfo
"""
return omnibox_info.OmniboxInfo(
self._SendJSONRequest(windex,
json.dumps({'command': 'GetOmniboxInfo'}),
self.action_max_timeout_ms()))
def SetOmniboxText(self, text, windex=0):
"""Enter text into the omnibox. This shifts focus to the omnibox.
Args:
text: the text to be set.
windex: the index of the browser window to work on.
Default: 0 (first window)
"""
# Ensure that keyword data is loaded from the profile.
# This would normally be triggered by the user inputting this text.
self._GetResultFromJSONRequest({'command': 'LoadSearchEngineInfo'})
cmd_dict = {
'command': 'SetOmniboxText',
'text': text,
}
self._GetResultFromJSONRequest(cmd_dict, windex=windex)
# TODO(ace): Remove this hack, update bug 62783.
def WaitUntilOmniboxReadyHack(self, windex=0):
"""Wait until the omnibox is ready for input.
This is a hack workaround for linux platform, which returns from
synchronous window creation methods before the omnibox is fully functional.
No-op on non-linux platforms.
Args:
windex: the index of the browser to work on.
"""
if self.IsLinux():
return self.WaitUntil(
lambda : self.GetOmniboxInfo(windex).Properties('has_focus'))
def WaitUntilOmniboxQueryDone(self, windex=0):
"""Wait until omnibox has finished populating results.
Uses WaitUntil() so the wait duration is capped by the timeout values
used by automation, which WaitUntil() uses.
Args:
windex: the index of the browser window to work on.
Default: 0 (first window)
"""
return self.WaitUntil(
lambda : not self.GetOmniboxInfo(windex).IsQueryInProgress())
def OmniboxMovePopupSelection(self, count, windex=0):
"""Move omnibox popup selection up or down.
Args:
count: number of rows by which to move.
-ve implies down, +ve implies up
windex: the index of the browser window to work on.
Default: 0 (first window)
"""
cmd_dict = {
'command': 'OmniboxMovePopupSelection',
'count': count,
}
self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def OmniboxAcceptInput(self, windex=0):
"""Accepts the current string of text in the omnibox.
This is equivalent to clicking or hiting enter on a popup selection.
Blocks until the page loads.
Args:
windex: the index of the browser window to work on.
Default: 0 (first window)
"""
cmd_dict = {
'command': 'OmniboxAcceptInput',
}
self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def GetInstantInfo(self):
"""Return info about the instant overlay tab.
Returns:
A dictionary.
Examples:
{ u'enabled': True,
u'active': True,
u'current': True,
u'loading': True,
u'location': u'http://cnn.com/',
u'showing': False,
u'title': u'CNN.com - Breaking News'},
{ u'enabled': False }
"""
cmd_dict = {'command': 'GetInstantInfo'}
return self._GetResultFromJSONRequest(cmd_dict)['instant']
def GetSearchEngineInfo(self, windex=0):
"""Return info about search engines.
Args:
windex: The window index, default is 0.
Returns:
An ordered list of dictionaries describing info about each search engine.
Example:
[ { u'display_url': u'{google:baseURL}search?q=%s',
u'host': u'www.google.com',
u'in_default_list': True,
u'is_default': True,
u'is_valid': True,
u'keyword': u'google.com',
u'path': u'/search',
u'short_name': u'Google',
u'supports_replacement': True,
u'url': u'{google:baseURL}search?q={searchTerms}'},
{ u'display_url': u'http://search.yahoo.com/search?p=%s',
u'host': u'search.yahoo.com',
u'in_default_list': True,
u'is_default': False,
u'is_valid': True,
u'keyword': u'yahoo.com',
u'path': u'/search',
u'short_name': u'Yahoo!',
u'supports_replacement': True,
u'url': u'http://search.yahoo.com/search?p={searchTerms}'},
"""
# Ensure that the search engine profile is loaded into data model.
self._GetResultFromJSONRequest({'command': 'LoadSearchEngineInfo'},
windex=windex)
cmd_dict = {'command': 'GetSearchEngineInfo'}
return self._GetResultFromJSONRequest(
cmd_dict, windex=windex)['search_engines']
def AddSearchEngine(self, title, keyword, url, windex=0):
"""Add a search engine, as done through the search engines UI.
Args:
title: name for search engine.
keyword: keyword, used to initiate a custom search from omnibox.
url: url template for this search engine's query.
'%s' is replaced by search query string when used to search.
windex: The window index, default is 0.
"""
# Ensure that the search engine profile is loaded into data model.
self._GetResultFromJSONRequest({'command': 'LoadSearchEngineInfo'},
windex=windex)
cmd_dict = {'command': 'AddOrEditSearchEngine',
'new_title': title,
'new_keyword': keyword,
'new_url': url}
self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def EditSearchEngine(self, keyword, new_title, new_keyword, new_url,
windex=0):
"""Edit info for existing search engine.
Args:
keyword: existing search engine keyword.
new_title: new name for this search engine.
new_keyword: new keyword for this search engine.
new_url: new url for this search engine.
windex: The window index, default is 0.
"""
# Ensure that the search engine profile is loaded into data model.
self._GetResultFromJSONRequest({'command': 'LoadSearchEngineInfo'},
windex=windex)
cmd_dict = {'command': 'AddOrEditSearchEngine',
'keyword': keyword,
'new_title': new_title,
'new_keyword': new_keyword,
'new_url': new_url}
self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def DeleteSearchEngine(self, keyword, windex=0):
"""Delete search engine with given keyword.
Args:
keyword: the keyword string of the search engine to delete.
windex: The window index, default is 0.
"""
# Ensure that the search engine profile is loaded into data model.
self._GetResultFromJSONRequest({'command': 'LoadSearchEngineInfo'},
windex=windex)
cmd_dict = {'command': 'PerformActionOnSearchEngine', 'keyword': keyword,
'action': 'delete'}
self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def MakeSearchEngineDefault(self, keyword, windex=0):
"""Make search engine with given keyword the default search.
Args:
keyword: the keyword string of the search engine to make default.
windex: The window index, default is 0.
"""
# Ensure that the search engine profile is loaded into data model.
self._GetResultFromJSONRequest({'command': 'LoadSearchEngineInfo'},
windex=windex)
cmd_dict = {'command': 'PerformActionOnSearchEngine', 'keyword': keyword,
'action': 'default'}
self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def _EnsureProtectorCheck(self):
"""Ensure that Protector check for changed settings has been performed in
the current browser session.
No-op if Protector is disabled.
"""
# Ensure that check for default search engine change has been performed.
self._GetResultFromJSONRequest({'command': 'LoadSearchEngineInfo'})
def GetProtectorState(self, window_index=0):
"""Returns current Protector state.
This will trigger Protector's check for changed settings if it hasn't been
performed yet.
Args:
window_index: The window index, default is 0.
Returns:
A dictionary.
Example:
{ u'enabled': True,
u'showing_change': False }
"""
self._EnsureProtectorCheck()
cmd_dict = {'command': 'GetProtectorState'}
return self._GetResultFromJSONRequest(cmd_dict, windex=window_index)
def ApplyProtectorChange(self):
"""Applies the change shown by Protector and closes the bubble.
No-op if Protector is not showing any change.
"""
cmd_dict = {'command': 'PerformProtectorAction',
'action': 'apply_change'}
self._GetResultFromJSONRequest(cmd_dict)
def DiscardProtectorChange(self):
"""Discards the change shown by Protector and closes the bubble.
No-op if Protector is not showing any change.
"""
cmd_dict = {'command': 'PerformProtectorAction',
'action': 'discard_change'}
self._GetResultFromJSONRequest(cmd_dict)
def GetLocalStatePrefsInfo(self):
"""Return info about preferences.
This represents a snapshot of the local state preferences. If you expect
local state preferences to have changed, you need to call this method again
to get a fresh snapshot.
Returns:
an instance of prefs_info.PrefsInfo
"""
return prefs_info.PrefsInfo(
self._SendJSONRequest(-1,
json.dumps({'command': 'GetLocalStatePrefsInfo'}),
self.action_max_timeout_ms()))
def SetLocalStatePrefs(self, path, value):
"""Set local state preference for the given path.
Preferences are stored by Chromium as a hierarchical dictionary.
dot-separated paths can be used to refer to a particular preference.
example: "session.restore_on_startup"
Some preferences are managed, that is, they cannot be changed by the
user. It's up to the user to know which ones can be changed. Typically,
the options available via Chromium preferences can be changed.
Args:
path: the path the preference key that needs to be changed
example: "session.restore_on_startup"
One of the equivalent names in chrome/common/pref_names.h could
also be used.
value: the value to be set. It could be plain values like int, bool,
string or complex ones like list.
The user has to ensure that the right value is specified for the
right key. It's useful to dump the preferences first to determine
what type is expected for a particular preference path.
"""
cmd_dict = {
'command': 'SetLocalStatePrefs',
'windex': 0,
'path': path,
'value': value,
}
self._GetResultFromJSONRequest(cmd_dict, windex=None)
def GetPrefsInfo(self):
"""Return info about preferences.
This represents a snapshot of the preferences. If you expect preferences
to have changed, you need to call this method again to get a fresh
snapshot.
Returns:
an instance of prefs_info.PrefsInfo
"""
cmd_dict = {
'command': 'GetPrefsInfo',
'windex': 0,
}
return prefs_info.PrefsInfo(
self._SendJSONRequest(-1, json.dumps(cmd_dict),
self.action_max_timeout_ms()))
def SetPrefs(self, path, value, windex=0):
"""Set preference for the given path.
Preferences are stored by Chromium as a hierarchical dictionary.
dot-separated paths can be used to refer to a particular preference.
example: "session.restore_on_startup"
Some preferences are managed, that is, they cannot be changed by the
user. It's up to the user to know which ones can be changed. Typically,
the options available via Chromium preferences can be changed.
Args:
path: the path the preference key that needs to be changed
example: "session.restore_on_startup"
One of the equivalent names in chrome/common/pref_names.h could
also be used.
value: the value to be set. It could be plain values like int, bool,
string or complex ones like list.
The user has to ensure that the right value is specified for the
right key. It's useful to dump the preferences first to determine
what type is expected for a particular preference path.
windex: window index to work on. Defaults to 0 (first window).
"""
cmd_dict = {
'command': 'SetPrefs',
'windex': windex,
'path': path,
'value': value,
}
self._GetResultFromJSONRequest(cmd_dict, windex=None)
def SendWebkitKeyEvent(self, key_type, key_code, tab_index=0, windex=0):
"""Send a webkit key event to the browser.
Args:
key_type: the raw key type such as 0 for up and 3 for down.
key_code: the hex value associated with the keypress (virtual key code).
tab_index: tab index to work on. Defaults to 0 (first tab).
windex: window index to work on. Defaults to 0 (first window).
"""
cmd_dict = {
'command': 'SendWebkitKeyEvent',
'type': key_type,
'text': '',
'isSystemKey': False,
'unmodifiedText': '',
'nativeKeyCode': 0,
'windowsKeyCode': key_code,
'modifiers': 0,
'windex': windex,
'tab_index': tab_index,
}
# Sending request for key event.
self._GetResultFromJSONRequest(cmd_dict, windex=None)
def SendWebkitCharEvent(self, char, tab_index=0, windex=0):
"""Send a webkit char to the browser.
Args:
char: the char value to be sent to the browser.
tab_index: tab index to work on. Defaults to 0 (first tab).
windex: window index to work on. Defaults to 0 (first window).
"""
cmd_dict = {
'command': 'SendWebkitKeyEvent',
'type': 2, # kCharType
'text': char,
'isSystemKey': False,
'unmodifiedText': char,
'nativeKeyCode': 0,
'windowsKeyCode': ord((char).upper()),
'modifiers': 0,
'windex': windex,
'tab_index': tab_index,
}
# Sending request for a char.
self._GetResultFromJSONRequest(cmd_dict, windex=None)
def WaitForAllDownloadsToComplete(self, pre_download_ids=[], windex=0,
timeout=-1):
"""Wait for all pending downloads to complete.
This function assumes that any downloads to wait for have already been
triggered and have started (it is ok if those downloads complete before this
function is called).
Args:
pre_download_ids: A list of numbers representing the IDs of downloads that
exist *before* downloads to wait for have been
triggered. Defaults to []; use GetDownloadsInfo() to get
these IDs (only necessary if a test previously
downloaded files).
windex: The window index, defaults to 0 (the first window).
timeout: The maximum amount of time (in milliseconds) to wait for
downloads to complete.
"""
cmd_dict = {
'command': 'WaitForAllDownloadsToComplete',
'pre_download_ids': pre_download_ids,
}
self._GetResultFromJSONRequest(cmd_dict, windex=windex, timeout=timeout)
def PerformActionOnDownload(self, id, action, window_index=0):
"""Perform the given action on the download with the given id.
Args:
id: The id of the download.
action: The action to perform on the download.
Possible actions:
'open': Opens the download (waits until it has completed first).
'toggle_open_files_like_this': Toggles the 'Always Open Files
Of This Type' option.
'remove': Removes the file from downloads (not from disk).
'decline_dangerous_download': Equivalent to 'Discard' option
after downloading a dangerous download (ex. an executable).
'save_dangerous_download': Equivalent to 'Save' option after
downloading a dangerous file.
'toggle_pause': Toggles the paused state of the download. If the
download completed before this call, it's a no-op.
'cancel': Cancel the download.
window_index: The window index, default is 0.
Returns:
A dictionary representing the updated download item (except in the case
of 'decline_dangerous_download', 'toggle_open_files_like_this', and
'remove', which return an empty dict).
Example dictionary:
{ u'PercentComplete': 100,
u'file_name': u'file.txt',
u'full_path': u'/path/to/file.txt',
u'id': 0,
u'is_otr': False,
u'is_paused': False,
u'is_temporary': False,
u'open_when_complete': False,
u'referrer_url': u'',
u'safety_state': u'SAFE',
u'state': u'COMPLETE',
u'url': u'file://url/to/file.txt'
}
"""
cmd_dict = { # Prepare command for the json interface
'command': 'PerformActionOnDownload',
'id': id,
'action': action
}
return self._GetResultFromJSONRequest(cmd_dict, windex=window_index)
def DownloadAndWaitForStart(self, file_url, windex=0):
"""Trigger download for the given url and wait for downloads to start.
It waits for download by looking at the download info from Chrome, so
anything which isn't registered by the history service won't be noticed.
This is not thread-safe, but it's fine to call this method to start
downloading multiple files in parallel. That is after starting a
download, it's fine to start another one even if the first one hasn't
completed.
"""
try:
num_downloads = len(self.GetDownloadsInfo(windex).Downloads())
except JSONInterfaceError:
num_downloads = 0
self.NavigateToURL(file_url, windex) # Trigger download.
# It might take a while for the download to kick in, hold on until then.
self.assertTrue(self.WaitUntil(
lambda: len(self.GetDownloadsInfo(windex).Downloads()) >
num_downloads))
def SetWindowDimensions(
self, x=None, y=None, width=None, height=None, windex=0):
"""Set window dimensions.
All args are optional and current values will be preserved.
Arbitrarily large values will be handled gracefully by the browser.
Args:
x: window origin x
y: window origin y
width: window width
height: window height
windex: window index to work on. Defaults to 0 (first window)
"""
cmd_dict = { # Prepare command for the json interface
'command': 'SetWindowDimensions',
}
if x:
cmd_dict['x'] = x
if y:
cmd_dict['y'] = y
if width:
cmd_dict['width'] = width
if height:
cmd_dict['height'] = height
self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def WaitForInfobarCount(self, count, windex=0, tab_index=0):
"""Wait until infobar count becomes |count|.
Note: Wait duration is capped by the automation timeout.
Args:
count: requested number of infobars
windex: window index. Defaults to 0 (first window)
tab_index: tab index Defaults to 0 (first tab)
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
# TODO(phajdan.jr): We need a solid automation infrastructure to handle
# these cases. See crbug.com/53647.
def _InfobarCount():
windows = self.GetBrowserInfo()['windows']
if windex >= len(windows): # not enough windows
return -1
tabs = windows[windex]['tabs']
if tab_index >= len(tabs): # not enough tabs
return -1
return len(tabs[tab_index]['infobars'])
return self.WaitUntil(_InfobarCount, expect_retval=count)
def PerformActionOnInfobar(
self, action, infobar_index, windex=0, tab_index=0):
"""Perform actions on an infobar.
Args:
action: the action to be performed.
Actions depend on the type of the infobar. The user needs to
call the right action for the right infobar.
Valid inputs are:
- "dismiss": closes the infobar (for all infobars)
- "accept", "cancel": click accept / cancel (for confirm infobars)
- "allow", "deny": click allow / deny (for media stream infobars)
infobar_index: 0-based index of the infobar on which to perform the action
windex: 0-based window index Defaults to 0 (first window)
tab_index: 0-based tab index. Defaults to 0 (first tab)
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'PerformActionOnInfobar',
'action': action,
'infobar_index': infobar_index,
'tab_index': tab_index,
}
if action not in ('dismiss', 'accept', 'allow', 'deny', 'cancel'):
raise JSONInterfaceError('Invalid action %s' % action)
self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def GetBrowserInfo(self):
"""Return info about the browser.
This includes things like the version number, the executable name,
executable path, pid info about the renderer/plugin/extension processes,
window dimensions. (See sample below)
For notification pid info, see 'GetActiveNotifications'.
Returns:
a dictionary
Sample:
{ u'browser_pid': 93737,
# Child processes are the processes for plugins and other workers.
u'child_process_path': u'.../Chromium.app/Contents/'
'Versions/6.0.412.0/Chromium Helper.app/'
'Contents/MacOS/Chromium Helper',
u'child_processes': [ { u'name': u'Shockwave Flash',
u'pid': 93766,
u'type': u'Plug-in'}],
u'extension_views': [ {
u'name': u'Webpage Screenshot',
u'pid': 93938,
u'extension_id': u'dgcoklnmbeljaehamekjpeidmbicddfj',
u'url': u'chrome-extension://dgcoklnmbeljaehamekjpeidmbicddfj/'
'bg.html',
u'loaded': True,
u'view': {
u'render_process_id': 2,
u'render_view_id': 1},
u'view_type': u'EXTENSION_BACKGROUND_PAGE'}]
u'properties': {
u'BrowserProcessExecutableName': u'Chromium',
u'BrowserProcessExecutablePath': u'Chromium.app/Contents/MacOS/'
'Chromium',
u'ChromeVersion': u'6.0.412.0',
u'HelperProcessExecutableName': u'Chromium Helper',
u'HelperProcessExecutablePath': u'Chromium Helper.app/Contents/'
'MacOS/Chromium Helper',
u'command_line_string': "COMMAND_LINE_STRING --WITH-FLAGS",
u'branding': 'Chromium',
u'is_official': False,}
# The order of the windows and tabs listed here will be the same as
# what shows up on screen.
u'windows': [ { u'index': 0,
u'height': 1134,
u'incognito': False,
u'profile_path': u'Default',
u'fullscreen': False,
u'visible_page_actions':
[u'dgcoklnmbeljaehamekjpeidmbicddfj',
u'osfcklnfasdofpcldmalwpicslasdfgd']
u'selected_tab': 0,
u'tabs': [ {
u'index': 0,
u'infobars': [],
u'pinned': True,
u'renderer_pid': 93747,
u'url': u'http://www.google.com/' }, {
u'index': 1,
u'infobars': [],
u'pinned': False,
u'renderer_pid': 93919,
u'url': u'https://chrome.google.com/'}, {
u'index': 2,
u'infobars': [ {
u'buttons': [u'Allow', u'Deny'],
u'link_text': u'Learn more',
u'text': u'slides.html5rocks.com wants to track '
'your physical location',
u'type': u'confirm_infobar'}],
u'pinned': False,
u'renderer_pid': 93929,
u'url': u'http://slides.html5rocks.com/#slide14'},
],
u'type': u'tabbed',
u'width': 925,
u'x': 26,
u'y': 44}]}
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { # Prepare command for the json interface
'command': 'GetBrowserInfo',
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def IsAura(self):
"""Is this Aura?"""
return self.GetBrowserInfo()['properties']['aura']
def GetProcessInfo(self):
"""Returns information about browser-related processes that currently exist.
This will also return information about other currently-running browsers
besides just Chrome.
Returns:
A dictionary containing browser-related process information as identified
by class MemoryDetails in src/chrome/browser/memory_details.h. The
dictionary contains a single key 'browsers', mapped to a list of
dictionaries containing information about each browser process name.
Each of those dictionaries contains a key 'processes', mapped to a list
of dictionaries containing the specific information for each process
with the given process name.
The memory values given in |committed_mem| and |working_set_mem| are in
KBytes.
Sample:
{ 'browsers': [ { 'name': 'Chromium',
'process_name': 'chrome',
'processes': [ { 'child_process_type': 'Browser',
'committed_mem': { 'image': 0,
'mapped': 0,
'priv': 0},
'is_diagnostics': False,
'num_processes': 1,
'pid': 7770,
'product_name': '',
'renderer_type': 'Unknown',
'titles': [],
'version': '',
'working_set_mem': { 'priv': 43672,
'shareable': 0,
'shared': 59251}},
{ 'child_process_type': 'Tab',
'committed_mem': { 'image': 0,
'mapped': 0,
'priv': 0},
'is_diagnostics': False,
'num_processes': 1,
'pid': 7791,
'product_name': '',
'renderer_type': 'Tab',
'titles': ['about:blank'],
'version': '',
'working_set_mem': { 'priv': 16768,
'shareable': 0,
'shared': 26256}},
...<more processes>...]}]}
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { # Prepare command for the json interface.
'command': 'GetProcessInfo',
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def GetNavigationInfo(self, tab_index=0, windex=0):
"""Get info about the navigation state of a given tab.
Args:
tab_index: The tab index, default is 0.
window_index: The window index, default is 0.
Returns:
a dictionary.
Sample:
{ u'favicon_url': u'https://www.google.com/favicon.ico',
u'page_type': u'NORMAL_PAGE',
u'ssl': { u'displayed_insecure_content': False,
u'ran_insecure_content': False,
u'security_style': u'SECURITY_STYLE_AUTHENTICATED'}}
Values for security_style can be:
SECURITY_STYLE_UNKNOWN
SECURITY_STYLE_UNAUTHENTICATED
SECURITY_STYLE_AUTHENTICATION_BROKEN
SECURITY_STYLE_AUTHENTICATED
Values for page_type can be:
NORMAL_PAGE
ERROR_PAGE
INTERSTITIAL_PAGE
"""
cmd_dict = { # Prepare command for the json interface
'command': 'GetNavigationInfo',
'tab_index': tab_index,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def GetHistoryInfo(self, search_text=''):
"""Return info about browsing history.
Args:
search_text: the string to search in history. Defaults to empty string
which means that all history would be returned. This is
functionally equivalent to searching for a text in the
chrome://history UI. So partial matches work too.
When non-empty, the history items returned will contain a
"snippet" field corresponding to the snippet visible in
the chrome://history/ UI.
Returns:
an instance of history_info.HistoryInfo
"""
cmd_dict = { # Prepare command for the json interface
'command': 'GetHistoryInfo',
'search_text': search_text,
}
return history_info.HistoryInfo(
self._SendJSONRequest(0, json.dumps(cmd_dict),
self.action_max_timeout_ms()))
def GetTranslateInfo(self, tab_index=0, window_index=0):
"""Returns info about translate for the given page.
If the translate bar is showing, also returns information about the bar.
Args:
tab_index: The tab index, default is 0.
window_index: The window index, default is 0.
Returns:
A dictionary of information about translate for the page. Example:
{ u'always_translate_lang_button_showing': False,
u'never_translate_lang_button_showing': False,
u'can_translate_page': True,
u'original_language': u'es',
u'page_translated': False,
# The below will only appear if the translate bar is showing.
u'translate_bar': { u'bar_state': u'BEFORE_TRANSLATE',
u'original_lang_code': u'es',
u'target_lang_code': u'en'}}
"""
cmd_dict = { # Prepare command for the json interface
'command': 'GetTranslateInfo',
'tab_index': tab_index
}
return self._GetResultFromJSONRequest(cmd_dict, windex=window_index)
def ClickTranslateBarTranslate(self, tab_index=0, window_index=0):
"""If the translate bar is showing, clicks the 'Translate' button on the
bar. This will show the 'this page has been translated...' infobar.
Args:
tab_index: The index of the tab, default is 0.
window_index: The index of the window, default is 0.
Returns:
True if the translation was successful or false if there was an error.
Note that an error shouldn't neccessarily mean a failed test - retry the
call on error.
Raises:
pyauto_errors.JSONInterfaceError if the automation returns an error.
"""
cmd_dict = { # Prepare command for the json interface
'command': 'SelectTranslateOption',
'tab_index': tab_index,
'option': 'translate_page'
}
return self._GetResultFromJSONRequest(
cmd_dict, windex=window_index)['translation_success']
def RevertPageTranslation(self, tab_index=0, window_index=0):
"""Select the 'Show original' button on the 'this page has been
translated...' infobar. This will remove the infobar and revert the
page translation.
Args:
tab_index: The index of the tab, default is 0.
window_index: The index of the window, default is 0.
"""
cmd_dict = { # Prepare command for the json interface
'command': 'SelectTranslateOption',
'tab_index': tab_index,
'option': 'revert_translation'
}
self._GetResultFromJSONRequest(cmd_dict, windex=window_index)
def ChangeTranslateToLanguage(self, new_language, tab_index=0,
window_index=0):
"""Set the target language to be a new language.
This is equivalent to selecting a different language from the 'to'
drop-down menu on the translate bar. If the page was already translated
before calling this function, this will trigger a re-translate to the
new language.
Args:
new_language: The new target language. The string should be equivalent
to the text seen in the translate bar options.
Example: 'English'.
tab_index: The tab index - default is 0.
window_index: The window index - default is 0.
Returns:
False, if a new translation was triggered and the translation failed.
True on success.
"""
cmd_dict = { # Prepare command for the json interface
'command': 'SelectTranslateOption',
'tab_index': tab_index,
'option': 'set_target_language',
'target_language': new_language
}
return self._GetResultFromJSONRequest(
cmd_dict, windex=window_index)['translation_success']
def SelectTranslateOption(self, option, tab_index=0, window_index=0):
"""Selects one of the options in the drop-down menu for the translate bar.
Args:
option: One of 'never_translate_language', 'never_translate_site', or
'toggle_always_translate'. See notes on each below.
tab_index: The index of the tab, default is 0.
window_index: The index of the window, default is 0.
*Notes*
never_translate_language: Selecting this means that no sites in this
language will be translated. This dismisses the infobar.
never_translate_site: Selecting this means that this site will never be
translated, regardless of the language. This dismisses the infobar.
toggle_always_translate: This does not dismiss the infobar or translate the
page. See ClickTranslateBarTranslate and PerformActioOnInfobar to do
those. If a language is selected to be always translated, then whenver
the user visits a page with that language, the infobar will show the
'This page has been translated...' message.
decline_translation: Equivalent to selecting 'Nope' on the translate bar.
click_never_translate_lang_button: This button appears when the user has
declined translation of this language several times. Selecting it causes
the language to never be translated. Look at GetTranslateInfo to
determine if the button is showing.
click_always_translate_lang_button: This button appears when the user has
accepted translation of this language several times. Selecting it causes
the language to always be translated. Look at GetTranslateInfo to
determine if the button is showing.
Raises:
pyauto_errors.JSONInterfaceError if the automation returns an error.
"""
cmd_dict = { # Prepare command for the json interface
'command': 'SelectTranslateOption',
'option': option,
'tab_index': tab_index
}
self._GetResultFromJSONRequest(cmd_dict, windex=window_index)
def WaitUntilTranslateComplete(self, tab_index=0, window_index=0):
"""Waits until an attempted translation has finished.
This should be called after navigating to a page that should be translated
automatically (because the language always-translate is on). It does not
need to be called after 'ClickTranslateBarTranslate'.
Do not call this function if you are not expecting a page translation - it
will hang. If you call it when there is no translate bar, it will return
False.
Args:
tab_index: The tab index, default is 0.
window_index: The window index, default is 0.
Returns:
True if the translation was successful, False if there was an error.
"""
cmd_dict = { # Prepare command for the json interface
'command': 'WaitUntilTranslateComplete',
'tab_index': tab_index
}
# TODO(phajdan.jr): We need a solid automation infrastructure to handle
# these cases. See crbug.com/53647.
return self.WaitUntil(
lambda tab_index, window_index: self.GetTranslateInfo(
tab_index=tab_index, window_index=window_index)['page_translated'],
args=[tab_index, window_index])
def InstallExtension(self, extension_path, with_ui=False, windex=0):
"""Installs an extension from the given path.
The path must be absolute and may be a crx file or an unpacked extension
directory. Returns the extension ID if successfully installed and loaded.
Otherwise, throws an exception. The extension must not already be installed.
Args:
extension_path: The absolute path to the extension to install. If the
extension is packed, it must have a .crx extension.
with_ui: Whether the extension install confirmation UI should be shown.
windex: Integer index of the browser window to use; defaults to 0
(first window).
Returns:
The ID of the installed extension.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'InstallExtension',
'path': extension_path,
'with_ui': with_ui,
'windex': windex,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None)['id']
def GetExtensionsInfo(self, windex=0):
"""Returns information about all installed extensions.
Args:
windex: Integer index of the browser window to use; defaults to 0
(first window).
Returns:
A list of dictionaries representing each of the installed extensions.
Example:
[ { u'api_permissions': [u'bookmarks', u'experimental', u'tabs'],
u'background_url': u'',
u'description': u'Bookmark Manager',
u'effective_host_permissions': [u'chrome://favicon/*',
u'chrome://resources/*'],
u'host_permissions': [u'chrome://favicon/*', u'chrome://resources/*'],
u'id': u'eemcgdkfndhakfknompkggombfjjjeno',
u'is_component': True,
u'is_internal': False,
u'name': u'Bookmark Manager',
u'options_url': u'',
u'public_key': u'MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDQcByy+eN9jza\
zWF/DPn7NW47sW7lgmpk6eKc0BQM18q8hvEM3zNm2n7HkJv/R6f\
U+X5mtqkDuKvq5skF6qqUF4oEyaleWDFhd1xFwV7JV+/DU7bZ00\
w2+6gzqsabkerFpoP33ZRIw7OviJenP0c0uWqDWF8EGSyMhB3tx\
qhOtiQIDAQAB',
u'version': u'0.1' },
{ u'api_permissions': [...],
u'background_url': u'chrome-extension://\
lkdedmbpkaiahjjibfdmpoefffnbdkli/\
background.html',
u'description': u'Extension which lets you read your Facebook news \
feed and wall. You can also post status updates.',
u'effective_host_permissions': [...],
u'host_permissions': [...],
u'id': u'lkdedmbpkaiahjjibfdmpoefffnbdkli',
u'name': u'Facebook for Google Chrome',
u'options_url': u'',
u'public_key': u'...',
u'version': u'2.0.9'
u'is_enabled': True,
u'allowed_in_incognito': True} ]
"""
cmd_dict = { # Prepare command for the json interface
'command': 'GetExtensionsInfo',
'windex': windex,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None)['extensions']
def UninstallExtensionById(self, id, windex=0):
"""Uninstall the extension with the given id.
Args:
id: The string id of the extension.
windex: Integer index of the browser window to use; defaults to 0
(first window).
Returns:
True, if the extension was successfully uninstalled, or
False, otherwise.
"""
cmd_dict = { # Prepare command for the json interface
'command': 'UninstallExtensionById',
'id': id,
'windex': windex,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None)['success']
def SetExtensionStateById(self, id, enable, allow_in_incognito, windex=0):
"""Set extension state: enable/disable, allow/disallow in incognito mode.
Args:
id: The string id of the extension.
enable: A boolean, enable extension.
allow_in_incognito: A boolean, allow extension in incognito.
windex: Integer index of the browser window to use; defaults to 0
(first window).
"""
cmd_dict = { # Prepare command for the json interface
'command': 'SetExtensionStateById',
'id': id,
'enable': enable,
'allow_in_incognito': allow_in_incognito,
'windex': windex,
}
self._GetResultFromJSONRequest(cmd_dict, windex=None)
def TriggerPageActionById(self, id, tab_index=0, windex=0):
"""Trigger page action asynchronously in the active tab.
The page action icon must be displayed before invoking this function.
Args:
id: The string id of the extension.
tab_index: Integer index of the tab to use; defaults to 0 (first tab).
windex: Integer index of the browser window to use; defaults to 0
(first window).
"""
cmd_dict = { # Prepare command for the json interface
'command': 'TriggerPageActionById',
'id': id,
'windex': windex,
'tab_index': tab_index,
}
self._GetResultFromJSONRequest(cmd_dict, windex=None)
def TriggerBrowserActionById(self, id, tab_index=0, windex=0):
"""Trigger browser action asynchronously in the active tab.
Args:
id: The string id of the extension.
tab_index: Integer index of the tab to use; defaults to 0 (first tab).
windex: Integer index of the browser window to use; defaults to 0
(first window).
"""
cmd_dict = { # Prepare command for the json interface
'command': 'TriggerBrowserActionById',
'id': id,
'windex': windex,
'tab_index': tab_index,
}
self._GetResultFromJSONRequest(cmd_dict, windex=None)
def UpdateExtensionsNow(self, windex=0):
"""Auto-updates installed extensions.
Waits until all extensions are updated, loaded, and ready for use.
This is equivalent to clicking the "Update extensions now" button on the
chrome://extensions page.
Args:
windex: Integer index of the browser window to use; defaults to 0
(first window).
Raises:
pyauto_errors.JSONInterfaceError if the automation returns an error.
"""
cmd_dict = { # Prepare command for the json interface.
'command': 'UpdateExtensionsNow',
'windex': windex,
}
self._GetResultFromJSONRequest(cmd_dict, windex=None)
def WaitUntilExtensionViewLoaded(self, name=None, extension_id=None,
url=None, view_type=None):
"""Wait for a loaded extension view matching all the given properties.
If no matching extension views are found, wait for one to be loaded.
If there are more than one matching extension view, return one at random.
Uses WaitUntil so timeout is capped by automation timeout.
Refer to extension_view dictionary returned in GetBrowserInfo()
for sample input/output values.
Args:
name: (optional) Name of the extension.
extension_id: (optional) ID of the extension.
url: (optional) URL of the extension view.
view_type: (optional) Type of the extension view.
['EXTENSION_BACKGROUND_PAGE'|'EXTENSION_POPUP'|'EXTENSION_INFOBAR'|
'EXTENSION_DIALOG']
Returns:
The 'view' property of the extension view.
None, if no view loaded.
Raises:
pyauto_errors.JSONInterfaceError if the automation returns an error.
"""
def _GetExtensionViewLoaded():
extension_views = self.GetBrowserInfo()['extension_views']
for extension_view in extension_views:
if ((name and name != extension_view['name']) or
(extension_id and extension_id != extension_view['extension_id']) or
(url and url != extension_view['url']) or
(view_type and view_type != extension_view['view_type'])):
continue
if extension_view['loaded']:
return extension_view['view']
return False
if self.WaitUntil(lambda: _GetExtensionViewLoaded()):
return _GetExtensionViewLoaded()
return None
def WaitUntilExtensionViewClosed(self, view):
"""Wait for the given extension view to to be closed.
Uses WaitUntil so timeout is capped by automation timeout.
Refer to extension_view dictionary returned by GetBrowserInfo()
for sample input value.
Args:
view: 'view' property of extension view.
Raises:
pyauto_errors.JSONInterfaceError if the automation returns an error.
"""
def _IsExtensionViewClosed():
extension_views = self.GetBrowserInfo()['extension_views']
for extension_view in extension_views:
if view == extension_view['view']:
return False
return True
return self.WaitUntil(lambda: _IsExtensionViewClosed())
def FillAutofillProfile(self, profiles=None, credit_cards=None,
tab_index=0, window_index=0):
"""Set the autofill profile to contain the given profiles and credit cards.
If profiles or credit_cards are specified, they will overwrite existing
profiles and credit cards. To update profiles and credit cards, get the
existing ones with the GetAutofillProfile function and then append new
profiles to the list and call this function.
Autofill profiles (not credit cards) support multiple values for some of the
fields. To account for this, all values in a profile must be specified as
a list of strings. If a form field only has a single value associated with
it, that value must still be specified as a list containing a single string.
Args:
profiles: (optional) a list of dictionaries representing each profile to
add. Example:
[{
'NAME_FIRST': ['Bob',],
'NAME_LAST': ['Smith',],
'ADDRESS_HOME_ZIP': ['94043',],
},
{
'EMAIL_ADDRESS': ['[email protected]',],
'COMPANY_NAME': ['Company X',],
}]
Other possible keys are:
'NAME_FIRST', 'NAME_MIDDLE', 'NAME_LAST', 'EMAIL_ADDRESS',
'COMPANY_NAME', 'ADDRESS_HOME_LINE1', 'ADDRESS_HOME_LINE2',
'ADDRESS_HOME_CITY', 'ADDRESS_HOME_STATE', 'ADDRESS_HOME_ZIP',
'ADDRESS_HOME_COUNTRY', 'PHONE_HOME_WHOLE_NUMBER'
credit_cards: (optional) a list of dictionaries representing each credit
card to add. Example:
[{
'CREDIT_CARD_NAME': 'Bob C. Smith',
'CREDIT_CARD_NUMBER': '5555555555554444',
'CREDIT_CARD_EXP_MONTH': '12',
'CREDIT_CARD_EXP_4_DIGIT_YEAR': '2011'
},
{
'CREDIT_CARD_NAME': 'Bob C. Smith',
'CREDIT_CARD_NUMBER': '4111111111111111',
'CREDIT_CARD_TYPE': 'Visa'
}
Other possible keys are:
'CREDIT_CARD_NAME', 'CREDIT_CARD_NUMBER', 'CREDIT_CARD_EXP_MONTH',
'CREDIT_CARD_EXP_4_DIGIT_YEAR'
All values must be strings.
tab_index: tab index, defaults to 0.
window_index: window index, defaults to 0.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { # Prepare command for the json interface
'command': 'FillAutofillProfile',
'tab_index': tab_index,
'profiles': profiles,
'credit_cards': credit_cards
}
self._GetResultFromJSONRequest(cmd_dict, windex=window_index)
def GetAutofillProfile(self, tab_index=0, window_index=0):
"""Returns all autofill profile and credit card information.
The format of the returned dictionary is described above in
FillAutofillProfile. The general format is:
{'profiles': [list of profile dictionaries as described above],
'credit_cards': [list of credit card dictionaries as described above]}
Args:
tab_index: tab index, defaults to 0.
window_index: window index, defaults to 0.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { # Prepare command for the json interface
'command': 'GetAutofillProfile',
'tab_index': tab_index
}
return self._GetResultFromJSONRequest(cmd_dict, windex=window_index)
def SubmitAutofillForm(self, js, frame_xpath='', tab_index=0, windex=0):
"""Submits a webpage autofill form and waits for autofill to be updated.
This function should be called when submitting autofill profiles via
webpage forms. It waits until the autofill data has been updated internally
before returning.
Args:
js: The string Javascript code that can be injected into the given webpage
to submit an autofill form. This Javascript MUST submit the form.
frame_xpath: The string xpath for the frame in which to inject javascript.
tab_index: Integer index of the tab to work on; defaults to 0 (first tab).
windex: Integer index of the browser window to use; defaults to 0
(first window).
"""
cmd_dict = { # Prepare command for the json interface.
'command': 'SubmitAutofillForm',
'javascript': js,
'frame_xpath': frame_xpath,
'tab_index': tab_index,
}
self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def AutofillTriggerSuggestions(self, field_id=None, tab_index=0, windex=0):
"""Focuses a webpage form field and triggers the autofill popup in it.
This function focuses the specified input field in a webpage form, then
causes the autofill popup to appear in that field. The underlying
automation hook sends a "down arrow" keypress event to trigger the autofill
popup. This function waits until the popup is displayed before returning.
Args:
field_id: The string ID of the webpage form field to focus. Can be
'None' (the default), in which case nothing is focused. This
can be useful if the field has already been focused by other
means.
tab_index: Integer index of the tab to work on; defaults to 0 (first tab).
windex: Integer index of the browser window to work on; defaults to 0
(first window).
Returns:
True, if no errors were encountered, or False otherwise.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
# Focus the field with the specified ID, if necessary.
if field_id:
if not self.JavascriptFocusElementById(field_id, tab_index, windex):
return False
# Cause the autofill popup to be shown in the focused form field.
cmd_dict = {
'command': 'AutofillTriggerSuggestions',
'tab_index': tab_index,
}
self._GetResultFromJSONRequest(cmd_dict, windex=windex)
return True
def AutofillHighlightSuggestion(self, direction, tab_index=0, windex=0):
"""Highlights the previous or next suggestion in an existing autofill popup.
This function assumes that an existing autofill popup is currently displayed
in a webpage form. The underlying automation hook sends either a
"down arrow" or an "up arrow" keypress event to cause the next or previous
suggestion to be highlighted, respectively. This function waits until
autofill displays a preview of the form's filled state before returning.
Use AutofillTriggerSuggestions() to trigger the autofill popup before
calling this function. Use AutofillAcceptSelection() after calling this
function to accept a selection.
Args:
direction: The string direction in which to highlight an autofill
suggestion. Must be either "up" or "down".
tab_index: Integer index of the tab to work on; defaults to 0 (first tab).
windex: Integer index of the browser window to work on; defaults to 0
(first window).
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
assert direction in ('up', 'down')
cmd_dict = {
'command': 'AutofillHighlightSuggestion',
'direction': direction,
'tab_index': tab_index,
}
self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def AutofillAcceptSelection(self, tab_index=0, windex=0):
"""Accepts the current selection in an already-displayed autofill popup.
This function assumes that a profile is already highlighted in an existing
autofill popup in a webpage form. The underlying automation hook sends a
"return" keypress event to cause the highlighted profile to be accepted.
This function waits for the webpage form to be filled in with autofill data
before returning. This function does not submit the webpage form.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'AutofillAcceptSelection',
'tab_index': tab_index,
}
self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def AutofillPopulateForm(self, field_id, profile_index=0, tab_index=0,
windex=0):
"""Populates a webpage form using autofill data and keypress events.
This function focuses the specified input field in the form, and then
sends keypress events to the associated tab to cause the form to be
populated with information from the requested autofill profile.
Args:
field_id: The string ID of the webpage form field to focus for autofill
purposes.
profile_index: The index of the profile in the autofill popup to use to
populate the form; defaults to 0 (first profile).
tab_index: Integer index of the tab to work on; defaults to 0 (first tab).
windex: Integer index of the browser window to work on; defaults to 0
(first window).
Returns:
True, if the webpage form is populated successfully, or False if not.
Raises:
pyauto_errors.JSONInterfaceError if an automation call returns an error.
"""
if not self.AutofillTriggerSuggestions(field_id, tab_index, windex):
return False
for _ in range(profile_index + 1):
self.AutofillHighlightSuggestion('down', tab_index, windex)
self.AutofillAcceptSelection(tab_index, windex)
return True
def AddHistoryItem(self, item):
"""Forge a history item for Chrome.
Args:
item: a python dictionary representing the history item. Example:
{
# URL is the only mandatory item.
'url': 'http://news.google.com',
# Title is optional.
'title': 'Google News',
# Time is optional; if not set, assume "now". Time is in
# seconds since the Epoch. The python construct to get "Now"
# in the right scale is "time.time()". Can be float or int.
'time': 1271781612
}
"""
cmd_dict = { # Prepare command for the json interface
'command': 'AddHistoryItem',
'item': item
}
if not 'url' in item:
raise JSONInterfaceError('must specify url')
self._GetResultFromJSONRequest(cmd_dict)
def GetPluginsInfo(self):
"""Return info about plugins.
This is the info available from about:plugins
Returns:
an instance of plugins_info.PluginsInfo
"""
return plugins_info.PluginsInfo(
self._SendJSONRequest(0, json.dumps({'command': 'GetPluginsInfo'}),
self.action_max_timeout_ms()))
def EnablePlugin(self, path):
"""Enable the plugin at the given path.
Use GetPluginsInfo() to fetch path info about a plugin.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'EnablePlugin',
'path': path,
}
self._GetResultFromJSONRequest(cmd_dict)
def DisablePlugin(self, path):
"""Disable the plugin at the given path.
Use GetPluginsInfo() to fetch path info about a plugin.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'DisablePlugin',
'path': path,
}
self._GetResultFromJSONRequest(cmd_dict)
def GetTabContents(self, tab_index=0, window_index=0):
"""Get the html contents of a tab (a la "view source").
As an implementation detail, this saves the html in a file, reads
the file into a buffer, then deletes it.
Args:
tab_index: tab index, defaults to 0.
window_index: window index, defaults to 0.
Returns:
html content of a page as a string.
"""
tempdir = tempfile.mkdtemp()
filename = os.path.join(tempdir, 'content.html')
cmd_dict = { # Prepare command for the json interface
'command': 'SaveTabContents',
'tab_index': tab_index,
'filename': filename
}
self._GetResultFromJSONRequest(cmd_dict, windex=window_index)
try:
f = open(filename)
all_data = f.read()
f.close()
return all_data
finally:
shutil.rmtree(tempdir, ignore_errors=True)
def ImportSettings(self, import_from, first_run, import_items):
"""Import the specified import items from the specified browser.
Implements the features available in the "Import Settings" part of the
first-run UI dialog.
Args:
import_from: A string indicating which browser to import from. Possible
strings (depending on which browsers are installed on the
machine) are: 'Mozilla Firefox', 'Google Toolbar',
'Microsoft Internet Explorer', 'Safari'
first_run: A boolean indicating whether this is the first run of
the browser.
If it is not the first run then:
1) Bookmarks are only imported to the bookmarks bar if there
aren't already bookmarks.
2) The bookmark bar is shown.
import_items: A list of strings indicating which items to import.
Strings that can be in the list are:
HISTORY, FAVORITES, PASSWORDS, SEARCH_ENGINES, HOME_PAGE,
ALL (note: COOKIES is not supported by the browser yet)
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { # Prepare command for the json interface
'command': 'ImportSettings',
'import_from': import_from,
'first_run': first_run,
'import_items': import_items
}
return self._GetResultFromJSONRequest(cmd_dict)
def ClearBrowsingData(self, to_remove, time_period):
"""Clear the specified browsing data. Implements the features available in
the "ClearBrowsingData" UI.
Args:
to_remove: a list of strings indicating which types of browsing data
should be removed. Strings that can be in the list are:
HISTORY, DOWNLOADS, COOKIES, PASSWORDS, FORM_DATA, CACHE
time_period: a string indicating the time period for the removal.
Possible strings are:
LAST_HOUR, LAST_DAY, LAST_WEEK, FOUR_WEEKS, EVERYTHING
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { # Prepare command for the json interface
'command': 'ClearBrowsingData',
'to_remove': to_remove,
'time_period': time_period
}
return self._GetResultFromJSONRequest(cmd_dict)
def AddSavedPassword(self, password_dict, windex=0):
"""Adds the given username-password combination to the saved passwords.
Args:
password_dict: a dictionary that represents a password. Example:
{ 'username_value': '[email protected]', # Required
'password_value': 'test.password', # Required
'signon_realm': 'https://www.example.com/', # Required
'time': 1279317810.0, # Can get from time.time()
'origin_url': 'https://www.example.com/login',
'username_element': 'username', # The HTML element
'password_element': 'password', # The HTML element
'submit_element': 'submit', # The HTML element
'action_target': 'https://www.example.com/login/',
'blacklist': False }
windex: window index; defaults to 0 (first window).
*Blacklist notes* To blacklist a site, add a blacklist password with the
following dictionary items: origin_url, signon_realm, username_element,
password_element, action_target, and 'blacklist': True. Then all sites that
have password forms matching those are blacklisted.
Returns:
True if adding the password succeeded, false otherwise. In incognito
mode, adding the password should fail.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { # Prepare command for the json interface
'command': 'AddSavedPassword',
'password': password_dict
}
return self._GetResultFromJSONRequest(
cmd_dict, windex=windex)['password_added']
def RemoveSavedPassword(self, password_dict, windex=0):
"""Removes the password matching the provided password dictionary.
Args:
password_dict: A dictionary that represents a password.
For an example, see the dictionary in AddSavedPassword.
windex: The window index, default is 0 (first window).
"""
cmd_dict = { # Prepare command for the json interface
'command': 'RemoveSavedPassword',
'password': password_dict
}
self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def GetSavedPasswords(self):
"""Return the passwords currently saved.
Returns:
A list of dictionaries representing each password. For an example
dictionary see AddSavedPassword documentation. The overall structure will
be:
[ {password1 dictionary}, {password2 dictionary} ]
"""
cmd_dict = { # Prepare command for the json interface
'command': 'GetSavedPasswords'
}
return self._GetResultFromJSONRequest(cmd_dict)['passwords']
def GetBlockedPopupsInfo(self, tab_index=0, windex=0):
"""Get info about blocked popups in a tab.
Args:
tab_index: 0-based tab index. Default: 0
windex: 0-based window index. Default: 0
Returns:
[a list of property dictionaries for each blocked popup]
Property dictionary contains: title, url
"""
cmd_dict = {
'command': 'GetBlockedPopupsInfo',
'tab_index': tab_index,
}
return self._GetResultFromJSONRequest(cmd_dict,
windex=windex)['blocked_popups']
def UnblockAndLaunchBlockedPopup(self, popup_index, tab_index=0, windex=0):
"""Unblock/launch a poup at the given index.
This is equivalent to clicking on a blocked popup in the UI available
from the omnibox.
"""
cmd_dict = {
'command': 'UnblockAndLaunchBlockedPopup',
'popup_index': popup_index,
'tab_index': tab_index,
}
self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def SetTheme(self, crx_file_path):
"""Installs the given theme synchronously.
A theme file is a file with a .crx suffix, like an extension. The theme
file must be specified with an absolute path. This method call waits until
the theme is installed and will trigger the "theme installed" infobar.
If the install is unsuccessful, will throw an exception.
Uses InstallExtension().
Returns:
The ID of the installed theme.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
return self.InstallExtension(crx_file_path, True)
def WaitUntilDownloadedThemeSet(self, theme_name):
"""Waits until the theme has been set.
This should not be called after SetTheme(). It only needs to be called after
downloading a theme file (which will automatically set the theme).
Uses WaitUntil so timeout is capped by automation timeout.
Args:
theme_name: The name that the theme will have once it is installed.
"""
def _ReturnThemeSet(name):
theme_info = self.GetThemeInfo()
return theme_info and theme_info['name'] == name
return self.WaitUntil(_ReturnThemeSet, args=[theme_name])
def ClearTheme(self):
"""Clear the theme. Resets to default.
Has no effect when the theme is already the default one.
This is a blocking call.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'ClearTheme',
}
self._GetResultFromJSONRequest(cmd_dict)
def GetThemeInfo(self):
"""Get info about theme.
This includes info about the theme name, its colors, images, etc.
Returns:
a dictionary containing info about the theme.
empty dictionary if no theme has been applied (default theme).
SAMPLE:
{ u'colors': { u'frame': [71, 105, 91],
u'ntp_link': [36, 70, 0],
u'ntp_section': [207, 221, 192],
u'ntp_text': [20, 40, 0],
u'toolbar': [207, 221, 192]},
u'images': { u'theme_frame': u'images/theme_frame_camo.png',
u'theme_ntp_background': u'images/theme_ntp_background.png',
u'theme_toolbar': u'images/theme_toolbar_camo.png'},
u'name': u'camo theme',
u'tints': {u'buttons': [0.33000000000000002, 0.5, 0.46999999999999997]}}
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'GetThemeInfo',
}
return self._GetResultFromJSONRequest(cmd_dict)
def GetActiveNotifications(self):
"""Gets a list of the currently active/shown HTML5 notifications.
Returns:
a list containing info about each active notification, with the
first item in the list being the notification on the bottom of the
notification stack. The 'content_url' key can refer to a URL or a data
URI. The 'pid' key-value pair may be invalid if the notification is
closing.
SAMPLE:
[ { u'content_url': u'data:text/html;charset=utf-8,%3C!DOCTYPE%l%3E%0Atm...'
u'display_source': 'www.corp.google.com',
u'origin_url': 'http://www.corp.google.com/',
u'pid': 8505},
{ u'content_url': 'http://www.gmail.com/special_notification.html',
u'display_source': 'www.gmail.com',
u'origin_url': 'http://www.gmail.com/',
u'pid': 9291}]
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
return [x for x in self.GetAllNotifications() if 'pid' in x]
def GetAllNotifications(self):
"""Gets a list of all active and queued HTML5 notifications.
An active notification is one that is currently shown to the user. Chrome's
notification system will limit the number of notifications shown (currently
by only allowing a certain percentage of the screen to be taken up by them).
A notification will be queued if there are too many active notifications.
Once other notifications are closed, another will be shown from the queue.
Returns:
a list containing info about each notification, with the first
item in the list being the notification on the bottom of the
notification stack. The 'content_url' key can refer to a URL or a data
URI. The 'pid' key-value pair will only be present for active
notifications.
SAMPLE:
[ { u'content_url': u'data:text/html;charset=utf-8,%3C!DOCTYPE%l%3E%0Atm...'
u'display_source': 'www.corp.google.com',
u'origin_url': 'http://www.corp.google.com/',
u'pid': 8505},
{ u'content_url': 'http://www.gmail.com/special_notification.html',
u'display_source': 'www.gmail.com',
u'origin_url': 'http://www.gmail.com/'}]
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'GetAllNotifications',
}
return self._GetResultFromJSONRequest(cmd_dict)['notifications']
def CloseNotification(self, index):
"""Closes the active HTML5 notification at the given index.
Args:
index: the index of the notification to close. 0 refers to the
notification on the bottom of the notification stack.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'CloseNotification',
'index': index,
}
return self._GetResultFromJSONRequest(cmd_dict)
def WaitForNotificationCount(self, count):
"""Waits for the number of active HTML5 notifications to reach the given
count.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'WaitForNotificationCount',
'count': count,
}
self._GetResultFromJSONRequest(cmd_dict)
def FindInPage(self, search_string, forward=True,
match_case=False, find_next=False,
tab_index=0, windex=0, timeout=-1):
"""Find the match count for the given search string and search parameters.
This is equivalent to using the find box.
Args:
search_string: The string to find on the page.
forward: Boolean to set if the search direction is forward or backwards
match_case: Boolean to set for case sensitive search.
find_next: Boolean to set to continue the search or start from beginning.
tab_index: The tab index, default is 0.
windex: The window index, default is 0.
timeout: request timeout (in milliseconds), default is -1.
Returns:
number of matches found for the given search string and parameters
SAMPLE:
{ u'match_count': 10,
u'match_left': 100,
u'match_top': 100,
u'match_right': 200,
u'match_bottom': 200}
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'FindInPage',
'tab_index' : tab_index,
'search_string' : search_string,
'forward' : forward,
'match_case' : match_case,
'find_next' : find_next,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=windex,
timeout=timeout)
def AddDomEventObserver(self, event_name='', automation_id=-1,
recurring=False):
"""Adds a DomEventObserver associated with the AutomationEventQueue.
An app raises a matching event in Javascript by calling:
window.domAutomationController.sendWithId(automation_id, event_name)
Args:
event_name: The event name to watch for. By default an event is raised
for any message.
automation_id: The Automation Id of the sent message. By default all
messages sent from the window.domAutomationController are
observed. Note that other PyAuto functions also send
messages through window.domAutomationController with
arbirary Automation Ids and they will be observed.
recurring: If False the observer will be removed after it generates one
event, otherwise it will continue observing and generating
events until explicity removed with RemoveEventObserver(id).
Returns:
The id of the created observer, which can be used with GetNextEvent(id)
and RemoveEventObserver(id).
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'AddDomEventObserver',
'event_name': event_name,
'automation_id': automation_id,
'recurring': recurring,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None)['observer_id']
def AddDomMutationObserver(self, mutation_type, xpath,
attribute='textContent', expected_value=None,
automation_id=44444,
exec_js=None, **kwargs):
"""Sets up an event observer watching for a specific DOM mutation.
Creates an observer that raises an event when a mutation of the given type
occurs on a DOM node specified by |selector|.
Args:
mutation_type: One of 'add', 'remove', 'change', or 'exists'.
xpath: An xpath specifying the DOM node to watch. The node must already
exist if |mutation_type| is 'change'.
attribute: Attribute to match |expected_value| against, if given. Defaults
to 'textContent'.
expected_value: Optional regular expression to match against the node's
textContent attribute after the mutation. Defaults to None.
automation_id: The automation_id used to route the observer javascript
messages. Defaults to 44444.
exec_js: A callable of the form f(self, js, **kwargs) used to inject the
MutationObserver javascript. Defaults to None, which uses
PyUITest.ExecuteJavascript.
Any additional keyword arguments are passed on to ExecuteJavascript and
can be used to select the tab where the DOM MutationObserver is created.
Returns:
The id of the created observer, which can be used with GetNextEvent(id)
and RemoveEventObserver(id).
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
pyauto_errors.JavascriptRuntimeError if the injected javascript
MutationObserver returns an error.
"""
assert mutation_type in ('add', 'remove', 'change', 'exists'), \
'Unexpected value "%s" for mutation_type.' % mutation_type
cmd_dict = {
'command': 'AddDomEventObserver',
'event_name': '__dom_mutation_observer__:$(id)',
'automation_id': automation_id,
'recurring': False,
}
observer_id = (
self._GetResultFromJSONRequest(cmd_dict, windex=None)['observer_id'])
expected_string = ('null' if expected_value is None else '"%s"' %
expected_value.replace('"', r'\"'))
jsfile = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'dom_mutation_observer.js')
with open(jsfile, 'r') as f:
js = ('(' + f.read() + ')(%d, %d, "%s", "%s", "%s", %s);' %
(automation_id, observer_id, mutation_type,
xpath.replace('"', r'\"'), attribute, expected_string))
exec_js = exec_js or PyUITest.ExecuteJavascript
jsreturn = exec_js(self, js, **kwargs)
if jsreturn != 'success':
self.RemoveEventObserver(observer_id)
raise pyauto_errors.JavascriptRuntimeError(jsreturn)
return observer_id
def WaitForDomNode(self, xpath, attribute='textContent',
expected_value=None, exec_js=None, timeout=-1, **kwargs):
"""Waits until a node specified by an xpath exists in the DOM.
NOTE: This does NOT poll. It returns as soon as the node appears, or
immediately if the node already exists.
Args:
xpath: An xpath specifying the DOM node to watch.
attribute: Attribute to match |expected_value| against, if given. Defaults
to 'textContent'.
expected_value: Optional regular expression to match against the node's
textContent attribute. Defaults to None.
exec_js: A callable of the form f(self, js, **kwargs) used to inject the
MutationObserver javascript. Defaults to None, which uses
PyUITest.ExecuteJavascript.
timeout: Time to wait for the node to exist before raising an exception,
defaults to the default automation timeout.
Any additional keyword arguments are passed on to ExecuteJavascript and
can be used to select the tab where the DOM MutationObserver is created.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
pyauto_errors.JavascriptRuntimeError if the injected javascript
MutationObserver returns an error.
"""
observer_id = self.AddDomMutationObserver('exists', xpath, attribute,
expected_value, exec_js=exec_js,
**kwargs)
self.GetNextEvent(observer_id, timeout=timeout)
def GetNextEvent(self, observer_id=-1, blocking=True, timeout=-1):
"""Waits for an observed event to occur.
The returned event is removed from the Event Queue. If there is already a
matching event in the queue it is returned immediately, otherwise the call
blocks until a matching event occurs. If blocking is disabled and no
matching event is in the queue this function will immediately return None.
Args:
observer_id: The id of the observer to wait for, matches any event by
default.
blocking: If True waits until there is a matching event in the queue,
if False and there is no event waiting in the queue returns None
immediately.
timeout: Time to wait for a matching event, defaults to the default
automation timeout.
Returns:
Event response dictionary, or None if blocking is disabled and there is no
matching event in the queue.
SAMPLE:
{ 'observer_id': 1,
'name': 'login completed',
'type': 'raised_event'}
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'GetNextEvent',
'observer_id' : observer_id,
'blocking' : blocking,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None,
timeout=timeout)
def RemoveEventObserver(self, observer_id):
"""Removes an Event Observer from the AutomationEventQueue.
Expects a valid observer_id.
Args:
observer_id: The id of the observer to remove.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'RemoveEventObserver',
'observer_id' : observer_id,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def ClearEventQueue(self):
"""Removes all events currently in the AutomationEventQueue.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'ClearEventQueue',
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def WaitUntilNavigationCompletes(self, tab_index=0, windex=0):
"""Wait until the specified tab is done navigating.
It is safe to call ExecuteJavascript() as soon as the call returns. If
there is no outstanding navigation the call will return immediately.
Args:
tab_index: index of the tab.
windex: index of the window.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'WaitUntilNavigationCompletes',
'tab_index': tab_index,
'windex': windex,
}
return self._GetResultFromJSONRequest(cmd_dict)
def ExecuteJavascript(self, js, tab_index=0, windex=0, frame_xpath=''):
"""Executes a script in the specified frame of a tab.
By default, execute the script in the top frame of the first tab in the
first window. The invoked javascript function must send a result back via
the domAutomationController.send function, or this function will never
return.
Args:
js: script to be executed.
windex: index of the window.
tab_index: index of the tab.
frame_xpath: XPath of the frame to execute the script. Default is no
frame. Example: '//frames[1]'.
Returns:
a value that was sent back via the domAutomationController.send method
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'ExecuteJavascript',
'javascript' : js,
'windex' : windex,
'tab_index' : tab_index,
'frame_xpath' : frame_xpath,
}
result = self._GetResultFromJSONRequest(cmd_dict)['result']
# Wrap result in an array before deserializing because valid JSON has an
# array or an object as the root.
json_string = '[' + result + ']'
return json.loads(json_string)[0]
def ExecuteJavascriptInRenderView(self, js, view, frame_xpath=''):
"""Executes a script in the specified frame of an render view.
The invoked javascript function must send a result back via the
domAutomationController.send function, or this function will never return.
Args:
js: script to be executed.
view: A dictionary representing a unique id for the render view as
returned for example by.
self.GetBrowserInfo()['extension_views'][]['view'].
Example:
{ 'render_process_id': 1,
'render_view_id' : 2}
frame_xpath: XPath of the frame to execute the script. Default is no
frame. Example:
'//frames[1]'
Returns:
a value that was sent back via the domAutomationController.send method
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'ExecuteJavascriptInRenderView',
'javascript' : js,
'view' : view,
'frame_xpath' : frame_xpath,
}
result = self._GetResultFromJSONRequest(cmd_dict, windex=None)['result']
# Wrap result in an array before deserializing because valid JSON has an
# array or an object as the root.
json_string = '[' + result + ']'
return json.loads(json_string)[0]
def ExecuteJavascriptInOOBEWebUI(self, js, frame_xpath=''):
"""Executes a script in the specified frame of the OOBE WebUI.
By default, execute the script in the top frame of the OOBE window. This
also works for all OOBE pages, including the enterprise enrollment
screen and login page. The invoked javascript function must send a result
back via the domAutomationController.send function, or this function will
never return.
Args:
js: Script to be executed.
frame_xpath: XPath of the frame to execute the script. Default is no
frame. Example: '//frames[1]'
Returns:
A value that was sent back via the domAutomationController.send method.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'ExecuteJavascriptInOOBEWebUI',
'javascript': js,
'frame_xpath': frame_xpath,
}
result = self._GetResultFromJSONRequest(cmd_dict, windex=None)['result']
# Wrap result in an array before deserializing because valid JSON has an
# array or an object as the root.
return json.loads('[' + result + ']')[0]
def GetDOMValue(self, expr, tab_index=0, windex=0, frame_xpath=''):
"""Executes a Javascript expression and returns the value.
This is a wrapper for ExecuteJavascript, eliminating the need to
explicitly call domAutomationController.send function.
Args:
expr: expression value to be returned.
tab_index: index of the tab.
windex: index of the window.
frame_xpath: XPath of the frame to execute the script. Default is no
frame. Example: '//frames[1]'.
Returns:
a string that was sent back via the domAutomationController.send method.
"""
js = 'window.domAutomationController.send(%s);' % expr
return self.ExecuteJavascript(js, tab_index, windex, frame_xpath)
def CallJavascriptFunc(self, function, args=[], tab_index=0, windex=0):
"""Executes a script which calls a given javascript function.
The invoked javascript function must send a result back via the
domAutomationController.send function, or this function will never return.
Defaults to first tab in first window.
Args:
function: name of the function.
args: list of all the arguments to pass into the called function. These
should be able to be converted to a string using the |str| function.
tab_index: index of the tab within the given window.
windex: index of the window.
Returns:
a string that was sent back via the domAutomationController.send method
"""
converted_args = map(lambda arg: json.dumps(arg), args)
js = '%s(%s)' % (function, ', '.join(converted_args))
logging.debug('Executing javascript: %s', js)
return self.ExecuteJavascript(js, tab_index, windex)
def JavascriptFocusElementById(self, field_id, tab_index=0, windex=0):
"""Uses Javascript to focus an element with the given ID in a webpage.
Args:
field_id: The string ID of the webpage form field to focus.
tab_index: Integer index of the tab to work on; defaults to 0 (first tab).
windex: Integer index of the browser window to work on; defaults to 0
(first window).
Returns:
True, on success, or False on failure.
"""
focus_field_js = """
var field = document.getElementById("%s");
if (!field) {
window.domAutomationController.send("error");
} else {
field.focus();
window.domAutomationController.send("done");
}
""" % field_id
return self.ExecuteJavascript(focus_field_js, tab_index, windex) == 'done'
def SignInToSync(self, username, password):
"""Signs in to sync using the given username and password.
Args:
username: The account with which to sign in. Example: "[email protected]".
password: Password for the above account. Example: "pa$$w0rd".
Returns:
True, on success.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'SignInToSync',
'username': username,
'password': password,
}
return self._GetResultFromJSONRequest(cmd_dict)['success']
def GetSyncInfo(self):
"""Returns info about sync.
Returns:
A dictionary of info about sync.
Example dictionaries:
{u'summary': u'SYNC DISABLED'}
{ u'authenticated': True,
u'last synced': u'Just now',
u'summary': u'READY',
u'sync url': u'clients4.google.com',
u'updates received': 42,
u'synced datatypes': [ u'Bookmarks',
u'Preferences',
u'Passwords',
u'Autofill',
u'Themes',
u'Extensions',
u'Apps']}
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'GetSyncInfo',
}
return self._GetResultFromJSONRequest(cmd_dict)['sync_info']
def AwaitSyncCycleCompletion(self):
"""Waits for the ongoing sync cycle to complete. Must be signed in to sync
before calling this method.
Returns:
True, on success.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'AwaitSyncCycleCompletion',
}
return self._GetResultFromJSONRequest(cmd_dict)['success']
def AwaitSyncRestart(self):
"""Waits for sync to reinitialize itself. Typically used when the browser
is restarted and a full sync cycle is not expected to occur. Must be
previously signed in to sync before calling this method.
Returns:
True, on success.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'AwaitSyncRestart',
}
return self._GetResultFromJSONRequest(cmd_dict)['success']
def EnableSyncForDatatypes(self, datatypes):
"""Enables sync for a given list of sync datatypes. Must be signed in to
sync before calling this method.
Args:
datatypes: A list of strings indicating the datatypes for which to enable
sync. Strings that can be in the list are:
Bookmarks, Preferences, Passwords, Autofill, Themes,
Typed URLs, Extensions, Encryption keys, Sessions, Apps, All.
For an updated list of valid sync datatypes, refer to the
function ModelTypeToString() in the file
chrome/browser/sync/syncable/model_type.cc.
Examples:
['Bookmarks', 'Preferences', 'Passwords']
['All']
Returns:
True, on success.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'EnableSyncForDatatypes',
'datatypes': datatypes,
}
return self._GetResultFromJSONRequest(cmd_dict)['success']
def DisableSyncForDatatypes(self, datatypes):
"""Disables sync for a given list of sync datatypes. Must be signed in to
sync before calling this method.
Args:
datatypes: A list of strings indicating the datatypes for which to
disable sync. Strings that can be in the list are:
Bookmarks, Preferences, Passwords, Autofill, Themes,
Typed URLs, Extensions, Encryption keys, Sessions, Apps, All.
For an updated list of valid sync datatypes, refer to the
function ModelTypeToString() in the file
chrome/browser/sync/syncable/model_type.cc.
Examples:
['Bookmarks', 'Preferences', 'Passwords']
['All']
Returns:
True, on success.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'DisableSyncForDatatypes',
'datatypes': datatypes,
}
return self._GetResultFromJSONRequest(cmd_dict)['success']
def HeapProfilerDump(self, process_type, reason, tab_index=0, windex=0):
"""Dumps a heap profile. It works only on Linux and ChromeOS.
We need an environment variable "HEAPPROFILE" set to a directory and a
filename prefix, for example, "/tmp/prof". In a case of this example,
heap profiles will be dumped into "/tmp/prof.(pid).0002.heap",
"/tmp/prof.(pid).0003.heap", and so on. Nothing happens when this
function is called without the env.
Args:
process_type: A string which is one of 'browser' or 'renderer'.
reason: A string which describes the reason for dumping a heap profile.
The reason will be included in the logged message.
Examples:
'To check memory leaking'
'For PyAuto tests'
tab_index: tab index to work on if 'process_type' == 'renderer'.
Defaults to 0 (first tab).
windex: window index to work on if 'process_type' == 'renderer'.
Defaults to 0 (first window).
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
assert process_type in ('browser', 'renderer')
if self.IsLinux(): # IsLinux() also implies IsChromeOS().
cmd_dict = {
'command': 'HeapProfilerDump',
'process_type': process_type,
'reason': reason,
'windex': windex,
'tab_index': tab_index,
}
self._GetResultFromJSONRequest(cmd_dict)
else:
logging.warn('Heap-profiling is not supported in this OS.')
def GetNTPThumbnails(self):
"""Return a list of info about the sites in the NTP most visited section.
SAMPLE:
[{ u'title': u'Google',
u'url': u'http://www.google.com'},
{
u'title': u'Yahoo',
u'url': u'http://www.yahoo.com'}]
"""
return self._GetNTPInfo()['most_visited']
def GetNTPThumbnailIndex(self, thumbnail):
"""Returns the index of the given NTP thumbnail, or -1 if it is not shown.
Args:
thumbnail: a thumbnail dict received from |GetNTPThumbnails|
"""
thumbnails = self.GetNTPThumbnails()
for i in range(len(thumbnails)):
if thumbnails[i]['url'] == thumbnail['url']:
return i
return -1
def RemoveNTPThumbnail(self, thumbnail):
"""Removes the NTP thumbnail and returns true on success.
Args:
thumbnail: a thumbnail dict received from |GetNTPThumbnails|
"""
self._CheckNTPThumbnailShown(thumbnail)
cmd_dict = {
'command': 'RemoveNTPMostVisitedThumbnail',
'url': thumbnail['url']
}
self._GetResultFromJSONRequest(cmd_dict)
def RestoreAllNTPThumbnails(self):
"""Restores all the removed NTP thumbnails.
Note:
the default thumbnails may come back into the Most Visited sites
section after doing this
"""
cmd_dict = {
'command': 'RestoreAllNTPMostVisitedThumbnails'
}
self._GetResultFromJSONRequest(cmd_dict)
def GetNTPDefaultSites(self):
"""Returns a list of URLs for all the default NTP sites, regardless of
whether they are showing or not.
These sites are the ones present in the NTP on a fresh install of Chrome.
"""
return self._GetNTPInfo()['default_sites']
def RemoveNTPDefaultThumbnails(self):
"""Removes all thumbnails for default NTP sites, regardless of whether they
are showing or not."""
cmd_dict = { 'command': 'RemoveNTPMostVisitedThumbnail' }
for site in self.GetNTPDefaultSites():
cmd_dict['url'] = site
self._GetResultFromJSONRequest(cmd_dict)
def GetNTPRecentlyClosed(self):
"""Return a list of info about the items in the NTP recently closed section.
SAMPLE:
[{
u'type': u'tab',
u'url': u'http://www.bing.com',
u'title': u'Bing',
u'timestamp': 2139082.03912, # Seconds since epoch (Jan 1, 1970)
u'direction': u'ltr'},
{
u'type': u'window',
u'timestamp': 2130821.90812,
u'tabs': [
{
u'type': u'tab',
u'url': u'http://www.cnn.com',
u'title': u'CNN',
u'timestamp': 2129082.12098,
u'direction': u'ltr'}]},
{
u'type': u'tab',
u'url': u'http://www.altavista.com',
u'title': u'Altavista',
u'timestamp': 21390820.12903,
u'direction': u'rtl'}]
"""
return self._GetNTPInfo()['recently_closed']
def GetNTPApps(self):
"""Retrieves information about the apps listed on the NTP.
In the sample data below, the "launch_type" will be one of the following
strings: "pinned", "regular", "fullscreen", "window", or "unknown".
SAMPLE:
[
{
u'app_launch_index': 2,
u'description': u'Web Store',
u'icon_big': u'chrome://theme/IDR_APP_DEFAULT_ICON',
u'icon_small': u'chrome://favicon/https://chrome.google.com/webstore',
u'id': u'ahfgeienlihckogmohjhadlkjgocpleb',
u'is_component_extension': True,
u'is_disabled': False,
u'launch_container': 2,
u'launch_type': u'regular',
u'launch_url': u'https://chrome.google.com/webstore',
u'name': u'Chrome Web Store',
u'options_url': u'',
},
{
u'app_launch_index': 1,
u'description': u'A countdown app',
u'icon_big': (u'chrome-extension://aeabikdlfbfeihglecobdkdflahfgcpd/'
u'countdown128.png'),
u'icon_small': (u'chrome://favicon/chrome-extension://'
u'aeabikdlfbfeihglecobdkdflahfgcpd/'
u'launchLocalPath.html'),
u'id': u'aeabikdlfbfeihglecobdkdflahfgcpd',
u'is_component_extension': False,
u'is_disabled': False,
u'launch_container': 2,
u'launch_type': u'regular',
u'launch_url': (u'chrome-extension://aeabikdlfbfeihglecobdkdflahfgcpd/'
u'launchLocalPath.html'),
u'name': u'Countdown',
u'options_url': u'',
}
]
Returns:
A list of dictionaries in which each dictionary contains the information
for a single app that appears in the "Apps" section of the NTP.
"""
return self._GetNTPInfo()['apps']
def _GetNTPInfo(self):
"""Get info about the New Tab Page (NTP).
This does not retrieve the actual info displayed in a particular NTP; it
retrieves the current state of internal data that would be used to display
an NTP. This includes info about the apps, the most visited sites,
the recently closed tabs and windows, and the default NTP sites.
SAMPLE:
{
u'apps': [ ... ],
u'most_visited': [ ... ],
u'recently_closed': [ ... ],
u'default_sites': [ ... ]
}
Returns:
A dictionary containing all the NTP info. See details about the different
sections in their respective methods: GetNTPApps(), GetNTPThumbnails(),
GetNTPRecentlyClosed(), and GetNTPDefaultSites().
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'GetNTPInfo',
}
return self._GetResultFromJSONRequest(cmd_dict)
def _CheckNTPThumbnailShown(self, thumbnail):
if self.GetNTPThumbnailIndex(thumbnail) == -1:
raise NTPThumbnailNotShownError()
def LaunchApp(self, app_id, windex=0):
"""Opens the New Tab Page and launches the specified app from it.
This method will not return until after the contents of a new tab for the
launched app have stopped loading.
Args:
app_id: The string ID of the app to launch.
windex: The index of the browser window to work on. Defaults to 0 (the
first window).
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
self.AppendTab(GURL('chrome://newtab'), windex) # Also activates this tab.
cmd_dict = {
'command': 'LaunchApp',
'id': app_id,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def SetAppLaunchType(self, app_id, launch_type, windex=0):
"""Sets the launch type for the specified app.
Args:
app_id: The string ID of the app whose launch type should be set.
launch_type: The string launch type, which must be one of the following:
'pinned': Launch in a pinned tab.
'regular': Launch in a regular tab.
'fullscreen': Launch in a fullscreen tab.
'window': Launch in a new browser window.
windex: The index of the browser window to work on. Defaults to 0 (the
first window).
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
self.assertTrue(launch_type in ('pinned', 'regular', 'fullscreen',
'window'),
msg='Unexpected launch type value: "%s"' % launch_type)
cmd_dict = {
'command': 'SetAppLaunchType',
'id': app_id,
'launch_type': launch_type,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def GetV8HeapStats(self, tab_index=0, windex=0):
"""Returns statistics about the v8 heap in the renderer process for a tab.
Args:
tab_index: The tab index, default is 0.
window_index: The window index, default is 0.
Returns:
A dictionary containing v8 heap statistics. Memory values are in bytes.
Example:
{ 'renderer_id': 6223,
'v8_memory_allocated': 21803776,
'v8_memory_used': 10565392 }
"""
cmd_dict = { # Prepare command for the json interface.
'command': 'GetV8HeapStats',
'tab_index': tab_index,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def GetFPS(self, tab_index=0, windex=0):
"""Returns the current FPS associated with the renderer process for a tab.
FPS is the rendered frames per second.
Args:
tab_index: The tab index, default is 0.
window_index: The window index, default is 0.
Returns:
A dictionary containing FPS info.
Example:
{ 'renderer_id': 23567,
'routing_id': 1,
'fps': 29.404298782348633 }
"""
cmd_dict = { # Prepare command for the json interface.
'command': 'GetFPS',
'tab_index': tab_index,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def IsFullscreenForBrowser(self, windex=0):
"""Returns true if the window is currently fullscreen and was initially
transitioned to fullscreen by a browser (vs tab) mode transition."""
return self._GetResultFromJSONRequest(
{ 'command': 'IsFullscreenForBrowser' },
windex=windex).get('result')
def IsFullscreenForTab(self, windex=0):
"""Returns true if fullscreen has been caused by a tab."""
return self._GetResultFromJSONRequest(
{ 'command': 'IsFullscreenForTab' },
windex=windex).get('result')
def IsMouseLocked(self, windex=0):
"""Returns true if the mouse is currently locked."""
return self._GetResultFromJSONRequest(
{ 'command': 'IsMouseLocked' },
windex=windex).get('result')
def IsMouseLockPermissionRequested(self, windex=0):
"""Returns true if the user is currently prompted to give permision for
mouse lock."""
return self._GetResultFromJSONRequest(
{ 'command': 'IsMouseLockPermissionRequested' },
windex=windex).get('result')
def IsFullscreenPermissionRequested(self, windex=0):
"""Returns true if the user is currently prompted to give permision for
fullscreen."""
return self._GetResultFromJSONRequest(
{ 'command': 'IsFullscreenPermissionRequested' },
windex=windex).get('result')
def IsFullscreenBubbleDisplayed(self, windex=0):
"""Returns true if the fullscreen and mouse lock bubble is currently
displayed."""
return self._GetResultFromJSONRequest(
{ 'command': 'IsFullscreenBubbleDisplayed' },
windex=windex).get('result')
def IsFullscreenBubbleDisplayingButtons(self, windex=0):
"""Returns true if the fullscreen and mouse lock bubble is currently
displayed and presenting buttons."""
return self._GetResultFromJSONRequest(
{ 'command': 'IsFullscreenBubbleDisplayingButtons' },
windex=windex).get('result')
def AcceptCurrentFullscreenOrMouseLockRequest(self, windex=0):
"""Activate the accept button on the fullscreen and mouse lock bubble."""
return self._GetResultFromJSONRequest(
{ 'command': 'AcceptCurrentFullscreenOrMouseLockRequest' },
windex=windex)
def DenyCurrentFullscreenOrMouseLockRequest(self, windex=0):
"""Activate the deny button on the fullscreen and mouse lock bubble."""
return self._GetResultFromJSONRequest(
{ 'command': 'DenyCurrentFullscreenOrMouseLockRequest' },
windex=windex)
def KillRendererProcess(self, pid):
"""Kills the given renderer process.
This will return only after the browser has received notice of the renderer
close.
Args:
pid: the process id of the renderer to kill
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'KillRendererProcess',
'pid': pid
}
return self._GetResultFromJSONRequest(cmd_dict)
def NewWebDriver(self, port=0):
"""Returns a new remote WebDriver instance.
Args:
port: The port to start WebDriver on; by default the service selects an
open port. It is an error to request a port number and request a
different port later.
Returns:
selenium.webdriver.remote.webdriver.WebDriver instance
"""
from chrome_driver_factory import ChromeDriverFactory
global _CHROME_DRIVER_FACTORY
if _CHROME_DRIVER_FACTORY is None:
_CHROME_DRIVER_FACTORY = ChromeDriverFactory(port=port)
self.assertTrue(_CHROME_DRIVER_FACTORY.GetPort() == port or port == 0,
msg='Requested a WebDriver on a specific port while already'
' running on a different port.')
return _CHROME_DRIVER_FACTORY.NewChromeDriver(self)
def CreateNewAutomationProvider(self, channel_id):
"""Creates a new automation provider.
The provider will open a named channel in server mode.
Args:
channel_id: the channel_id to open the server channel with
"""
cmd_dict = {
'command': 'CreateNewAutomationProvider',
'channel_id': channel_id
}
self._GetResultFromJSONRequest(cmd_dict)
def OpenNewBrowserWindowWithNewProfile(self):
"""Creates a new multi-profiles user, and then opens and shows a new
tabbed browser window with the new profile.
This is equivalent to 'Add new user' action with multi-profiles.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { # Prepare command for the json interface
'command': 'OpenNewBrowserWindowWithNewProfile'
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def GetMultiProfileInfo(self):
"""Fetch info about all multi-profile users.
Returns:
A dictionary.
Sample:
{
'enabled': True,
'profiles': [{'name': 'First user',
'path': '/tmp/.org.chromium.Chromium.Tyx17X/Default'},
{'name': 'User 1',
'path': '/tmp/.org.chromium.Chromium.Tyx17X/profile_1'}],
}
Profiles will be listed in the same order as visible in preferences.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { # Prepare command for the json interface
'command': 'GetMultiProfileInfo'
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def GetPolicyDefinitionList(self):
"""Gets a dictionary of existing policies mapped to their definitions.
SAMPLE OUTPUT:
{
'ShowHomeButton': ['bool', false],
'DefaultSearchProviderSearchURL': ['str', false],
...
}
Returns:
A dictionary mapping each policy name to its value type and a Boolean flag
indicating whether it is a device policy.
"""
cmd_dict = {
'command': 'GetPolicyDefinitionList'
}
return self._GetResultFromJSONRequest(cmd_dict)
def RefreshPolicies(self):
"""Refreshes all the available policy providers.
Each policy provider will reload its policy source and push the updated
policies. This call waits for the new policies to be applied; any policies
installed before this call is issued are guaranteed to be ready after it
returns.
"""
# TODO(craigdh): Determine the root cause of RefreshPolicies' flakiness.
# See crosbug.com/30221
timeout = PyUITest.ActionTimeoutChanger(self, 3 * 60 * 1000)
cmd_dict = { 'command': 'RefreshPolicies' }
self._GetResultFromJSONRequest(cmd_dict, windex=None)
def SubmitForm(self, form_id, tab_index=0, windex=0, frame_xpath=''):
"""Submits the given form ID, and returns after it has been submitted.
Args:
form_id: the id attribute of the form to submit.
Returns: true on success.
"""
js = """
document.getElementById("%s").submit();
window.addEventListener("unload", function() {
window.domAutomationController.send("done");
});
""" % form_id
if self.ExecuteJavascript(js, tab_index, windex, frame_xpath) != 'done':
return False
# Wait until the form is submitted and the page completes loading.
return self.WaitUntil(
lambda: self.GetDOMValue('document.readyState',
tab_index, windex, frame_xpath),
expect_retval='complete')
## ChromeOS section
def GetLoginInfo(self):
"""Returns information about login and screen locker state.
This includes things like whether a user is logged in, the username
of the logged in user, and whether the screen is locked.
Returns:
A dictionary.
Sample:
{ u'is_guest': False,
u'is_owner': True,
u'email': u'[email protected]',
u'is_screen_locked': False,
u'login_ui_type': 'nativeui', # or 'webui'
u'is_logged_in': True}
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'GetLoginInfo' }
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def WaitForSessionManagerRestart(self, function):
"""Call a function and wait for the ChromeOS session_manager to restart.
Args:
function: The function to call.
"""
assert callable(function)
pgrep_process = subprocess.Popen(['pgrep', 'session_manager'],
stdout=subprocess.PIPE)
old_pid = pgrep_process.communicate()[0].strip()
function()
return self.WaitUntil(lambda: self._IsSessionManagerReady(old_pid))
def _WaitForInodeChange(self, path, function):
"""Call a function and wait for the specified file path to change.
Args:
path: The file path to check for changes.
function: The function to call.
"""
assert callable(function)
old_inode = os.stat(path).st_ino
function()
return self.WaitUntil(lambda: self._IsInodeNew(path, old_inode))
def ShowCreateAccountUI(self):
"""Go to the account creation page.
This is the same as clicking the "Create Account" link on the
ChromeOS login screen. Does not actually create a new account.
Should be displaying the login screen to work.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'ShowCreateAccountUI' }
# See note below under LoginAsGuest(). ShowCreateAccountUI() logs
# the user in as guest in order to access the account creation page.
assert self._WaitForInodeChange(
self._named_channel_id,
lambda: self._GetResultFromJSONRequest(cmd_dict, windex=None)), \
'Chrome did not reopen the testing channel after login as guest.'
self.SetUp()
def LoginAsGuest(self):
"""Login to chromeos as a guest user.
Waits until logged in.
Should be displaying the login screen to work.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'LoginAsGuest' }
# Currently, logging in as guest causes session_manager to
# restart Chrome, which will close the testing channel.
# We need to call SetUp() again to reconnect to the new channel.
assert self._WaitForInodeChange(
self._named_channel_id,
lambda: self._GetResultFromJSONRequest(cmd_dict, windex=None)), \
'Chrome did not reopen the testing channel after login as guest.'
self.SetUp()
def Login(self, username, password):
"""Login to chromeos.
Waits until logged in and browser is ready.
Should be displaying the login screen to work.
Note that in case of webui auth-extension-based login, gaia auth errors
will not be noticed here, because the browser has no knowledge of it.
Returns:
An error string if an error occured.
None otherwise.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'Login',
'username': username,
'password': password,
}
result = self._GetResultFromJSONRequest(cmd_dict, windex=None)
return result.get('error_string')
def Logout(self):
"""Log out from ChromeOS and wait for session_manager to come up.
May return before logout is complete and
gives no indication of success or failure.
Should be logged in to work.
"""
assert self.GetLoginInfo()['is_logged_in'], \
'Trying to log out when already logged out.'
assert self.WaitForSessionManagerRestart(
lambda: self.ApplyAccelerator(IDC_EXIT)), \
'Session manager did not restart after logout.'
self.__SetUp()
def LockScreen(self):
"""Locks the screen on chromeos.
Waits until screen is locked.
Should be logged in and screen should not be locked to work.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'LockScreen' }
self._GetResultFromJSONRequest(cmd_dict, windex=None)
def UnlockScreen(self, password):
"""Unlocks the screen on chromeos, authenticating the user's password first.
Waits until screen is unlocked.
Screen locker should be active for this to work.
Returns:
An error string if an error occured.
None otherwise.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'UnlockScreen',
'password': password,
}
result = self._GetResultFromJSONRequest(
cmd_dict, windex=None, timeout=self.large_test_timeout_ms())
return result.get('error_string')
def SignoutInScreenLocker(self):
"""Signs out of chromeos using the screen locker's "Sign out" feature.
Effectively the same as clicking the "Sign out" link on the screen locker.
Screen should be locked for this to work.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'SignoutInScreenLocker' }
assert self.WaitForSessionManagerRestart(
lambda: self._GetResultFromJSONRequest(cmd_dict, windex=None)), \
'Session manager did not restart after logout.'
self.__SetUp()
def GetBatteryInfo(self):
"""Get details about battery state.
Returns:
A dictionary with the following keys:
'battery_is_present': bool
'line_power_on': bool
if 'battery_is_present':
'battery_percentage': float (0 ~ 100)
'battery_fully_charged': bool
if 'line_power_on':
'battery_time_to_full': int (seconds)
else:
'battery_time_to_empty': int (seconds)
If it is still calculating the time left, 'battery_time_to_full'
and 'battery_time_to_empty' will be absent.
Use 'battery_fully_charged' instead of 'battery_percentage'
or 'battery_time_to_full' to determine whether the battery
is fully charged, since the percentage is only approximate.
Sample:
{ u'battery_is_present': True,
u'line_power_on': False,
u'battery_time_to_empty': 29617,
u'battery_percentage': 100.0,
u'battery_fully_charged': False }
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'GetBatteryInfo' }
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def GetPanelInfo(self):
"""Get details about open ChromeOS panels.
A panel is actually a type of browser window, so all of
this information is also available using GetBrowserInfo().
Returns:
A dictionary.
Sample:
[{ 'incognito': False,
'renderer_pid': 4820,
'title': u'Downloads',
'url': u'chrome://active-downloads/'}]
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
panels = []
for browser in self.GetBrowserInfo()['windows']:
if browser['type'] != 'panel':
continue
panel = {}
panels.append(panel)
tab = browser['tabs'][0]
panel['incognito'] = browser['incognito']
panel['renderer_pid'] = tab['renderer_pid']
panel['title'] = self.GetActiveTabTitle(browser['index'])
panel['url'] = tab['url']
return panels
def GetNetworkInfo(self):
"""Get details about ethernet, wifi, and cellular networks on chromeos.
Returns:
A dictionary.
Sample:
{ u'cellular_available': True,
u'cellular_enabled': False,
u'connected_ethernet': u'/service/ethernet_abcd',
u'connected_wifi': u'/service/wifi_abcd_1234_managed_none',
u'ethernet_available': True,
u'ethernet_enabled': True,
u'ethernet_networks':
{ u'/service/ethernet_abcd':
{ u'device_path': u'/device/abcdeth',
u'ip_address': u'11.22.33.44',
u'name': u'',
u'service_path':
u'/profile/default/ethernet_abcd',
u'status': u'Connected'}},
u'ip_address': u'11.22.33.44',
u'remembered_wifi':
{ u'/service/wifi_abcd_1234_managed_none':
{ u'device_path': u'',
u'encrypted': False,
u'encryption': u'',
u'ip_address': '',
u'name': u'WifiNetworkName1',
u'status': u'Unknown',
u'strength': 0},
},
u'wifi_available': True,
u'wifi_enabled': True,
u'wifi_networks':
{ u'/service/wifi_abcd_1234_managed_none':
{ u'device_path': u'/device/abcdwifi',
u'encrypted': False,
u'encryption': u'',
u'ip_address': u'123.123.123.123',
u'name': u'WifiNetworkName1',
u'status': u'Connected',
u'strength': 76},
u'/service/wifi_abcd_1234_managed_802_1x':
{ u'encrypted': True,
u'encryption': u'8021X',
u'ip_address': u'',
u'name': u'WifiNetworkName2',
u'status': u'Idle',
u'strength': 79}}}
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'GetNetworkInfo' }
network_info = self._GetResultFromJSONRequest(cmd_dict, windex=None)
# Remembered networks do not have /service/ prepended to the service path
# even though wifi_networks does. We want this prepended to allow for
# consistency and easy string comparison with wifi_networks.
remembered_wifi = {}
network_info['remembered_wifi'] = dict([('/service/' + k, v) for k, v in
network_info['remembered_wifi'].iteritems()])
return network_info
def GetConnectedWifi(self):
"""Returns the SSID of the currently connected wifi network.
Returns:
The SSID of the connected network or None if we're not connected.
"""
service_list = self.GetNetworkInfo()
connected_service_path = service_list.get('connected_wifi')
if 'wifi_networks' in service_list and \
connected_service_path in service_list['wifi_networks']:
return service_list['wifi_networks'][connected_service_path]['name']
def GetServicePath(self, ssid):
"""Returns the service path associated with an SSID.
Args:
ssid: String defining the SSID we are searching for.
Returns:
The service path or None if SSID does not exist.
"""
service_list = self.GetNetworkInfo()
service_list = service_list.get('wifi_networks', [])
for service_path, service_obj in service_list.iteritems():
if service_obj['name'] == ssid:
return service_path
return None
def NetworkScan(self):
"""Causes ChromeOS to scan for available wifi networks.
Blocks until scanning is complete.
Returns:
The new list of networks obtained from GetNetworkInfo().
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'NetworkScan' }
self._GetResultFromJSONRequest(cmd_dict, windex=None)
return self.GetNetworkInfo()
def ToggleNetworkDevice(self, device, enable):
"""Enable or disable a network device on ChromeOS.
Valid device names are ethernet, wifi, cellular.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'ToggleNetworkDevice',
'device': device,
'enable': enable,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
PROXY_TYPE_DIRECT = 1
PROXY_TYPE_MANUAL = 2
PROXY_TYPE_PAC = 3
def WaitUntilWifiNetworkAvailable(self, ssid, timeout=60, is_hidden=False):
"""Waits until the given network is available.
Routers that are just turned on may take up to 1 minute upon turning them
on to broadcast their SSID.
Args:
ssid: SSID of the service we want to connect to.
timeout: timeout (in seconds)
Raises:
Exception if timeout duration has been hit before wifi router is seen.
Returns:
True, when the wifi network is seen within the timout period.
False, otherwise.
"""
def _GotWifiNetwork():
# Returns non-empty array if desired SSID is available.
try:
return [wifi for wifi in
self.NetworkScan().get('wifi_networks', {}).values()
if wifi.get('name') == ssid]
except pyauto_errors.JSONInterfaceError:
# Temporary fix until crosbug.com/14174 is fixed.
# NetworkScan is only used in updating the list of networks so errors
# thrown by it are not critical to the results of wifi tests that use
# this method.
return False
# The hidden AP's will always be on, thus we will assume it is ready to
# connect to.
if is_hidden:
return bool(_GotWifiNetwork())
return self.WaitUntil(_GotWifiNetwork, timeout=timeout, retry_sleep=1)
def GetProxyTypeName(self, proxy_type):
values = { self.PROXY_TYPE_DIRECT: 'Direct Internet connection',
self.PROXY_TYPE_MANUAL: 'Manual proxy configuration',
self.PROXY_TYPE_PAC: 'Automatic proxy configuration' }
return values[proxy_type]
def GetProxySettingsOnChromeOS(self, windex=0):
"""Get current proxy settings on Chrome OS.
Returns:
A dictionary. See SetProxySettings() below
for the full list of possible dictionary keys.
Samples:
{ u'ignorelist': [],
u'single': False,
u'type': 1}
{ u'ignorelist': [u'www.example.com', u'www.example2.com'],
u'single': True,
u'singlehttp': u'24.27.78.152',
u'singlehttpport': 1728,
u'type': 2}
{ u'ignorelist': [],
u'pacurl': u'http://example.com/config.pac',
u'single': False,
u'type': 3}
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'GetProxySettings' }
return self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def SetProxySettingsOnChromeOS(self, key, value, windex=0):
"""Set a proxy setting on Chrome OS.
Owner must be logged in for these to persist.
If user is not logged in or is logged in as non-owner or guest,
proxy settings do not persist across browser restarts or login/logout.
Valid settings are:
'type': int - Type of proxy. Should be one of:
PROXY_TYPE_DIRECT, PROXY_TYPE_MANUAL, PROXY_TYPE_PAC.
'ignorelist': list - The list of hosts and domains to ignore.
These settings set 'type' to PROXY_TYPE_MANUAL:
'single': boolean - Whether to use the same proxy for all protocols.
These settings set 'single' to True:
'singlehttp': string - If single is true, the proxy address to use.
'singlehttpport': int - If single is true, the proxy port to use.
These settings set 'single' to False:
'httpurl': string - HTTP proxy address.
'httpport': int - HTTP proxy port.
'httpsurl': string - Secure HTTP proxy address.
'httpsport': int - Secure HTTP proxy port.
'ftpurl': string - FTP proxy address.
'ftpport': int - FTP proxy port.
'socks': string - SOCKS host address.
'socksport': int - SOCKS host port.
This setting sets 'type' to PROXY_TYPE_PAC:
'pacurl': string - Autoconfiguration URL.
Examples:
# Sets direct internet connection, no proxy.
self.SetProxySettings('type', self.PROXY_TYPE_DIRECT)
# Sets manual proxy configuration, same proxy for all protocols.
self.SetProxySettings('singlehttp', '24.27.78.152')
self.SetProxySettings('singlehttpport', 1728)
self.SetProxySettings('ignorelist', ['www.example.com', 'example2.com'])
# Sets automatic proxy configuration with the specified PAC url.
self.SetProxySettings('pacurl', 'http://example.com/config.pac')
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'SetProxySettings',
'key': key,
'value': value,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=windex)
def ForgetAllRememberedNetworks(self):
"""Forgets all networks that the device has marked as remembered."""
for service in self.GetNetworkInfo()['remembered_wifi']:
self.ForgetWifiNetwork(service)
def ForgetWifiNetwork(self, service_path):
"""Forget a remembered network by its service path.
This function is equivalent to clicking the 'Forget Network' button in the
chrome://settings/internet page. This function does not indicate whether
or not forget succeeded or failed. It is up to the caller to call
GetNetworkInfo to check the updated remembered_wifi list to verify the
service has been removed.
Args:
service_path: Flimflam path that defines the remembered network.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
# Usually the service_path is prepended with '/service/', such as when the
# service path is retrieved from GetNetworkInfo. ForgetWifiNetwork works
# only for service paths where this has already been stripped.
service_path = service_path.split('/service/')[-1]
cmd_dict = {
'command': 'ForgetWifiNetwork',
'service_path': service_path,
}
self._GetResultFromJSONRequest(cmd_dict, windex=None, timeout=50000)
def ConnectToCellularNetwork(self):
"""Connects to the available cellular network.
Blocks until connection succeeds or fails.
Returns:
An error string if an error occured.
None otherwise.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
# Every device should only have one cellular network present, so we can
# scan for it.
cellular_networks = self.NetworkScan().get('cellular_networks', {}).keys()
self.assertTrue(cellular_networks, 'Could not find cellular service.')
service_path = cellular_networks[0]
cmd_dict = {
'command': 'ConnectToCellularNetwork',
'service_path': service_path,
}
result = self._GetResultFromJSONRequest(
cmd_dict, windex=None, timeout=50000)
return result.get('error_string')
def DisconnectFromCellularNetwork(self):
"""Disconnect from the connected cellular network.
Blocks until disconnect is complete.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'DisconnectFromCellularNetwork',
}
self._GetResultFromJSONRequest(cmd_dict, windex=None)
def ConnectToWifiNetwork(self, service_path, password='', shared=True):
"""Connect to a wifi network by its service path.
Blocks until connection succeeds or fails.
Args:
service_path: Flimflam path that defines the wifi network.
password: Passphrase for connecting to the wifi network.
shared: Boolean value specifying whether the network should be shared.
Returns:
An error string if an error occured.
None otherwise.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'ConnectToWifiNetwork',
'service_path': service_path,
'password': password,
'shared': shared,
}
result = self._GetResultFromJSONRequest(
cmd_dict, windex=None, timeout=50000)
return result.get('error_string')
def ConnectToHiddenWifiNetwork(self, ssid, security, password='',
shared=True, save_credentials=False):
"""Connect to a wifi network by its service path.
Blocks until connection succeeds or fails.
Args:
ssid: The SSID of the network to connect to.
security: The network's security type. One of: 'SECURITY_NONE',
'SECURITY_WEP', 'SECURITY_WPA', 'SECURITY_RSN', 'SECURITY_8021X'
password: Passphrase for connecting to the wifi network.
shared: Boolean value specifying whether the network should be shared.
save_credentials: Boolean value specifying whether 802.1x credentials are
saved.
Returns:
An error string if an error occured.
None otherwise.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
assert security in ('SECURITY_NONE', 'SECURITY_WEP', 'SECURITY_WPA',
'SECURITY_RSN', 'SECURITY_8021X')
cmd_dict = {
'command': 'ConnectToHiddenWifiNetwork',
'ssid': ssid,
'security': security,
'password': password,
'shared': shared,
'save_credentials': save_credentials,
}
result = self._GetResultFromJSONRequest(
cmd_dict, windex=None, timeout=50000)
return result.get('error_string')
def DisconnectFromWifiNetwork(self):
"""Disconnect from the connected wifi network.
Blocks until disconnect is complete.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'DisconnectFromWifiNetwork',
}
self._GetResultFromJSONRequest(cmd_dict, windex=None)
def AddPrivateNetwork(self,
hostname,
service_name,
provider_type,
username,
password,
cert_nss='',
cert_id='',
key=''):
"""Add and connect to a private network.
Blocks until connection succeeds or fails. This is equivalent to
'Add Private Network' in the network menu UI.
Args:
hostname: Server hostname for the private network.
service_name: Service name that defines the private network. Do not
add multiple services with the same name.
provider_type: Types are L2TP_IPSEC_PSK and L2TP_IPSEC_USER_CERT.
Provider type OPEN_VPN is not yet supported.
Type names returned by GetPrivateNetworkInfo will
also work.
username: Username for connecting to the virtual network.
password: Passphrase for connecting to the virtual network.
cert_nss: Certificate nss nickname for a L2TP_IPSEC_USER_CERT network.
cert_id: Certificate id for a L2TP_IPSEC_USER_CERT network.
key: Pre-shared key for a L2TP_IPSEC_PSK network.
Returns:
An error string if an error occured.
None otherwise.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'AddPrivateNetwork',
'hostname': hostname,
'service_name': service_name,
'provider_type': provider_type,
'username': username,
'password': password,
'cert_nss': cert_nss,
'cert_id': cert_id,
'key': key,
}
result = self._GetResultFromJSONRequest(
cmd_dict, windex=None, timeout=50000)
return result.get('error_string')
def GetPrivateNetworkInfo(self):
"""Get details about private networks on chromeos.
Returns:
A dictionary including information about all remembered virtual networks
as well as the currently connected virtual network, if any.
Sample:
{ u'connected': u'/service/vpn_123_45_67_89_test_vpn'}
u'/service/vpn_123_45_67_89_test_vpn':
{ u'username': u'vpn_user',
u'name': u'test_vpn',
u'hostname': u'123.45.67.89',
u'key': u'abcde',
u'cert_id': u'',
u'password': u'zyxw123',
u'provider_type': u'L2TP_IPSEC_PSK'},
u'/service/vpn_111_11_11_11_test_vpn2':
{ u'username': u'testerman',
u'name': u'test_vpn2',
u'hostname': u'111.11.11.11',
u'key': u'fghijklm',
u'cert_id': u'',
u'password': u'789mnop',
u'provider_type': u'L2TP_IPSEC_PSK'},
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'GetPrivateNetworkInfo' }
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def ConnectToPrivateNetwork(self, service_path):
"""Connect to a remembered private network by its service path.
Blocks until connection succeeds or fails. The network must have been
previously added with all necessary connection details.
Args:
service_path: Service name that defines the private network.
Returns:
An error string if an error occured.
None otherwise.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'ConnectToPrivateNetwork',
'service_path': service_path,
}
result = self._GetResultFromJSONRequest(
cmd_dict, windex=None, timeout=50000)
return result.get('error_string')
def DisconnectFromPrivateNetwork(self):
"""Disconnect from the active private network.
Expects a private network to be active.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'DisconnectFromPrivateNetwork',
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def IsEnterpriseDevice(self):
"""Check whether the device is managed by an enterprise.
Returns:
True if the device is managed by an enterprise, False otherwise.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'IsEnterpriseDevice',
}
result = self._GetResultFromJSONRequest(cmd_dict, windex=None)
return result.get('enterprise')
def GetEnterprisePolicyInfo(self):
"""Get details about enterprise policy on chromeos.
Returns:
A dictionary including information about the enterprise policy.
Sample:
{u'device_token_cache_loaded': True,
u'device_cloud_policy_state': u'success',
u'device_id': u'11111-222222222-33333333-4444444',
u'device_mandatory_policies': {},
u'device_recommended_policies': {},
u'device_token': u'ABjmT7nqGWTHRLO',
u'enterprise_domain': u'example.com',
u'gaia_token': u'',
u'machine_id': u'123456789',
u'machine_model': u'COMPUTER',
u'user_cache_loaded': True,
u'user_cloud_policy_state': u'success',
u'user_mandatory_policies': {u'AuthSchemes': u'',
u'AutoFillEnabled': True,
u'ChromeOsLockOnIdleSuspend': True}
u'user_recommended_policies': {},
u'user_name': u'[email protected]'}
"""
cmd_dict = { 'command': 'GetEnterprisePolicyInfo' }
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def EnableSpokenFeedback(self, enabled):
"""Enables or disables spoken feedback accessibility mode.
Args:
enabled: Boolean value indicating the desired state of spoken feedback.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'EnableSpokenFeedback',
'enabled': enabled,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def IsSpokenFeedbackEnabled(self):
"""Check whether spoken feedback accessibility mode is enabled.
Returns:
True if spoken feedback is enabled, False otherwise.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'IsSpokenFeedbackEnabled', }
result = self._GetResultFromJSONRequest(cmd_dict, windex=None)
return result.get('spoken_feedback')
def GetTimeInfo(self, windex=0):
"""Gets info about the ChromeOS status bar clock.
Set the 24-hour clock by using:
self.SetPrefs('settings.clock.use_24hour_clock', True)
Returns:
a dictionary.
Sample:
{u'display_date': u'Tuesday, July 26, 2011',
u'display_time': u'4:30',
u'timezone': u'America/Los_Angeles'}
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'GetTimeInfo' }
if self.GetLoginInfo()['is_logged_in']:
return self._GetResultFromJSONRequest(cmd_dict, windex=windex)
else:
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def SetTimezone(self, timezone):
"""Sets the timezone on ChromeOS. A user must be logged in.
The timezone is the relative path to the timezone file in
/usr/share/zoneinfo. For example, /usr/share/zoneinfo/America/Los_Angeles
is 'America/Los_Angeles'.
This method does not return indication of success or failure.
If the timezone is invalid, it falls back to UTC/GMT.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'SetTimezone',
'timezone': timezone,
}
self._GetResultFromJSONRequest(cmd_dict, windex=None)
def EnrollEnterpriseDevice(self, user, password):
"""Enrolls an unenrolled device as an enterprise device.
Expects the device to be unenrolled with the TPM unlocked. This is
equivalent to pressing Ctrl-Alt-e to enroll the device from the login
screen.
Returns:
An error string if the enrollment fails.
None otherwise.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = {
'command': 'EnrollEnterpriseDevice',
'user': user,
'password': password,
}
time.sleep(5) # TODO(craigdh): Block until Install Attributes is ready.
result = self._GetResultFromJSONRequest(cmd_dict, windex=None)
return result.get('error_string')
def GetUpdateInfo(self):
"""Gets the status of the ChromeOS updater.
Returns:
a dictionary.
Samples:
{ u'status': u'idle',
u'release_track': u'beta-channel'}
{ u'status': u'downloading',
u'release_track': u'beta-channel',
u'download_progress': 0.1203236708350371, # 0.0 ~ 1.0
u'new_size': 152033593, # size of payload, in bytes
u'last_checked_time': 1302055709} # seconds since UNIX epoch
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'GetUpdateInfo' }
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def UpdateCheck(self):
"""Checks for a ChromeOS update. Blocks until finished updating.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'UpdateCheck' }
self._GetResultFromJSONRequest(cmd_dict, windex=None)
def SetReleaseTrack(self, track):
"""Sets the release track (channel) of the ChromeOS updater.
Valid values for the track parameter are:
'stable-channel', 'beta-channel', 'dev-channel'
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
assert track in ('stable-channel', 'beta-channel', 'dev-channel'), \
'Attempt to set release track to unknown release track "%s".' % track
cmd_dict = {
'command': 'SetReleaseTrack',
'track': track,
}
self._GetResultFromJSONRequest(cmd_dict, windex=None)
def GetVolumeInfo(self):
"""Gets the volume and whether the device is muted.
Returns:
a tuple.
Sample:
(47.763456790123456, False)
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'GetVolumeInfo' }
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def SetVolume(self, volume):
"""Sets the volume on ChromeOS. Only valid if not muted.
Args:
volume: The desired volume level as a percent from 0 to 100.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
assert volume >= 0 and volume <= 100
cmd_dict = {
'command': 'SetVolume',
'volume': float(volume),
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def SetMute(self, mute):
"""Sets whether ChromeOS is muted or not.
Args:
mute: True to mute, False to unmute.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'SetMute' }
cmd_dict = {
'command': 'SetMute',
'mute': mute,
}
return self._GetResultFromJSONRequest(cmd_dict, windex=None)
def CaptureProfilePhoto(self):
"""Captures user profile photo on ChromeOS.
This is done by driving the TakePhotoDialog. The image file is
saved on disk and its path is set in the local state preferences.
A user needs to be logged-in as a precondition. Note that the UI is not
destroyed afterwards, a browser restart is necessary if you want
to interact with the browser after this call in the same test case.
Raises:
pyauto_errors.JSONInterfaceError if the automation call returns an error.
"""
cmd_dict = { 'command': 'CaptureProfilePhoto' }
return self._GetResultFromJSONRequest(cmd_dict)
def GetMemoryStatsChromeOS(self, duration):
"""Identifies and returns different kinds of current memory usage stats.
This function samples values each second for |duration| seconds, then
outputs the min, max, and ending values for each measurement type.
Args:
duration: The number of seconds to sample data before outputting the
minimum, maximum, and ending values for each measurement type.
Returns:
A dictionary containing memory usage information. Each measurement type
is associated with the min, max, and ending values from among all
sampled values. Values are specified in KB.
{
'gem_obj': { # GPU memory usage.
'min': ...,
'max': ...,
'end': ...,
},
'gtt': { ... }, # GPU memory usage (graphics translation table).
'mem_free': { ... }, # CPU free memory.
'mem_available': { ... }, # CPU available memory.
'mem_shared': { ... }, # CPU shared memory.
'mem_cached': { ... }, # CPU cached memory.
'mem_anon': { ... }, # CPU anon memory (active + inactive).
'mem_file': { ... }, # CPU file memory (active + inactive).
'mem_slab': { ... }, # CPU slab memory.
'browser_priv': { ... }, # Chrome browser private memory.
'browser_shared': { ... }, # Chrome browser shared memory.
'gpu_priv': { ... }, # Chrome GPU private memory.
'gpu_shared': { ... }, # Chrome GPU shared memory.
'renderer_priv': { ... }, # Total private memory of all renderers.
'renderer_shared': { ... }, # Total shared memory of all renderers.
}
"""
logging.debug('Sampling memory information for %d seconds...' % duration)
stats = {}
for _ in xrange(duration):
# GPU memory.
gem_obj_path = '/sys/kernel/debug/dri/0/i915_gem_objects'
if os.path.exists(gem_obj_path):
p = subprocess.Popen('grep bytes %s' % gem_obj_path,
stdout=subprocess.PIPE, shell=True)
stdout = p.communicate()[0]
gem_obj = re.search(
'\d+ objects, (\d+) bytes\n', stdout).group(1)
if 'gem_obj' not in stats:
stats['gem_obj'] = []
stats['gem_obj'].append(int(gem_obj) / 1024.0)
gtt_path = '/sys/kernel/debug/dri/0/i915_gem_gtt'
if os.path.exists(gtt_path):
p = subprocess.Popen('grep bytes %s' % gtt_path,
stdout=subprocess.PIPE, shell=True)
stdout = p.communicate()[0]
gtt = re.search(
'Total [\d]+ objects, ([\d]+) bytes', stdout).group(1)
if 'gtt' not in stats:
stats['gtt'] = []
stats['gtt'].append(int(gtt) / 1024.0)
# CPU memory.
stdout = ''
with open('/proc/meminfo') as f:
stdout = f.read()
mem_free = re.search('MemFree:\s*([\d]+) kB', stdout).group(1)
if 'mem_free' not in stats:
stats['mem_free'] = []
stats['mem_free'].append(int(mem_free))
mem_dirty = re.search('Dirty:\s*([\d]+) kB', stdout).group(1)
mem_active_file = re.search(
'Active\(file\):\s*([\d]+) kB', stdout).group(1)
mem_inactive_file = re.search(
'Inactive\(file\):\s*([\d]+) kB', stdout).group(1)
with open('/proc/sys/vm/min_filelist_kbytes') as f:
mem_min_file = f.read()
# Available memory =
# MemFree + ActiveFile + InactiveFile - DirtyMem - MinFileMem
if 'mem_available' not in stats:
stats['mem_available'] = []
stats['mem_available'].append(
int(mem_free) + int(mem_active_file) + int(mem_inactive_file) -
int(mem_dirty) - int(mem_min_file))
mem_shared = re.search('Shmem:\s*([\d]+) kB', stdout).group(1)
if 'mem_shared' not in stats:
stats['mem_shared'] = []
stats['mem_shared'].append(int(mem_shared))
mem_cached = re.search('Cached:\s*([\d]+) kB', stdout).group(1)
if 'mem_cached' not in stats:
stats['mem_cached'] = []
stats['mem_cached'].append(int(mem_cached))
mem_anon_active = re.search('Active\(anon\):\s*([\d]+) kB',
stdout).group(1)
mem_anon_inactive = re.search('Inactive\(anon\):\s*([\d]+) kB',
stdout).group(1)
if 'mem_anon' not in stats:
stats['mem_anon'] = []
stats['mem_anon'].append(int(mem_anon_active) + int(mem_anon_inactive))
mem_file_active = re.search('Active\(file\):\s*([\d]+) kB',
stdout).group(1)
mem_file_inactive = re.search('Inactive\(file\):\s*([\d]+) kB',
stdout).group(1)
if 'mem_file' not in stats:
stats['mem_file'] = []
stats['mem_file'].append(int(mem_file_active) + int(mem_file_inactive))
mem_slab = re.search('Slab:\s*([\d]+) kB', stdout).group(1)
if 'mem_slab' not in stats:
stats['mem_slab'] = []
stats['mem_slab'].append(int(mem_slab))
# Chrome process memory.
pinfo = self.GetProcessInfo()['browsers'][0]['processes']
total_renderer_priv = 0
total_renderer_shared = 0
for process in pinfo:
mem_priv = process['working_set_mem']['priv']
mem_shared = process['working_set_mem']['shared']
if process['child_process_type'] == 'Browser':
if 'browser_priv' not in stats:
stats['browser_priv'] = []
stats['browser_priv'].append(int(mem_priv))
if 'browser_shared' not in stats:
stats['browser_shared'] = []
stats['browser_shared'].append(int(mem_shared))
elif process['child_process_type'] == 'GPU':
if 'gpu_priv' not in stats:
stats['gpu_priv'] = []
stats['gpu_priv'].append(int(mem_priv))
if 'gpu_shared' not in stats:
stats['gpu_shared'] = []
stats['gpu_shared'].append(int(mem_shared))
elif process['child_process_type'] == 'Tab':
# Sum the memory of all renderer processes.
total_renderer_priv += int(mem_priv)
total_renderer_shared += int(mem_shared)
if 'renderer_priv' not in stats:
stats['renderer_priv'] = []
stats['renderer_priv'].append(int(total_renderer_priv))
if 'renderer_shared' not in stats:
stats['renderer_shared'] = []
stats['renderer_shared'].append(int(total_renderer_shared))
time.sleep(1)
# Compute min, max, and ending values to return.
result = {}
for measurement_type in stats:
values = stats[measurement_type]
result[measurement_type] = {
'min': min(values),
'max': max(values),
'end': values[-1],
}
return result
## ChromeOS section -- end
class ExtraBrowser(PyUITest):
"""Launches a new browser with some extra flags.
The new browser is launched with its own fresh profile.
This class does not apply to ChromeOS.
"""
def __init__(self, chrome_flags=[], methodName='runTest', **kwargs):
"""Accepts extra chrome flags for launching a new browser instance.
Args:
chrome_flags: list of extra flags when launching a new browser.
"""
assert not PyUITest.IsChromeOS(), \
'This function cannot be used to launch a new browser in ChromeOS.'
PyUITest.__init__(self, methodName=methodName, **kwargs)
self._chrome_flags = chrome_flags
PyUITest.setUp(self)
def __del__(self):
"""Tears down the browser and then calls super class's destructor"""
PyUITest.tearDown(self)
PyUITest.__del__(self)
def ExtraChromeFlags(self):
"""Prepares the browser to launch with specified Chrome flags."""
return PyUITest.ExtraChromeFlags(self) + self._chrome_flags
class _RemoteProxy():
"""Class for PyAuto remote method calls.
Use this class along with RemoteHost.testRemoteHost to establish a PyAuto
connection with another machine and make remote PyAuto calls. The RemoteProxy
mimics a PyAuto object, so all json-style PyAuto calls can be made on it.
The remote host acts as a dumb executor that receives method call requests,
executes them, and sends all of the results back to the RemoteProxy, including
the return value, thrown exceptions, and console output.
The remote host should be running the same version of PyAuto as the proxy.
A mismatch could lead to undefined behavior.
Example usage:
class MyTest(pyauto.PyUITest):
def testRemoteExample(self):
remote = pyauto._RemoteProxy(('127.0.0.1', 7410))
remote.NavigateToURL('http://www.google.com')
title = remote.GetActiveTabTitle()
self.assertEqual(title, 'Google')
"""
class RemoteException(Exception):
pass
def __init__(self, host):
self.RemoteConnect(host)
def RemoteConnect(self, host):
begin = time.time()
while time.time() - begin < 50:
self._socket = socket.socket()
if not self._socket.connect_ex(host):
break
time.sleep(0.25)
else:
# Make one last attempt, but raise a socket error on failure.
self._socket = socket.socket()
self._socket.connect(host)
def RemoteDisconnect(self):
if self._socket:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
def CreateTarget(self, target):
"""Registers the methods and creates a remote instance of a target.
Any RPC calls will then be made on the remote target instance. Note that the
remote instance will be a brand new instance and will have none of the state
of the local instance. The target's class should have a constructor that
takes no arguments.
"""
self._Call('CreateTarget', target.__class__)
self._RegisterClassMethods(target)
def _RegisterClassMethods(self, remote_class):
# Make remote-call versions of all remote_class methods.
for method_name, _ in inspect.getmembers(remote_class, inspect.ismethod):
# Ignore private methods and duplicates.
if method_name[0] in string.letters and \
getattr(self, method_name, None) is None:
setattr(self, method_name, functools.partial(self._Call, method_name))
def _Call(self, method_name, *args, **kwargs):
# Send request.
request = pickle.dumps((method_name, args, kwargs))
if self._socket.send(request) != len(request):
raise self.RemoteException('Error sending remote method call request.')
# Receive response.
response = self._socket.recv(4096)
if not response:
raise self.RemoteException('Client disconnected during method call.')
result, stdout, stderr, exception = pickle.loads(response)
# Print any output the client captured, throw any exceptions, and return.
sys.stdout.write(stdout)
sys.stderr.write(stderr)
if exception:
raise self.RemoteException('%s raised by remote client: %s' %
(exception[0], exception[1]))
return result
class PyUITestSuite(pyautolib.PyUITestSuiteBase, unittest.TestSuite):
"""Base TestSuite for PyAuto UI tests."""
def __init__(self, args):
pyautolib.PyUITestSuiteBase.__init__(self, args)
# Figure out path to chromium binaries
browser_dir = os.path.normpath(os.path.dirname(pyautolib.__file__))
logging.debug('Loading pyauto libs from %s', browser_dir)
self.InitializeWithPath(pyautolib.FilePath(browser_dir))
os.environ['PATH'] = browser_dir + os.pathsep + os.environ['PATH']
unittest.TestSuite.__init__(self)
cr_source_root = os.path.normpath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir))
self.SetCrSourceRoot(pyautolib.FilePath(cr_source_root))
# Start http server, if needed.
global _OPTIONS
if _OPTIONS and not _OPTIONS.no_http_server:
self._StartHTTPServer()
if _OPTIONS and _OPTIONS.remote_host:
self._ConnectToRemoteHosts(_OPTIONS.remote_host.split(','))
def __del__(self):
# python unittest module is setup such that the suite gets deleted before
# the test cases, which is odd because our test cases depend on
# initializtions like exitmanager, autorelease pool provided by the
# suite. Forcibly delete the test cases before the suite.
del self._tests
pyautolib.PyUITestSuiteBase.__del__(self)
global _HTTP_SERVER
if _HTTP_SERVER:
self._StopHTTPServer()
global _CHROME_DRIVER_FACTORY
if _CHROME_DRIVER_FACTORY is not None:
_CHROME_DRIVER_FACTORY.Stop()
def _StartHTTPServer(self):
"""Start a local file server hosting data files over http://"""
global _HTTP_SERVER
assert not _HTTP_SERVER, 'HTTP Server already started'
http_data_dir = _OPTIONS.http_data_dir
http_server = pyautolib.TestServer(pyautolib.TestServer.TYPE_HTTP,
'127.0.0.1',
pyautolib.FilePath(http_data_dir))
assert http_server.Start(), 'Could not start http server'
_HTTP_SERVER = http_server
logging.debug('Started http server at "%s".', http_data_dir)
def _StopHTTPServer(self):
"""Stop the local http server."""
global _HTTP_SERVER
assert _HTTP_SERVER, 'HTTP Server not yet started'
assert _HTTP_SERVER.Stop(), 'Could not stop http server'
_HTTP_SERVER = None
logging.debug('Stopped http server.')
def _ConnectToRemoteHosts(self, addresses):
"""Connect to remote PyAuto instances using a RemoteProxy.
The RemoteHost instances must already be running."""
global _REMOTE_PROXY
assert not _REMOTE_PROXY, 'Already connected to a remote host.'
_REMOTE_PROXY = []
for address in addresses:
if address == 'localhost' or address == '127.0.0.1':
self._StartLocalRemoteHost()
_REMOTE_PROXY.append(_RemoteProxy((address, 7410)))
def _StartLocalRemoteHost(self):
"""Start a remote PyAuto instance on the local machine."""
# Add the path to our main class to the RemoteHost's
# environment, so it can load that class at runtime.
import __main__
main_path = os.path.dirname(__main__.__file__)
env = os.environ
if env.get('PYTHONPATH', None):
env['PYTHONPATH'] += ':' + main_path
else:
env['PYTHONPATH'] = main_path
# Run it!
subprocess.Popen([sys.executable, os.path.join(os.path.dirname(__file__),
'remote_host.py')], env=env)
class _GTestTextTestResult(unittest._TextTestResult):
"""A test result class that can print formatted text results to a stream.
Results printed in conformance with gtest output format, like:
[ RUN ] autofill.AutofillTest.testAutofillInvalid: "test desc."
[ OK ] autofill.AutofillTest.testAutofillInvalid
[ RUN ] autofill.AutofillTest.testFillProfile: "test desc."
[ OK ] autofill.AutofillTest.testFillProfile
[ RUN ] autofill.AutofillTest.testFillProfileCrazyCharacters: "Test."
[ OK ] autofill.AutofillTest.testFillProfileCrazyCharacters
"""
def __init__(self, stream, descriptions, verbosity):
unittest._TextTestResult.__init__(self, stream, descriptions, verbosity)
def _GetTestURI(self, test):
if sys.version_info[:2] <= (2, 4):
return '%s.%s' % (unittest._strclass(test.__class__),
test._TestCase__testMethodName)
return '%s.%s' % (unittest._strclass(test.__class__), test._testMethodName)
def getDescription(self, test):
return '%s: "%s"' % (self._GetTestURI(test), test.shortDescription())
def startTest(self, test):
unittest.TestResult.startTest(self, test)
self.stream.writeln('[ RUN ] %s' % self.getDescription(test))
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
self.stream.writeln('[ OK ] %s' % self._GetTestURI(test))
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
self.stream.writeln('[ ERROR ] %s' % self._GetTestURI(test))
def addFailure(self, test, err):
unittest.TestResult.addFailure(self, test, err)
self.stream.writeln('[ FAILED ] %s' % self._GetTestURI(test))
class PyAutoTextTestRunner(unittest.TextTestRunner):
"""Test Runner for PyAuto tests that displays results in textual format.
Results are displayed in conformance with gtest output.
"""
def __init__(self, verbosity=1):
unittest.TextTestRunner.__init__(self,
stream=sys.stderr,
verbosity=verbosity)
def _makeResult(self):
return _GTestTextTestResult(self.stream, self.descriptions, self.verbosity)
# Implementation inspired from unittest.main()
class Main(object):
"""Main program for running PyAuto tests."""
_options, _args = None, None
_tests_filename = 'PYAUTO_TESTS'
_platform_map = {
'win32': 'win',
'darwin': 'mac',
'linux2': 'linux',
'linux3': 'linux',
'chromeos': 'chromeos',
}
def __init__(self):
self._ParseArgs()
self._Run()
def _ParseArgs(self):
"""Parse command line args."""
parser = optparse.OptionParser()
parser.add_option(
'', '--channel-id', type='string', default='',
help='Name of channel id, if using named interface.')
parser.add_option(
'', '--chrome-flags', type='string', default='',
help='Flags passed to Chrome. This is in addition to the usual flags '
'like suppressing first-run dialogs, enabling automation. '
'See chrome/common/chrome_switches.cc for the list of flags '
'chrome understands.')
parser.add_option(
'', '--http-data-dir', type='string',
default=os.path.join('chrome', 'test', 'data'),
help='Relative path from which http server should serve files.')
parser.add_option(
'', '--list-missing-tests', action='store_true', default=False,
help='Print a list of tests not included in PYAUTO_TESTS, and exit')
parser.add_option(
'-L', '--list-tests', action='store_true', default=False,
help='List all tests, and exit.')
parser.add_option(
'--shard',
help='Specify sharding params. Example: 1/3 implies split the list of '
'tests into 3 groups of which this is the 1st.')
parser.add_option(
'', '--log-file', type='string', default=None,
help='Provide a path to a file to which the logger will log')
parser.add_option(
'', '--no-http-server', action='store_true', default=False,
help='Do not start an http server to serve files in data dir.')
parser.add_option(
'', '--remote-host', type='string', default=None,
help='Connect to remote hosts for remote automation. If "localhost" '
'"127.0.0.1" is specified, a remote host will be launched '
'automatically on the local machine.')
parser.add_option(
'', '--repeat', type='int', default=1,
help='Number of times to repeat the tests. Useful to determine '
'flakiness. Defaults to 1.')
parser.add_option(
'-S', '--suite', type='string', default='FULL',
help='Name of the suite to load. Defaults to "FULL".')
parser.add_option(
'-v', '--verbose', action='store_true', default=False,
help='Make PyAuto verbose.')
parser.add_option(
'-D', '--wait-for-debugger', action='store_true', default=False,
help='Block PyAuto on startup for attaching debugger.')
self._options, self._args = parser.parse_args()
global _OPTIONS
_OPTIONS = self._options # Export options so other classes can access.
# Set up logging. All log messages will be prepended with a timestamp.
format = '%(asctime)s %(levelname)-8s %(message)s'
level = logging.INFO
if self._options.verbose:
level=logging.DEBUG
logging.basicConfig(level=level, format=format,
filename=self._options.log_file)
if self._options.list_missing_tests:
self._ListMissingTests()
sys.exit(0)
def TestsDir(self):
"""Returns the path to dir containing tests.
This is typically the dir containing the tests description file.
This method should be overridden by derived class to point to other dirs
if needed.
"""
return os.path.dirname(__file__)
@staticmethod
def _ImportTestsFromName(name):
"""Get a list of all test names from the given string.
Args:
name: dot-separated string for a module, a test case or a test method.
Examples: omnibox (a module)
omnibox.OmniboxTest (a test case)
omnibox.OmniboxTest.testA (a test method)
Returns:
[omnibox.OmniboxTest.testA, omnibox.OmniboxTest.testB, ...]
"""
def _GetTestsFromTestCase(class_obj):
"""Return all test method names from given class object."""
return [class_obj.__name__ + '.' + x for x in dir(class_obj) if
x.startswith('test')]
def _GetTestsFromModule(module):
"""Return all test method names from the given module object."""
tests = []
for name in dir(module):
obj = getattr(module, name)
if (isinstance(obj, (type, types.ClassType)) and
issubclass(obj, PyUITest) and obj != PyUITest):
tests.extend([module.__name__ + '.' + x for x in
_GetTestsFromTestCase(obj)])
return tests
module = None
# Locate the module
parts = name.split('.')
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy: raise
# We have the module. Pick the exact test method or class asked for.
parts = parts[1:]
obj = module
for part in parts:
obj = getattr(obj, part)
if type(obj) == types.ModuleType:
return _GetTestsFromModule(obj)
elif (isinstance(obj, (type, types.ClassType)) and
issubclass(obj, PyUITest) and obj != PyUITest):
return [module.__name__ + '.' + x for x in _GetTestsFromTestCase(obj)]
elif type(obj) == types.UnboundMethodType:
return [name]
else:
logging.warn('No tests in "%s"', name)
return []
def _ListMissingTests(self):
"""Print tests missing from PYAUTO_TESTS."""
# Fetch tests from all test scripts
all_test_files = filter(lambda x: x.endswith('.py'),
os.listdir(self.TestsDir()))
all_tests_modules = [os.path.splitext(x)[0] for x in all_test_files]
all_tests = reduce(lambda x, y: x + y,
map(self._ImportTestsFromName, all_tests_modules))
# Fetch tests included by PYAUTO_TESTS
pyauto_tests_file = os.path.join(self.TestsDir(), self._tests_filename)
pyauto_tests = reduce(lambda x, y: x + y,
map(self._ImportTestsFromName,
self._ExpandTestNamesFrom(pyauto_tests_file,
self._options.suite)))
for a_test in all_tests:
if a_test not in pyauto_tests:
print a_test
def _HasTestCases(self, module_string):
"""Determines if we have any PyUITest test case classes in the module
identified by |module_string|."""
module = __import__(module_string)
for name in dir(module):
obj = getattr(module, name)
if (isinstance(obj, (type, types.ClassType)) and
issubclass(obj, PyUITest)):
return True
return False
def _ExpandTestNames(self, args):
"""Returns a list of tests loaded from the given args.
The given args can be either a module (ex: module1) or a testcase
(ex: module2.MyTestCase) or a test (ex: module1.MyTestCase.testX)
If empty, the tests in the already imported modules are loaded.
Args:
args: [module1, module2, module3.testcase, module4.testcase.testX]
These modules or test cases or tests should be importable
Returns:
a list of expanded test names. Example:
[
'module1.TestCase1.testA',
'module1.TestCase1.testB',
'module2.TestCase2.testX',
'module3.testcase.testY',
'module4.testcase.testX'
]
"""
if not args: # Load tests ourselves
if self._HasTestCases('__main__'): # we are running a test script
module_name = os.path.splitext(os.path.basename(sys.argv[0]))[0]
args.append(module_name) # run the test cases found in it
else: # run tests from the test description file
pyauto_tests_file = os.path.join(self.TestsDir(), self._tests_filename)
logging.debug("Reading %s", pyauto_tests_file)
if not os.path.exists(pyauto_tests_file):
logging.warn("%s missing. Cannot load tests.", pyauto_tests_file)
else:
args = self._ExpandTestNamesFrom(pyauto_tests_file,
self._options.suite)
return args
def _ExpandTestNamesFrom(self, filename, suite):
"""Load test names from the given file.
Args:
filename: the file to read the tests from
suite: the name of the suite to load from |filename|.
Returns:
a list of test names
[module.testcase.testX, module.testcase.testY, ..]
"""
suites = PyUITest.EvalDataFrom(filename)
platform = sys.platform
if PyUITest.IsChromeOS(): # check if it's chromeos
platform = 'chromeos'
assert platform in self._platform_map, '%s unsupported' % platform
def _NamesInSuite(suite_name):
logging.debug('Expanding suite %s', suite_name)
platforms = suites.get(suite_name)
names = platforms.get('all', []) + \
platforms.get(self._platform_map[platform], [])
ret = []
# Recursively include suites if any. Suites begin with @.
for name in names:
if name.startswith('@'): # Include another suite
ret.extend(_NamesInSuite(name[1:]))
else:
ret.append(name)
return ret
assert suite in suites, '%s: No such suite in %s' % (suite, filename)
all_names = _NamesInSuite(suite)
args = []
excluded = []
# Find all excluded tests. Excluded tests begin with '-'.
for name in all_names:
if name.startswith('-'): # Exclude
excluded.extend(self._ImportTestsFromName(name[1:]))
else:
args.extend(self._ImportTestsFromName(name))
for name in excluded:
if name in args:
args.remove(name)
else:
logging.warn('Cannot exclude %s. Not included. Ignoring', name)
if excluded:
logging.debug('Excluded %d test(s): %s', len(excluded), excluded)
return args
def _Run(self):
"""Run the tests."""
if self._options.wait_for_debugger:
raw_input('Attach debugger to process %s and hit <enter> ' % os.getpid())
suite_args = [sys.argv[0]]
chrome_flags = self._options.chrome_flags
# Set CHROME_HEADLESS. It enables crash reporter on posix.
os.environ['CHROME_HEADLESS'] = '1'
os.environ['EXTRA_CHROME_FLAGS'] = chrome_flags
test_names = self._ExpandTestNames(self._args)
# Shard, if requested (--shard).
if self._options.shard:
matched = re.match('(\d+)/(\d+)', self._options.shard)
if not matched:
print >>sys.stderr, 'Invalid sharding params: %s' % self._options.shard
sys.exit(1)
shard_index = int(matched.group(1)) - 1
num_shards = int(matched.group(2))
if shard_index < 0 or shard_index >= num_shards:
print >>sys.stderr, 'Invalid sharding params: %s' % self._options.shard
sys.exit(1)
test_names = pyauto_utils.Shard(test_names, shard_index, num_shards)
test_names *= self._options.repeat
logging.debug("Loading %d tests from %s", len(test_names), test_names)
if self._options.list_tests: # List tests and exit
for name in test_names:
print name
sys.exit(0)
pyauto_suite = PyUITestSuite(suite_args)
loaded_tests = unittest.defaultTestLoader.loadTestsFromNames(test_names)
pyauto_suite.addTests(loaded_tests)
verbosity = 1
if self._options.verbose:
verbosity = 2
result = PyAutoTextTestRunner(verbosity=verbosity).run(pyauto_suite)
del loaded_tests # Need to destroy test cases before the suite
del pyauto_suite
successful = result.wasSuccessful()
if not successful:
pyauto_tests_file = os.path.join(self.TestsDir(), self._tests_filename)
print >>sys.stderr, 'Tests can be disabled by editing %s. ' \
'Ref: %s' % (pyauto_tests_file, _PYAUTO_DOC_URL)
sys.exit(not successful)
if __name__ == '__main__':
Main()
| bsd-3-clause | 3,060,942,454,470,267,000 | 35.915046 | 80 | 0.635997 | false |
thuma/sestationinfo | stationinfo.py | 1 | 1600 | import urllib2
import json
files = '''blataget-gtfs.csv
blekingetrafiken-gtfs.csv
dalatrafik-gtfs.csv
gotlandskommun-gtfs.csv
hallandstrafiken-gtfs.csv
jonkopingslanstrafik-gtfs.csv
kalmarlanstrafik-gtfs.csv
lanstrafikenkronoberg-gtfs.csv
localdata-gtfs.csv
masexpressen.csv
nettbuss-gtfs.csv
nsb-gtfs.csv
ostgotatrafiken-gtfs.csv
pagelinks-gtfs.csv
peopletravelgrouop.csv
rt90cords-gtfs.csv
skanerafiken-gtfs.csv
sl-gtfs.csv
swebus-gtfs.csv
tib-gtfs.csv
treminalmaps-gtfs.csv
trv-gtfs.csv
ul-gtfs.csv
vasttrafik-gtfs.csv
xtrafik-gtfs.csv'''
data = files.split("\n")
print data
alldata = {}
for filename in data:
alldata[filename] = {}
response = urllib2.urlopen('https://github.com/thuma/Transit-Stop-Identifier-Conversions-Sweden/raw/master/'+filename)
downloaded = response.read().split("\n")
rubriker = downloaded[0].split(";")
downloaded[0] = downloaded[1]
for row in downloaded:
parts = row.split(";")
alldata[filename][parts[0]] = {}
for i in range(len(parts)):
alldata[filename][parts[0]][rubriker[i]] = parts[i]
print alldata['hallandstrafiken-gtfs.csv']['7400110']
'''
response = urllib2.urlopen('https://github.com/thuma/Transit-Stop-Identifier-Conversions-Sweden/raw/master/treminalmaps-gtfs.csv')
maps = response.read()
response = urllib2.urlopen('https://github.com/thuma/Transit-Stop-Identifier-Conversions-Sweden/raw/master/treminalmaps-gtfs.csv')
maps = response.read()
response = urllib2.urlopen('https://github.com/thuma/Transit-Stop-Identifier-Conversions-Sweden/raw/master/treminalmaps-gtfs.csv')
maps = response.read()''' | gpl-2.0 | 7,348,605,985,020,554,000 | 28.109091 | 130 | 0.754375 | false |
JasonBristol/spor-ct | spor/research/models.py | 1 | 1571 | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToFill
from django.utils.text import slugify
class Project(models.Model):
title = models.CharField(max_length=50)
tagline = models.CharField(max_length=255)
description = models.TextField()
author = models.ForeignKey(User)
thumbnail = models.ImageField(upload_to="research/img")
thumbnail_200x100 = ImageSpecField(source='thumbnail', processors=[ResizeToFill(200, 100)], format='PNG', options={'quality': 100})
thumbnail_500x300 = ImageSpecField(source='thumbnail', processors=[ResizeToFill(500, 300)], format='PNG', options={'quality': 100})
thumbnail_700x400 = ImageSpecField(source='thumbnail', processors=[ResizeToFill(700, 400)], format='PNG', options={'quality': 100})
thumbnail_750x500 = ImageSpecField(source='thumbnail', processors=[ResizeToFill(750, 500)], format='PNG', options={'quality': 100})
document = models.FileField(upload_to="research/{0}/".format(1), blank=True)
date_published = models.DateTimeField(auto_now=True)
related_projects = models.ManyToManyField('self', blank=True)
publish = models.BooleanField(default=False)
slug = models.SlugField(unique=True, help_text="Only change this if you know what you are doing")
def __unicode__(self):
return self.title
def save(self, *args, **kw):
self.slug = slugify(self.title)
super(Project, self).save(*args, **kw)
| mit | 6,532,919,822,770,132,000 | 48.09375 | 135 | 0.724379 | false |
rboman/progs | sandbox/tkinter/playing_with_tkinter.py | 1 | 4424 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
from tkinter import *
from future import standard_library
standard_library.install_aliases()
# nx.set("20")
def sortir():
root.quit()
root = Tk()
root.title('Parameters')
ni = IntVar()
ni.set(50)
nx = StringVar()
nx.set("10")
frame1 = Frame(root)
lab1 = Label(frame1, text="Mailles selon X (nx)", relief=SUNKEN)
# ou relief=RAISED, SUNKEN, FLAT, RIDGE, GROOVE, and SOLID
lab1.pack(side=LEFT)
ent1 = Entry(frame1, textvariable=nx, width=5)
ent1.pack(side=LEFT)
frame1.pack(pady=5)
ny = StringVar()
ny.set("10")
frame2 = Frame(root)
lab2 = Label(frame2, text="Mailles selon Y (ny)", bg='red', fg='yellow')
lab2.pack(side=LEFT)
ent2 = Entry(frame2, textvariable=ny, width=10, state=DISABLED, relief=GROOVE)
# ou state=ACTIVE, NORMAL
ent2.pack(side=LEFT)
frame2.pack(pady=5)
frame3 = Frame(root)
lab3 = Label(frame3, text="Radius", borderwidth=5, font=('Arial', 12, 'bold'))
lab3.pack(side=LEFT)
ent3 = Entry(frame3, textvariable=ny, width=10, justify=RIGHT)
# ou justify=LEFT, RIGHT, CENTER
ent3.pack(side=LEFT)
frame3.pack(pady=5)
frame4 = Frame(root)
lab41 = Label(frame4, text="X Length")
lab41.grid(row=1, column=1)
ent41 = Entry(frame4, width=30)
ent41.grid(row=1, column=2, sticky=W)
lab42 = Label(frame4, text="Y Length")
lab42.grid(row=2, column=1)
ent42 = Entry(frame4, width=10)
ent42.insert(0, "blabla")
ent42.grid(row=2, column=2, sticky=E) # sticky= N,S,E,W ou NS, ou NW, etc
lab43 = Label(frame4, text="Un super long texte")
lab43.grid(row=3, column=1, columnspan=2)
btn = Button(frame4, text="End")
btn.grid(row=4, column=1, columnspan=2)
def stop(event):
print(' click!')
btn.configure(bg='red')
lab42.destroy()
ent42.delete(0, len(ent42.get()))
btn.bind('<Button-1>', stop)
frame4.pack()
def affiche(x):
print(x)
list = ["one", "two", "three"]
dict = {}
for num in list:
def do_this(x=num): return affiche(x)
dict[num] = Button(root, text=num, command=do_this)
dict[num].pack()
but = Button(root, text="Start", command=sortir)
but.pack()
root.bind('q', stop)
root.bind('<Escape>', stop)
# mouse: <Enter>,<Leave>,<Button-3>,<Double-Button-1>,<B1-Motion>,<ButtonRelease>,<Shift-Button-1>
# kb: <Key>,<KeyRelease>,<Return>,...
win2 = Toplevel(root)
win2.title("Toplevels")
win2.maxsize(width=300, height=200)
win2.minsize(width=150, height=100)
win2.resizable(width=YES, height=NO)
def printVal():
print(num_holder.get())
num_holder = IntVar()
rb1 = Radiobutton(win2, text="Five", variable=num_holder,
value=5, command=printVal)
rb2 = Radiobutton(win2, text="Three", variable=num_holder,
value=3, command=printVal)
rb1.pack()
rb2.pack()
def printVal2():
print(txt1_holder.get())
print(txt2_holder.get())
txt1_holder = StringVar()
txt2_holder = StringVar()
rb1 = Checkbutton(win2, text="Five", variable=txt1_holder,
onvalue="FiveOn", offvalue="FiveOff", command=printVal2)
rb2 = Checkbutton(win2, text="Three", variable=txt2_holder,
onvalue="ThreeOn", offvalue="ThreeOff", command=printVal2)
rb1.pack()
rb2.pack()
def printVal3(x):
print(list.curselection())
choices = ["Red", "Orange", "Yellow", "Green", "Blue", "Purple"]
list = Listbox(win2, height=2, selectmode=SINGLE)
list.pack()
for item in choices:
list.insert(END, item)
list.bind('<Button-1>', printVal3)
scroll = Scrollbar(win2, command=list.yview)
list.configure(yscrollcommand=scroll.set)
scroll.pack()
but = Button(win2, text=" ")
but.pack()
def printVal4(x):
print(scale.get())
but.configure(text=scale.get())
scale = Scale(win2, orient=HORIZONTAL, length=100,
from_=0, to=100, tickinterval=50,
command=printVal4)
scale.pack()
Label(win2, bitmap="warning", cursor="pirate").pack()
picture = PhotoImage(file="bouteille.gif")
Label(win2, image=picture, cursor="fleur").pack()
def message():
rt2 = Toplevel(root)
msg = Message(rt2, text="Here is the first line of text. "
"Here is the next line of text. "
"Now we are on line three. "
"Oooh, look mom, line four! "
"Okay, that's enough. Goodbye.", bg="white", fg="red")
msg.pack(fill=BOTH)
rt2.transient(root)
message()
root.mainloop()
root.withdraw()
# root.destroy()
# print 'nx=', ent1.get()
print('nx=', nx.get())
print('ny=', ny.get())
| apache-2.0 | 8,750,211,535,864,202,000 | 21.804124 | 98 | 0.654837 | false |
jittat/ku-eng-direct-admission | application/fields.py | 1 | 1109 | from django.db import models
class IntegerListField(models.Field):
"""
IntegerListField keeps a list of int as a comma-separated string.
>>> g = IntegerListField()
>>> g.get_db_prep_value([1,2,-1,20,30,40,-100])
'1,2,-1,20,30,40,-100'
>>> g.to_python('1,2,-10,3,4,-100,7')
[1,2,-10,3,4,-100,7]
"""
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'text'
def to_python(self, value):
if isinstance(value, list):
return value
if value==None or value=='':
return []
else:
if value[0]=='[':
value = value[1:]
if value[-1]==']':
value = value[:-1]
return [ int(r) for r in value.split(',') ]
def get_db_prep_value(self, value):
return ','.join([str(r) for r in value])
# south introspection
from south.modelsinspector import add_introspection_rules
add_introspection_rules(
[(
[IntegerListField],
[],
{},
),
], ["^application\.fields\.IntegerListField"])
| agpl-3.0 | -7,671,050,940,817,247,000 | 23.644444 | 69 | 0.522092 | false |
p1c2u/openapi-core | tests/unit/unmarshalling/test_validate.py | 1 | 32347 | import datetime
from unittest import mock
import pytest
from openapi_core.extensions.models.models import Model
from openapi_core.spec.paths import SpecPath
from openapi_core.unmarshalling.schemas.exceptions import (
FormatterNotFoundError,
)
from openapi_core.unmarshalling.schemas.exceptions import InvalidSchemaValue
from openapi_core.unmarshalling.schemas.factories import (
SchemaUnmarshallersFactory,
)
from openapi_core.unmarshalling.schemas.util import build_format_checker
class TestSchemaValidate:
@pytest.fixture
def validator_factory(self):
def create_validator(schema):
format_checker = build_format_checker()
return SchemaUnmarshallersFactory(
format_checker=format_checker
).create(schema)
return create_validator
@pytest.mark.parametrize(
"schema_type",
[
"boolean",
"array",
"integer",
"number",
"string",
],
)
def test_null(self, schema_type, validator_factory):
spec = {
"type": schema_type,
}
schema = SpecPath.from_spec(spec)
value = None
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"schema_type",
[
"boolean",
"array",
"integer",
"number",
"string",
],
)
def test_nullable(self, schema_type, validator_factory):
spec = {
"type": schema_type,
"nullable": True,
}
schema = SpecPath.from_spec(spec)
value = None
result = validator_factory(schema).validate(value)
assert result is None
def test_string_format_custom_missing(self, validator_factory):
custom_format = "custom"
spec = {
"type": "string",
"format": custom_format,
}
schema = SpecPath.from_spec(spec)
value = "x"
with pytest.raises(FormatterNotFoundError):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [False, True])
def test_boolean(self, value, validator_factory):
spec = {
"type": "boolean",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [1, 3.14, "true", [True, False]])
def test_boolean_invalid(self, value, validator_factory):
spec = {
"type": "boolean",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [(1, 2)])
def test_array_no_schema(self, value, validator_factory):
spec = {
"type": "array",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [[1, 2]])
def test_array(self, value, validator_factory):
spec = {
"type": "array",
"items": {
"type": "integer",
},
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [False, 1, 3.14, "true", (3, 4)])
def test_array_invalid(self, value, validator_factory):
spec = {
"type": "array",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [1, 3])
def test_integer(self, value, validator_factory):
spec = {
"type": "integer",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [False, 3.14, "true", [1, 2]])
def test_integer_invalid(self, value, validator_factory):
spec = {
"type": "integer",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [0, 1, 2])
def test_integer_minimum_invalid(self, value, validator_factory):
spec = {
"type": "integer",
"minimum": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [4, 5, 6])
def test_integer_minimum(self, value, validator_factory):
spec = {
"type": "integer",
"minimum": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [4, 5, 6])
def test_integer_maximum_invalid(self, value, validator_factory):
spec = {
"type": "integer",
"maximum": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [0, 1, 2])
def test_integer_maximum(self, value, validator_factory):
spec = {
"type": "integer",
"maximum": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [1, 2, 4])
def test_integer_multiple_of_invalid(self, value, validator_factory):
spec = {
"type": "integer",
"multipleOf": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [3, 6, 18])
def test_integer_multiple_of(self, value, validator_factory):
spec = {
"type": "integer",
"multipleOf": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [1, 3.14])
def test_number(self, value, validator_factory):
spec = {
"type": "number",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [False, "true", [1, 3]])
def test_number_invalid(self, value, validator_factory):
spec = {
"type": "number",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [0, 1, 2])
def test_number_minimum_invalid(self, value, validator_factory):
spec = {
"type": "number",
"minimum": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [3, 4, 5])
def test_number_minimum(self, value, validator_factory):
spec = {
"type": "number",
"minimum": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [1, 2, 3])
def test_number_exclusive_minimum_invalid(self, value, validator_factory):
spec = {
"type": "number",
"minimum": 3,
"exclusiveMinimum": True,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [4, 5, 6])
def test_number_exclusive_minimum(self, value, validator_factory):
spec = {
"type": "number",
"minimum": 3,
"exclusiveMinimum": True,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [4, 5, 6])
def test_number_maximum_invalid(self, value, validator_factory):
spec = {
"type": "number",
"maximum": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [1, 2, 3])
def test_number_maximum(self, value, validator_factory):
spec = {
"type": "number",
"maximum": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [3, 4, 5])
def test_number_exclusive_maximum_invalid(self, value, validator_factory):
spec = {
"type": "number",
"maximum": 3,
"exclusiveMaximum": True,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [0, 1, 2])
def test_number_exclusive_maximum(self, value, validator_factory):
spec = {
"type": "number",
"maximum": 3,
"exclusiveMaximum": True,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [1, 2, 4])
def test_number_multiple_of_invalid(self, value, validator_factory):
spec = {
"type": "number",
"multipleOf": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [3, 6, 18])
def test_number_multiple_of(self, value, validator_factory):
spec = {
"type": "number",
"multipleOf": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", ["true", b"test"])
def test_string(self, value, validator_factory):
spec = {
"type": "string",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [False, 1, 3.14, [1, 3]])
def test_string_invalid(self, value, validator_factory):
spec = {
"type": "string",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
b"true",
"test",
False,
1,
3.14,
[1, 3],
datetime.datetime(1989, 1, 2),
],
)
def test_string_format_date_invalid(self, value, validator_factory):
spec = {
"type": "string",
"format": "date",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
"1989-01-02",
"2018-01-02",
],
)
def test_string_format_date(self, value, validator_factory):
spec = {
"type": "string",
"format": "date",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
"12345678-1234-5678-1234-567812345678",
],
)
def test_string_format_uuid(self, value, validator_factory):
spec = {
"type": "string",
"format": "uuid",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
b"true",
"true",
False,
1,
3.14,
[1, 3],
datetime.date(2018, 1, 2),
datetime.datetime(2018, 1, 2, 23, 59, 59),
],
)
def test_string_format_uuid_invalid(self, value, validator_factory):
spec = {
"type": "string",
"format": "uuid",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
b"true",
"true",
False,
1,
3.14,
[1, 3],
"1989-01-02",
],
)
def test_string_format_datetime_invalid(self, value, validator_factory):
spec = {
"type": "string",
"format": "date-time",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
"1989-01-02T00:00:00Z",
"2018-01-02T23:59:59Z",
],
)
@mock.patch(
"openapi_schema_validator._format." "DATETIME_HAS_STRICT_RFC3339", True
)
@mock.patch(
"openapi_schema_validator._format." "DATETIME_HAS_ISODATE", False
)
def test_string_format_datetime_strict_rfc3339(
self, value, validator_factory
):
spec = {
"type": "string",
"format": "date-time",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
"1989-01-02T00:00:00Z",
"2018-01-02T23:59:59Z",
],
)
@mock.patch(
"openapi_schema_validator._format." "DATETIME_HAS_STRICT_RFC3339",
False,
)
@mock.patch(
"openapi_schema_validator._format." "DATETIME_HAS_ISODATE", True
)
def test_string_format_datetime_isodate(self, value, validator_factory):
spec = {
"type": "string",
"format": "date-time",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
"true",
False,
1,
3.14,
[1, 3],
"1989-01-02",
"1989-01-02T00:00:00Z",
],
)
def test_string_format_binary_invalid(self, value, validator_factory):
spec = {
"type": "string",
"format": "binary",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
b"stream",
b"text",
],
)
def test_string_format_binary(self, value, validator_factory):
spec = {
"type": "string",
"format": "binary",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
b"dGVzdA==",
"dGVzdA==",
],
)
def test_string_format_byte(self, value, validator_factory):
spec = {
"type": "string",
"format": "byte",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
"tsssst",
b"tsssst",
b"tesddddsdsdst",
],
)
def test_string_format_byte_invalid(self, value, validator_factory):
spec = {
"type": "string",
"format": "byte",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
"test",
b"stream",
datetime.date(1989, 1, 2),
datetime.datetime(1989, 1, 2, 0, 0, 0),
],
)
def test_string_format_unknown(self, value, validator_factory):
unknown_format = "unknown"
spec = {
"type": "string",
"format": unknown_format,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(FormatterNotFoundError):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", ["", "a", "ab"])
def test_string_min_length_invalid(self, value, validator_factory):
spec = {
"type": "string",
"minLength": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", ["abc", "abcd"])
def test_string_min_length(self, value, validator_factory):
spec = {
"type": "string",
"minLength": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
"",
],
)
def test_string_max_length_invalid_schema(self, value, validator_factory):
spec = {
"type": "string",
"maxLength": -1,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", ["ab", "abc"])
def test_string_max_length_invalid(self, value, validator_factory):
spec = {
"type": "string",
"maxLength": 1,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", ["", "a"])
def test_string_max_length(self, value, validator_factory):
spec = {
"type": "string",
"maxLength": 1,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", ["foo", "bar"])
def test_string_pattern_invalid(self, value, validator_factory):
spec = {
"type": "string",
"pattern": "baz",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", ["bar", "foobar"])
def test_string_pattern(self, value, validator_factory):
spec = {
"type": "string",
"pattern": "bar",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", ["true", False, 1, 3.14, [1, 3]])
def test_object_not_an_object(self, value, validator_factory):
spec = {
"type": "object",
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
Model(),
],
)
def test_object_multiple_one_of(self, value, validator_factory):
one_of = [
{
"type": "object",
},
{
"type": "object",
},
]
spec = {
"type": "object",
"oneOf": one_of,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{},
],
)
def test_object_different_type_one_of(self, value, validator_factory):
one_of = [
{
"type": "integer",
},
{
"type": "string",
},
]
spec = {
"type": "object",
"oneOf": one_of,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{},
],
)
def test_object_no_one_of(self, value, validator_factory):
one_of = [
{
"type": "object",
"required": [
"test1",
],
"properties": {
"test1": {
"type": "string",
},
},
},
{
"type": "object",
"required": [
"test2",
],
"properties": {
"test2": {
"type": "string",
},
},
},
]
spec = {
"type": "object",
"oneOf": one_of,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{
"foo": "FOO",
},
{
"foo": "FOO",
"bar": "BAR",
},
],
)
def test_unambiguous_one_of(self, value, validator_factory):
one_of = [
{
"type": "object",
"required": [
"foo",
],
"properties": {
"foo": {
"type": "string",
},
},
"additionalProperties": False,
},
{
"type": "object",
"required": ["foo", "bar"],
"properties": {
"foo": {
"type": "string",
},
"bar": {
"type": "string",
},
},
"additionalProperties": False,
},
]
spec = {
"type": "object",
"oneOf": one_of,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
{},
],
)
def test_object_default_property(self, value, validator_factory):
spec = {
"type": "object",
"default": "value1",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
{},
],
)
def test_object_min_properties_invalid_schema(
self, value, validator_factory
):
spec = {
"type": "object",
"minProperties": 2,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{"a": 1},
{"a": 1, "b": 2},
{"a": 1, "b": 2, "c": 3},
],
)
def test_object_min_properties_invalid(self, value, validator_factory):
spec = {
"type": "object",
"properties": {k: {"type": "number"} for k in ["a", "b", "c"]},
"minProperties": 4,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{"a": 1},
{"a": 1, "b": 2},
{"a": 1, "b": 2, "c": 3},
],
)
def test_object_min_properties(self, value, validator_factory):
spec = {
"type": "object",
"properties": {k: {"type": "number"} for k in ["a", "b", "c"]},
"minProperties": 1,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
{},
],
)
def test_object_max_properties_invalid_schema(
self, value, validator_factory
):
spec = {
"type": "object",
"maxProperties": -1,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{"a": 1},
{"a": 1, "b": 2},
{"a": 1, "b": 2, "c": 3},
],
)
def test_object_max_properties_invalid(self, value, validator_factory):
spec = {
"type": "object",
"properties": {k: {"type": "number"} for k in ["a", "b", "c"]},
"maxProperties": 0,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{"a": 1},
{"a": 1, "b": 2},
{"a": 1, "b": 2, "c": 3},
],
)
def test_object_max_properties(self, value, validator_factory):
spec = {
"type": "object",
"properties": {k: {"type": "number"} for k in ["a", "b", "c"]},
"maxProperties": 3,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
{"additional": 1},
],
)
def test_object_additional_properties(self, value, validator_factory):
spec = {
"type": "object",
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
{"additional": 1},
],
)
def test_object_additional_properties_false(
self, value, validator_factory
):
spec = {
"type": "object",
"additionalProperties": False,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{"additional": 1},
],
)
def test_object_additional_properties_object(
self, value, validator_factory
):
additional_properties = {
"type": "integer",
}
spec = {
"type": "object",
"additionalProperties": additional_properties,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize("value", [[], [1], [1, 2]])
def test_list_min_items_invalid(self, value, validator_factory):
spec = {
"type": "array",
"items": {
"type": "number",
},
"minItems": 3,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(Exception):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [[], [1], [1, 2]])
def test_list_min_items(self, value, validator_factory):
spec = {
"type": "array",
"items": {
"type": "number",
},
"minItems": 0,
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
[],
],
)
def test_list_max_items_invalid_schema(self, value, validator_factory):
spec = {
"type": "array",
"items": {
"type": "number",
},
"maxItems": -1,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(InvalidSchemaValue):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [[1, 2], [2, 3, 4]])
def test_list_max_items_invalid(self, value, validator_factory):
spec = {
"type": "array",
"items": {
"type": "number",
},
"maxItems": 1,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(Exception):
validator_factory(schema).validate(value)
@pytest.mark.parametrize("value", [[1, 2, 1], [2, 2]])
def test_list_unique_items_invalid(self, value, validator_factory):
spec = {
"type": "array",
"items": {
"type": "number",
},
"uniqueItems": True,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(Exception):
validator_factory(schema).validate(value)
@pytest.mark.parametrize(
"value",
[
{
"someint": 123,
},
{
"somestr": "content",
},
{
"somestr": "content",
"someint": 123,
},
],
)
def test_object_with_properties(self, value, validator_factory):
spec = {
"type": "object",
"properties": {
"somestr": {
"type": "string",
},
"someint": {
"type": "integer",
},
},
}
schema = SpecPath.from_spec(spec)
result = validator_factory(schema).validate(value)
assert result is None
@pytest.mark.parametrize(
"value",
[
{
"somestr": {},
"someint": 123,
},
{
"somestr": ["content1", "content2"],
"someint": 123,
},
{
"somestr": 123,
"someint": 123,
},
{
"somestr": "content",
"someint": 123,
"not_in_scheme_prop": 123,
},
],
)
def test_object_with_invalid_properties(self, value, validator_factory):
spec = {
"type": "object",
"properties": {
"somestr": {
"type": "string",
},
"someint": {
"type": "integer",
},
},
"additionalProperties": False,
}
schema = SpecPath.from_spec(spec)
with pytest.raises(Exception):
validator_factory(schema).validate(value)
| bsd-3-clause | 4,296,732,595,900,197,400 | 25.866279 | 79 | 0.498532 | false |
freehackquest/backend | fhq-server/templates/tmpl_create_new_storage_update.py | 1 | 4081 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import random
import string
updatespath = "../src/storages/updates/"
fileslist = [f for f in os.listdir(updatespath) if os.path.isfile(os.path.join(updatespath, f))]
pattern = r'.*StorageUpdateBase.*\(.*"([a-zA-Z0-9]*)".*,.*"([a-zA-Z0-9]*)".*,.*\).*'
updates = []
for filename in fileslist:
filepath = os.path.join(updatespath, filename)
# print(filepath);
with open(filepath) as f:
line = f.readline()
while line:
line = line.strip()
if re.match(pattern, line):
versions = re.search(pattern, line, re.IGNORECASE)
if versions:
updates.append({
"from": versions.group(1),
"to": versions.group(2),
})
line = f.readline()
# for tests
'''
updates.append({
"from": "u0100",
"to": "615d8fddd",
})
updates.append({
"from": "615d8fddd",
"to": "995d8fddd",
})'''
# print all updates
# for v in updates:
# print("[" + v["from"] + "] -> [" + v["to"] + "]")
# find the ends in graph
end_points = []
max_weight = 0
def recoursive_search_endpoints(spoint, weight):
global updates, end_points, max_weight
found = False
for v in updates:
if v["from"] == spoint:
found = True
recoursive_search_endpoints(v["to"], weight + 1)
if not found:
if weight > max_weight:
max_weight = weight;
end_points.append({
"point": spoint,
"weight": weight
})
recoursive_search_endpoints("", 0)
print(end_points)
if len(end_points) == 0:
print("Not found updates")
exit(-1)
endpoint = ""
for i in end_points:
if i["weight"] == max_weight and endpoint == "":
endpoint = i["point"]
elif i["weight"] == max_weight and endpoint != "":
print("WARNING: Found points with same weights, will be used first. Ignored: " + i["point"])
print("Found point: " + endpoint + " weight: " + str(max_weight))
newpoint = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(10))
newpoint_upper = newpoint.upper();
endpoint_upper = endpoint.upper();
filename_h = "update_" + endpoint + "_" + newpoint + ".h"
filename_cpp = "update_" + endpoint + "_" + newpoint + ".cpp"
filename_h = os.path.join(updatespath, filename_h)
filename_cpp = os.path.join(updatespath, filename_cpp)
print("Generate header file: " + filename_h)
f_h = open(filename_h, 'w')
f_h.write("#ifndef UPDATE_" + endpoint_upper + "_" + newpoint_upper + "_H\n")
f_h.write("#define UPDATE_" + endpoint_upper + "_" + newpoint_upper + "_H\n")
f_h.write("\n")
f_h.write("#include <storages.h>\n")
f_h.write("\n")
f_h.write("class Update_" + endpoint + "_" + newpoint + " : public StorageUpdateBase {\n")
f_h.write(" public:\n")
f_h.write(" Update_" + endpoint + "_" + newpoint + "();\n")
f_h.write(" virtual bool custom(Storage *pStorage, StorageConnection *pConn, std::string &error);\n")
f_h.write("};\n")
f_h.write("\n")
f_h.write("#endif // UPDATE_" + endpoint_upper + "_" + newpoint_upper + "_H\n")
f_h.close();
print("Generate source file: " + filename_cpp)
f_cpp = open(filename_cpp, 'w')
f_cpp.write("#include \"update_" + endpoint + "_" + newpoint + ".h\"\n")
f_cpp.write("\n")
f_cpp.write("REGISTRY_STORAGE_UPDATE(Update_" + endpoint + "_" + newpoint + ")\n")
f_cpp.write("\n")
f_cpp.write("Update_" + endpoint + "_" + newpoint + "::Update_" + endpoint + "_" + newpoint + "()\n")
f_cpp.write(" : StorageUpdateBase(\"" + endpoint + "\", \"" + newpoint + "\", \"TODO\") {\n")
f_cpp.write(" \n")
f_cpp.write(" // fill the array with struct changes\n")
f_cpp.write("}\n")
f_cpp.write("\n")
f_cpp.write("bool Update_" + endpoint + "_" + newpoint + "::custom(Storage *pStorage, StorageConnection *pConn, std::string &error) {\n")
f_cpp.write(" // here you can migrate data of correction if not just return true;\n")
f_cpp.write(" return true;\n")
f_cpp.write("}\n")
f_cpp.close();
| mit | -4,647,844,123,428,140,000 | 30.152672 | 137 | 0.57976 | false |
dstroppa/openstack-smartos-nova-grizzly | nova/exception.py | 1 | 33666 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
SHOULD include dedicated exception logging.
"""
import functools
from oslo.config import cfg
import webob.exc
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import safe_utils
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='make exception message format errors fatal'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _('Unexpected error while running command.')
if exit_code is None:
exit_code = '-'
message = _('%(description)s\nCommand: %(cmd)s\n'
'Exit code: %(exit_code)s\nStdout: %(stdout)r\n'
'Stderr: %(stderr)r') % locals()
IOError.__init__(self, message)
def _cleanse_dict(original):
"""Strip all admin_password, new_pass, rescue_pass keys from a dict."""
return dict((k, v) for k, v in original.iteritems() if not "_pass" in k)
def wrap_exception(notifier=None, publisher_id=None, event_type=None,
level=None):
"""This decorator wraps a method to catch any exceptions that may
get thrown. It logs the exception as well as optionally sending
it to the notification system.
"""
# TODO(sandy): Find a way to import nova.notifier.api so we don't have
# to pass it in as a parameter. Otherwise we get a cyclic import of
# nova.notifier.api -> nova.utils -> nova.exception :(
def inner(f):
def wrapped(self, context, *args, **kw):
# Don't store self or context in the payload, it now seems to
# contain confidential information.
try:
return f(self, context, *args, **kw)
except Exception, e:
with excutils.save_and_reraise_exception():
if notifier:
payload = dict(exception=e)
call_dict = safe_utils.getcallargs(f, *args, **kw)
cleansed = _cleanse_dict(call_dict)
payload.update({'args': cleansed})
# Use a temp vars so we don't shadow
# our outer definitions.
temp_level = level
if not temp_level:
temp_level = notifier.ERROR
temp_type = event_type
if not temp_type:
# If f has multiple decorators, they must use
# functools.wraps to ensure the name is
# propagated.
temp_type = f.__name__
notifier.notify(context, publisher_id, temp_type,
temp_level, payload)
return functools.wraps(f)(wrapped)
return inner
class NovaException(Exception):
"""Base Nova Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.message % kwargs
except Exception as e:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
if CONF.fatal_exception_format_errors:
raise e
else:
# at least get the core message out if something happened
message = self.message
super(NovaException, self).__init__(message)
class EC2APIError(NovaException):
message = _("Unknown")
def __init__(self, message=None, code=None):
self.msg = message
self.code = code
outstr = '%s' % message
super(EC2APIError, self).__init__(outstr)
class EncryptionFailure(NovaException):
message = _("Failed to encrypt text: %(reason)s")
class DecryptionFailure(NovaException):
message = _("Failed to decrypt text: %(reason)s")
class VirtualInterfaceCreateException(NovaException):
message = _("Virtual Interface creation failed")
class VirtualInterfaceMacAddressException(NovaException):
message = _("5 attempts to create virtual interface"
"with unique mac address failed")
class GlanceConnectionFailed(NovaException):
message = _("Connection to glance host %(host)s:%(port)s failed: "
"%(reason)s")
class NotAuthorized(NovaException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotActive(NovaException):
message = _("Image %(image_id)s is not active.")
class ImageNotAuthorized(NovaException):
message = _("Not authorized for image %(image_id)s.")
class Invalid(NovaException):
message = _("Unacceptable parameters.")
code = 400
class InvalidBDM(Invalid):
message = _("Block Device Mapping is Invalid.")
class InvalidBDMSnapshot(InvalidBDM):
message = _("Block Device Mapping is Invalid: "
"failed to get snapshot %(id)s.")
class InvalidBDMVolume(InvalidBDM):
message = _("Block Device Mapping is Invalid: "
"failed to get volume %(id)s.")
class VolumeUnattached(Invalid):
message = _("Volume %(volume_id)s is not attached to anything")
class InvalidKeypair(Invalid):
message = _("Keypair data is invalid")
class InvalidRequest(Invalid):
message = _("The request is invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received") + ": %(reason)s"
class InvalidVolume(Invalid):
message = _("Invalid volume") + ": %(reason)s"
class InvalidMetadata(Invalid):
message = _("Invalid metadata") + ": %(reason)s"
class InvalidMetadataSize(Invalid):
message = _("Invalid metadata size") + ": %(reason)s"
class InvalidPortRange(Invalid):
message = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
class InvalidIpProtocol(Invalid):
message = _("Invalid IP protocol %(protocol)s.")
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidCidr(Invalid):
message = _("Invalid cidr %(cidr)s.")
class InvalidUnicodeParameter(Invalid):
message = _("Invalid Parameter: "
"Unicode is not supported by the current database.")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class InvalidAggregateAction(Invalid):
message = _("Cannot perform action '%(action)s' on aggregate "
"%(aggregate_id)s. Reason: %(reason)s.")
class InvalidGroup(Invalid):
message = _("Group not valid. Reason: %(reason)s")
class InvalidSortKey(Invalid):
message = _("Sort key supplied was not valid.")
class InstanceInvalidState(Invalid):
message = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
"%(method)s while the instance is in this state.")
class InstanceNotRunning(Invalid):
message = _("Instance %(instance_id)s is not running.")
class InstanceNotInRescueMode(Invalid):
message = _("Instance %(instance_id)s is not in rescue mode")
class InstanceNotRescuable(Invalid):
message = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
class InstanceNotReady(Invalid):
message = _("Instance %(instance_id)s is not ready")
class InstanceSuspendFailure(Invalid):
message = _("Failed to suspend instance") + ": %(reason)s"
class InstanceResumeFailure(Invalid):
message = _("Failed to resume instance: %(reason)s.")
class InstancePowerOnFailure(Invalid):
message = _("Failed to power on instance: %(reason)s.")
class InstancePowerOffFailure(Invalid):
message = _("Failed to power off instance: %(reason)s.")
class InstanceRebootFailure(Invalid):
message = _("Failed to reboot instance") + ": %(reason)s"
class InstanceTerminationFailure(Invalid):
message = _("Failed to terminate instance") + ": %(reason)s"
class InstanceDeployFailure(Invalid):
message = _("Failed to deploy instance") + ": %(reason)s"
class ServiceUnavailable(Invalid):
message = _("Service is unavailable at this time.")
class ComputeResourcesUnavailable(ServiceUnavailable):
message = _("Insufficient compute resources.")
class ComputeServiceUnavailable(ServiceUnavailable):
message = _("Compute service of %(host)s is unavailable at this time.")
class UnableToMigrateToSelf(Invalid):
message = _("Unable to migrate instance (%(instance_id)s) "
"to current host (%(host)s).")
class InvalidHypervisorType(Invalid):
message = _("The supplied hypervisor type of is invalid.")
class DestinationHypervisorTooOld(Invalid):
message = _("The instance requires a newer hypervisor version than "
"has been provided.")
class DestinationDiskExists(Invalid):
message = _("The supplied disk path (%(path)s) already exists, "
"it is expected not to exist.")
class InvalidDevicePath(Invalid):
message = _("The supplied device path (%(path)s) is invalid.")
class DevicePathInUse(Invalid):
message = _("The supplied device path (%(path)s) is in use.")
class DeviceIsBusy(Invalid):
message = _("The supplied device (%(device)s) is busy.")
class InvalidCPUInfo(Invalid):
message = _("Unacceptable CPU info") + ": %(reason)s"
class InvalidIpAddressError(Invalid):
message = _("%(address)s is not a valid IP v4/6 address.")
class InvalidVLANTag(Invalid):
message = _("VLAN tag is not appropriate for the port group "
"%(bridge)s. Expected VLAN tag is %(tag)s, "
"but the one associated with the port group is %(pgroup)s.")
class InvalidVLANPortGroup(Invalid):
message = _("vSwitch which contains the port group %(bridge)s is "
"not associated with the desired physical adapter. "
"Expected vSwitch is %(expected)s, but the one associated "
"is %(actual)s.")
class InvalidDiskFormat(Invalid):
message = _("Disk format %(disk_format)s is not acceptable")
class ImageUnacceptable(Invalid):
message = _("Image %(image_id)s is unacceptable: %(reason)s")
class InstanceUnacceptable(Invalid):
message = _("Instance %(instance_id)s is unacceptable: %(reason)s")
class InvalidEc2Id(Invalid):
message = _("Ec2 id %(ec2_id)s is unacceptable.")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
class InvalidID(Invalid):
message = _("Invalid ID received %(id)s.")
class InvalidPeriodicTaskArg(Invalid):
message = _("Unexpected argument for periodic task creation: %(arg)s.")
class ConstraintNotMet(NovaException):
message = _("Constraint not met.")
code = 412
class NotFound(NovaException):
message = _("Resource could not be found.")
code = 404
class AgentBuildNotFound(NotFound):
message = _("No agent-build associated with id %(id)s.")
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ISCSITargetNotFoundForVolume(NotFound):
message = _("No target id found for volume %(volume_id)s.")
class DiskNotFound(NotFound):
message = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
message = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid):
message = _("Invalid image href %(image_href)s.")
class ImageNotFound(NotFound):
message = _("Image %(image_id)s could not be found.")
class ImageNotFoundEC2(ImageNotFound):
message = _("Image %(image_id)s could not be found. The nova EC2 API "
"assigns image ids dynamically when they are listed for the "
"first time. Have you listed image ids since adding this "
"image?")
class ProjectNotFound(NotFound):
message = _("Project %(project_id)s could not be found.")
class StorageRepositoryNotFound(NotFound):
message = _("Cannot find SR to read/write VDI.")
class NetworkDuplicated(NovaException):
message = _("Network %(network_id)s is duplicated.")
class NetworkInUse(NovaException):
message = _("Network %(network_id)s is still in use.")
class NetworkNotCreated(NovaException):
message = _("%(req)s is required to create a network.")
class NetworkNotFound(NotFound):
message = _("Network %(network_id)s could not be found.")
class PortNotFound(NotFound):
message = _("Port id %(port_id)s could not be found.")
class NetworkNotFoundForBridge(NetworkNotFound):
message = _("Network could not be found for bridge %(bridge)s")
class NetworkNotFoundForUUID(NetworkNotFound):
message = _("Network could not be found for uuid %(uuid)s")
class NetworkNotFoundForCidr(NetworkNotFound):
message = _("Network could not be found with cidr %(cidr)s.")
class NetworkNotFoundForInstance(NetworkNotFound):
message = _("Network could not be found for instance %(instance_id)s.")
class NoNetworksFound(NotFound):
message = _("No networks defined.")
class NetworkNotFoundForProject(NotFound):
message = _("Either Network uuid %(network_uuid)s is not present or "
"is not assigned to the project %(project_id)s.")
class DatastoreNotFound(NotFound):
message = _("Could not find the datastore reference(s) which the VM uses.")
class PortInUse(NovaException):
message = _("Port %(port_id)s is still in use.")
class PortNotUsable(NovaException):
message = _("Port %(port_id)s not usable for instance %(instance)s.")
class PortNotFree(NovaException):
message = _("No free port available for instance %(instance)s.")
class FixedIpNotFound(NotFound):
message = _("No fixed IP associated with id %(id)s.")
class FixedIpNotFoundForAddress(FixedIpNotFound):
message = _("Fixed ip not found for address %(address)s.")
class FixedIpNotFoundForInstance(FixedIpNotFound):
message = _("Instance %(instance_uuid)s has zero fixed ips.")
class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
message = _("Network host %(host)s has zero fixed ips "
"in network %(network_id)s.")
class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
message = _("Instance %(instance_uuid)s doesn't have fixed ip '%(ip)s'.")
class FixedIpNotFoundForNetwork(FixedIpNotFound):
message = _("Fixed IP address (%(address)s) does not exist in "
"network (%(network_uuid)s).")
class FixedIpAlreadyInUse(NovaException):
message = _("Fixed IP address %(address)s is already in use on instance "
"%(instance_uuid)s.")
class FixedIpAssociatedWithMultipleInstances(NovaException):
message = _("More than one instance is associated with fixed ip address "
"'%(address)s'.")
class FixedIpInvalid(Invalid):
message = _("Fixed IP address %(address)s is invalid.")
class NoMoreFixedIps(NovaException):
message = _("Zero fixed ips available.")
class NoFixedIpsDefined(NotFound):
message = _("Zero fixed ips could be found.")
#TODO(bcwaldon): EOL this exception!
class Duplicate(NovaException):
pass
class FloatingIpExists(Duplicate):
message = _("Floating ip %(address)s already exists.")
class FloatingIpNotFound(NotFound):
message = _("Floating ip not found for id %(id)s.")
class FloatingIpDNSExists(Invalid):
message = _("The DNS entry %(name)s already exists in domain %(domain)s.")
class FloatingIpNotFoundForAddress(FloatingIpNotFound):
message = _("Floating ip not found for address %(address)s.")
class FloatingIpNotFoundForHost(FloatingIpNotFound):
message = _("Floating ip not found for host %(host)s.")
class FloatingIpMultipleFoundForAddress(NovaException):
message = _("Multiple floating ips are found for address %(address)s.")
class FloatingIpPoolNotFound(NotFound):
message = _("Floating ip pool not found.")
safe = True
class NoMoreFloatingIps(FloatingIpNotFound):
message = _("Zero floating ips available.")
safe = True
class FloatingIpAssociated(NovaException):
message = _("Floating ip %(address)s is associated.")
class FloatingIpNotAssociated(NovaException):
message = _("Floating ip %(address)s is not associated.")
class NoFloatingIpsDefined(NotFound):
message = _("Zero floating ips exist.")
class NoFloatingIpInterface(NotFound):
message = _("Interface %(interface)s not found.")
class CannotDisassociateAutoAssignedFloatingIP(NovaException):
message = _("Cannot disassociate auto assigined floating ip")
class KeypairNotFound(NotFound):
message = _("Keypair %(name)s not found for user %(user_id)s")
class CertificateNotFound(NotFound):
message = _("Certificate %(certificate_id)s not found.")
class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class ComputeHostNotFound(HostNotFound):
message = _("Compute host %(host)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
message = _("Quota could not be found")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(NovaException):
message = _("Quota exceeded for resources: %(overs)s")
class SecurityGroupNotFound(NotFound):
message = _("Security group %(security_group_id)s not found.")
class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
message = _("Security group %(security_group_id)s not found "
"for project %(project_id)s.")
class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
message = _("Security group with rule %(rule_id)s not found.")
class SecurityGroupExistsForInstance(Invalid):
message = _("Security group %(security_group_id)s is already associated"
" with the instance %(instance_id)s")
class SecurityGroupNotExistsForInstance(Invalid):
message = _("Security group %(security_group_id)s is not associated with"
" the instance %(instance_id)s")
class SecurityGroupDefaultRuleNotFound(Invalid):
message = _("Security group default rule (%rule_id)s not found.")
class SecurityGroupCannotBeApplied(Invalid):
message = _("Network requires port_security_enabled and subnet associated"
" in order to apply security groups.")
class NoUniqueMatch(NovaException):
message = _("No Unique Match Found.")
code = 409
class MigrationNotFound(NotFound):
message = _("Migration %(migration_id)s could not be found.")
class MigrationNotFoundByStatus(MigrationNotFound):
message = _("Migration not found for instance %(instance_id)s "
"with status %(status)s.")
class ConsolePoolNotFound(NotFound):
message = _("Console pool %(pool_id)s could not be found.")
class ConsolePoolNotFoundForHostType(NotFound):
message = _("Console pool of type %(console_type)s "
"for compute host %(compute_host)s "
"on proxy host %(host)s not found.")
class ConsoleNotFound(NotFound):
message = _("Console %(console_id)s could not be found.")
class ConsoleNotFoundForInstance(ConsoleNotFound):
message = _("Console for instance %(instance_uuid)s could not be found.")
class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
message = _("Console for instance %(instance_uuid)s "
"in pool %(pool_id)s could not be found.")
class ConsoleTypeInvalid(Invalid):
message = _("Invalid console type %(console_type)s")
class InstanceTypeNotFound(NotFound):
message = _("Instance type %(instance_type_id)s could not be found.")
class InstanceTypeNotFoundByName(InstanceTypeNotFound):
message = _("Instance type with name %(instance_type_name)s "
"could not be found.")
class FlavorNotFound(NotFound):
message = _("Flavor %(flavor_id)s could not be found.")
class FlavorAccessNotFound(NotFound):
message = _("Flavor access not found for %(flavor_id)s / "
"%(project_id)s combination.")
class CellNotFound(NotFound):
message = _("Cell %(cell_name)s doesn't exist.")
class CellRoutingInconsistency(NovaException):
message = _("Inconsistency in cell routing: %(reason)s")
class CellServiceAPIMethodNotFound(NotFound):
message = _("Service API method not found: %(detail)s")
class CellTimeout(NotFound):
message = _("Timeout waiting for response from cell")
class CellMaxHopCountReached(NovaException):
message = _("Cell message has reached maximum hop count: %(hop_count)s")
class NoCellsAvailable(NovaException):
message = _("No cells available matching scheduling criteria.")
class CellError(NovaException):
message = _("Exception received during cell processing: %(exc_name)s.")
class InstanceUnknownCell(NotFound):
message = _("Cell is not known for instance %(instance_uuid)s")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
class SchedulerCostFunctionNotFound(NotFound):
message = _("Scheduler cost function %(cost_fn_str)s could"
" not be found.")
class SchedulerWeightFlagNotFound(NotFound):
message = _("Scheduler weight flag not found: %(flag_name)s")
class InstanceMetadataNotFound(NotFound):
message = _("Instance %(instance_uuid)s has no metadata with "
"key %(metadata_key)s.")
class InstanceSystemMetadataNotFound(NotFound):
message = _("Instance %(instance_uuid)s has no system metadata with "
"key %(metadata_key)s.")
class InstanceTypeExtraSpecsNotFound(NotFound):
message = _("Instance Type %(instance_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class NoFilesFound(NotFound):
message = _("Zero files could be found.")
class SwitchNotFoundForNetworkAdapter(NotFound):
message = _("Virtual switch associated with the "
"network adapter %(adapter)s not found.")
class NetworkAdapterNotFound(NotFound):
message = _("Network adapter %(adapter)s could not be found.")
class ClassNotFound(NotFound):
message = _("Class %(class_name)s could not be found: %(exception)s")
class NotAllowed(NovaException):
message = _("Action not allowed.")
class ImageRotationNotAllowed(NovaException):
message = _("Rotation is not allowed for snapshots")
class RotationRequiredForBackup(NovaException):
message = _("Rotation param is required for backup image_type")
class KeyPairExists(Duplicate):
message = _("Key pair %(key_name)s already exists.")
class InstanceExists(Duplicate):
message = _("Instance %(name)s already exists.")
class InstanceTypeExists(Duplicate):
message = _("Instance Type with name %(name)s already exists.")
class InstanceTypeIdExists(Duplicate):
message = _("Instance Type with ID %(flavor_id)s already exists.")
class FlavorAccessExists(Duplicate):
message = _("Flavor access alreay exists for flavor %(flavor_id)s "
"and project %(project_id)s combination.")
class InvalidSharedStorage(NovaException):
message = _("%(path)s is not on shared storage: %(reason)s")
class InvalidLocalStorage(NovaException):
message = _("%(path)s is not on local storage: %(reason)s")
class MigrationError(NovaException):
message = _("Migration error") + ": %(reason)s"
class MalformedRequestBody(NovaException):
message = _("Malformed message body: %(reason)s")
# NOTE(johannes): NotFound should only be used when a 404 error is
# appropriate to be returned
class ConfigNotFound(NovaException):
message = _("Could not find config at %(path)s")
class PasteAppNotFound(NovaException):
message = _("Could not load paste app '%(name)s' from %(path)s")
class CannotResizeToSameFlavor(NovaException):
message = _("When resizing, instances must change flavor!")
class ResizeError(NovaException):
message = _("Resize error: %(reason)s")
class ImageTooLarge(NovaException):
message = _("Image is larger than instance type allows")
class InstanceTypeMemoryTooSmall(NovaException):
message = _("Instance type's memory is too small for requested image.")
class InstanceTypeDiskTooSmall(NovaException):
message = _("Instance type's disk is too small for requested image.")
class InsufficientFreeMemory(NovaException):
message = _("Insufficient free memory on compute node to start %(uuid)s.")
class CouldNotFetchMetrics(NovaException):
message = _("Could not fetch bandwidth/cpu/disk metrics for this host.")
class NoValidHost(NovaException):
message = _("No valid host was found. %(reason)s")
class QuotaError(NovaException):
message = _("Quota exceeded") + ": code=%(code)s"
code = 413
headers = {'Retry-After': 0}
safe = True
class TooManyInstances(QuotaError):
message = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)d of %(allowed)d %(resource)s")
class FloatingIpLimitExceeded(QuotaError):
message = _("Maximum number of floating ips exceeded")
class MetadataLimitExceeded(QuotaError):
message = _("Maximum number of metadata items exceeds %(allowed)d")
class OnsetFileLimitExceeded(QuotaError):
message = _("Personality file limit exceeded")
class OnsetFilePathLimitExceeded(QuotaError):
message = _("Personality file path too long")
class OnsetFileContentLimitExceeded(QuotaError):
message = _("Personality file content too long")
class KeypairLimitExceeded(QuotaError):
message = _("Maximum number of key pairs exceeded")
class SecurityGroupLimitExceeded(QuotaError):
message = _("Maximum number of security groups or rules exceeded")
class AggregateError(NovaException):
message = _("Aggregate %(aggregate_id)s: action '%(action)s' "
"caused an error: %(reason)s.")
class AggregateNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s could not be found.")
class AggregateNameExists(Duplicate):
message = _("Aggregate %(aggregate_name)s already exists.")
class AggregateHostNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s has no host %(host)s.")
class AggregateMetadataNotFound(NotFound):
message = _("Aggregate %(aggregate_id)s has no metadata with "
"key %(metadata_key)s.")
class AggregateHostExists(Duplicate):
message = _("Aggregate %(aggregate_id)s already has host %(host)s.")
class InstanceTypeCreateFailed(NovaException):
message = _("Unable to create instance type")
class InstancePasswordSetFailed(NovaException):
message = _("Failed to set admin password on %(instance)s "
"because %(reason)s")
safe = True
class DuplicateVlan(Duplicate):
message = _("Detected existing vlan with id %(vlan)d")
class InstanceNotFound(NotFound):
message = _("Instance %(instance_id)s could not be found.")
class InstanceInfoCacheNotFound(NotFound):
message = _("Info cache for instance %(instance_uuid)s could not be "
"found.")
class NodeNotFound(NotFound):
message = _("Node %(node_id)s could not be found.")
class NodeNotFoundByUUID(NotFound):
message = _("Node with UUID %(node_uuid)s could not be found.")
class MarkerNotFound(NotFound):
message = _("Marker %(marker)s could not be found.")
class InvalidInstanceIDMalformed(Invalid):
message = _("Invalid id: %(val)s (expecting \"i-...\").")
class CouldNotFetchImage(NovaException):
message = _("Could not fetch image %(image_id)s")
class CouldNotUploadImage(NovaException):
message = _("Could not upload image %(image_id)s")
class TaskAlreadyRunning(NovaException):
message = _("Task %(task_name)s is already running on host %(host)s")
class TaskNotRunning(NovaException):
message = _("Task %(task_name)s is not running on host %(host)s")
class InstanceIsLocked(InstanceInvalidState):
message = _("Instance %(instance_uuid)s is locked")
class ConfigDriveMountFailed(NovaException):
message = _("Could not mount vfat config drive. %(operation)s failed. "
"Error: %(error)s")
class ConfigDriveUnknownFormat(NovaException):
message = _("Unknown config drive format %(format)s. Select one of "
"iso9660 or vfat.")
class InterfaceAttachFailed(Invalid):
message = _("Failed to attach network adapter device to %(instance)s")
class InterfaceDetachFailed(Invalid):
message = _("Failed to detach network adapter device from %(instance)s")
class InstanceUserDataTooLarge(NovaException):
message = _("User data too large. User data must be no larger than "
"%(maxsize)s bytes once base64 encoded. Your data is "
"%(length)d bytes")
class InstanceUserDataMalformed(NovaException):
message = _("User data needs to be valid base 64.")
class UnexpectedTaskStateError(NovaException):
message = _("unexpected task state: expecting %(expected)s but "
"the actual state is %(actual)s")
class InstanceActionNotFound(NovaException):
message = _("Action for request_id %(request_id)s on instance"
" %(instance_uuid)s not found")
class InstanceActionEventNotFound(NovaException):
message = _("Event %(event)s not found for action id %(action_id)s")
class CryptoCAFileNotFound(FileNotFound):
message = _("The CA file for %(project)s could not be found")
class CryptoCRLFileNotFound(FileNotFound):
message = _("The CRL file for %(project)s could not be found")
class InstanceRecreateNotSupported(Invalid):
message = _('Instance recreate is not implemented by this virt driver.')
class ServiceGroupUnavailable(NovaException):
message = _("The service from servicegroup driver %(driver) is "
"temporarily unavailable.")
class DBNotAllowed(NovaException):
message = _('%(binary)s attempted direct database access which is '
'not allowed by policy')
class UnsupportedVirtType(Invalid):
message = _("Virtualization type '%(virt)s' is not supported by "
"this compute driver")
class UnsupportedHardware(Invalid):
message = _("Requested hardware '%(model)s' is not supported by "
"the '%(virt)s' virt driver")
| apache-2.0 | -3,833,240,339,379,516,400 | 27.267003 | 79 | 0.67335 | false |
jiangzhengshen/Interesting | Crawler/crawler_enhanced.py | 1 | 10134 | import argparse
import hashlib
import logging
import os
import queue
import socket
from urllib.parse import quote, urlsplit
from urllib.request import Request, urlopen
from urllib.error import URLError
from pyquery import PyQuery
''' 根据输入的网址和深度爬网页
需要对php网页取得真实的网站地址
next:
并行化
断点续抓
URLCrawler: 结果存储为收藏夹的HTML格式
'''
class Crawler(object):
def __init__(self, args):
# log设置
log_file = 'crawler.log'
logging.basicConfig(filename=log_file,
format='%(asctime)s -> %(levelname)s %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.DEBUG)
logging.info('\n=================== New Session =====================')
''' 对整个socket层设置超时时间(s)。后续文件中如果再使用到socket,不必再设置 '''
socket.setdefaulttimeout(30)
''' 不变的参数以"_"开头 '''
self._init_urls = args.init_urls.split(";")
self._depth = args.depth
self._out_dir = args.out_dir if args.out_dir[-1] in ['/', '\\'] else args.out_dir + '/'
if not os.path.exists(self._out_dir):
os.mkdir(self._out_dir)
self.current_depth = 0
self.url_queue = queue.Queue() # 待爬取的url队列,格式(url, depth)
for url in self._init_urls:
self.url_queue.put((url, self.current_depth))
self.cached_urls = {} # 所有爬过网页的原始url,格式url -> [cnt, depth]
@staticmethod
def __get_html(url):
try:
return PyQuery(url=url, parser='html')
except Exception as e:
logging.warning('PyQuery: %s : %s\n\tURL: %s', type(e), e, url)
return None
@staticmethod
def __get_real_url(raw_url):
try:
'''
参考了js中encodeURI的不编码字符
escape不编码字符有69个:*,+,-,.,/,@,_,0-9,a-z,A-Z
encodeURI不编码字符有82个:!,#,$,&,',(,),*,+,,,-,.,/,:,;,=,?,@,_,~,0-9,a-z,A-Z
encodeURIComponent不编码字符有71个:!, ',(,),*,-,.,_,~,0-9,a-z,A-Z
'''
url = quote(raw_url, safe='!#$&()*+,-./:;=?@_~\'', encoding='utf-8')
req = Request(url)
response = urlopen(req)
new_url = response.geturl()
response.close()
return new_url
except URLError as e:
logging.warning('Request: URLError: %s\n\tRaw URL: %s', e.reason, raw_url)
return ''
except Exception as e:
logging.warning('Request: %s : %s\n\tRaw URL: %s', type(e), e, raw_url)
return ''
def __extract_url(self, html_pyquery):
""" extract all the urls from html, except for the cached urls """
try:
html_pyquery.make_links_absolute()
all_urls = html_pyquery('a').map(lambda i, element: PyQuery(element)('a').attr('href'))
url_list = set()
for url in all_urls:
real_url = self.__get_real_url(url)
if not real_url:
continue
if real_url in self.cached_urls:
self.cached_urls[real_url][0] += 1
else:
url_list.add(real_url)
return list(url_list)
except Exception as e:
logging.warning('PyQuery: %s : %s', type(e), e)
return []
def __dump_cached_urls(self):
with open('cached_urls.txt', 'w') as dump_file:
for url in self.cached_urls:
dump_file.write(
url + '\t' + str(self.cached_urls[url][0]) + '\t' + str(self.cached_urls[url][1]) + '\n')
@staticmethod
def filter_url(urls):
""" could be personalized implemented """
return urls
def save_content(self, url, depth, html_pyquery):
""" could be personalized implemented """
pass
def run(self):
while not self.url_queue.empty() and self.current_depth <= self._depth:
url_info = self.url_queue.get()
url = url_info[0]
depth = url_info[1]
self.current_depth = depth
logging.info('Depth: %d, URL: %s', depth, url)
''' get html content from the url '''
html_pyquery = self.__get_html(url)
if not html_pyquery:
continue
''' save the needed information from the html content, e.g., images, href, etc. '''
self.save_content(url, depth, html_pyquery)
''' cache the crawled urls '''
if url in self.cached_urls:
logging.warning('URL: %s -> There should not be cached urls in the queue, check your code !!!', url)
break
else:
self.cached_urls[url] = [1, depth]
''' extract urls from the html content, except for the cached urls '''
extracted_urls = []
if self.current_depth < self._depth:
extracted_urls = self.__extract_url(html_pyquery)
''' only retain the needed urls, and put them into the queue '''
filtered_urls = self.filter_url(extracted_urls)
for new_url in filtered_urls:
self.url_queue.put((new_url, depth + 1))
self.__dump_cached_urls()
class URLCrawler(Crawler):
def save_content(self, url, depth, html_pyquery):
parse_list = urlsplit(url)
host = parse_list[0] + '://' + parse_list[1]
with open(self._out_dir + 'savedLinks.txt', 'a') as outfile:
outfile.write(host + '\n')
class ImageCrawler(Crawler):
def save_content(self, url, depth, html_pyquery):
all_imgs = html_pyquery('img').map(lambda i, element: PyQuery(element)('img').attr('src'))
for raw_url in all_imgs:
image_name = raw_url.split('/')[-1]
words = image_name.split('.')
suffix = ''
if len(words) > 1:
suffix = words[-1]
print(image_name + ', ' + suffix)
try:
img_url = quote(raw_url, safe='!#$&()*+,-./:;=?@_~\'', encoding='utf-8')
req = Request(img_url)
response = urlopen(req)
content = response.read()
m = hashlib.md5()
m.update(content)
content_hash = m.hexdigest()
filename = content_hash + '.' + suffix
if os.path.exists(self._out_dir + filename):
continue
with open(self._out_dir + filename, 'wb') as image_file:
image_file.write(content)
except URLError as e:
logging.warning('Request: URLError: %s\n\tRaw URL: %s', e.reason, raw_url)
continue
except Exception as e:
logging.warning('Request: %s : %s\n\tRaw URL: %s', type(e), e, raw_url)
continue
class ICML2019Crawler(Crawler):
"""
Parameters: -a https://icml.cc/Conferences/2019/Schedule?type=Poster -d 0
"""
@staticmethod
def heading_author(_, element):
entry = PyQuery(element)
heading = entry('.maincardBody').text()
author_list = entry('.maincardFooter').text().split(' · ')
return heading, author_list
def save_content(self, url, depth, html_pyquery):
all_entries = html_pyquery('.maincard')
heading_authors = all_entries.map(ICML2019Crawler.heading_author)
with open(self._out_dir + 'ICML2019.csv', 'w', encoding='utf8') as outfile:
for heading, author_list in heading_authors:
outfile.write('\t'.join([heading] + author_list) + '\n')
class CVPR2019Crawler(Crawler):
"""
Parameters: -a http://cvpr2019.thecvf.com/program/main_conference -d 0
"""
def save_content(self, url, depth, html_pyquery):
topic_title_author_dic = {}
all_tables = html_pyquery('table')
for a_table in all_tables.items():
entries = a_table('tr').filter(lambda i, this: PyQuery(this).attr('class') != 'blue-bottom')
current_topic = ''
current_id = 0
current_title = ''
current_authors = []
for idx, field in enumerate(entries('td')):
if idx % 6 == 0:
if field.text is not None:
current_topic = field.text
elif idx % 6 == 3:
current_title = field.text
elif idx % 6 == 4:
current_authors = field.text.split(';')
elif idx % 6 == 5:
current_id = field.text
if current_id not in topic_title_author_dic.keys():
topic_title_author_dic[current_id] = [current_topic, current_title] + current_authors
with open(self._out_dir + 'CVPR2019.csv', 'w', encoding='utf8') as outfile:
for id, topic_title_author_list in topic_title_author_dic.items():
outfile.write('\t'.join([id] + topic_title_author_list) + '\n')
def main():
# if sys.modules['idlelib']:
# sys.argv.extend(input("Args: ").split())
# args.init_urls = 'http://www.baidu.com'
# args.depth = 3
parser = argparse.ArgumentParser(description='A crawler for website')
parser.add_argument('-a', type=str, required=True, metavar='WebAddr', dest='init_urls',
help='Specify the Website Address')
parser.add_argument('-d', type=int, default=1, metavar='CrawlDepth', dest='depth', help='Specify the Crawler Depth')
parser.add_argument('-o', type=str, default='./', metavar='OutputDir', dest='out_dir',
help='Specify the Output Directory')
args = parser.parse_args()
crawler = CVPR2019Crawler(args)
crawler.run()
if __name__ == '__main__':
main()
| mit | -901,503,238,482,499,200 | 36.588462 | 120 | 0.527883 | false |
jacobwindsor/pubchem-ranker | CompoundRanker/DataManipulators/CIDGatherer.py | 1 | 2702 | import sys
from CompoundRanker.database import get_db,query_db
from CompoundRanker import app
from requests import exceptions, get
class CIDGatherer(object):
def harvest(self, dataset_id):
"""
Harvest all of the CIDs from PubChem
:return: List of tuples [(cid, metab_id),]
"""
# Query only returns the metabolites that don't already have CIDs associated
query = "SELECT t1.id, t1.cas from metabolites t1 " \
"LEFT JOIN pubchem_compounds t2 ON t2.metab_ID = t1.id " \
"WHERE t2.metab_ID is NULL AND t1.dataset_id is ?"
results = query_db(query, dataset_id)
count = len(results)
since_wait = 0
since_report = 0
cid_metab_id_map = [] # List of tuples
for i, result in enumerate(results):
since_wait += 1
since_report += 1
if since_wait > 2:
sys.stdout.write("Waiting 1 second \n")
sys.stdout.flush()
since_wait = 0
if since_report > 49:
sys.stdout.write(str(cid_metab_id_map))
sys.stdout.write("\n")
sys.stdout.flush()
since_report = 0
cids = self.get_cids(result['cas'])
metab_id = result['id']
if cids:
for cid in cids:
cid_metab_id_map.append((cid, metab_id))
# Progress
perc = ((i+1)/count) * 100
sys.stdout.write("%s%% \n" % perc)
sys.stdout.flush()
return cid_metab_id_map
def get_cids(self, cas):
"""
Use the PubChem API to get the CID
:param cas: string - CAS identifier
:return: list of CIDs
"""
uri = "http://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/name/%s/cids/json" \
"?email=%s"
try:
response = get((uri % (cas, app.config['ADMIN_EMAIL']))).json()
try:
cids = response['IdentifierList']['CID']
return cids
except KeyError:
return None
except (exceptions.ConnectionError, TimeoutError, exceptions.Timeout,
exceptions.ConnectTimeout, exceptions.ReadTimeout) as e:
# Error. return the error and the CAS number that this error occured on
sys.stderr.write("Error: %s. Occurred on CAS: %s", (e, cas))
sys.stderr.flush()
sys.stdout.flush()
def save(self, cid_metab_id_map):
insert_query = "INSERT INTO pubchem_compounds(CID, metab_ID) VALUES (?, ?)"
return query_db(insert_query, cid_metab_id_map, many=True)
| mit | 3,315,396,116,340,576,000 | 31.95122 | 85 | 0.53738 | false |
FescueFungiShare/hydroshare | hs_core/tests/api/native/test_publish_resource.py | 1 | 2487 |
import unittest
from django.contrib.auth.models import Group
from django.test import TestCase
from hs_core import hydroshare
from hs_core.testing import MockIRODSTestCaseMixin
class TestPublishResource(MockIRODSTestCaseMixin, TestCase):
def setUp(self):
super(TestPublishResource, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Hydroshare Author')
# create a user
self.user = hydroshare.create_account(
'[email protected]',
username='creator',
first_name='Creator_FirstName',
last_name='Creator_LastName',
superuser=False,
groups=[]
)
# create a resource
self.res = hydroshare.create_resource(
'GenericResource',
self.user,
'Test Resource'
)
# TODO: This test needs to be enabled once the publish_resource() function is updated to register resource DOI
# with crossref. At that point the call to crossref needs to be mocked here in this test (IMPORTANT)
@unittest.skip
def test_publish_resource(self):
# check status prior to publishing the resource
self.assertFalse(
self.res.raccess.published,
msg='The resource is published'
)
self.assertFalse(
self.res.raccess.immutable,
msg='The resource is frozen'
)
self.assertIsNone(
self.res.doi,
msg='doi is assigned'
)
# there should not be published date type metadata element
self.assertFalse(self.res.metadata.dates.filter(type='published').exists())
# publish resource - this is the api we are testing
hydroshare.publish_resource(self.res.short_id)
self.pub_res = hydroshare.get_resource_by_shortkey(self.res.short_id)
# test publish state
self.assertTrue(
self.pub_res.raccess.published,
msg='The resource is not published'
)
# test frozen state
self.assertTrue(
self.pub_res.raccess.immutable,
msg='The resource is not frozen'
)
# test if doi is assigned
self.assertIsNotNone(
self.pub_res.doi,
msg='No doi is assigned with the published resource.'
)
# there should now published date type metadata element
self.assertTrue(self.pub_res.metadata.dates.filter(type='published').exists())
| bsd-3-clause | 1,755,957,264,904,521,700 | 30.884615 | 114 | 0.617209 | false |
derekmd/opentag-presenter | tags/DefaultDocument.py | 1 | 4289 | from PresentationObject import PresentationObject
class DefaultDocument( PresentationObject ):
"""
Contains the hard-coded defaults for properties in a presentation.
This internal tag shouldn't be used in a presentation.
"""
def __init__( self ):
"""
Set the properties dictionary of the default settings for
every PresentationObject type in the Presenter application. If
you add a new PresetationObject type, set it's default settings
in this class.
"""
properties = {
"animation" : "", # direction to animate an object
# from
"animationdelay" : 0, # number of seconds between
# refreshing an object
"animationspeed" : 1, # pixels to move for each object
# refresh
"align" : "", # position to align an object
"background" : "", # background image
"bgcolour" : "white", # background colour
"bgcolor" : "white", # colour alias
"bgpattern" : "SolidPattern", # table bgcolor pattern
"bgstyle" : "SolidPattern", # box background style
"stretchbackground" : "no", # stretch background to
# fit to the canvas
"face" : "Times New Roman", # font family
"color" : "black", # font colour
"colour" : "black", # colour alias
"size" : 12, # point/pixel/relative size
"colorize" : "yes", # colourize bullet images
"colourize" : "yes", # colour alias
"indent" : 0, # pixels from marginleft for a <p>
# first line or a <bullet>
"linespacing" : 0, # number of lines to space between
# lines.
#
# i.e., if a line is 16 pixels
# high and linespacing is 0.5,
# the space between that line and
# the next will be 8 pixels.
"margin" : 32, # pixels from edge of screen for
# left, right, top, & bottom
"marginleft" : 32, # pixels from left of screen
"marginright" : 32, # pixels from right of screen
"margintop" : 32, # pixels from top of screen
"marginbottom" : 32, # pixels from bottom of screen
"displaynumber" : "no", # display slide number
"displaynumberof" : "no", # display slide number of
# i.e., x of X
"border" : 0, # border size in pixels
"bordercolour" : "", # border colour
"bordercolor" : "", # colour alias
"borderstyle" : "SolidLine", # border style constant
"cellspacing" : 8, # pixels between cells
"cellpadding" : 8, # pixels from cell edges
# (aka a margin)
"padding" : "yes", # y-padding for <title> tag
"src" : "", # location of image on disk
"title" : "", # slide title
"transition" : "immediate", # slide transition type
"transitiondelay" : 0.01, # slide transition refresh
# timeout
"type" : "circle", # bullet type
"titleface" : None, # title font face
"titleborder" : None, # titlebox border size
"titlebordercolor" : None, # titlebox bordercolor
"titlebordercolour" : None, # titlebox bordercolor
"titlebgcolor" : None, # titlebox bgcolor
"titlebgcolour" : None, # titlebox bgcolor
"titlebgpattern" : None, # titlebox bgpattern
"titlecolor" : None, # title font colour
"titlecolour" : None, # title font colour
"titlesize" : None, # title font size
"titlemargin" : None, # titlebox margin
"titlewidth" : None, # titlebox width
"titleignore" : "no", # ignore title* properties
"zborder" : 2,
"zbordercolor" : None,
"zbordercolour" : None,
"zcellpadding" : 32,
"zcellspacing" : 0
}
PresentationObject.__init__( self, None, None, properties )
def getSlideshow( self ):
"""
Get the <slideshow> for this presentation. It SHOULD be the
first tag defined the XML document.
Returns a slideshow instance.
"""
from slideshow import slideshow
if isinstance(self.getContent(0), slideshow):
return self.getContent(0)
else:
return None
| bsd-2-clause | 1,632,470,191,671,452,200 | 36.622807 | 67 | 0.569363 | false |
sertansenturk/symbtrdataextractor | symbtrdataextractor/unittests/extractor_tests.py | 1 | 3913 | import json
import os
from symbtrdataextractor.dataextractor import DataExtractor
from symbtrdataextractor.reader.mu2 import Mu2Reader
_curr_folder = os.path.dirname(os.path.abspath(__file__))
def _basic_txt_extractor(score_name, use_name=True):
txt_filename = os.path.join(_curr_folder, 'data', score_name + '.txt')
symbtr_name = score_name if use_name is True else None
# initialize the extractor
extractor = DataExtractor(
extract_all_labels=False, melody_sim_thres=0.7, lyrics_sim_thres=0.7,
save_structure_sim=True, get_recording_rels=False, print_warnings=True)
# extract txt_data
txt_data, is_data_valid = extractor.extract(txt_filename,
symbtr_name=symbtr_name)
# compare with a previously saved result
score_data_file = os.path.join(_curr_folder, 'data', score_name + '.json')
saved_data = json.load(open(score_data_file))
assert saved_data == txt_data, u"{0:s}: the result is different".format(
score_name)
assert is_data_valid, "The data is not valid (or the validations failed.)"
def test_with_instrumental():
"""
Tests the result of a instrumental score
"""
scorename = 'ussak--sazsemaisi--aksaksemai----neyzen_aziz_dede'
_basic_txt_extractor(scorename)
def test_without_name():
"""
Tests the result of a score without the symbtr_name input given
"""
scorename = 'ussak--sazsemaisi--aksaksemai----neyzen_aziz_dede'
_basic_txt_extractor(scorename, use_name=False)
def test_with_free_usul():
"""
Tests the result of a score with free (serbest) usul
"""
scorename = 'saba--miraciye--serbest--pes_heman--nayi_osman_dede'
_basic_txt_extractor(scorename)
def test_with_phrase_annotation():
"""
Tests the result of a score with phrase_annotations
"""
scorename = 'huzzam--sarki--curcuna--guzel_gun_gormedi--haci_arif_bey'
_basic_txt_extractor(scorename)
def test_with_vocal_section_starting_mid_measure():
"""
Tests the result with the score of a vocal composition in which some of
the lyrics lines start in middle of the measure
"""
scorename = 'hicaz_humayun--beste--hafif--olmada_diller--abdulhalim_aga'
_basic_txt_extractor(scorename)
def test_with_full_input():
"""
Tests the result with complete information available, i.e. mbid, phrase
annotation and user provided segmentation
"""
# inputs
scorename = 'kurdilihicazkar--sarki--agiraksak--ehl-i_askin--tatyos_efendi'
txt_filename = os.path.join(_curr_folder, 'data', scorename + '.txt')
mbid = 'b43fd61e-522c-4af4-821d-db85722bf48c'
auto_seg_file = os.path.join(_curr_folder, 'data', scorename + '.autoSeg')
auto_seg_bounds = json.load(open(auto_seg_file, 'r'))['boundary_noteIdx']
mu2_filename = os.path.join(_curr_folder, 'data', scorename + '.mu2')
# initialize the extractor
extractor = DataExtractor(
extract_all_labels=False, melody_sim_thres=0.75, lyrics_sim_thres=0.75,
save_structure_sim=True, get_recording_rels=False, print_warnings=True)
# extract txt_data
txt_data, is_data_valid = extractor.extract(
txt_filename, symbtr_name=scorename, mbid=mbid,
segment_note_bound_idx=auto_seg_bounds)
# extract mu2 header metadata
mu2_header, header_row, is_header_valid = Mu2Reader.read_header(
mu2_filename, symbtr_name=scorename)
# merge
data = DataExtractor.merge(txt_data, mu2_header)
is_valid = is_data_valid and is_header_valid
# compare with a previously saved result
score_data_file = os.path.join(_curr_folder, 'data', scorename + '.json')
saved_data = json.load(open(score_data_file))
assert saved_data == data, u"{0:s}: the result is different".format(
scorename)
assert is_valid, "The data is not valid (or the validations failed.)"
| agpl-3.0 | 6,322,922,692,640,797,000 | 31.882353 | 79 | 0.675185 | false |
takeshixx/deen | deen/plugins/codecs/plugin_url.py | 1 | 1130 | try:
# Python 3
import urllib.parse as urllibparse
except ImportError:
# Python 2
import urllib as urllibparse
from .. import DeenPlugin
class DeenPluginUrl(DeenPlugin):
name = 'url'
display_name = 'URL'
cmd_name = 'url'
cmd_help='URL encode/decode data'
def __init__(self):
super(DeenPluginUrl, self).__init__()
def process(self, data):
super(DeenPluginUrl, self).process(data)
try:
# urllib requires str?
data = urllibparse.quote_plus(data.decode())
data = data.encode()
except Exception as e:
self.error = e
self.log.error(self.error)
self.log.debug(self.error, exc_info=True)
return data
def unprocess(self, data):
super(DeenPluginUrl, self).unprocess(data)
try:
data = urllibparse.unquote_plus(data.decode())
data = data.encode()
except (UnicodeDecodeError, TypeError) as e:
self.error = e
self.log.error(self.error)
self.log.debug(self.error, exc_info=True)
return data
| apache-2.0 | -2,507,691,634,774,062,600 | 26.560976 | 58 | 0.582301 | false |
darknight-007/Firmware | testScripts/testOffboardPositionControlWithGainAndIntertialParamChange.py | 1 | 5733 | """
testing offboard positon control with a simple takeoff script
"""
import rospy
from mavros_msgs.msg import State
from geometry_msgs.msg import PoseStamped, Point, Quaternion
import math
import numpy
from gazebo_msgs.srv import SetLinkProperties
from gazebo_msgs.srv import SetLinkPropertiesRequest
from gazebo_msgs.srv import GetLinkProperties
from gazebo_msgs.srv import GetLinkPropertiesRequest
from gazebo_msgs.srv import GetLinkPropertiesResponse
from sensor_msgs.msg import Joy
from mavros_msgs.srv import ParamSetRequest
from mavros_msgs.srv import ParamSet
from mavros_msgs.msg import ParamValue
class OffboardPosCtlWithOnlineDynamicalUpdates:
curr_pose = PoseStamped()
waypointIndex = 0
distThreshold = 0.4
sim_ctr = 1
des_pose = PoseStamped()
isReadyToFly = False
locations = numpy.matrix([[2, 0, 1, 0, 0, -0.48717451, -0.87330464],
[0, 2, 1, 0, 0, 0, 1],
[-2, 0, 1, 0., 0., 0.99902148, -0.04422762],
[0, -2, 1, 0, 0, 0, 0],
])
MPC_PITCH_P = 0
MPC_PITCH_D = 1
MPC_ROLL_P = 2
MPC_ROLL_D = 3
MPC_PITCHRATE_P = 4
MPC_PITCHRATE_D = 5
MPC_ROLLRATE_P = 6
MPC_ROLLRATE_D = 7
MPC_XY_CRUISE = 8
def __init__(self):
rospy.init_node('offboard_test', anonymous=True)
pose_pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=10)
mocap_sub = rospy.Subscriber('/mavros/local_position/pose', PoseStamped, callback=self.mocap_cb)
state_sub = rospy.Subscriber('/mavros/state', State, callback=self.state_cb)
nanokontrolSub = rospy.Subscriber('/nanokontrol/nanokontrol', Joy, callback=self.nanokontrolCallback)
gazebo_service_set_link_properties = rospy.ServiceProxy('/gazebo/set_link_properties', SetLinkProperties)
gazebo_service_get_link_properties = rospy.ServiceProxy('/gazebo/get_link_properties', GetLinkProperties)
self.param_service = rospy.ServiceProxy('/mavros/param/set', ParamSet)
rate = rospy.Rate(10) # Hz
rate.sleep()
self.des_pose = self.copy_pose(self.curr_pose)
shape = self.locations.shape
while not rospy.is_shutdown():
#print self.sim_ctr, shape[0], self.waypointIndex
if self.waypointIndex is shape[0]:
self.waypointIndex = 0
self.sim_ctr += 1
if self.isReadyToFly:
des_x = self.locations[self.waypointIndex, 0]
des_y = self.locations[self.waypointIndex, 1]
des_z = self.locations[self.waypointIndex, 2]
self.des_pose.pose.position.x = des_x
self.des_pose.pose.position.y = des_y
self.des_pose.pose.position.z = des_z
self.des_pose.pose.orientation.x = self.locations[self.waypointIndex, 3]
self.des_pose.pose.orientation.y = self.locations[self.waypointIndex, 4]
self.des_pose.pose.orientation.z = self.locations[self.waypointIndex, 5]
self.des_pose.pose.orientation.w = self.locations[self.waypointIndex, 6]
curr_x = self.curr_pose.pose.position.x
curr_y = self.curr_pose.pose.position.y
curr_z = self.curr_pose.pose.position.z
dist = math.sqrt((curr_x - des_x)*(curr_x - des_x) + (curr_y - des_y)*(curr_y - des_y) + (curr_z - des_z)*(curr_z - des_z))
if dist < self.distThreshold:
self.waypointIndex += 1
#des_params = self.updateUAVInertialParam(gazebo_service_get_link_properties)
# print dist, curr_x, curr_y, curr_z, self.waypointIndex
pose_pub.publish(self.des_pose)
rate.sleep()
def updateUAVInertialParam(self, gazebo_service_get_link_properties):
# current_params = GetLinkPropertiesResponse()
# current_params = gazebo_service_get_link_properties.call(GetLinkPropertiesRequest('base_link'))
# des_params = current_params
# des_params = SetLinkPropertiesRequest()
# des_params.mass = current_params.mass + 0.3
# des_params.gravity_mode = current_params.gravity_mode
# des_params.com = current_params.com
# des_params.ixx = current_params.ixx
# des_params.ixy = current_params.ixy
# des_params.ixz = current_params.ixz
# des_params.iyy = current_params.iyy
# des_params.iyz = current_params.ixz
# des_params.izz = current_params.izz
# des_params.link_name = 'base_link'
# gazebo_service_set_link_properties.call(des_params)
des_params = 0
return des_params
def copy_pose(self, pose):
pt = pose.pose.position
quat = pose.pose.orientation
copied_pose = PoseStamped()
copied_pose.header.frame_id = pose.header.frame_id
copied_pose.pose.position = Point(pt.x, pt.y, pt.z)
copied_pose.pose.orientation = Quaternion(quat.x, quat.y, quat.z, quat.w)
return copied_pose
def mocap_cb(self, msg):
# print msg
self.curr_pose = msg
def state_cb(self,msg):
print msg.mode
if(msg.mode=='OFFBOARD'):
self.isReadyToFly = True
print "readyToFly"
def nanokontrolCallback(self,msg):
velocity = (((msg.axes[0])+1)*4)
param = ParamValue()
param.real = velocity
paramReq = ParamSetRequest()
paramReq.param_id = 'MPC_XY_CRUISE'
paramReq.value = param
self.param_service.call(paramReq)
if __name__ == "__main__":
OffboardPosCtlWithOnlineDynamicalUpdates()
| mit | -5,823,130,695,435,417,000 | 35.987097 | 139 | 0.618524 | false |
ytsarev/rally | rally/openstack/common/config/generator.py | 1 | 10531 | # Copyright 2012 SINA Corporation
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import argparse
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
import six
import stevedore.named
from rally.openstack.common import gettextutils
from rally.openstack.common import importutils
gettextutils.install('rally')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
DICTOPT = "DictOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
DICTOPT: 'dict value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT, DICTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def raise_extension_exception(extmanager, ep, err):
raise
def generate(argv):
parser = argparse.ArgumentParser(
description='generate sample configuration file',
)
parser.add_argument('-m', dest='modules', action='append')
parser.add_argument('-l', dest='libraries', action='append')
parser.add_argument('srcfiles', nargs='*')
parsed_args = parser.parse_args(argv)
mods_by_pkg = dict()
for filepath in parsed_args.srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
if parsed_args.modules:
for module_name in parsed_args.modules:
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
# Look for entry points defined in libraries (or applications) for
# option discovery, and include their return values in the output.
#
# Each entry point should be a function returning an iterable
# of pairs with the group name (or None for the default group)
# and the list of Opt instances for that group.
if parsed_args.libraries:
loader = stevedore.named.NamedExtensionManager(
'oslo.config.opts',
names=list(set(parsed_args.libraries)),
invoke_on_load=False,
on_load_failure_callback=raise_extension_exception
)
for ext in loader:
for group, opts in ext.plugin():
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
opt_list.append((ext.name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
raise RuntimeError("Unable to import module %s" % mod_str)
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group in sorted(opts_by_group.keys()):
print_group_opts(group, opts_by_group[group])
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except Exception as e:
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
if not(value['opt'] != opt):
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
return 'rally'
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help = u'%s (%s)' % (opt_help,
OPT_TYPES[opt_type])
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
if opt.deprecated_opts:
for deprecated_opt in opt.deprecated_opts:
if deprecated_opt.name:
deprecated_group = (deprecated_opt.group if
deprecated_opt.group else "DEFAULT")
print('# Deprecated group/name - [%s]/%s' %
(deprecated_group,
deprecated_opt.name))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, six.string_types))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == DICTOPT:
assert(isinstance(opt_default, dict))
opt_default_strlist = [str(key) + ':' + str(value)
for (key, value) in opt_default.items()]
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
generate(sys.argv[1:])
if __name__ == '__main__':
main()
| apache-2.0 | 3,891,286,383,344,923,600 | 33.302932 | 79 | 0.578103 | false |
all-of-us/raw-data-repository | rdr_service/lib_fhir/fhirclient_4_0_0/models/resource.py | 1 | 1819 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Resource) on 2019-05-07.
# 2019, SMART Health IT.
from . import fhirabstractresource
class Resource(fhirabstractresource.FHIRAbstractResource):
""" Base Resource.
This is the base resource type for everything.
"""
resource_type = "Resource"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.id = None
""" Logical id of this artifact.
Type `str`. """
self.implicitRules = None
""" A set of rules under which this content was created.
Type `str`. """
self.language = None
""" Language of the resource content.
Type `str`. """
self.meta = None
""" Metadata about the resource.
Type `Meta` (represented as `dict` in JSON). """
super(Resource, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(Resource, self).elementProperties()
js.extend([
("id", "id", str, False, None, False),
("implicitRules", "implicitRules", str, False, None, False),
("language", "language", str, False, None, False),
("meta", "meta", meta.Meta, False, None, False),
])
return js
import sys
try:
from . import meta
except ImportError:
meta = sys.modules[__package__ + '.meta']
| bsd-3-clause | -6,473,601,764,319,109,000 | 29.830508 | 105 | 0.586586 | false |
gouthambs/qtk-python | qtk/creators/indexes.py | 1 | 3937 | import QuantLib as ql
from .common import CreatorBase
from qtk.templates import Template as T
from qtk.fields import Field as F
class USDLiborCreator(CreatorBase):
_templates = [T.INDEX_IBOR_USDLIBOR]
_req_fields = [F.YIELD_CURVE, F.TENOR]
_opt_fields = []
def _create(self, asof_date):
yield_curve = self[F.YIELD_CURVE]
tenor = self[F.TENOR]
yield_handle = ql.YieldTermStructureHandle(yield_curve)
return ql.USDLibor(tenor, yield_handle)
def defaults(self):
return {F.CURRENCY.id: "USD"}
@classmethod
def set_info(cls):
cls.desc("Creates USD LIBOR index")
cls.field(F.YIELD_CURVE, "The reference yield curve")
cls.field(F.TENOR, "The reference tenor of the index")
class CADLiborCreator(CreatorBase):
_templates = [T.INDEX_IBOR_CADLIBOR]
_req_fields = [F.YIELD_CURVE, F.TENOR]
_opt_fields = []
def _create(self, asof_date):
yield_curve = self[F.YIELD_CURVE]
tenor = self[F.TENOR]
yield_handle = ql.YieldTermStructureHandle(yield_curve)
return ql.CADLibor(tenor, yield_handle)
def defaults(self):
return {F.CURRENCY.id: "CAD"}
@classmethod
def set_info(cls):
cls.desc("Creates CAD LIBOR index")
cls.field(F.YIELD_CURVE, "The reference yield curve")
cls.field(F.TENOR, "The reference tenor of the index")
class GBPLiborCreator(CreatorBase):
_templates = [T.INDEX_IBOR_GBPLIBOR]
_req_fields = [F.YIELD_CURVE, F.TENOR]
_opt_fields = []
def _create(self, asof_date):
yield_curve = self[F.YIELD_CURVE]
tenor = self[F.TENOR]
yield_handle = ql.YieldTermStructureHandle(yield_curve)
return ql.GBPLibor(tenor, yield_handle)
def defaults(self):
return {F.CURRENCY.id: "GBP"}
@classmethod
def set_info(cls):
cls.desc("Creates GBP LIBOR index")
cls.field(F.YIELD_CURVE, "The reference yield curve")
cls.field(F.TENOR, "The reference tenor of the index")
class AUDLiborCreator(CreatorBase):
_templates = [T.INDEX_IBOR_AUDLIBOR]
_req_fields = [F.YIELD_CURVE, F.TENOR]
_opt_fields = []
def _create(self, asof_date):
yield_curve = self[F.YIELD_CURVE]
tenor = self[F.TENOR]
yield_handle = ql.YieldTermStructureHandle(yield_curve)
return ql.AUDLibor(tenor, yield_handle)
def defaults(self):
return {F.CURRENCY.id: "AUD"}
@classmethod
def set_info(cls):
cls.desc("Creates AUD LIBOR index")
cls.field(F.YIELD_CURVE, "The reference yield curve")
cls.field(F.TENOR, "The reference tenor of the index")
class JPYLiborCreator(CreatorBase):
_templates = [T.INDEX_IBOR_JPYLIBOR]
_req_fields = [F.YIELD_CURVE, F.TENOR]
_opt_fields = []
def _create(self, asof_date):
yield_curve = self[F.YIELD_CURVE]
tenor = self[F.TENOR]
yield_handle = ql.YieldTermStructureHandle(yield_curve)
return ql.JPYLibor(tenor, yield_handle)
def defaults(self):
return {F.CURRENCY.id: "JPY"}
@classmethod
def set_info(cls):
cls.desc("Creates JPY LIBOR index")
cls.field(F.YIELD_CURVE, "The reference yield curve")
cls.field(F.TENOR, "The reference tenor of the index")
class EURLiborCreator(CreatorBase):
_templates = [T.INDEX_IBOR_EURLIBOR]
_req_fields = [F.YIELD_CURVE, F.TENOR]
_opt_fields = []
def _create(self, asof_date):
yield_curve = self[F.YIELD_CURVE]
tenor = self[F.TENOR]
yield_handle = ql.YieldTermStructureHandle(yield_curve)
return ql.EURLibor(tenor, yield_handle)
def defaults(self):
return {F.CURRENCY.id: "EUR"}
@classmethod
def set_info(cls):
cls.desc("Creates EUR LIBOR index")
cls.field(F.YIELD_CURVE, "The reference yield curve")
cls.field(F.TENOR, "The reference tenor of the index") | mit | 3,303,185,191,772,193,300 | 29.292308 | 63 | 0.637795 | false |
veltzer/demos-python | src/examples/short/object_oriented/construction/return_weird_things.py | 1 | 1225 | #!/usr/bin/env python
"""
Trying to return weird things from the constructor
Conclusions:
- you can only return None or nothing (which is None)
from a import constructor.
- this helps you to get out early if you don't want to
execute the rest of the constructor code.
- you cannot return 'self' since the __init__ method is
not really a constructor in object oriented terminology
but rather an initializer function. The object on which
it works is already alive and well and is already determined
and the initializer function has no say in that matter.
That is why it's return value is uninteresting (it is not
even returned to the programmer).
"""
class A:
# noinspection PyReturnFromInit
def __init__(self):
return 0
class B:
# noinspection PyReturnFromInit
def __init__(self):
return None
class C:
# noinspection PyReturnFromInit
def __init__(self):
return self
class D:
# noinspection PyReturnFromInit
def __init__(self):
return
try:
a = A()
except Exception as e:
print('yes, got the exception [{0}]...'.format(str(e)))
b = B()
try:
c = C()
except Exception as e:
print('yes, got the exception [{0}]...'.format(str(e)))
d = D()
| gpl-3.0 | 7,359,768,143,197,327,000 | 21.685185 | 60 | 0.682449 | false |
felixboes/hosd | experimental/compute_morse.py | 1 | 3618 | #!/usr/bin/env python
# The software pyradbar is a bunch of programs to compute the homology of
# Sullivan diagrams.
# Copyright (C) 2015 - 2017 Felix Boes
#
# This file is part of pyradbar.
#
# pyradbar is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyradbar is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyradbar. If not, see <http://www.gnu.org/licenses/>.
import argparse
import pyradbar
import subprocess
import sys
import os
import time
import inspect
def call_sage(g=1, m=2, more_verbose=None, result_file=None, sage_path=None):
script_path = './pyradbar/morse_computation.py'
sys.stdout.write('Calling ' + sage_path + ' -python ' + script_path + ' ' + str(g) + ' ' + str(m) + ' ' + str(more_verbose) + ' ' + str(result_file) + '\n')
sys.stdout.flush()
cmd = [sage_path, "-python", script_path, str(g), str(m), str(more_verbose), str(result_file)]
subprocess.call(cmd)
def main():
# check for correct version.
major, minor = sys.version_info[0], sys.version_info[1]
if major < 2 or (major == 2 and minor < 7):
raise "Python >= 2.7 is required for argument parsing."
# Use the argparse library. The library optparse is deprecated since version 2.7.
# Compare the documentation: https://docs.python.org/2/library/argparse.html
# Create the argument parser.
# Note: Config files can be processed. In order to do so, we have to give fromfile_prefix_chars='_' with _ a symbol.
parser = argparse.ArgumentParser(
add_help = True,
fromfile_prefix_chars='@',
description='Compute the homology of the compactification of the unilevel radial slit domains aka sulivan diagrams.'
)
# Provide all arguments.
# Note: we provide nargs=N and the N arguments from the command line will be gathered together into a list.
# Thus, we supress nargs.
parser.add_argument('-g', '--gen', required=True, action='store', type=int, dest='g', metavar='arg', help='The genus of the Riemann surfaces')
parser.add_argument('-m', '--pun', required=True, action='store', type=int, dest='m', metavar='arg', help='The number of punctures of the Riemann surfaces')
parser.add_argument('-v', action='store_true', dest='more_verbose', help='Print more status information.', default=False)
parser.add_argument('--sage', action='store', type=str, dest='sage_path', metavar='path', help='The Path to the sage executable', default='./sage-6.8-x86_64-Linux/sage')
args=vars( parser.parse_args() )
# The name of the results file.
args['result_file'] = './results/' + ''.join( [str(param).replace(' ', '_') for param in sys.argv if str(param) ] )
tee = pyradbar.Tee(args['result_file'], 'w')
pre, valid = pyradbar.preamble( args['sage_path'] )
sys.stdout.write(pre + '\n')
sys.stdout.flush()
if valid == False:
print "Could not initialize everything. Abroating."
return 1
call_sage( **args )
sys.stdout.write('\n\n\n')
sys.stdout.flush()
if __name__ == "__main__":
main()
| gpl-3.0 | 4,686,020,170,505,466,000 | 43.121951 | 200 | 0.651741 | false |
mariocesar/pengbot | src/pengbot/adapters/base.py | 1 | 2798 | import asyncio
from collections import defaultdict
from functools import wraps
from pengbot import logger
from pengbot.context import Context
from pengbot.utils import isbound
class UnknownCommand(Exception):
pass
class BaseAdapter:
handlers = []
signals = {}
running = False
name = None
loop = None
def __init__(self, setup_method, **kwargs):
self.context = Context()
self.setup_method = setup_method
def __call__(self, *args, **kwargs):
try:
self.run()
except KeyboardInterrupt:
exit(0)
@property
def name(self):
return self.context.get('name', None) or self.setup_method.__name__
def run(self):
self.setup_method()
self.receive()
def receive(self, *args):
self.loop = asyncio.get_event_loop()
self.loop.set_debug(True)
try:
self.loop.run_until_complete(self.handle_message(*args))
finally:
self.loop.close()
async def handle_message(self, *args):
for handler in self.handlers:
coroutine = handler(*args)
print('handler=', handler)
print('create_task=', coroutine)
task = self.emit(coroutine)
print('task=', task)
print()
def emit(self, coroutine):
print('emit=', coroutine)
self.loop.create_task(coroutine)
def send(self, message):
raise NotImplementedError()
def say(self, *args, **kwargs):
raise NotImplementedError()
# Directives
def signal(self):
adapter = self
def decorator(func):
@wraps(func)
async def wrapper(*args, **kwargs):
print('func=', func)
result = await func(*args, **kwargs)
for listener in adapter.signals.get(func.__qualname__, []):
print('listener=', listener)
if isinstance(result, tuple):
adapter.emit(listener(*result))
else:
adapter.emit(listener(result))
return result
return wrapper
return decorator
def listen(self, signal=None):
def decorator(func):
@wraps(func)
def callback(*args, **kwargs):
return func(*args, **kwargs)
if not signal:
self.handlers.append(callback)
else:
if signal in self.signals:
self.signals[signal.__qualname__].append(callback)
else:
self.signals[signal.__qualname__] = [callback]
return decorator
class SocketAdapter(BaseAdapter):
pass
class ProcessAdapter(BaseAdapter):
pass
| mit | 7,566,608,231,802,680,000 | 23.330435 | 75 | 0.546104 | false |
kittiu/sale-workflow | sale_automatic_workflow_payment_mode/models/automatic_workflow_job.py | 1 | 2353 | # -*- coding: utf-8 -*-
# © 2016 Camptocamp SA, Sodexis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
import logging
from odoo import models, api, fields
from odoo.tools.safe_eval import safe_eval
from odoo.addons.sale_automatic_workflow.models.automatic_workflow_job \
import savepoint
_logger = logging.getLogger(__name__)
class AutomaticWorkflowJob(models.Model):
_inherit = 'automatic.workflow.job'
@api.model
def run_with_workflow(self, sale_wkf):
workflow_domain = [('workflow_process_id', '=', sale_wkf.id)]
res = super(AutomaticWorkflowJob, self).run_with_workflow(sale_wkf)
if sale_wkf.register_payment:
self._register_payments(
safe_eval(sale_wkf.payment_filter_id.domain) +
workflow_domain)
return res
@api.model
def _register_payments(self, payment_filter):
invoice_obj = self.env['account.invoice']
invoices = invoice_obj.search(payment_filter)
_logger.debug('Invoices to Register Payment: %s', invoices.ids)
for invoice in invoices:
partner_type = invoice.type in ('out_invoice', 'out_refund') and \
'customer' or 'supplier'
payment_mode = invoice.payment_mode_id
if not payment_mode.fixed_journal_id:
_logger.debug('Unable to Register Payment for invoice %s: '
'Payment mode %s must have fixed journal',
invoice.id, payment_mode.id)
return
with savepoint(self.env.cr):
payment = self.env['account.payment'].create({
'invoice_ids': [(6, 0, invoice.ids)],
'amount': invoice.residual,
'payment_date': fields.Date.context_today(self),
'communication': invoice.reference or invoice.number,
'partner_id': invoice.partner_id.id,
'partner_type': partner_type,
'payment_type': payment_mode.payment_type,
'payment_method_id': payment_mode.payment_method_id.id,
'journal_id': payment_mode.fixed_journal_id.id,
'currency_id': invoice.currency_id.id,
})
payment.post()
return
| agpl-3.0 | -7,847,073,784,139,087,000 | 40.263158 | 78 | 0.576105 | false |
warrenspe/NanoDB | NanoQueries/Alter.py | 1 | 1805 | # Standard imports
import os
# Project imports
from _BaseQuery import BaseQuery
import NanoIO.Table
import NanoIO.File
class Alter(BaseQuery):
name = None
addColumns = None
removeColumns = None
modifyColumns = None
addIndex = None
removeIndex = None
grammar = """
"table"
<name: _>
{
(addColumns: "add" "column" <name: _> <type: _>)
(removeColumns: "remove" "column" <name: _>)
(modifyColumns: "modify" "column" <name: _> <newName: _> <newType: _>)
["add" "index" <addIndex: _>]
["remove" "index" <removeIndex: _>]
}
"""
def executeQuery(self, conn):
# Get tableIO object
tableIO = conn._getTable(self.name)
# Back up the TableIO object
#NanoIO.File._renameTable(tableIO, "_NanoDB_Backup
# Create a new TableIO object
# Overwrite our connections tableio object for this table
# Add columns as desired to our new table io object
# Remove columns as desired from this table io object
# Modify columns as desired to this table io object
# Add indices as desired to this table io object
# Remove indices as desired from this table io object
# Serialize our new table io object
# Copy data from our old table to our new table
# Delete our old table IO object
tmpTableName = "_tmp_alter_table_" + tableIO.tableName
NanoIO.File.createTable(tableIO.dbname, tmpTableName)
newTableIO = NanoIO.Table.TableIO(tableIO.dbName, tmpTableName)
# Update config
newTableIO.config
# Update table definition
# Remove indices
| gpl-3.0 | -183,135,863,889,413,900 | 26.348485 | 89 | 0.580609 | false |
amenonsen/ansible | lib/ansible/modules/storage/netapp/na_ontap_fcp.py | 2 | 7005 | #!/usr/bin/python
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_ontap_fcp
short_description: NetApp ONTAP Start, Stop and Enable FCP services.
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.7'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Start, Stop and Enable FCP services.
options:
state:
description:
- Whether the FCP should be enabled or not.
choices: ['present', 'absent']
default: present
status:
description:
- Whether the FCP should be up or down
choices: ['up', 'down']
default: up
vserver:
description:
- The name of the vserver to use.
required: true
'''
EXAMPLES = """
- name: create FCP
na_ontap_fcp:
state: present
status: down
hostname: "{{hostname}}"
username: "{{username}}"
password: "{{password}}"
vserver: "{{vservername}}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapFCP(object):
"""
Enable and Disable FCP
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
vserver=dict(required=True, type='str'),
status=dict(required=False, choices=['up', 'down'], default='up')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
return
def create_fcp(self):
"""
Create's and Starts an FCP
:return: none
"""
try:
self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-create'), True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating FCP: %s' %
(to_native(error)),
exception=traceback.format_exc())
def start_fcp(self):
"""
Starts an existing FCP
:return: none
"""
try:
self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-start'), True)
except netapp_utils.zapi.NaApiError as error:
# Error 13013 denotes fcp service already started.
if to_native(error.code) == "13013":
return None
else:
self.module.fail_json(msg='Error starting FCP %s' % (to_native(error)),
exception=traceback.format_exc())
def stop_fcp(self):
"""
Steps an Existing FCP
:return: none
"""
try:
self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-stop'), True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error Stoping FCP %s' %
(to_native(error)),
exception=traceback.format_exc())
def destroy_fcp(self):
"""
Destroys an already stopped FCP
:return:
"""
try:
self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-destroy'), True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error destroying FCP %s' %
(to_native(error)),
exception=traceback.format_exc())
def get_fcp(self):
fcp_obj = netapp_utils.zapi.NaElement('fcp-service-get-iter')
fcp_info = netapp_utils.zapi.NaElement('fcp-service-info')
fcp_info.add_new_child('vserver', self.parameters['vserver'])
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(fcp_info)
fcp_obj.add_child_elem(query)
result = self.server.invoke_successfully(fcp_obj, True)
# There can only be 1 FCP per vserver. If true, one is set up, else one isn't set up
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) >= 1:
return True
else:
return False
def current_status(self):
try:
status = self.server.invoke_successfully(netapp_utils.zapi.NaElement('fcp-service-status'), True)
return status.get_child_content('is-available') == 'true'
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error destroying FCP: %s' %
(to_native(error)),
exception=traceback.format_exc())
def apply(self):
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_fcp", cserver)
exists = self.get_fcp()
changed = False
if self.parameters['state'] == 'present':
if exists:
if self.parameters['status'] == 'up':
if not self.current_status():
self.start_fcp()
changed = True
else:
if self.current_status():
self.stop_fcp()
changed = True
else:
self.create_fcp()
if self.parameters['status'] == 'up':
self.start_fcp()
elif self.parameters['status'] == 'down':
self.stop_fcp()
changed = True
else:
if exists:
if self.current_status():
self.stop_fcp()
self.destroy_fcp()
changed = True
self.module.exit_json(changed=changed)
def main():
"""
Start, Stop and Enable FCP services.
"""
obj = NetAppOntapFCP()
obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 | -6,642,160,182,949,105,000 | 32.357143 | 114 | 0.561599 | false |
mgeorgehansen/FIFE_Technomage | engine/python/fife/extensions/pychan/widgets/basictextwidget.py | 1 | 2140 | # -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2009 by the FIFE team
# http://www.fifengine.de
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from widget import Widget
from common import *
class BasicTextWidget(Widget):
"""
The base class for widgets which display a string - L{Label},L{ClickLabel},L{Button}, etc.
Do not use directly.
New Attributes
==============
- text: The text (depends on actual widget)
Data
====
The text can be set via the L{distributeInitialData} method.
"""
ATTRIBUTES = Widget.ATTRIBUTES + [UnicodeAttr('text')]
DEFAULT_HEXPAND = 1
DEFAULT_VEXPAND = 0
def __init__(self, text = u"",**kwargs):
self.margins = (5,5)
self.text = text
super(BasicTextWidget,self).__init__(**kwargs)
# Prepare Data collection framework
self.accepts_initial_data = True
self._realSetInitialData = self._setText
def _getText(self): return gui2text(self.real_widget.getCaption())
def _setText(self,text): self.real_widget.setCaption(text2gui(text))
text = property(_getText,_setText)
def resizeToContent(self, recurse = True):
self.height = self.real_font.getHeight() + self.margins[1]*2
self.width = self.real_font.getWidth(text2gui(self.text)) + self.margins[0]*2
| lgpl-2.1 | -1,168,638,644,344,881,700 | 32.516129 | 91 | 0.648598 | false |
HMRecord/website | python/admin.py | 1 | 2656 | from functools import wraps
from flask import Blueprint, jsonify, request, abort, request, Response, make_response
from flask.ext.httpauth import HTTPBasicAuth
from bson.json_util import loads, dumps
import database as db
import configparser
adminAPI = Blueprint('adminAPI', __name__)
auth = HTTPBasicAuth()
config = configparser.ConfigParser()
config.read("../config.ini")
ADMIN_PASSWORD = config.get("admin", "password")
def checkAuth(username, password):
return username == 'admin' and password == ADMIN_PASSWORD
def authenticate():
return Response('Unauthorized access', 401, {'WWW-Authenticate': 'Basic realm="Login Required"'})
def requiresAuth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not checkAuth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
@adminAPI.errorhandler(400)
def bad_request(error):
return make_response('Bad request', 400)
@adminAPI.errorhandler(404)
def not_found(error):
return make_response('Not found', 404)
@adminAPI.route('/api/admin/article', methods=['POST'])
@requiresAuth
def createArticle():
if not request.json or not db.createArticle(loads(dumps(request.json))):
abort(400)
return "good"
@adminAPI.route('/api/admin/article/<articleID>', methods=['DELETE'])
@requiresAuth
def updateArticle(articleID):
db.deleteArticle(articleID)
return "good"
@adminAPI.route('/api/admin/staff', methods=['POST'])
@requiresAuth
def createStaff():
if not request.json or not db.createStaff(loads(dumps(request.json))):
abort(400)
return "good"
@adminAPI.route('/api/admin/staff', methods=['PUT'])
@requiresAuth
def updateStaff():
print(loads(dumps(request.json)))
if not request.json or not db.updateStaff(loads(dumps(request.json))):
abort(400)
return "good"
@adminAPI.route('/api/admin/section', methods=['POST'])
@requiresAuth
def createSection():
print(request.json)
if not request.json or not db.createSection(loads(dumps(request.json))):
abort(400)
return "good"
@adminAPI.route('/api/admin/section/<sectionID>', methods=['DELETE'])
@requiresAuth
def updateSection(sectionID):
db.deleteSection(sectionID)
return "good"
@adminAPI.route('/api/admin/file', methods=['POST'])
@requiresAuth
def file():
print(request.files)
filename = db.saveFile(request.files['file'])
print(filename)
if filename is not None:
return filename
return "bad"
@adminAPI.route('/api/admin/test', methods=['GET'])
@requiresAuth
def test():
return ""
| gpl-3.0 | 1,334,948,711,134,836,700 | 24.538462 | 101 | 0.698419 | false |
ingadhoc/account-payment | account_check/models/account_chart_template.py | 1 | 3098 | ##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import models, fields
import logging
_logger = logging.getLogger(__name__)
class AccountChartTemplate(models.Model):
_inherit = 'account.chart.template'
rejected_check_account_id = fields.Many2one(
'account.account.template',
'Rejected Check Account',
help='Rejection Checks account, for eg. "Rejected Checks"',
# domain=[('type', 'in', ['other'])],
)
deferred_check_account_id = fields.Many2one(
'account.account.template',
'Deferred Check Account',
help='Deferred Checks account, for eg. "Deferred Checks"',
# domain=[('type', 'in', ['other'])],
)
holding_check_account_id = fields.Many2one(
'account.account.template',
'Holding Check Account',
help='Holding Checks account for third checks, '
'for eg. "Holding Checks"',
# domain=[('type', 'in', ['other'])],
)
def _load_template(self, company, code_digits=None, account_ref=None, taxes_ref=None):
account_ref, taxes_ref = super()._load_template(
company, code_digits=code_digits, account_ref=account_ref, taxes_ref=taxes_ref)
for field in [
'rejected_check_account_id',
'deferred_check_account_id',
'holding_check_account_id']:
account_field = self[field]
# TODO we should send it in the context and overwrite with
# lower hierichy values
if account_field:
company[field] = account_ref[account_field.id]
return account_ref, taxes_ref
def _create_bank_journals(self, company, acc_template_ref):
"""
Bank - Cash journals are created with this method
Inherit this function in order to add checks to cash and bank
journals. This is because usually will be installed before chart loaded
and they will be disable by default
"""
res = super(
AccountChartTemplate, self)._create_bank_journals(
company, acc_template_ref)
# creamos diario para cheques de terceros
received_third_check = self.env.ref(
'account_check.account_payment_method_received_third_check')
delivered_third_check = self.env.ref(
'account_check.account_payment_method_delivered_third_check')
self.env['account.journal'].create({
'name': 'Cheques de Terceros',
'type': 'cash',
'company_id': company.id,
'inbound_payment_method_ids': [
(4, received_third_check.id, None)],
'outbound_payment_method_ids': [
(4, delivered_third_check.id, None)],
})
self.env['account.journal'].with_context(
force_company_id=company.id)._enable_issue_check_on_bank_journals()
return res
| agpl-3.0 | 7,020,589,236,783,189,000 | 39.763158 | 91 | 0.573273 | false |
deepmind/reverb | reverb/trajectory_writer.py | 1 | 27672 | # Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the TrajectoryWriter."""
import datetime
import itertools
from typing import Any, Iterator, List, Mapping, Optional, Sequence, Tuple, Union
import numpy as np
from reverb import errors
from reverb import pybind
import tree
class TrajectoryWriter:
"""The TrajectoryWriter is used to write data to tables at a Reverb server.
At a high level, the process of inserting trajectories can be summarized as:
* Structured data is appended to an internal buffer using the `append`
method and the caller receives a reference to each element (i.e leaf node)
in the original data.
* Compatible data referenced (i.e same dtype and compatible shapes) are
concatenated into `TrajectoryColumn`s which in turn are combined into a
trajectory and inserted into a table using the `create_item` method.
It is important to understand that the structure of the data provided to
`append` does NOT need to match the structure of the trajectory which the
sampler will receive when it samples the item. To illustrate, consider a
scenario were want to sample SARS (State-action-reward-state) style trajectory
of length 5. Furthermore, we would like a trajectory to start at every step.
It would look something like this:
```python
client = Client(...)
env = .... # Construct the environment
policy = .... # Construct the agent's policy
with client.trajectory_writer(num_keep_alive_refs=5) as writer:
for episode in range(NUM_EPISODES):
timestep = env.reset()
# You probably have strong opinions of whether the actions should be
# aligned with the step it originated from or the destination. In this
# example we'll align it with the destination state and thus we'll start
# off by appending the timestep WITHOUT an action.
writer.append({
'observation': timestep.observation,
})
while not timestep.last():
# Select the action according to your policy and act it out in the
# environment.
action = policy(timestep)
timestep = env.step(action)
# Now we have both an action and the state it resulted in. We append
# both of these together to the writer. This will result in them
# sharing the same index in `writer.history`.
writer.append({
'observation': timestep.observation,
'reward': timestep.reward,
'action': action,
})
# Once we have seen at least 5 timesteps (including the first one
# which does not have an aligned action) then we can start inserting
# items that reference the last 5 timesteps and the last 4 actions.
if writer.episode_steps >= 5:
trajectory = {
'states': writer.history['observation'][-5:],
'rewards': writer.history['reward'][-4:],
'actions': writer.history['action'][-4:],
}
writer.create_item(
table='my_table',
priority=calc_priority(trajectory),
trajectory=trajectory)
# Block until all pending items have been sent to the server and
# inserted into 'my_table'. This also clears the buffers so history will
# once again be empty and `writer.episode_steps` is 0.
writer.end_episode()
```
"""
def __init__(self, internal_writer: pybind.TrajectoryWriter):
"""Constructor of TrajectoryWriter (must only be called by `Client`)."""
self._writer = internal_writer
# The union of the structures of all data passed to `append`. The structure
# grows everytime the provided data contains one or more fields which were
# not present in any of the data seen before.
self._structure = None
# References to all data seen since the writer was constructed or last reset
# (through end_episode). The number of columns always matches the number of
# leaf nodes in `_structure` but the order is not (necessarily) the same as
# `tree.flatten(_structure)` since the structure may evolve over time.
# Instead the mapping is controlled by `_path_to_column_index`. See
# `_flatten` and `_unflatten` for more details.
self._column_history: List[_ColumnHistory] = []
# Mapping from structured paths (i.e as received from
# `tree.flatten_with_path`) to position in `_column_history`. This is used
# in `_flatten`.
self._path_to_column_index: Mapping[str, int] = {}
# The inverse of `_path_to_column_index`. That is the mapping describes the
# swaps required to go from the order of `column_history` (and the C++
# writer) to the order of a sequence which can be unflattened into
# `_structure`. This is used in `_unflatten`.
self._column_index_to_flat_structure_index: Mapping[int, int] = {}
self._path_to_column_config = {}
# Set when `append` called with `partial_step=True`. Remains set until
# `append` called with `partial_step=False`. This is used to control where
# new data references are added to the history (i.e whether a new step
# should be created).
self._last_step_is_open = False
def __enter__(self) -> 'TrajectoryWriter':
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if exc_type is None or errors.ReverbError not in exc_type.mro():
self.flush()
def __del__(self):
self.close()
@property
def history(self):
"""References to data, grouped by column and structured like appended data.
Allows recently added data references to be accesses with list indexing
semantics. However, instead of returning the raw references, the result is
wrapped in a TrajectoryColumn object before being returned to the caller.
```python
writer = TrajectoryWriter(...)
# Add three steps worth of data.
writer.append({'a': 1, 'b': 100})
writer.append({'a': 2, 'b': 200})
writer.append({'a': 3, 'b': 300})
# Create a trajectory using the _ColumnHistory helpers.
from_history = {
'all_a': writer.history['a'][:],
'first_b': writer.history['b'][0],
'last_b': writer.history['b'][-1],
}
writer.create_item(table='name', priority=1.0, trajectory=from_history)
```
Raises:
RuntimeError: If `append` hasn't been called at least once before.
"""
if not self._column_history:
raise RuntimeError(
'history cannot be accessed before `append` is called at least once.')
return self._unflatten(self._column_history)
@property
def episode_steps(self) -> int:
"""Number of append calls since last `end_episode` call.
This does not count partial calls to append, i.e. ones with
`partial_step=True`.
"""
if not self._column_history:
return 0
else:
return len(self._column_history[0]) - int(self._last_step_is_open)
def configure(self, path: Tuple[Union[int, str], ...],
*,
num_keep_alive_refs: int,
max_chunk_length: Optional[int]):
"""Override chunking options for a single column.
Args:
path: Structured path to the column to configure.
num_keep_alive_refs: Override value for `num_keep_alive_refs` i.e the size
of the circular buffer of the most recently added data.
max_chunk_length: Override value for the chunk length used by this column.
When set to None, an auto tuned chunk length is used. When set to a
number, a constant chunk length is used.
Raises:
ValueError: If num_keep_alive_refs is < 1.
ValueError: If max_chunk_length set to a value < 1 or to a value > than
num_keep_alive_refs.
"""
if num_keep_alive_refs < 1:
raise ValueError(
f'num_keep_alive_refs ({num_keep_alive_refs}) must be a positive '
f'integer')
if max_chunk_length is not None and (
max_chunk_length < 1 or max_chunk_length > num_keep_alive_refs):
raise ValueError(
f'max_chunk_length ({max_chunk_length}) must be None or a positive '
f'integer <= num_keep_alive_refs ({num_keep_alive_refs})')
if max_chunk_length is None:
chunker_options = pybind.AutoTunedChunkerOptions(
num_keep_alive_refs=num_keep_alive_refs,
throughput_weight=1.0)
else:
chunker_options = pybind.ConstantChunkerOptions(
max_chunk_length=max_chunk_length,
num_keep_alive_refs=num_keep_alive_refs)
if path in self._path_to_column_index:
self._writer.ConfigureChunker(self._path_to_column_index[path],
chunker_options)
else:
self._path_to_column_config[path] = chunker_options
def append(self, data: Any, *, partial_step: bool = False):
"""Columnwise append of data leaf nodes to internal buffers.
If `data` includes fields or sub structures which haven't been present in
any previous calls then the types and shapes of the new fields are extracted
and used to validate future `append` calls. The structure of `history` is
also updated to include the union of the structure across all `append`
calls.
When new fields are added after the first step then the newly created
history field will be filled with `None` in all preceding positions. This
results in the equal indexing across columns. That is `a[i]` and `b[i]`
references the same step in the sequence even if `b` was first observed
after `a` had already been seen.
It is possible to create a "step" using more than one `append` call by
setting the `partial_step` flag. Partial steps can be used when some parts
of the step becomes available only as a result of inserting (and learning
from) trajectories that include the fields available first (e.g learn from
the SARS trajectory to select the next action in an on-policy agent). In the
final `append` call of the step, `partial_step` must be set to False.
Failing to "close" the partial step will result in error as the same field
must NOT be provided more than once in the same step.
Args:
data: The (possibly nested) structure to make available for new items to
reference.
partial_step: If `True` then the step is not considered "done" with this
call. See above for more details. Defaults to `False`.
Raises:
ValueError: If the same column is provided more than once in the same
step.
"""
# Unless it is the first step, check that the structure is the same.
if self._structure is None:
self._update_structure(tree.map_structure(lambda _: None, data))
data_with_path_flat = tree.flatten_with_path(data)
try:
# Use our custom mapping to flatten the expanded structure into columns.
flat_column_data = self._reorder_like_flat_structure(data_with_path_flat)
except KeyError:
# `data` contains fields which haven't been observed before so we need
# expand the spec using the union of the history and `data`.
self._update_structure(
_tree_union(self._structure,
tree.map_structure(lambda x: None, data)))
flat_column_data = self._reorder_like_flat_structure(data_with_path_flat)
# If the last step is still open then verify that already populated columns
# are None in the new `data`.
if self._last_step_is_open:
for i, (column, column_data) in enumerate(
zip(self._column_history, flat_column_data)):
if column_data is None or column.can_set_last:
continue
raise ValueError(
f'Field {self._get_path_for_column_index(i)} has already been set '
f'in the active step by previous (partial) append call and thus '
f'must be omitted or set to None but got: {column_data}')
# Flatten the data and pass it to the C++ writer for column wise append.
if partial_step:
flat_column_data_references = self._writer.AppendPartial(flat_column_data)
else:
flat_column_data_references = self._writer.Append(flat_column_data)
# Append references to respective columns. Note that we use the expanded
# structure in order to populate the columns missing from the data with
# None.
for column, data_reference in zip(self._column_history,
flat_column_data_references):
# If the last step is still open (i.e `partial_step` was set) then we
# populate that step instead of creating a new one.
if not self._last_step_is_open:
column.append(data_reference)
elif data_reference is not None:
column.set_last(data_reference)
# Save the flag so the next `append` call either populates the same step
# or begins a new step.
self._last_step_is_open = partial_step
def create_item(self, table: str, priority: float, trajectory: Any):
"""Enqueue insertion of an item into `table` referencing `trajectory`.
Note! This method does NOT BLOCK and therefore is not impacted by the table
rate limiter. To prevent unwanted runahead, `flush` must be called.
Before creating an item, `trajectory` is validated.
* Only contain `TrajectoryColumn` objects.
* All data references must be alive (i.e not yet expired).
* Data references within a column must have the same dtype and shape.
Args:
table: Name of the table to insert the item into.
priority: The priority used for determining the sample probability of the
new item.
trajectory: A structure of `TrajectoryColumn` objects. The structure is
flattened before passed to the C++ writer.
Raises:
TypeError: If trajectory is invalid.
"""
flat_trajectory = tree.flatten(trajectory)
if not all(isinstance(col, TrajectoryColumn) for col in flat_trajectory):
raise TypeError(
f'All leaves of `trajectory` must be `TrajectoryColumn` but got '
f'{trajectory}')
# Pass the flatten trajectory to the C++ writer where it will be validated
# and if successful then the item is created and enqued for the background
# worker to send to the server.
self._writer.CreateItem(table, priority,
[list(column) for column in flat_trajectory],
[column.is_squeezed for column in flat_trajectory])
def flush(self,
block_until_num_items: int = 0,
timeout_ms: Optional[int] = None):
"""Block until all but `block_until_num_items` confirmed by the server.
There are two ways that an item could be "pending":
1. Some of the data elements referenced by the item have not yet been
finalized (and compressed) as a `ChunkData`.
2. The item has been written to the gRPC stream but the response
confirming the insertion has not yet been received.
Type 1 pending items are transformed into type 2 when flush is called by
forcing (premature) chunk finalization of the data elements referenced by
the items. This will allow the background worker to write the data and items
to the gRPC stream and turn them into type 2 pending items.
The time it takes for type 2 pending items to be confirmed is primarily
due to the state of the table rate limiter. After the items have been
written to the gRPC stream then all we can do is wait (GIL is not held).
Args:
block_until_num_items: If > 0 then this many pending items will be allowed
to remain as type 1. If the number of type 1 pending items is less than
`block_until_num_items` then we simply wait until the total number of
pending items is <= `block_until_num_items`.
timeout_ms: (optional, default is no timeout) Maximum time to block for
before unblocking and raising a `DeadlineExceededError` instead. Note
that although the block is interrupted, the insertion of the items will
proceed in the background.
Raises:
ValueError: If block_until_num_items < 0.
DeadlineExceededError: If operation did not complete before the timeout.
"""
if block_until_num_items < 0:
raise ValueError(
f'block_until_num_items must be >= 0, got {block_until_num_items}')
if timeout_ms is None:
timeout_ms = -1
try:
self._writer.Flush(block_until_num_items, timeout_ms)
except RuntimeError as e:
if 'Timeout exceeded' in str(e) and timeout_ms is not None:
raise errors.DeadlineExceededError(
f'Flush call did not complete within provided timeout of '
f'{datetime.timedelta(milliseconds=timeout_ms)}')
raise
def end_episode(self,
clear_buffers: bool = True,
timeout_ms: Optional[int] = None):
"""Flush all pending items and generate a new episode ID.
Args:
clear_buffers: Whether the history should be cleared or not. Buffers
should only not be cleared when trajectories spanning multiple episodes
are used.
timeout_ms: (optional, default is no timeout) Maximum time to block for
before unblocking and raising a `DeadlineExceededError` instead. Note
that although the block is interrupted, the buffers and episode ID are
reset all the same and the insertion of the items will proceed in the
background thread.
Raises:
DeadlineExceededError: If operation did not complete before the timeout.
"""
try:
self._writer.EndEpisode(clear_buffers, timeout_ms)
except RuntimeError as e:
if 'Timeout exceeded' in str(e) and timeout_ms is not None:
raise errors.DeadlineExceededError(
f'End episode call did not complete within provided timeout of '
f'{datetime.timedelta(milliseconds=timeout_ms)}')
raise
if clear_buffers:
for column in self._column_history:
column.reset()
def close(self):
self._writer.Close()
def _reorder_like_flat_structure(self, data_with_path_flat):
flat_data = [None] * len(self._path_to_column_index)
for path, value in data_with_path_flat:
flat_data[self._path_to_column_index[path]] = value
return flat_data
def _unflatten(self, flat_data):
reordered_flat_data = [
flat_data[self._column_index_to_flat_structure_index[i]]
for i in range(len(flat_data))
]
return tree.unflatten_as(self._structure, reordered_flat_data)
def _get_path_for_column_index(self, column_index):
i = self._column_index_to_flat_structure_index[column_index]
return tree.flatten_with_path(self._structure)[i][0]
def _update_structure(self, new_structure: Any):
"""Replace the existing structure with a superset of the current one.
Since the structure is allowed to evolve over time we are unable to simply
map flattened data to column indices. For example, if the first step is
`{'a': 1, 'c': 101}` and the second step is `{'a': 2, 'b': 12, 'c': 102}`
then the flatten data would be `[1, 101]` and `[2, 12, 102]`. This will
result in invalid behaviour as the second column (index 1) would receive `c`
in the first step and `b` in the second.
To mitigate this we maintain an explicit mapping from path -> column. The
mapping is allowed to grow over time and would in the above example be
`{'a': 0, 'c': 1}` and `{'a': 0, 'b': 2, 'c': 1}` after the first and second
step resp. Data would thus be flatten as `[1, 101]` and `[2, 102, 12]` which
means that the columns in the C++ layer only receive data from a single
field in the structure even if it evolves over time.
Args:
new_structure: The new structure to use. Must be a superset of the
previous structure.
"""
new_structure_with_path_flat = tree.flatten_with_path(new_structure)
# Evolve the mapping from structure path to column index.
for path, _ in new_structure_with_path_flat:
if path not in self._path_to_column_index:
self._path_to_column_index[path] = len(self._path_to_column_index)
# If an explicit config have been provided for the column then forward
# it to the C++ writer so it will be applied when the column chunker is
# created.
if path in self._path_to_column_config:
self._writer.ConfigureChunker(self._path_to_column_index[path],
self._path_to_column_config[path])
# Recalculate the reverse mapping, i.e column index to index within the
# flatten structure.
self._column_index_to_flat_structure_index = {
i: self._path_to_column_index[path]
for i, (path, _) in enumerate(new_structure_with_path_flat)
}
# New columns are always added to the back so all we need to do expand the
# history structure is to append one column for every field added by this
# `_update_structure` call. In order to align indexing across all columns
# we init the new fields with None for all steps up until this.
history_length = len(self._column_history[0]) if self._column_history else 0
while len(self._column_history) < len(new_structure_with_path_flat):
column_index = len(self._column_history)
self._column_history.append(
_ColumnHistory(new_structure_with_path_flat[column_index][0],
history_length))
# With the mapping and history updated the structure can be set.
self._structure = new_structure
class _ColumnHistory:
"""Utility class for building `TrajectoryColumn`s from structured history."""
def __init__(self,
path: Tuple[Union[str, int], ...],
history_padding: int = 0):
"""Constructor for _ColumnHistory.
Args:
path: A Tuple of strings and ints that represents which leaf-node this
column represents in TrajectoryWriter._structure.
history_padding: The number of Nones used to forward-pad the column's
history.
"""
self._path = path
self._data_references: Sequence[Optional[pybind.WeakCellRef]] = (
[None] * history_padding)
def append(self, ref: Optional[pybind.WeakCellRef]):
self._data_references.append(ref)
def reset(self):
self._data_references = []
def set_last(self, ref: pybind.WeakCellRef):
if not self._data_references:
raise RuntimeError('set_last called on empty history column')
if self._data_references[-1] is not None:
raise RuntimeError('set_last called on already set cell')
self._data_references[-1] = ref
@property
def can_set_last(self) -> bool:
return self._data_references and self._data_references[-1] is None
def __len__(self) -> int:
return len(self._data_references)
def __iter__(self) -> Iterator[Optional[pybind.WeakCellRef]]:
return iter(self._data_references)
def __getitem__(self, val) -> 'TrajectoryColumn':
path = self._path + (val,)
if isinstance(val, int):
return TrajectoryColumn([self._data_references[val]],
squeeze=True,
path=path)
elif isinstance(val, slice):
return TrajectoryColumn(
self._data_references[val], path=path)
else:
raise TypeError(
f'_ColumnHistory indices must be integers, not {type(val)}')
def __str__(self):
name = f'{self.__class__.__module__}.{self.__class__.__name__}'
return f'{name}(path={self._path}, refs={self._data_references})'
class TrajectoryColumn:
"""Column used for building trajectories referenced by table items."""
def __init__(self,
data_references: Sequence[pybind.WeakCellRef],
*,
squeeze: bool = False,
path: Optional[Tuple[Union[str, int, slice], ...]] = None):
if squeeze and len(data_references) != 1:
raise ValueError(
f'Columns must contain exactly one data reference when squeeze set, '
f'got {len(data_references)}')
if any(ref is None for ref in data_references):
raise ValueError('TrajectoryColumns cannot contain any None data '
f'references, got {data_references} for '
f'TrajectoryColumn at path {path}')
self._data_references = tuple(data_references)
self.is_squeezed = squeeze
def __len__(self) -> int:
return len(self._data_references)
def __iter__(self) -> Iterator[pybind.WeakCellRef]:
return iter(self._data_references)
def __getitem__(self, val) -> 'TrajectoryColumn':
if isinstance(val, int):
return TrajectoryColumn([self._data_references[val]], squeeze=True)
elif isinstance(val, slice):
return TrajectoryColumn(self._data_references[val])
else:
raise TypeError(
f'TrajectoryColumn indices must be integers or slices, '
f'not {type(val)}')
@property
def shape(self) -> Tuple[Optional[int], ...]:
if self.is_squeezed:
return self._data_references[0].shape
else:
return (len(self._data_references), *self._data_references[0].shape)
@property
def dtype(self) -> np.dtype:
return self._data_references[0].dtype
def numpy(self) -> np.ndarray:
"""Gets and stacks all the referenced data.
Data is copied from buffers in the C++ layers and may involve decompression
of already created chunks. This can be quite a memory intensive operation
when used on large arrays.
Returns:
All referenced data stacked in a single numpy array if column isn't
squeezed. If the column is squeezed then the value is returned without
stacking.
Raises:
RuntimeError: If any data reference has expired.
"""
if any(reference.expired for reference in self._data_references):
raise RuntimeError(
'Cannot convert TrajectoryColumn with expired data references to '
'numpy array.')
if self.is_squeezed:
return self._data_references[0].numpy()
return np.stack([ref.numpy() for ref in self._data_references])
def _tree_filter(source, filter_wih_path_flat):
"""Extract `filter_` from `source`."""
path_to_index = {
path: i for i, (path, _) in enumerate(filter_wih_path_flat)
}
flat_target = [None] * len(path_to_index)
for path, leaf in tree.flatten_with_path(source):
if path in path_to_index:
flat_target[path_to_index[path]] = leaf
return flat_target
def _is_named_tuple(x):
# Classes that look syntactically as if they inherit from `NamedTuple` in
# fact end up not doing so, so use this heuristic to detect them.
return isinstance(x, Tuple) and hasattr(x, '_fields')
def _tree_union(a, b):
"""Compute the disjunction of two trees with None leaves."""
if a is None:
return a
if _is_named_tuple(a):
return type(a)(**_tree_union(a._asdict(), b._asdict()))
if isinstance(a, (List, Tuple)):
return type(a)(
_tree_union(aa, bb) for aa, bb in itertools.zip_longest(a, b))
merged = {**a}
for k, v in b.items():
if k in a:
merged[k] = _tree_union(a[k], v)
else:
merged[k] = v
return type(a)(**merged)
| apache-2.0 | -4,714,772,191,559,620,000 | 39.279476 | 81 | 0.664751 | false |
euronmetaliaj/MarketAnalyzer | core/social/Objects/rake.py | 1 | 6665 | # Implementation of RAKE - Rapid Automtic Keyword Exraction algorithm
# as described in:
# Rose, S., D. Engel, N. Cramer, and W. Cowley (2010).
# Automatic keyword extraction from indi-vidual documents.
# In M. W. Berry and J. Kogan (Eds.), Text Mining: Applications and Theory.unknown: John Wiley and Sons, Ltd.
import re
import operator
debug = False
test = True
def is_number(s):
try:
float(s) if '.' in s else int(s)
return True
except ValueError:
return False
def load_stop_words(stop_word_file):
"""
Utility function to load stop words from a file and return as a list of words
@param stop_word_file Path and file name of a file containing stop words.
@return list A list of stop words.
"""
stop_words = []
for line in open(stop_word_file):
if line.strip()[0:1] != "#":
for word in line.split(): # in case more than one per line
stop_words.append(word)
return stop_words
def separate_words(text, min_word_return_size):
"""
Utility function to return a list of all words that are have a length greater than a specified number of characters.
@param text The text that must be split in to words.
@param min_word_return_size The minimum no of characters a word must have to be included.
"""
splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]')
words = []
for single_word in splitter.split(text):
current_word = single_word.strip().lower()
#leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases
if len(current_word) > min_word_return_size and current_word != '' and not is_number(current_word):
words.append(current_word)
return words
def split_sentences(text):
"""
Utility function to return a list of sentences.
@param text The text that must be split in to sentences.
"""
sentence_delimiters = re.compile(u'[.!?,;:\t\\\\"\\(\\)\\\'\u2019\u2013]|\\s\\-\\s')
sentences = sentence_delimiters.split(text)
return sentences
def build_stop_word_regex(stop_word_file_path):
stop_word_list = load_stop_words(stop_word_file_path)
stop_word_regex_list = []
for word in stop_word_list:
word_regex = r'\b' + word + r'(?![\w-])' # added look ahead for hyphen
stop_word_regex_list.append(word_regex)
stop_word_pattern = re.compile('|'.join(stop_word_regex_list), re.IGNORECASE)
return stop_word_pattern
def generate_candidate_keywords(sentence_list, stopword_pattern):
phrase_list = []
for s in sentence_list:
tmp = re.sub(stopword_pattern, '|', s.strip())
phrases = tmp.split("|")
for phrase in phrases:
phrase = phrase.strip().lower()
if phrase != "":
phrase_list.append(phrase)
return phrase_list
def calculate_word_scores(phraseList):
word_frequency = {}
word_degree = {}
for phrase in phraseList:
word_list = separate_words(phrase, 0)
word_list_length = len(word_list)
word_list_degree = word_list_length - 1
#if word_list_degree > 3: word_list_degree = 3 #exp.
for word in word_list:
word_frequency.setdefault(word, 0)
word_frequency[word] += 1
word_degree.setdefault(word, 0)
word_degree[word] += word_list_degree #orig.
#word_degree[word] += 1/(word_list_length*1.0) #exp.
for item in word_frequency:
word_degree[item] = word_degree[item] + word_frequency[item]
# Calculate Word scores = deg(w)/frew(w)
word_score = {}
for item in word_frequency:
word_score.setdefault(item, 0)
word_score[item] = word_degree[item] / (word_frequency[item] * 1.0) #orig.
#word_score[item] = word_frequency[item]/(word_degree[item] * 1.0) #exp.
return word_score
def generate_candidate_keyword_scores(phrase_list, word_score):
keyword_candidates = {}
for phrase in phrase_list:
keyword_candidates.setdefault(phrase, 0)
word_list = separate_words(phrase, 0)
candidate_score = 0
for word in word_list:
candidate_score += word_score[word]
keyword_candidates[phrase] = candidate_score
return keyword_candidates
class Rake(object):
def __init__(self, stop_words_path):
self.stop_words_path = stop_words_path
self.__stop_words_pattern = build_stop_word_regex(stop_words_path)
def run(self, text):
sentence_list = split_sentences(text)
phrase_list = generate_candidate_keywords(sentence_list, self.__stop_words_pattern)
word_scores = calculate_word_scores(phrase_list)
keyword_candidates = generate_candidate_keyword_scores(phrase_list, word_scores)
sorted_keywords = sorted(keyword_candidates.iteritems(), key=operator.itemgetter(1), reverse=True)
return sorted_keywords
if test:
text = "Compatibility of systems of linear constraints over the set of natural numbers. Criteria of compatibility of a system of linear Diophantine equations, strict inequations, and nonstrict inequations are considered. Upper bounds for components of a minimal set of solutions and algorithms of construction of minimal generating sets of solutions for all types of systems are given. These criteria and the corresponding algorithms for constructing a minimal supporting set of solutions can be used in solving all the considered types of systems and systems of mixed types."
# Split text into sentences
sentenceList = split_sentences(text)
#stoppath = "FoxStoplist.txt" #Fox stoplist contains "numbers", so it will not find "natural numbers" like in Table 1.1
stoppath = "SmartStoplist.txt" #SMART stoplist misses some of the lower-scoring keywords in Figure 1.5, which means that the top 1/3 cuts off one of the 4.0 score words in Table 1.1
stopwordpattern = build_stop_word_regex(stoppath)
# generate candidate keywords
phraseList = generate_candidate_keywords(sentenceList, stopwordpattern)
# calculate individual word scores
wordscores = calculate_word_scores(phraseList)
# generate candidate keyword scores
keywordcandidates = generate_candidate_keyword_scores(phraseList, wordscores)
if debug: print keywordcandidates
sortedKeywords = sorted(keywordcandidates.iteritems(), key=operator.itemgetter(1), reverse=True)
if debug: print sortedKeywords
totalKeywords = len(sortedKeywords)
if debug: print totalKeywords
print sortedKeywords[0:(totalKeywords / 3)]
rake = Rake("SmartStoplist.txt")
keywords = rake.run(text)
print keywords | mit | -2,379,889,619,676,009,000 | 38.678571 | 580 | 0.674269 | false |
dsapandora/die_hard | backup.py | 1 | 4670 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
import urllib
import requests
import math
from flask import Flask, request
app = Flask(__name__)
API_KEY = 'ebc2ccbd44d15f282010c6f3514c5c02'
API_URL = 'http://api.openweathermap.org/data/2.5/weather?'
API_QUERY = 'lat={lat}&lon={lon}&appid={api}'
# SAMPLE REQUEST
# http://
# api.openweathermap.org/data/2.5/weather?q=London,uk&appid=ebc2ccbd44d15f282010c6f3514c5c02
# API_KEY='E8D05ADD-DF71-3D14-3794-93FAF8ED8F5'
# API_URL='https://api.airmap.io/data/v1/status'
"""
curl -v -L -G \
--header "X-API-Key: 'E8D05ADD-DF71-3D14-3794-93FAF8ED8F5'" \
-d "latitude=33.9425&longitude=-118.4081&unique_id=laxexample" \
https://api.airmap.io/data/v1/status
"""
"""
curl -v -L -G \
--header "X-API-Key: fd94daed750a375ef87d87445090cc8fab3bf3f62796ac37698b6f7b3add3146" \
-d "latitude=8.985955&longitude=-79.529316&radius=100000&unique_id=colexample&weather=true" \
https://api.airmap.io/data/v1/status
"""
"""
curl -v -L -G \
--header "'X-api-key: fd94daed750a375ef87d87445090cc8fab3bf3f62796ac37698b6f7b3add3146" \
-d "latitude=8.983258&longitude=-79.557281&radius=100000&unique_id=colexample&weather=true" \
https://api.airmap.io/data/v1/status
Airport Tocumen:
latitude=9.088791&longitude=-79.384632
AirPort Gelabert:
8.983258, -79.557281
curl -G \
--header "X-API-Key: fd94daed750a375ef87d87445090cc8fab3bf3f62796ac37698b6f7b3add3146" \
-d "latitude=8.985955&longitude=-79.529316&radius=100000&unique_id=colexample&weather=true" \
https://api.airmap.io/data/v1/status
"""
@app.route('/')
def hello_world():
return 'Flask Dockerized'
@app.route('/get_data')
def get_data():
R = 6378.1 #Radius of the Earth
brng = 1.57 #Bearing is 90 degrees converted to radians.
d = 15 #Distance in km
lat1 = math.radians(52.20472) #Current lat point converted to radians
lon1 = math.radians(0.14056) #Current long point converted to radians
lat2 = math.asin( math.sin(lat1)*math.cos(d/R) +
math.cos(lat1)*math.sin(d/R)*math.cos(brng))
lon2 = lon1 + math.atan2(math.sin(brng)*math.sin(d/R)*math.cos(lat1),
math.cos(d/R)-math.sin(lat1)*math.sin(lat2))
lat2 = math.degrees(lat2)
lon2 = math.degrees(lon2)
print(lat2)
print(lon2)
return 'Get data route %s %s' % (lat2, lon2)
@app.route('/get_something_else')
def get_something_else():
latitude = request.args.get('latitude')
longitude = request.args.get('longitude')
if latitude is None:
latitude = 8.985955
if longitude is None:
longitude = -79.529316
url = API_URL + API_QUERY.format(lat=latitude, lon=longitude, api=API_KEY)
values = urllib2.urlopen(url).read()
return values
@app.route('/get_flight_zones')
def get_flight_zones():
latitude = request.args.get('latitude')
longitude = request.args.get('longitude')
if latitude is None:
latitude = 8.985955
if longitude is None:
longitude = -79.529316
url = 'https://api.airmap.io/data/v1/status?radius=360&latitude=%s&longitude=%s&unique_id=sample&weather=true' % (latitude, longitude)
headers = { 'X-API-Key': 'fd94daed750a375ef87d87445090cc8fab3bf3f62796ac37698b6f7b3add3146' }
req = requests.get(url,headers=headers)
no_flight_near_me = map(lambda x: x['name'], req.json()['nearest_advisories'])
@app.route('/get_weather_data')
def get_weather_data():
"""
Weather parameters of wind speed and direction, gust speed potential, dew point, temperature and visibility.
"""
latitude = request.args.get('latitude')
longitude = request.args.get('longitude')
if latitude is None:
latitude = 8.985955
if longitude is None:
longitude = -79.529316
url = 'https://api.airmap.io/data/v1/status?radius=360&latitude=%s&longitude=%s&unique_id=sample&weather=true' % (latitude, longitude)
headers = { 'X-API-Key': 'fd94daed750a375ef87d87445090cc8fab3bf3f62796ac37698b6f7b3add3146' }
req = requests.get(url,headers=headers)
return str(req.json()['weather'])
#!/usr/bin/env python
# Haversine formula example in Python
# Author: Wayne Dyck
import math
def distance(origin, destination):
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0') | gpl-3.0 | 4,456,546,718,283,175,000 | 30.993151 | 138 | 0.679872 | false |
ayberkt/2048 | game.py | 1 | 3825 | from random import randint
from view import GridView
class Grid(object):
def __init__(self):
self.matrix = [ [2, 0, 2, 0],
[0, 0, 0, 8],
[0, 2, 0, 0],
[0, 0, 2, 4]]
self.score = 0
print "Play with WASD!"
def begin(self):
'''Start the game.'''
self.grid_view = GridView(self)
self.grid_view.initUI(self.matrix)
self.grid_view.mainloop()
def get_column(self, nth):
'''Get column at index.'''
column = []
for row in self.matrix:
column.append(row[nth])
return column
def set_column(self, nth, column):
'''Replace a column at index "nth".'''
for i in range(4):
self.matrix[i][nth] = column[i]
def insert_random_num(self):
'''Insert a random number to the grid.'''
x = randint(0, 3)
y = randint(0, 3)
while not self.matrix[y][x] == 0:
x = randint(0, 3)
y = randint(0, 3)
self.matrix[y][x] = 2
def control_state(self):
print "Score: " + str(self.score)
self.insert_random_num()
self.grid_view.layout_matrix(self.matrix)
def slide(self, direction):
''' Apply the corresponding shift to a column or row.
Columns are treated as rows thus sliding a row up is
same as shifting it left.
u for up, r for right, l for left, d for down '''
if direction == "up":
for i in range(4):
column = self.get_column(i)
column = self.shift(column, "left")
self.set_column(i, column)
elif direction == "right":
for i in range(4):
row = self.matrix[i]
row = self.shift(row, "right")
self.matrix[i] = row
elif direction == "down":
for i in range(4):
column = self.get_column(i)
column = self.shift(column, "right")
self.set_column(i, column)
elif direction == "left":
for i in range(4):
row = self.matrix[i]
row = self.shift(row, "left")
self.matrix[i] = row
self.control_state()
def shift(self, array, direction):
'''Shift an array left or right specified with the "direction" arg.
If the input array is [2, 2, 4, 8] the result would be equal
to [4, 4, 8] after a left-shift is applied.'''
# Direction should be specified as either left or right.
assert(direction == 'left' or direction == 'right')
if sum(array) == 0: return array
if direction == 'right': array = array[::-1]
array = filter(lambda x: x != 0, array)
for index in range(1, len(array)):
if array[index - 1] == array[index]:
array[index - 1] += array[index]
self.score += array[index - 1]
array[index] = 0
array = filter(lambda x: x != 0, array)
while len(array) < 4:
array.append(0)
if direction == 'left': return array
if direction == 'right': return array[::-1]
def matrix_str(self):
'''Create a string representation of the matrix
in the current state. This method is to be used
for debugging purposes.'''
matrix_str = ""
for row in self.matrix:
row_str = ""
for num in row:
if num == 0:
row_str += " . "
else:
row_str += " " + str(num) + " "
row_str += "\n"
matrix_str += row_str
return matrix_str
if __name__ == "__main__":
game_grid = Grid()
game_grid.begin()
| mit | 2,469,207,557,474,849,300 | 29.6 | 75 | 0.491242 | false |
zimonkaizoku/GMTcsh2dos | GMTcsh2dos.py | 1 | 3970 | #!/usr/bin/env python
################################################################################
# GMTcsh2dos.py
# -------------------------------------------------
# Version: 0.1
# Author: Simon Dreutter
# License: GNU Generic Public License v3.0 / 2015
# -------------------------------------------------
################################################################################
#
# This is a script for translating simple GMT (Generic Mapping Tools) Unix
# csh scripts into DOS batch files. It will do some simple changes in the
# Syntax (comments, etc.) to ensure the compatibility. It is not meant for
# translating unix2dos in general, since this is not possible!
#
# Run script like so:
# GMTcsh2dos.py <Inputfile>
#
################################################################################
# import modules
import sys
#=================================================
# open GMT csh script:
#=================================================
try:
filename = sys.argv[1]
print('\nInput file: ' + filename)
except IndexError:
print('\nNo Input file specified. Canelled!\n')
sys.exit()
f = open(filename,'rb')
csh = f.read()
f.close()
#=================================================
# start with some simple replacement:
#=================================================
# ('\n','') for multiline commands
# ('\t','') for tabs inbetween command lines
# ('>!','>') for first time calling of the PS_FILE
# ('= ','=') to avoid spaces in the variable settings
# ('=>','= >') to recover '= >' in the -T option of grd2cpt
# ('rm -f','del') unix2dos syntax for deleting files
lines = csh.replace('\\\n','').replace('>!','>').replace('= ','=').replace('=>','= >').replace('rm -f','del').split('\n')
#=================================================
# go on with some more complicated replacements:
#=================================================
# counter
i = 0
# list of script variables
var = []
# loop through all lines and do stuff
for line in lines:
# delete \t in lines that are not comments
if not line.startswith('#'):
lines[i] = line.replace('\t','')
line = lines[i]
# check for lines that contain a command and a following comment and
# get rid of the comment
if '#' in line and not line.startswith('#'):
lines[i] = line.split('#')[0]
line = lines[i]
# replace comment symbols ('#','REM ')
if line.startswith('#'):
lines[i] = line.replace('#','REM ',1)
line = lines[i]
# look for variable settings and append the to var list
if line.startswith('set'):
var.append(line.split('=')[0].split(' ')[1])
# loop through all variables in each line to change '$VAR' to '%VAR%'
for v in var:
v = '$'+v
if v in line:
lines[i] = line.replace(v,'%'+v[1:]+'%')
line = lines[i]
# DOS does not accept variables within " ", therefore get rid of them
if '"%' in line:
lines[i] = line.replace('"%','%')
line = lines[i]
if '%"' in line:
lines[i] = line.replace('%"','%')
line = lines[i]
# count up
i = i + 1
#=================================================
# write .bat file:
#=================================================
# file handling
filename = filename.split('.')[0] + '.bat'
f = open(filename,'wb')
# 'echo off' to make echos visible in DOS cmd
f.write('@echo off\r\n')
# write lines but skip initial '#! /bin/csh' line and 'Preview' command line
for line in lines:
if '! /bin' in line:
continue
if 'Preview' in line:
continue
f.write(line + '\r\n')
# 'echo on'
f.write('@echo on\r\n')
# close file
f.close()
#=================================================
# all done:
#=================================================
print('Output file: ' + filename)
print('\nAll Done!\n')
| gpl-3.0 | -2,424,475,750,428,373,000 | 29.775194 | 121 | 0.455416 | false |
Yelp/kafka-utils | kafka_utils/kafka_consumer_manager/commands/list_topics.py | 1 | 2354 | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
import sys
import six
from .offset_manager import OffsetManagerBase
from kafka_utils.util.client import KafkaToolClient
class ListTopics(OffsetManagerBase):
@classmethod
def setup_subparser(cls, subparsers):
parser_list_topics = subparsers.add_parser(
"list_topics",
description="List topics by consumer group.",
add_help=False
)
parser_list_topics.add_argument(
"-h", "--help", action="help",
help="Show this help message and exit."
)
parser_list_topics.add_argument(
'groupid',
help="Consumer Group ID whose topics shall be fetched."
)
parser_list_topics.set_defaults(command=cls.run)
@classmethod
def run(cls, args, cluster_config):
# Setup the Kafka client
client = KafkaToolClient(cluster_config.broker_list)
client.load_metadata_for_topics()
topics_dict = cls.preprocess_args(
groupid=args.groupid,
topic=None,
partitions=None,
cluster_config=cluster_config,
client=client,
fail_on_error=False,
use_admin_client=args.use_admin_client,
)
if not topics_dict:
print("Consumer Group ID: {group} does not exist.".format(
group=args.groupid,
))
sys.exit(1)
print("Consumer Group ID: {groupid}".format(groupid=args.groupid))
for topic, partitions in six.iteritems(topics_dict):
print("\tTopic: {topic}".format(topic=topic))
print("\t\tPartitions: {partitions}".format(partitions=partitions))
| apache-2.0 | 1,139,298,315,094,970,400 | 33.115942 | 79 | 0.639337 | false |
Intel-Corporation/tensorflow | tensorflow/python/eager/def_function_test.py | 1 | 17245 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import weakref
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
class _ModelWithOptimizer(training.Model):
def __init__(self):
super(_ModelWithOptimizer, self).__init__()
self.dense = core.Dense(1)
self.optimizer = adam.AdamOptimizer(0.01)
@def_function.function(
input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.float32)))
def call(self, x, y):
with backprop.GradientTape() as tape:
loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.)
trainable_variables = self.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
return {'loss': loss}
class _HasDecoratedMethod(object):
@def_function.function
def f(self, x):
return x * 3.
# pylint: disable=bad-continuation,anomalous-backslash-in-string
MIXING_GRAPH_EAGER_TENSORS_ERROR = (
"""An op outside of the function building code is being passed
a "Graph" tensor. It is possible to have Graph tensors
leak out of the function building context by including a
tf.init_scope in your function building code.
For example, the following function will fail:
@tf.function
def has_init_scope\(\):
my_constant = tf.constant\(1.\)
with tf.init_scope\(\):
added = my_constant \* 2
The graph tensor has name: Const:0""")
# pylint: enable=bad-continuation,anomalous-backslash-in-string
class DefFunctionTest(test.TestCase):
def testNoVariables(self):
@def_function.function
def fn(x):
return 2 * x
self.assertAllEqual(fn(constant_op.constant(4.0)), 8.0)
def testFailIfVariablesAreCreatedMoreThanOnce(self):
@def_function.function
def fn(x):
return variables.Variable(1.0) + x
with self.assertRaises(ValueError):
fn(1.0)
def testFailIfVariablesAreCreatedMoreThanOnceNoWeakRef(self):
state = []
@def_function.function
def fn(x):
state.append(variables.Variable(1.0))
return state[-1] + x
with self.assertRaises(ValueError):
fn(1.0)
def testCorrectVariableCreation(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
def testFunctionInitializer(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(lambda: 2.0))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
def testFunctionInitializationFunction(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
init_fn = fn.get_initialization_function(constant_op.constant(1.0))
self.assertEqual(len(state), 1)
self.assertFalse(
resource_variable_ops.var_is_initialized_op(state[0].handle))
init_fn()
self.assertEqual(state[0].numpy(), 2.0)
def testVariableInitializerNotConstant(self):
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
self.assertAllEqual(fn(constant_op.constant(1.0)), 2.0)
self.assertAllEqual(fn(constant_op.constant(3.0)), 6.0)
def testLegacyGraphModeVariables(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0))
return state[0] * x
result = fn(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 2.0)
self.assertAllEqual(self.evaluate(result), 6.0)
def testLegacyGraphModeVariablesNonTrivialInitializer(self):
with ops.Graph().as_default(), self.test_session() as sess:
state = []
@def_function.function
def fn(x):
if not state:
two = constant_op.constant(2.0)
four = two * two
two_again = math_ops.sqrt(four)
state.append(variables.Variable(two_again + four))
return state[0] * x
result = fn(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(sess.run(state[0]), 6.0)
self.assertAllEqual(self.evaluate(result), 18.0)
def testLegacyGraphModeInputDependentInitializerFails(self):
with ops.Graph().as_default():
state = []
@def_function.function
def fn(x):
if not state:
state.append(variables.Variable(2.0 * x))
return state[0] * x
with self.assertRaisesRegexp(
lift_to_graph.UnliftableError, r'transitively.* mul .* x'):
fn(constant_op.constant(3.0))
def testMethod(self):
class MyModel(object):
def __init__(self):
self.var = None
@def_function.function
def apply(self, x):
if self.var is None:
self.var = variables.Variable(2.0)
return self.var * x
m0 = MyModel()
self.assertAllEqual(m0.apply(3.0), 6.0)
# Calling twice to exercise that we do not recreate variables.
m0.var.assign(3.0)
self.assertAllEqual(m0.apply(3.0), 9.0)
m1 = MyModel()
self.assertAllEqual(m1.apply(3.0), 6.0)
def test_functools_partial(self):
self.assertAllClose(
3.,
def_function.function(functools.partial(lambda x, y: x + y, 1.))(
constant_op.constant(2.)))
def test_functools_partial_new_default(self):
def f(x=3, y=7):
return x + y
func = def_function.function(functools.partial(f, y=6))
self.assertEqual(func().numpy(), 9)
self.assertEqual(func(y=8).numpy(), 11)
def test_functools_partial_keywords(self):
def f(x, y):
return x + y
func = def_function.function(
functools.partial(f, x=array_ops.zeros([1]), y=array_ops.zeros([1])))
self.assertAllEqual(func(), [0.0])
def test_functools_partial_single_positional(self):
def f(x, y):
return x + y
func = def_function.function(
functools.partial(f, constant_op.constant(1)))
self.assertAllEqual(func(5), 6)
def test_unspecified_default_argument(self):
wrapped = def_function.function(
lambda x, y=2: x + y,
input_signature=[tensor_spec.TensorSpec((), dtypes.int32)])
self.assertEqual(3, wrapped(constant_op.constant(1)).numpy())
def test_optimizer(self):
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
model = _ModelWithOptimizer()
model(x, y)
def test_concrete_function_from_signature(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
def compute(x):
return 2. * x
concrete = compute.get_concrete_function()
self.assertAllClose(1., concrete(constant_op.constant(0.5)))
concrete = compute.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
self.assertAllClose(4., concrete(constant_op.constant(2.)))
signature_args, _ = concrete.structured_input_signature
self.assertEqual(signature_args,
(tensor_spec.TensorSpec(
None, dtypes.float32, name='x'),))
def test_concrete_function_keyword_arguments(self):
@def_function.function
def f(x):
return x
conc = f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32, 'y'))
conc(y=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('y', signature_args[0].name)
conc = f.get_concrete_function(tensor_spec.TensorSpec(None, dtypes.float32))
conc(x=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('x', signature_args[0].name)
@def_function.function
def g(x):
return x[0]
conc = g.get_concrete_function(
[tensor_spec.TensorSpec(None, dtypes.float32, 'z'), 2])
conc(z=constant_op.constant(3.0))
signature_args, _ = conc.structured_input_signature
self.assertEqual('z', signature_args[0][0].name)
with self.assertRaisesRegexp(
ValueError, 'either zero or all names have to be specified'):
conc = g.get_concrete_function([
tensor_spec.TensorSpec(None, dtypes.float32, 'z'),
tensor_spec.TensorSpec(None, dtypes.float32),
])
def test_error_inner_capture(self):
@def_function.function
def f(inputs):
num_steps, _ = inputs.shape[:2]
outputs = []
for t in math_ops.range(num_steps):
outputs.append(inputs[t])
return outputs
with self.assertRaisesRegexp(ValueError, 'inner'):
f(array_ops.zeros(shape=(8, 42, 3)))
def testRuntimeErrorNotSticky(self):
@def_function.function
def fail(i):
control_flow_ops.Assert(math_ops.equal(i, 0), ['ick'])
fail(constant_op.constant(0)) # OK
with self.assertRaises(errors.InvalidArgumentError):
fail(constant_op.constant(1)) # InvalidArgument: "ick"
fail(constant_op.constant(0)) # OK
def testUnderscoreName(self):
@def_function.function
def f(_):
return _ + _
self.assertAllEqual(2.0, f(constant_op.constant(1.0)))
def test_serialization_signature_cache(self):
@def_function.function
def f(x, y):
return x, y
f(constant_op.constant([[3., 4.]]), constant_op.constant([2.]))
f(constant_op.constant([[3, 4, 5]]), constant_op.constant([2]))
signatures_args = set()
concrete_functions = f._list_all_concrete_functions_for_serialization()
for concrete_function in concrete_functions:
args, kwargs = concrete_function.structured_input_signature
signatures_args.add(args)
self.assertEqual(dict(), kwargs)
self.assertEqual(
signatures_args,
set(((tensor_spec.TensorSpec([1, 2], dtypes.float32, name='x'),
tensor_spec.TensorSpec([1], dtypes.float32, name='y')),
(tensor_spec.TensorSpec([1, 3], dtypes.int32, name='x'),
tensor_spec.TensorSpec([1], dtypes.int32, name='y')))))
@test_util.assert_no_garbage_created
def testFunctionReferenceCycles(self):
fn = def_function.function(lambda x: 2. * x)
fn(constant_op.constant(4.0))
weak_fn = weakref.ref(fn)
del fn
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
@test_util.assert_no_garbage_created
def testMethodReferenceCycles(self):
has_decorated_method = _HasDecoratedMethod()
has_decorated_method.f(constant_op.constant(5.))
weak_fn = weakref.ref(has_decorated_method.f)
del has_decorated_method
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
def testErrorMessageWhenGraphTensorIsPassedToEager(self):
@def_function.function
def failing_function():
a = constant_op.constant(1.)
with ops.init_scope():
_ = a + a
with self.assertRaisesRegexp(TypeError, MIXING_GRAPH_EAGER_TENSORS_ERROR):
failing_function()
def testVariableCreatorScope(self):
created_variables = []
captured_variables = []
@def_function.function
def f():
if not created_variables:
created_variables.append(variables.Variable(1.))
return created_variables[0] + 1.
def capture_creator(next_creator, **kwargs):
created = next_creator(**kwargs)
captured_variables.append(created)
return created
with variable_scope.variable_creator_scope(capture_creator):
f()
self.assertEqual(created_variables, captured_variables)
def testVarAlreadyInitializedNoClobbering(self):
v_holder = []
@def_function.function
def add_var(x):
if not v_holder:
v = variables.Variable([1., 2.])
v_holder.append(v)
already_initialized = variables.Variable(3.)
with ops.init_scope():
already_initialized.assign(10.)
v_holder.append(already_initialized)
return v_holder[0] + v_holder[1] + x
add_var.get_concrete_function(constant_op.constant(2.))
self.assertAllClose([13., 14.], add_var(constant_op.constant(2.)))
def testSameVariableTwice(self):
v = variables.Variable(1.0)
@def_function.function
def add(a, b):
return a + b
self.assertAllEqual(add(v, v), 2.0)
def testShapeCache(self):
@def_function.function
def func(x):
return 2 * x
func_a = func.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32))
func_b = func.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32))
self.assertIs(func_a, func_b)
def testInitializationInNestedCall(self):
v_holder = []
@def_function.function
def add_var(x):
if not v_holder:
v = variables.Variable([1., 2.])
v_holder.append(v)
already_initialized = variables.Variable(3.)
with ops.init_scope():
already_initialized.assign(10.)
v_holder.append(already_initialized)
return v_holder[0] + v_holder[1] + x
@def_function.function
def wrapper(x):
return add_var(x)
self.assertAllClose([13., 14.], wrapper(constant_op.constant(2.)))
v_holder[1].assign(11.)
self.assertAllClose([14., 15.], wrapper(constant_op.constant(2.)))
@test_util.run_gpu_only
def testDeviceAnnotationRespected(self):
a = []
@def_function.function()
def create_variable():
with ops.init_scope():
initial_value = random_ops.random_uniform(
(2, 2), maxval=1000000, dtype=dtypes.int64)
if not a:
with ops.device("CPU:0"):
a.append(resource_variable_ops.ResourceVariable(initial_value))
return a[0].read_value()
created_variable_read = create_variable()
self.assertRegexpMatches(created_variable_read.device, "CPU")
def testDecorate(self):
func = def_function.function(lambda: 1)
def decorator(f):
return lambda: 1 + f()
func._decorate(decorator)
self.assertEqual(func().numpy(), 2)
def testLiftPlaceholderInitializedVariable(self):
with ops.Graph().as_default():
var_list = []
@def_function.function
def use_variable():
if not var_list:
initial_value = array_ops.placeholder(shape=[], dtype=dtypes.float32)
v = variables.Variable(initial_value)
var_list.append(v)
return var_list[0] + 1.
var_plus_one = use_variable()
with self.session() as session:
init_op = var_list[0].initializer
session.run(init_op, feed_dict={init_op.inputs[1]: 2.})
self.assertEqual(3., session.run(var_plus_one))
def testDecorate_rejectedAfterTrace(self):
func = def_function.function(lambda: 1)
self.assertEqual(func().numpy(), 1)
msg = 'Functions cannot be decorated after they have been traced.'
with self.assertRaisesRegexp(ValueError, msg):
func._decorate(lambda f: f)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| apache-2.0 | -179,380,147,259,264,420 | 29.905018 | 80 | 0.662221 | false |
sveetch/djangocodemirror | tests/001_manifest/test_004_config.py | 1 | 3056 | """
Tests against manifest for CodeMirror configurations
"""
import pytest
from djangocodemirror.exceptions import NotRegisteredError
def test_raw_config_empty(manifesto):
"""Empty config"""
manifesto.autoregister()
config = manifesto.get_codemirror_parameters('empty')
assert config == {}
def test_get_config_success(settings, manifesto):
"""Use get_config on non registered name"""
manifesto.autoregister()
assert manifesto.get_config('empty') == {
'modes': [],
'addons': [],
'themes': [],
'css_bundle_name': 'dcm-empty_css',
'js_bundle_name': 'dcm-empty_js',
}
def test_get_config_error(settings, manifesto):
"""Use get_config on non registered name"""
manifesto.autoregister()
with pytest.raises(NotRegisteredError):
registred = manifesto.get_config('nope')
def test_get_configs_single_success(settings, manifesto):
"""Use get_configs for one registred config"""
manifesto.register('empty')
manifesto.register('basic')
registred = manifesto.get_configs('empty')
assert registred == {
'empty': {
'modes': [],
'addons': [],
'themes': [],
'css_bundle_name': 'dcm-empty_css',
'js_bundle_name': 'dcm-empty_js',
},
}
def test_get_configs_multiple_success(settings, manifesto):
"""Use get_configs for all registred configs"""
manifesto.register('empty')
manifesto.register('basic')
registred = manifesto.get_configs()
assert registred == {
'empty': {
'modes': [],
'addons': [],
'themes': [],
'css_bundle_name': 'dcm-empty_css',
'js_bundle_name': 'dcm-empty_js',
},
'basic': {
'mode': 'rst',
'modes': [],
'addons': [],
'themes': [],
'css_bundle_name': 'dcm-basic_css',
'js_bundle_name': 'dcm-basic_js',
},
}
def test_get_configs_single_error(settings, manifesto):
"""Use get_configs on single non registered name"""
manifesto.autoregister()
with pytest.raises(NotRegisteredError):
registred = manifesto.get_configs('nope')
@pytest.mark.parametrize('name,options', [
(
'empty',
{},
),
(
'basic',
{
'mode': 'rst',
},
),
(
'with-options',
{
'mode': 'rst',
'lineWrapping': True,
'lineNumbers': True,
},
),
(
'with-themes',
{
'mode': 'rst',
'theme': 'elegant',
},
),
(
'with-all',
{
'mode': 'rst',
'lineWrapping': True,
'lineNumbers': True,
'theme': 'neat',
},
),
])
def test_codemirror_parameters(manifesto, name, options):
"""CodeMirror parameters"""
manifesto.autoregister()
config = manifesto.get_codemirror_parameters(name)
assert config == options
| mit | 3,459,122,332,573,493,000 | 22.507692 | 59 | 0.531414 | false |
Ogreman/django-termsearch | docs/conf.py | 1 | 8138 | # -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
cwd = os.getcwd()
parent = os.path.dirname(cwd)
sys.path.append(parent)
import termsearch
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'dj-termsearch'
copyright = u'2014, James Cox'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = termsearch.__version__
# The full version, including alpha/beta/rc tags.
release = termsearch.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dj-termsearchdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'dj-termsearch.tex', u'dj-termsearch Documentation',
u'James Cox', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dj-termsearch', u'dj-termsearch Documentation',
[u'James Cox'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'dj-termsearch', u'dj-termsearch Documentation',
u'James Cox', 'dj-termsearch', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | bsd-3-clause | 2,248,664,516,863,073,500 | 31.043307 | 80 | 0.706439 | false |
jlmadurga/django-telegram-bot | telegrambot/bot_views/generic/responses.py | 1 | 1680 | from django.template import RequestContext, TemplateDoesNotExist
from django.template.loader import get_template
from telegram import ReplyKeyboardMarkup, ReplyKeyboardRemove
import ast
import logging
from django.http.request import HttpRequest
logger = logging.getLogger(__name__)
class TemplateResponse(object):
def __init__(self, template_name, ctx=None):
self.template_name = template_name
if ctx is None:
self.ctx = {}
else:
self.ctx = ctx
def render(self):
if not self.template_name:
return None
try:
logger.debug("Template name: %s" % self.template_name)
template = get_template(self.template_name)
except TemplateDoesNotExist:
logger.debug("Template not found: %s" % self.template_name)
return None
# TODO: Avoid using a null HttRequest to context processors
ctx = RequestContext(HttpRequest(), self.ctx)
return template.render(ctx)
class TextResponse(TemplateResponse):
def __init__(self, template_text, ctx=None):
super(TextResponse, self).__init__(template_text, ctx)
class KeyboardResponse(TemplateResponse):
def __init__(self, template_keyboard, ctx=None):
super(KeyboardResponse, self).__init__(template_keyboard, ctx)
def render(self):
keyboard = super(KeyboardResponse, self).render()
if keyboard:
keyboard = ast.literal_eval(keyboard)
keyboard = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
else:
keyboard = ReplyKeyboardRemove()
return keyboard | bsd-3-clause | -3,643,803,183,915,614,000 | 33.306122 | 74 | 0.641667 | false |
ntt-sic/heat | heat/tests/test_sqlalchemy_api.py | 1 | 59442 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from datetime import datetime
from datetime import timedelta
import fixtures
from json import loads
from json import dumps
import mock
import mox
from heat.db.sqlalchemy import api as db_api
from heat.engine import environment
from heat.tests.v1_1 import fakes
from heat.engine.resource import Resource
from heat.common import context
from heat.common import exception
from heat.common import template_format
from heat.engine.resources import instance as instances
from heat.engine import parser
from heat.engine import scheduler
from heat.openstack.common import timeutils
from heat.tests.common import HeatTestCase
from heat.tests import utils
from heat.engine.clients import novaclient
wp_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "WordPress",
"Parameters" : {
"KeyName" : {
"Description" : "KeyName",
"Type" : "String",
"Default" : "test"
}
},
"Resources" : {
"WebServer": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "F17-x86_64-gold",
"InstanceType" : "m1.large",
"KeyName" : "test",
"UserData" : "wordpress"
}
}
}
}
'''
UUIDs = (UUID1, UUID2, UUID3) = sorted([str(uuid.uuid4())
for x in range(3)])
class MyResource(Resource):
properties_schema = {
'ServerName': {'Type': 'String', 'Required': True},
'Flavor': {'Type': 'String', 'Required': True},
'ImageName': {'Type': 'String', 'Required': True},
'UserData': {'Type': 'String'},
'PublicKey': {'Type': 'String'}
}
@property
def my_secret(self):
return db_api.resource_data_get(self, 'my_secret')
@my_secret.setter
def my_secret(self, my_secret):
db_api.resource_data_set(self, 'my_secret', my_secret, True)
class SqlAlchemyTest(HeatTestCase):
def setUp(self):
super(SqlAlchemyTest, self).setUp()
self.fc = fakes.FakeClient()
utils.setup_dummy_db()
utils.reset_dummy_db()
self.ctx = utils.dummy_context()
def tearDown(self):
super(SqlAlchemyTest, self).tearDown()
def _setup_test_stack(self, stack_name, stack_id=None, owner_id=None):
t = template_format.parse(wp_template)
template = parser.Template(t)
stack_id = stack_id or str(uuid.uuid4())
stack = parser.Stack(self.ctx, stack_name, template,
environment.Environment({'KeyName': 'test'}),
owner_id=owner_id)
with utils.UUIDStub(stack_id):
stack.store()
return (t, stack)
def _mock_create(self, mocks):
fc = fakes.FakeClient()
mocks.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().MultipleTimes().AndReturn(fc)
mocks.StubOutWithMock(fc.servers, 'create')
fc.servers.create(image=744, flavor=3, key_name='test',
name=mox.IgnoreArg(),
security_groups=None,
userdata=mox.IgnoreArg(), scheduler_hints=None,
meta=None, nics=None,
availability_zone=None).MultipleTimes().AndReturn(
fc.servers.list()[4])
return fc
def _mock_delete(self, mocks):
fc = fakes.FakeClient()
mocks.StubOutWithMock(instances.Instance, 'nova')
instances.Instance.nova().MultipleTimes().AndReturn(fc)
mocks.StubOutWithMock(fc.client, 'get_servers_9999')
get = fc.client.get_servers_9999
get().MultipleTimes().AndRaise(novaclient.exceptions.NotFound(404))
@mock.patch.object(db_api, '_paginate_query')
def test_filter_and_page_query_paginates_query(self, mock_paginate_query):
query = mock.Mock()
db_api._filter_and_page_query(self.ctx, query)
assert mock_paginate_query.called
@mock.patch.object(db_api.db_filters, 'exact_filter')
def test_filter_and_page_query_handles_no_filters(self, mock_db_filter):
query = mock.Mock()
db_api._filter_and_page_query(self.ctx, query)
mock_db_filter.assert_called_once_with(mock.ANY, mock.ANY, {})
@mock.patch.object(db_api.db_filters, 'exact_filter')
def test_filter_and_page_query_applies_filters(self, mock_db_filter):
query = mock.Mock()
filters = {'foo': 'bar'}
db_api._filter_and_page_query(self.ctx, query, filters=filters)
assert mock_db_filter.called
@mock.patch.object(db_api, '_paginate_query')
def test_filter_and_page_query_whitelists_sort_keys(self,
mock_paginate_query):
query = mock.Mock()
sort_keys = ['name', 'foo']
db_api._filter_and_page_query(self.ctx, query, sort_keys=sort_keys)
args, _ = mock_paginate_query.call_args
self.assertIn(['name'], args)
@mock.patch.object(db_api.utils, 'paginate_query')
def test_paginate_query_default_sorts_by_created_at_and_id(
self, mock_paginate_query):
query = mock.Mock()
model = mock.Mock()
db_api._paginate_query(self.ctx, query, model, sort_keys=None)
args, _ = mock_paginate_query.call_args
self.assertIn(['created_at', 'id'], args)
@mock.patch.object(db_api.utils, 'paginate_query')
def test_paginate_query_default_sorts_dir_by_desc(self,
mock_paginate_query):
query = mock.Mock()
model = mock.Mock()
db_api._paginate_query(self.ctx, query, model, sort_dir=None)
args, _ = mock_paginate_query.call_args
self.assertIn('desc', args)
@mock.patch.object(db_api.utils, 'paginate_query')
def test_paginate_query_uses_given_sort_plus_id(self,
mock_paginate_query):
query = mock.Mock()
model = mock.Mock()
db_api._paginate_query(self.ctx, query, model, sort_keys=['name'])
args, _ = mock_paginate_query.call_args
self.assertIn(['name', 'id'], args)
@mock.patch.object(db_api.utils, 'paginate_query')
@mock.patch.object(db_api, 'model_query')
def test_paginate_query_gets_model_marker(self, mock_query,
mock_paginate_query):
query = mock.Mock()
model = mock.Mock()
marker = mock.Mock()
mock_query_object = mock.Mock()
mock_query_object.get.return_value = 'real_marker'
mock_query.return_value = mock_query_object
db_api._paginate_query(self.ctx, query, model, marker=marker)
mock_query_object.get.assert_called_once_with(marker)
args, _ = mock_paginate_query.call_args
self.assertIn('real_marker', args)
@mock.patch.object(db_api.utils, 'paginate_query')
def test_paginate_query_raises_invalid_sort_key(self, mock_paginate_query):
query = mock.Mock()
model = mock.Mock()
mock_paginate_query.side_effect = db_api.utils.InvalidSortKey()
self.assertRaises(exception.Invalid, db_api._paginate_query,
self.ctx, query, model, sort_keys=['foo'])
def test_filter_sort_keys_returns_empty_list_if_no_keys(self):
sort_keys = None
whitelist = None
filtered_keys = db_api._filter_sort_keys(sort_keys, whitelist)
self.assertEqual([], filtered_keys)
def test_filter_sort_keys_whitelists_single_key(self):
sort_key = 'foo'
whitelist = ['foo']
filtered_keys = db_api._filter_sort_keys(sort_key, whitelist)
self.assertEqual(['foo'], filtered_keys)
def test_filter_sort_keys_whitelists_multiple_keys(self):
sort_keys = ['foo', 'bar', 'nope']
whitelist = ['foo', 'bar']
filtered_keys = db_api._filter_sort_keys(sort_keys, whitelist)
self.assertIn('foo', filtered_keys)
self.assertIn('bar', filtered_keys)
self.assertNotIn('nope', filtered_keys)
def test_encryption(self):
stack_name = 'test_encryption'
(t, stack) = self._setup_test_stack(stack_name)
cs = MyResource('cs_encryption',
t['Resources']['WebServer'],
stack)
# This gives the fake cloud server an id and created_time attribute
cs._store_or_update(cs.CREATE, cs.IN_PROGRESS, 'test_store')
cs.my_secret = 'fake secret'
rs = db_api.resource_get_by_name_and_stack(self.ctx,
'cs_encryption',
stack.id)
encrypted_key = rs.data[0]['value']
self.assertNotEqual(encrypted_key, "fake secret")
# Test private_key property returns decrypted value
self.assertEqual("fake secret", cs.my_secret)
#do this twice to verify that the orm does not commit the unencrypted
#value.
self.assertEqual("fake secret", cs.my_secret)
scheduler.TaskRunner(cs.destroy)()
def test_resource_data_delete(self):
stack = self._setup_test_stack('stack', UUID1)[1]
self._mock_create(self.m)
self.m.ReplayAll()
stack.create()
rsrc = stack['WebServer']
db_api.resource_data_set(rsrc, 'test', 'test_data')
self.assertEqual('test_data', db_api.resource_data_get(rsrc, 'test'))
db_api.resource_data_delete(rsrc, 'test')
self.assertRaises(exception.NotFound,
db_api.resource_data_get, rsrc, 'test')
def test_stack_get_by_name(self):
stack = self._setup_test_stack('stack', UUID1)[1]
st = db_api.stack_get_by_name(self.ctx, 'stack')
self.assertEqual(UUID1, st.id)
stack.delete()
st = db_api.stack_get_by_name(self.ctx, 'stack')
self.assertIsNone(st)
def test_nested_stack_get_by_name(self):
stack1 = self._setup_test_stack('stack1', UUID1)[1]
stack2 = self._setup_test_stack('stack2', UUID2,
owner_id=stack1.id)[1]
result = db_api.stack_get_by_name(self.ctx, 'stack2')
self.assertEqual(UUID2, result.id)
stack2.delete()
result = db_api.stack_get_by_name(self.ctx, 'stack2')
self.assertIsNone(result)
def test_stack_get_by_name_and_owner_id(self):
stack1 = self._setup_test_stack('stack1', UUID1)[1]
stack2 = self._setup_test_stack('stack2', UUID2,
owner_id=stack1.id)[1]
result = db_api.stack_get_by_name_and_owner_id(self.ctx, 'stack2',
None)
self.assertIsNone(result)
result = db_api.stack_get_by_name_and_owner_id(self.ctx, 'stack2',
stack1.id)
self.assertEqual(UUID2, result.id)
stack2.delete()
result = db_api.stack_get_by_name_and_owner_id(self.ctx, 'stack2',
stack1.id)
self.assertIsNone(result)
def test_stack_get(self):
stack = self._setup_test_stack('stack', UUID1)[1]
st = db_api.stack_get(self.ctx, UUID1, show_deleted=False)
self.assertEqual(UUID1, st.id)
stack.delete()
st = db_api.stack_get(self.ctx, UUID1, show_deleted=False)
self.assertIsNone(st)
st = db_api.stack_get(self.ctx, UUID1, show_deleted=True)
self.assertEqual(UUID1, st.id)
def test_stack_get_show_deleted_context(self):
stack = self._setup_test_stack('stack', UUID1)[1]
self.assertFalse(self.ctx.show_deleted)
st = db_api.stack_get(self.ctx, UUID1)
self.assertEqual(UUID1, st.id)
stack.delete()
st = db_api.stack_get(self.ctx, UUID1)
self.assertIsNone(st)
self.ctx.show_deleted = True
st = db_api.stack_get(self.ctx, UUID1)
self.assertEqual(UUID1, st.id)
def test_stack_get_all(self):
stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
st_db = db_api.stack_get_all(self.ctx)
self.assertEqual(3, len(st_db))
stacks[0].delete()
st_db = db_api.stack_get_all(self.ctx)
self.assertEqual(2, len(st_db))
stacks[1].delete()
st_db = db_api.stack_get_all(self.ctx)
self.assertEqual(1, len(st_db))
def test_stack_get_all_by_tenant(self):
stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
st_db = db_api.stack_get_all_by_tenant(self.ctx)
self.assertEqual(3, len(st_db))
stacks[0].delete()
st_db = db_api.stack_get_all_by_tenant(self.ctx)
self.assertEqual(2, len(st_db))
stacks[1].delete()
st_db = db_api.stack_get_all_by_tenant(self.ctx)
self.assertEqual(1, len(st_db))
def test_stack_get_all_by_tenant_and_filters(self):
self._setup_test_stack('foo', UUID1)
self._setup_test_stack('bar', UUID2)
filters = {'name': 'foo'}
results = db_api.stack_get_all_by_tenant(self.ctx,
filters=filters)
self.assertEqual(1, len(results))
self.assertEqual('foo', results[0]['name'])
def test_stack_get_all_by_tenant_filter_matches_in_list(self):
self._setup_test_stack('foo', UUID1)
self._setup_test_stack('bar', UUID2)
filters = {'name': ['bar', 'quux']}
results = db_api.stack_get_all_by_tenant(self.ctx,
filters=filters)
self.assertEqual(1, len(results))
self.assertEqual('bar', results[0]['name'])
def test_stack_get_all_by_tenant_returns_all_if_no_filters(self):
self._setup_test_stack('foo', UUID1)
self._setup_test_stack('bar', UUID2)
filters = None
results = db_api.stack_get_all_by_tenant(self.ctx,
filters=filters)
self.assertEqual(2, len(results))
def test_stack_get_all_by_tenant_default_sort_keys_and_dir(self):
stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
st_db = db_api.stack_get_all_by_tenant(self.ctx)
self.assertEqual(3, len(st_db))
self.assertEqual(stacks[2].id, st_db[0].id)
self.assertEqual(stacks[1].id, st_db[1].id)
self.assertEqual(stacks[0].id, st_db[2].id)
def test_stack_get_all_by_tenant_default_sort_dir(self):
stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
st_db = db_api.stack_get_all_by_tenant(self.ctx, sort_dir='asc')
self.assertEqual(3, len(st_db))
self.assertEqual(stacks[0].id, st_db[0].id)
self.assertEqual(stacks[1].id, st_db[1].id)
self.assertEqual(stacks[2].id, st_db[2].id)
def test_stack_get_all_by_tenant_str_sort_keys(self):
stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
st_db = db_api.stack_get_all_by_tenant(self.ctx,
sort_keys='created_at')
self.assertEqual(3, len(st_db))
self.assertEqual(stacks[0].id, st_db[0].id)
self.assertEqual(stacks[1].id, st_db[1].id)
self.assertEqual(stacks[2].id, st_db[2].id)
@mock.patch.object(db_api.utils, 'paginate_query')
def test_stack_get_all_by_tenant_filters_sort_keys(self, mock_paginate):
sort_keys = ['name', 'status', 'created_at', 'updated_at', 'username']
db_api.stack_get_all_by_tenant(self.ctx,
sort_keys=sort_keys)
args, _ = mock_paginate.call_args
used_sort_keys = set(args[3])
expected_keys = set(['name', 'status', 'created_at',
'updated_at', 'id'])
self.assertEqual(expected_keys, used_sort_keys)
def test_stack_get_all_by_tenant_marker(self):
stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
st_db = db_api.stack_get_all_by_tenant(self.ctx, marker=stacks[1].id)
self.assertEqual(1, len(st_db))
self.assertEqual(stacks[0].id, st_db[0].id)
def test_stack_get_all_by_tenant_non_existing_marker(self):
[self._setup_test_stack('stack', x)[1] for x in UUIDs]
uuid = 'this stack doesnt exist'
st_db = db_api.stack_get_all_by_tenant(self.ctx, marker=uuid)
self.assertEqual(3, len(st_db))
def test_stack_get_all_by_tenant_doesnt_mutate_sort_keys(self):
[self._setup_test_stack('stack', x)[1] for x in UUIDs]
sort_keys = ['id']
db_api.stack_get_all_by_tenant(self.ctx, sort_keys=sort_keys)
self.assertEqual(['id'], sort_keys)
def test_stack_count_all_by_tenant(self):
stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
st_db = db_api.stack_count_all_by_tenant(self.ctx)
self.assertEqual(3, st_db)
stacks[0].delete()
st_db = db_api.stack_count_all_by_tenant(self.ctx)
self.assertEqual(2, st_db)
stacks[1].delete()
st_db = db_api.stack_count_all_by_tenant(self.ctx)
self.assertEqual(1, st_db)
def test_stack_count_all_by_tenant_with_filters(self):
self._setup_test_stack('foo', UUID1)
self._setup_test_stack('bar', UUID2)
self._setup_test_stack('bar', UUID3)
filters = {'name': 'bar'}
st_db = db_api.stack_count_all_by_tenant(self.ctx, filters=filters)
self.assertEqual(2, st_db)
def test_event_get_all_by_stack(self):
stack = self._setup_test_stack('stack', UUID1)[1]
self._mock_create(self.m)
self.m.ReplayAll()
stack.create()
self.m.UnsetStubs()
events = db_api.event_get_all_by_stack(self.ctx, UUID1)
self.assertEqual(2, len(events))
self._mock_delete(self.m)
self.m.ReplayAll()
stack.delete()
events = db_api.event_get_all_by_stack(self.ctx, UUID1)
self.assertEqual(4, len(events))
self.m.VerifyAll()
def test_event_count_all_by_stack(self):
stack = self._setup_test_stack('stack', UUID1)[1]
self._mock_create(self.m)
self.m.ReplayAll()
stack.create()
self.m.UnsetStubs()
num_events = db_api.event_count_all_by_stack(self.ctx, UUID1)
self.assertEqual(2, num_events)
self._mock_delete(self.m)
self.m.ReplayAll()
stack.delete()
num_events = db_api.event_count_all_by_stack(self.ctx, UUID1)
self.assertEqual(4, num_events)
self.m.VerifyAll()
def test_event_get_all_by_tenant(self):
stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
self._mock_create(self.m)
self.m.ReplayAll()
[s.create() for s in stacks]
self.m.UnsetStubs()
events = db_api.event_get_all_by_tenant(self.ctx)
self.assertEqual(6, len(events))
self._mock_delete(self.m)
self.m.ReplayAll()
[s.delete() for s in stacks]
events = db_api.event_get_all_by_tenant(self.ctx)
self.assertEqual(0, len(events))
self.m.VerifyAll()
def test_event_get_all(self):
stacks = [self._setup_test_stack('stack', x)[1] for x in UUIDs]
self._mock_create(self.m)
self.m.ReplayAll()
[s.create() for s in stacks]
self.m.UnsetStubs()
events = db_api.event_get_all(self.ctx)
self.assertEqual(6, len(events))
self._mock_delete(self.m)
self.m.ReplayAll()
stacks[0].delete()
events = db_api.event_get_all(self.ctx)
self.assertEqual(4, len(events))
self.m.VerifyAll()
def test_user_creds_password(self):
self.ctx.trust_id = None
db_creds = db_api.user_creds_create(self.ctx)
load_creds = db_api.user_creds_get(db_creds.id)
self.assertEqual('test_username', load_creds.get('username'))
self.assertEqual('password', load_creds.get('password'))
self.assertEqual('test_tenant', load_creds.get('tenant'))
self.assertEqual('test_tenant_id', load_creds.get('tenant_id'))
self.assertIsNotNone(load_creds.get('created_at'))
self.assertIsNone(load_creds.get('updated_at'))
self.assertEqual('http://server.test:5000/v2.0',
load_creds.get('auth_url'))
self.assertIsNone(load_creds.get('trust_id'))
self.assertIsNone(load_creds.get('trustor_user_id'))
def test_user_creds_trust(self):
self.ctx.username = None
self.ctx.password = None
self.ctx.trust_id = 'atrust123'
self.ctx.trustor_user_id = 'atrustor123'
self.ctx.tenant_id = 'atenant123'
self.ctx.tenant = 'atenant'
db_creds = db_api.user_creds_create(self.ctx)
load_creds = db_api.user_creds_get(db_creds.id)
self.assertIsNone(load_creds.get('username'))
self.assertIsNone(load_creds.get('password'))
self.assertIsNotNone(load_creds.get('created_at'))
self.assertIsNone(load_creds.get('updated_at'))
self.assertIsNone(load_creds.get('auth_url'))
self.assertEqual('atenant123', load_creds.get('tenant_id'))
self.assertEqual('atenant', load_creds.get('tenant'))
self.assertEqual('atrust123', load_creds.get('trust_id'))
self.assertEqual('atrustor123', load_creds.get('trustor_user_id'))
def test_user_creds_none(self):
self.ctx.username = None
self.ctx.password = None
self.ctx.trust_id = None
db_creds = db_api.user_creds_create(self.ctx)
load_creds = db_api.user_creds_get(db_creds.id)
self.assertIsNone(load_creds.get('username'))
self.assertIsNone(load_creds.get('password'))
self.assertIsNone(load_creds.get('trust_id'))
def test_software_config_create(self):
tenant_id = self.ctx.tenant_id
config = db_api.software_config_create(
self.ctx, {'name': 'config_mysql',
'tenant': tenant_id})
self.assertIsNotNone(config)
self.assertEqual('config_mysql', config.name)
self.assertEqual(tenant_id, config.tenant)
def test_software_config_get(self):
self.assertIsNone(
db_api.software_config_get(self.ctx, str(uuid.uuid4())))
io = {'inputs': [{'name': 'foo'}, {'name': 'bar'}],
'outputs': [{'name': 'result'}]}
tenant_id = self.ctx.tenant_id
conf = ('#!/bin/bash\n'
'echo "$bar and $foo"\n')
values = {'name': 'config_mysql',
'tenant': tenant_id,
'group': 'Heat::Shell',
'config': conf,
'io': io}
config = db_api.software_config_create(
self.ctx, values)
config_id = config.id
config = db_api.software_config_get(self.ctx, config_id)
self.assertIsNotNone(config)
self.assertEqual('config_mysql', config.name)
self.assertEqual(tenant_id, config.tenant)
self.assertEqual('Heat::Shell', config.group)
self.assertEqual(conf, config.config)
self.assertEqual(io, config.io)
self.ctx.tenant_id = None
config = db_api.software_config_get(self.ctx, config_id)
self.assertIsNone(config)
def test_software_config_delete(self):
tenant_id = self.ctx.tenant_id
config = db_api.software_config_create(
self.ctx, {'name': 'config_mysql',
'tenant': tenant_id})
config_id = config.id
db_api.software_config_delete(self.ctx, config_id)
config = db_api.software_config_get(self.ctx, config_id)
self.assertIsNone(config)
err = self.assertRaises(
exception.NotFound, db_api.software_config_delete,
self.ctx, config_id)
self.assertIn(config_id, str(err))
def _deployment_values(self):
tenant_id = self.ctx.tenant_id
config_id = db_api.software_config_create(
self.ctx, {'name': 'config_mysql', 'tenant': tenant_id}).id
server_id = str(uuid.uuid4())
input_values = {'foo': 'fooooo', 'bar': 'baaaaa'}
values = {
'tenant': tenant_id,
'config_id': config_id,
'server_id': server_id,
'input_values': input_values
}
return values
def test_software_deployment_create(self):
values = self._deployment_values()
deployment = db_api.software_deployment_create(self.ctx, values)
self.assertIsNotNone(deployment)
self.assertEqual(values['tenant'], deployment.tenant)
def test_software_deployment_get(self):
self.assertIsNone(
db_api.software_deployment_get(self.ctx, str(uuid.uuid4())))
values = self._deployment_values()
deployment = db_api.software_deployment_create(self.ctx, values)
self.assertIsNotNone(deployment)
deployment_id = deployment.id
deployment = db_api.software_deployment_get(self.ctx, deployment_id)
self.assertIsNotNone(deployment)
self.assertEqual(values['tenant'], deployment.tenant)
self.assertEqual(values['config_id'], deployment.config_id)
self.assertEqual(values['server_id'], deployment.server_id)
self.assertEqual(values['input_values'], deployment.input_values)
self.ctx.tenant_id = None
deployment = db_api.software_deployment_get(self.ctx, deployment_id)
self.assertIsNone(deployment)
def test_software_deployment_get_all(self):
self.assertEqual([], db_api.software_deployment_get_all(self.ctx))
values = self._deployment_values()
deployment = db_api.software_deployment_create(self.ctx, values)
self.assertIsNotNone(deployment)
all = db_api.software_deployment_get_all(self.ctx)
self.assertEqual(1, len(all))
self.assertEqual(deployment, all[0])
all = db_api.software_deployment_get_all(
self.ctx, server_id=values['server_id'])
self.assertEqual(1, len(all))
self.assertEqual(deployment, all[0])
all = db_api.software_deployment_get_all(
self.ctx, server_id=str(uuid.uuid4()))
self.assertEqual([], all)
def test_software_deployment_update(self):
deployment_id = str(uuid.uuid4())
err = self.assertRaises(exception.NotFound,
db_api.software_deployment_update,
self.ctx, deployment_id, values={})
self.assertIn(deployment_id, str(err))
values = self._deployment_values()
deployment = db_api.software_deployment_create(self.ctx, values)
deployment_id = deployment.id
values = {'status': 'COMPLETED'}
deployment = db_api.software_deployment_update(
self.ctx, deployment_id, values)
self.assertIsNotNone(deployment)
self.assertEqual(values['status'], deployment.status)
def test_software_deployment_delete(self):
deployment_id = str(uuid.uuid4())
err = self.assertRaises(exception.NotFound,
db_api.software_deployment_delete,
self.ctx, deployment_id)
self.assertIn(deployment_id, str(err))
values = self._deployment_values()
deployment = db_api.software_deployment_create(self.ctx, values)
deployment_id = deployment.id
deployment = db_api.software_deployment_get(self.ctx, deployment_id)
self.assertIsNotNone(deployment)
db_api.software_deployment_delete(self.ctx, deployment_id)
deployment = db_api.software_deployment_get(self.ctx, deployment_id)
self.assertIsNone(deployment)
def create_raw_template(context, **kwargs):
t = template_format.parse(wp_template)
template = {
'template': t,
'files': {'foo': 'bar'}
}
template.update(kwargs)
return db_api.raw_template_create(context, template)
def create_user_creds(ctx, **kwargs):
ctx_dict = ctx.to_dict()
ctx_dict.update(kwargs)
ctx = context.RequestContext.from_dict(ctx_dict)
return db_api.user_creds_create(ctx)
def create_stack(ctx, template, user_creds, **kwargs):
values = {
'name': 'db_test_stack_name',
'raw_template_id': template.id,
'username': ctx.username,
'tenant': ctx.tenant_id,
'action': 'create',
'status': 'complete',
'status_reason': 'create_complete',
'parameters': {},
'user_creds_id': user_creds.id,
'owner_id': None,
'timeout': '60',
'disable_rollback': 0
}
values.update(kwargs)
return db_api.stack_create(ctx, values)
def create_resource(ctx, stack, **kwargs):
values = {
'name': 'test_resource_name',
'nova_instance': UUID1,
'action': 'create',
'status': 'complete',
'status_reason': 'create_complete',
'rsrc_metadata': loads('{"foo": "123"}'),
'stack_id': stack.id
}
values.update(kwargs)
return db_api.resource_create(ctx, values)
def create_resource_data(ctx, resource, **kwargs):
values = {
'key': 'test_resource_key',
'value': 'test_value',
'redact': 0,
}
values.update(kwargs)
return db_api.resource_data_set(resource, **values)
def create_event(ctx, **kwargs):
values = {
'stack_id': 'test_stack_id',
'resource_action': 'create',
'resource_status': 'complete',
'resource_name': 'res',
'physical_resource_id': UUID1,
'resource_status_reason': "create_complete",
'resource_properties': {'name': 'foo'}
}
values.update(kwargs)
return db_api.event_create(ctx, values)
def create_watch_rule(ctx, stack, **kwargs):
values = {
'name': 'test_rule',
'rule': loads('{"foo": "123"}'),
'state': 'normal',
'last_evaluated': timeutils.utcnow(),
'stack_id': stack.id,
}
values.update(kwargs)
return db_api.watch_rule_create(ctx, values)
def create_watch_data(ctx, watch_rule, **kwargs):
values = {
'data': loads('{"foo": "bar"}'),
'watch_rule_id': watch_rule.id
}
values.update(kwargs)
return db_api.watch_data_create(ctx, values)
class DBAPIRawTemplateTest(HeatTestCase):
def setUp(self):
super(DBAPIRawTemplateTest, self).setUp()
self.ctx = utils.dummy_context()
utils.setup_dummy_db()
def test_raw_template_create(self):
t = template_format.parse(wp_template)
tp = create_raw_template(self.ctx, template=t)
self.assertIsNotNone(tp.id)
self.assertEqual(t, tp.template)
self.assertEqual({'foo': 'bar'}, tp.files)
def test_raw_template_get(self):
t = template_format.parse(wp_template)
tp = create_raw_template(self.ctx, template=t)
template = db_api.raw_template_get(self.ctx, tp.id)
self.assertEqual(tp.id, template.id)
self.assertEqual(tp.template, template.template)
class DBAPIUserCredsTest(HeatTestCase):
def setUp(self):
super(DBAPIUserCredsTest, self).setUp()
self.ctx = utils.dummy_context()
utils.setup_dummy_db()
def test_user_creds_create_trust(self):
user_creds = create_user_creds(self.ctx, trust_id='test_trust_id',
trustor_user_id='trustor_id')
self.assertIsNotNone(user_creds.id)
self.assertEqual('test_trust_id',
db_api._decrypt(user_creds.trust_id,
user_creds.decrypt_method))
self.assertEqual('trustor_id', user_creds.trustor_user_id)
self.assertIsNone(user_creds.username)
self.assertIsNone(user_creds.password)
self.assertEqual(self.ctx.tenant, user_creds.tenant)
self.assertEqual(self.ctx.tenant_id, user_creds.tenant_id)
def test_user_creds_create_password(self):
user_creds = create_user_creds(self.ctx)
self.assertIsNotNone(user_creds.id)
self.assertEqual(self.ctx.password,
db_api._decrypt(user_creds.password,
user_creds.decrypt_method))
def test_user_creds_get(self):
user_creds = create_user_creds(self.ctx)
ret_user_creds = db_api.user_creds_get(user_creds.id)
self.assertEqual(db_api._decrypt(user_creds.password,
user_creds.decrypt_method),
ret_user_creds['password'])
class DBAPIStackTest(HeatTestCase):
def setUp(self):
super(DBAPIStackTest, self).setUp()
self.ctx = utils.dummy_context()
utils.setup_dummy_db()
utils.reset_dummy_db()
self.template = create_raw_template(self.ctx)
self.user_creds = create_user_creds(self.ctx)
def test_stack_create(self):
stack = create_stack(self.ctx, self.template, self.user_creds)
self.assertIsNotNone(stack.id)
self.assertEqual('db_test_stack_name', stack.name)
self.assertEqual(self.template.id, stack.raw_template_id)
self.assertEqual(self.ctx.username, stack.username)
self.assertEqual(self.ctx.tenant_id, stack.tenant)
self.assertEqual('create', stack.action)
self.assertEqual('complete', stack.status)
self.assertEqual('create_complete', stack.status_reason)
self.assertEqual({}, stack.parameters)
self.assertEqual(self.user_creds.id, stack.user_creds_id)
self.assertIsNone(stack.owner_id)
self.assertEqual('60', stack.timeout)
self.assertFalse(stack.disable_rollback)
def test_stack_delete(self):
stack = create_stack(self.ctx, self.template, self.user_creds)
stack_id = stack.id
resource = create_resource(self.ctx, stack)
db_api.stack_delete(self.ctx, stack_id)
self.assertIsNone(db_api.stack_get(self.ctx, stack_id,
show_deleted=False))
self.assertRaises(exception.NotFound, db_api.resource_get,
self.ctx, resource.id)
self.assertRaises(exception.NotFound, db_api.stack_delete,
self.ctx, stack_id)
#Testing soft delete
ret_stack = db_api.stack_get(self.ctx, stack_id, show_deleted=True)
self.assertIsNotNone(ret_stack)
self.assertEqual(stack_id, ret_stack.id)
self.assertEqual('db_test_stack_name', ret_stack.name)
#Testing child resources deletion
self.assertRaises(exception.NotFound, db_api.resource_get,
self.ctx, resource.id)
def test_stack_update(self):
stack = create_stack(self.ctx, self.template, self.user_creds)
values = {
'name': 'db_test_stack_name2',
'action': 'update',
'status': 'failed',
'status_reason': "update_failed",
'timeout': '90',
}
db_api.stack_update(self.ctx, stack.id, values)
stack = db_api.stack_get(self.ctx, stack.id)
self.assertEqual('db_test_stack_name2', stack.name)
self.assertEqual('update', stack.action)
self.assertEqual('failed', stack.status)
self.assertEqual('update_failed', stack.status_reason)
self.assertEqual('90', stack.timeout)
self.assertRaises(exception.NotFound, db_api.stack_update, self.ctx,
UUID2, values)
def test_stack_get_returns_a_stack(self):
stack = create_stack(self.ctx, self.template, self.user_creds)
ret_stack = db_api.stack_get(self.ctx, stack.id, show_deleted=False)
self.assertIsNotNone(ret_stack)
self.assertEqual(stack.id, ret_stack.id)
self.assertEqual('db_test_stack_name', ret_stack.name)
def test_stack_get_returns_none_if_stack_does_not_exist(self):
stack = db_api.stack_get(self.ctx, UUID1, show_deleted=False)
self.assertIsNone(stack)
def test_stack_get_returns_none_if_tenant_id_does_not_match(self):
stack = create_stack(self.ctx, self.template, self.user_creds)
self.ctx.tenant_id = 'abc'
stack = db_api.stack_get(self.ctx, UUID1, show_deleted=False)
self.assertIsNone(stack)
def test_stack_get_can_return_a_stack_from_different_tenant(self):
stack = create_stack(self.ctx, self.template, self.user_creds)
self.ctx.tenant_id = 'abc'
ret_stack = db_api.stack_get(self.ctx, stack.id,
show_deleted=False, tenant_safe=False)
self.assertEqual(stack.id, ret_stack.id)
self.assertEqual('db_test_stack_name', ret_stack.name)
def test_stack_get_by_name(self):
stack = create_stack(self.ctx, self.template, self.user_creds)
ret_stack = db_api.stack_get_by_name(self.ctx, stack.name)
self.assertIsNotNone(ret_stack)
self.assertEqual(stack.id, ret_stack.id)
self.assertEqual('db_test_stack_name', ret_stack.name)
self.assertIsNone(db_api.stack_get_by_name(self.ctx, 'abc'))
self.ctx.tenant_id = 'abc'
self.assertIsNone(db_api.stack_get_by_name(self.ctx, 'abc'))
def test_stack_get_all(self):
values = [
{'name': 'stack1'},
{'name': 'stack2'},
{'name': 'stack3'},
{'name': 'stack4'}
]
[create_stack(self.ctx, self.template, self.user_creds,
**val) for val in values]
ret_stacks = db_api.stack_get_all(self.ctx)
self.assertEqual(4, len(ret_stacks))
names = [ret_stack.name for ret_stack in ret_stacks]
[self.assertIn(val['name'], names) for val in values]
def test_stack_get_all_by_owner_id(self):
parent_stack1 = create_stack(self.ctx, self.template, self.user_creds)
parent_stack2 = create_stack(self.ctx, self.template, self.user_creds)
values = [
{'owner_id': parent_stack1.id},
{'owner_id': parent_stack1.id},
{'owner_id': parent_stack2.id},
{'owner_id': parent_stack2.id},
]
[create_stack(self.ctx, self.template, self.user_creds,
**val) for val in values]
stack1_children = db_api.stack_get_all_by_owner_id(self.ctx,
parent_stack1.id)
self.assertEqual(2, len(stack1_children))
stack2_children = db_api.stack_get_all_by_owner_id(self.ctx,
parent_stack2.id)
self.assertEqual(2, len(stack2_children))
def test_stack_get_all_by_tenant(self):
values = [
{'tenant': UUID1},
{'tenant': UUID1},
{'tenant': UUID2},
{'tenant': UUID2},
{'tenant': UUID2},
]
[create_stack(self.ctx, self.template, self.user_creds,
**val) for val in values]
self.ctx.tenant_id = UUID1
stacks = db_api.stack_get_all_by_tenant(self.ctx)
self.assertEqual(2, len(stacks))
self.ctx.tenant_id = UUID2
stacks = db_api.stack_get_all_by_tenant(self.ctx)
self.assertEqual(3, len(stacks))
self.ctx.tenant_id = UUID3
self.assertEqual([], db_api.stack_get_all_by_tenant(self.ctx))
def test_stack_count_all_by_tenant(self):
values = [
{'tenant': self.ctx.tenant_id},
{'tenant': self.ctx.tenant_id},
]
[create_stack(self.ctx, self.template, self.user_creds,
**val) for val in values]
self.assertEqual(2, db_api.stack_count_all_by_tenant(self.ctx))
def test_purge_deleted(self):
now = datetime.now()
delta = timedelta(seconds=3600 * 7)
deleted = [now - delta * i for i in range(1, 6)]
templates = [create_raw_template(self.ctx) for i in range(5)]
creds = [create_user_creds(self.ctx) for i in range(5)]
stacks = [create_stack(self.ctx, templates[i], creds[i],
deleted_at=deleted[i]) for i in range(5)]
class MyDatetime():
def now(self):
return now
self.useFixture(fixtures.MonkeyPatch('heat.db.sqlalchemy.api.datetime',
MyDatetime()))
db_api.purge_deleted(age=1, granularity='days')
self._deleted_stack_existance(utils.dummy_context(), stacks,
(0, 1, 2), (3, 4))
db_api.purge_deleted(age=22, granularity='hours')
self._deleted_stack_existance(utils.dummy_context(), stacks,
(0, 1, 2), (3, 4))
db_api.purge_deleted(age=1100, granularity='minutes')
self._deleted_stack_existance(utils.dummy_context(), stacks,
(0, 1), (2, 3, 4))
db_api.purge_deleted(age=3600, granularity='seconds')
self._deleted_stack_existance(utils.dummy_context(), stacks,
(), (0, 1, 2, 3, 4))
def _deleted_stack_existance(self, ctx, stacks, existing, deleted):
for s in existing:
self.assertIsNotNone(db_api.stack_get(ctx, stacks[s].id,
show_deleted=True))
for s in deleted:
self.assertIsNone(db_api.stack_get(ctx, stacks[s].id,
show_deleted=True))
class DBAPIResourceTest(HeatTestCase):
def setUp(self):
super(DBAPIResourceTest, self).setUp()
self.ctx = utils.dummy_context()
utils.setup_dummy_db()
utils.reset_dummy_db()
self.template = create_raw_template(self.ctx)
self.user_creds = create_user_creds(self.ctx)
self.stack = create_stack(self.ctx, self.template, self.user_creds)
def test_resource_create(self):
res = create_resource(self.ctx, self.stack)
ret_res = db_api.resource_get(self.ctx, res.id)
self.assertIsNotNone(ret_res)
self.assertEqual('test_resource_name', ret_res.name)
self.assertEqual(UUID1, ret_res.nova_instance)
self.assertEqual('create', ret_res.action)
self.assertEqual('complete', ret_res.status)
self.assertEqual('create_complete', ret_res.status_reason)
self.assertEqual('{"foo": "123"}', dumps(ret_res.rsrc_metadata))
self.assertEqual(self.stack.id, ret_res.stack_id)
def test_resource_get(self):
res = create_resource(self.ctx, self.stack)
ret_res = db_api.resource_get(self.ctx, res.id)
self.assertIsNotNone(ret_res)
self.assertRaises(exception.NotFound, db_api.resource_get,
self.ctx, UUID2)
def test_resource_get_by_name_and_stack(self):
create_resource(self.ctx, self.stack)
ret_res = db_api.resource_get_by_name_and_stack(self.ctx,
'test_resource_name',
self.stack.id)
self.assertIsNotNone(ret_res)
self.assertEqual('test_resource_name', ret_res.name)
self.assertEqual(self.stack.id, ret_res.stack_id)
self.assertIsNone(db_api.resource_get_by_name_and_stack(self.ctx,
'abc',
self.stack.id))
def test_resource_get_by_physical_resource_id(self):
create_resource(self.ctx, self.stack)
ret_res = db_api.resource_get_by_physical_resource_id(self.ctx, UUID1)
self.assertIsNotNone(ret_res)
self.assertEqual(UUID1, ret_res.nova_instance)
self.assertIsNone(db_api.resource_get_by_physical_resource_id(self.ctx,
UUID2))
def test_resource_get_all(self):
values = [
{'name': 'res1'},
{'name': 'res2'},
{'name': 'res3'},
]
[create_resource(self.ctx, self.stack, **val) for val in values]
resources = db_api.resource_get_all(self.ctx)
self.assertEqual(3, len(resources))
names = [resource.name for resource in resources]
[self.assertIn(val['name'], names) for val in values]
def test_resource_get_all_by_stack(self):
self.stack1 = create_stack(self.ctx, self.template, self.user_creds)
self.stack2 = create_stack(self.ctx, self.template, self.user_creds)
values = [
{'name': 'res1', 'stack_id': self.stack.id},
{'name': 'res2', 'stack_id': self.stack.id},
{'name': 'res3', 'stack_id': self.stack1.id},
]
[create_resource(self.ctx, self.stack, **val) for val in values]
stacks = db_api.resource_get_all_by_stack(self.ctx, self.stack.id)
self.assertEqual(2, len(stacks))
self.assertRaises(exception.NotFound, db_api.resource_get_all_by_stack,
self.ctx, self.stack2.id)
class DBAPIStackLockTest(HeatTestCase):
def setUp(self):
super(DBAPIStackLockTest, self).setUp()
self.ctx = utils.dummy_context()
utils.setup_dummy_db()
utils.reset_dummy_db()
self.template = create_raw_template(self.ctx)
self.user_creds = create_user_creds(self.ctx)
self.stack = create_stack(self.ctx, self.template, self.user_creds)
def test_stack_lock_create_success(self):
observed = db_api.stack_lock_create(self.stack.id, UUID1)
self.assertIsNone(observed)
def test_stack_lock_create_fail_double_same(self):
db_api.stack_lock_create(self.stack.id, UUID1)
observed = db_api.stack_lock_create(self.stack.id, UUID1)
self.assertEqual(UUID1, observed)
def test_stack_lock_create_fail_double_different(self):
db_api.stack_lock_create(self.stack.id, UUID1)
observed = db_api.stack_lock_create(self.stack.id, UUID2)
self.assertEqual(UUID1, observed)
def test_stack_lock_steal_success(self):
db_api.stack_lock_create(self.stack.id, UUID1)
observed = db_api.stack_lock_steal(self.stack.id, UUID1, UUID2)
self.assertIsNone(observed)
def test_stack_lock_steal_fail_gone(self):
db_api.stack_lock_create(self.stack.id, UUID1)
db_api.stack_lock_release(self.stack.id, UUID1)
observed = db_api.stack_lock_steal(self.stack.id, UUID1, UUID2)
self.assertTrue(observed)
def test_stack_lock_steal_fail_stolen(self):
db_api.stack_lock_create(self.stack.id, UUID1)
# Simulate stolen lock
db_api.stack_lock_release(self.stack.id, UUID1)
db_api.stack_lock_create(self.stack.id, UUID2)
observed = db_api.stack_lock_steal(self.stack.id, UUID3, UUID2)
self.assertEqual(UUID2, observed)
def test_stack_lock_release_success(self):
db_api.stack_lock_create(self.stack.id, UUID1)
observed = db_api.stack_lock_release(self.stack.id, UUID1)
self.assertIsNone(observed)
def test_stack_lock_release_fail_double(self):
db_api.stack_lock_create(self.stack.id, UUID1)
db_api.stack_lock_release(self.stack.id, UUID1)
observed = db_api.stack_lock_release(self.stack.id, UUID1)
self.assertTrue(observed)
def test_stack_lock_release_fail_wrong_engine_id(self):
db_api.stack_lock_create(self.stack.id, UUID1)
observed = db_api.stack_lock_release(self.stack.id, UUID2)
self.assertTrue(observed)
class DBAPIResourceDataTest(HeatTestCase):
def setUp(self):
super(DBAPIResourceDataTest, self).setUp()
self.ctx = utils.dummy_context()
utils.setup_dummy_db()
utils.reset_dummy_db()
self.template = create_raw_template(self.ctx)
self.user_creds = create_user_creds(self.ctx)
self.stack = create_stack(self.ctx, self.template, self.user_creds)
self.resource = create_resource(self.ctx, self.stack)
self.resource.context = self.ctx
def test_resource_data_set_get(self):
create_resource_data(self.ctx, self.resource)
val = db_api.resource_data_get(self.resource, 'test_resource_key')
self.assertEqual('test_value', val)
#Updating existing resource data
create_resource_data(self.ctx, self.resource, value='foo')
val = db_api.resource_data_get(self.resource, 'test_resource_key')
self.assertEqual('foo', val)
#Testing with encrypted value
create_resource_data(self.ctx, self.resource,
key='encryped_resource_key', redact=True)
val = db_api.resource_data_get(self.resource, 'encryped_resource_key')
self.assertEqual('test_value', val)
vals = db_api.resource_data_get_all(self.resource)
self.assertEqual(2, len(vals))
self.assertEqual('foo', vals.get('test_resource_key'))
self.assertEqual('test_value', vals.get('encryped_resource_key'))
def test_resource_data_delete(self):
create_resource_data(self.ctx, self.resource)
res_data = db_api.resource_data_get_by_key(self.ctx, self.resource.id,
'test_resource_key')
self.assertIsNotNone(res_data)
self.assertEqual('test_value', res_data.value)
db_api.resource_data_delete(self.resource, 'test_resource_key')
self.assertRaises(exception.NotFound, db_api.resource_data_get_by_key,
self.ctx, self.resource.id, 'test_resource_key')
self.assertIsNotNone(res_data)
class DBAPIEventTest(HeatTestCase):
def setUp(self):
super(DBAPIEventTest, self).setUp()
self.ctx = utils.dummy_context()
utils.setup_dummy_db()
utils.reset_dummy_db()
self.template = create_raw_template(self.ctx)
self.user_creds = create_user_creds(self.ctx)
def test_event_create_get(self):
event = create_event(self.ctx)
ret_event = db_api.event_get(self.ctx, event.id)
self.assertIsNotNone(ret_event)
self.assertEqual('test_stack_id', ret_event.stack_id)
self.assertEqual('create', ret_event.resource_action)
self.assertEqual('complete', ret_event.resource_status)
self.assertEqual('res', ret_event.resource_name)
self.assertEqual(UUID1, ret_event.physical_resource_id)
self.assertEqual('create_complete', ret_event.resource_status_reason)
self.assertEqual({'name': 'foo'}, ret_event.resource_properties)
def test_event_get_all(self):
self.stack1 = create_stack(self.ctx, self.template, self.user_creds,
tenant='tenant1')
self.stack2 = create_stack(self.ctx, self.template, self.user_creds,
tenant='tenant2')
values = [
{'stack_id': self.stack1.id, 'resource_name': 'res1'},
{'stack_id': self.stack1.id, 'resource_name': 'res2'},
{'stack_id': self.stack2.id, 'resource_name': 'res3'},
]
[create_event(self.ctx, **val) for val in values]
events = db_api.event_get_all(self.ctx)
self.assertEqual(3, len(events))
stack_ids = [event.stack_id for event in events]
res_names = [event.resource_name for event in events]
[(self.assertIn(val['stack_id'], stack_ids),
self.assertIn(val['resource_name'], res_names)) for val in values]
def test_event_get_all_by_tenant(self):
self.stack1 = create_stack(self.ctx, self.template, self.user_creds,
tenant='tenant1')
self.stack2 = create_stack(self.ctx, self.template, self.user_creds,
tenant='tenant2')
values = [
{'stack_id': self.stack1.id, 'resource_name': 'res1'},
{'stack_id': self.stack1.id, 'resource_name': 'res2'},
{'stack_id': self.stack2.id, 'resource_name': 'res3'},
]
[create_event(self.ctx, **val) for val in values]
self.ctx.tenant_id = 'tenant1'
events = db_api.event_get_all_by_tenant(self.ctx)
self.assertEqual(2, len(events))
self.ctx.tenant_id = 'tenant2'
events = db_api.event_get_all_by_tenant(self.ctx)
self.assertEqual(1, len(events))
def test_event_get_all_by_stack(self):
self.stack1 = create_stack(self.ctx, self.template, self.user_creds)
self.stack2 = create_stack(self.ctx, self.template, self.user_creds)
values = [
{'stack_id': self.stack1.id, 'resource_name': 'res1'},
{'stack_id': self.stack1.id, 'resource_name': 'res2'},
{'stack_id': self.stack2.id, 'resource_name': 'res3'},
]
[create_event(self.ctx, **val) for val in values]
self.ctx.tenant_id = 'tenant1'
events = db_api.event_get_all_by_stack(self.ctx, self.stack1.id)
self.assertEqual(2, len(events))
self.ctx.tenant_id = 'tenant2'
events = db_api.event_get_all_by_stack(self.ctx, self.stack2.id)
self.assertEqual(1, len(events))
def test_event_count_all_by_stack(self):
self.stack1 = create_stack(self.ctx, self.template, self.user_creds)
self.stack2 = create_stack(self.ctx, self.template, self.user_creds)
values = [
{'stack_id': self.stack1.id, 'resource_name': 'res1'},
{'stack_id': self.stack1.id, 'resource_name': 'res2'},
{'stack_id': self.stack2.id, 'resource_name': 'res3'},
]
[create_event(self.ctx, **val) for val in values]
self.assertEqual(2, db_api.event_count_all_by_stack(self.ctx,
self.stack1.id))
self.assertEqual(1, db_api.event_count_all_by_stack(self.ctx,
self.stack2.id))
class DBAPIWatchRuleTest(HeatTestCase):
def setUp(self):
super(DBAPIWatchRuleTest, self).setUp()
self.ctx = utils.dummy_context()
utils.setup_dummy_db()
utils.reset_dummy_db()
self.template = create_raw_template(self.ctx)
self.user_creds = create_user_creds(self.ctx)
self.stack = create_stack(self.ctx, self.template, self.user_creds)
def test_watch_rule_create_get(self):
watch_rule = create_watch_rule(self.ctx, self.stack)
ret_wr = db_api.watch_rule_get(self.ctx, watch_rule.id)
self.assertIsNotNone(ret_wr)
self.assertEqual('test_rule', ret_wr.name)
self.assertEqual('{"foo": "123"}', dumps(ret_wr.rule))
self.assertEqual('normal', ret_wr.state)
self.assertEqual(self.stack.id, ret_wr.stack_id)
def test_watch_rule_get_by_name(self):
watch_rule = create_watch_rule(self.ctx, self.stack)
ret_wr = db_api.watch_rule_get_by_name(self.ctx, watch_rule.name)
self.assertIsNotNone(ret_wr)
self.assertEqual('test_rule', ret_wr.name)
def test_watch_rule_get_all(self):
values = [
{'name': 'rule1'},
{'name': 'rule2'},
{'name': 'rule3'},
]
[create_watch_rule(self.ctx, self.stack, **val) for val in values]
wrs = db_api.watch_rule_get_all(self.ctx)
self.assertEqual(3, len(wrs))
names = [wr.name for wr in wrs]
[self.assertIn(val['name'], names) for val in values]
def test_watch_rule_get_all_by_stack(self):
self.stack1 = create_stack(self.ctx, self.template, self.user_creds)
values = [
{'name': 'rule1', 'stack_id': self.stack.id},
{'name': 'rule2', 'stack_id': self.stack1.id},
{'name': 'rule3', 'stack_id': self.stack1.id},
]
[create_watch_rule(self.ctx, self.stack, **val) for val in values]
wrs = db_api.watch_rule_get_all_by_stack(self.ctx, self.stack.id)
self.assertEqual(1, len(wrs))
wrs = db_api.watch_rule_get_all_by_stack(self.ctx, self.stack1.id)
self.assertEqual(2, len(wrs))
def test_watch_rule_update(self):
watch_rule = create_watch_rule(self.ctx, self.stack)
values = {
'name': 'test_rule_1',
'rule': loads('{"foo": "bar"}'),
'state': 'nodata',
}
db_api.watch_rule_update(self.ctx, watch_rule.id, values)
watch_rule = db_api.watch_rule_get(self.ctx, watch_rule.id)
self.assertEqual('test_rule_1', watch_rule.name)
self.assertEqual('{"foo": "bar"}', dumps(watch_rule.rule))
self.assertEqual('nodata', watch_rule.state)
self.assertRaises(exception.NotFound, db_api.watch_rule_update,
self.ctx, UUID2, values)
def test_watch_rule_delete(self):
watch_rule = create_watch_rule(self.ctx, self.stack)
create_watch_data(self.ctx, watch_rule)
db_api.watch_rule_delete(self.ctx, watch_rule.id)
self.assertIsNone(db_api.watch_rule_get(self.ctx, watch_rule.id))
self.assertRaises(exception.NotFound, db_api.watch_rule_delete,
self.ctx, UUID2)
#Testing associated watch data deletion
self.assertEqual([], db_api.watch_data_get_all(self.ctx))
class DBAPIWatchDataTest(HeatTestCase):
def setUp(self):
super(DBAPIWatchDataTest, self).setUp()
self.ctx = utils.dummy_context()
utils.setup_dummy_db()
utils.reset_dummy_db()
self.template = create_raw_template(self.ctx)
self.user_creds = create_user_creds(self.ctx)
self.stack = create_stack(self.ctx, self.template, self.user_creds)
self.watch_rule = create_watch_rule(self.ctx, self.stack)
def test_watch_data_create(self):
create_watch_data(self.ctx, self.watch_rule)
ret_data = db_api.watch_data_get_all(self.ctx)
self.assertEqual(1, len(ret_data))
self.assertEqual('{"foo": "bar"}', dumps(ret_data[0].data))
self.assertEqual(self.watch_rule.id, ret_data[0].watch_rule_id)
def test_watch_data_get_all(self):
values = [
{'data': loads('{"foo": "d1"}')},
{'data': loads('{"foo": "d2"}')},
{'data': loads('{"foo": "d3"}')}
]
[create_watch_data(self.ctx, self.watch_rule, **val) for val in values]
watch_data = db_api.watch_data_get_all(self.ctx)
self.assertEqual(3, len(watch_data))
data = [wd.data for wd in watch_data]
[self.assertIn(val['data'], data) for val in values]
| apache-2.0 | -8,190,972,192,729,894,000 | 38.183916 | 79 | 0.592628 | false |
tangentlabs/django-oscar-fancypages | oscar_sandbox/sandbox/settings.py | 1 | 6442 | # Django settings for sandbox project.
import os
import oscar_fancypages.utils as ofp
PROJECT_DIR = os.path.dirname(__file__)
location = lambda x: os.path.join(os.path.dirname(os.path.realpath(__file__)), "../%s" % x)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
USE_LESS = True
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(os.path.dirname(__file__), 'db.sqlite3'),
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Australia/Melbourne'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = location('public/media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATICFILES_DIRS = [
location('static/'),
] + ofp.get_oscar_fancypages_paths('static')
STATIC_ROOT = location('public')
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'sba9ti)x&^fkod-g91@^_yi6y_#&3mo#m5@n)i&k+0h=+zsfkb'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.request",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
# Oscar specific
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.apps.customer.notifications.context_processors.notifications',
'oscar.core.context_processors.metadata',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
'fancypages.middleware.EditorMiddleware',
)
ROOT_URLCONF = 'sandbox.ofp_urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'sandbox.wsgi.application'
# Compressor and pre-compiler settings for django-compressor
COMPRESS_ENABLED = DEBUG
COMPRESS_OUTPUT_DIR = 'cache'
COMPRESS_OFFLINE = False
COMPRESS_PRECOMPILERS = (
('text/coffeescript', 'coffee --compile --stdio'),
('text/less', 'lessc {infile} {outfile}'),
)
if DEBUG:
COMPRESS_JS_FILTERS = []
from oscar import OSCAR_MAIN_TEMPLATE_DIR
TEMPLATE_DIRS = [
location('templates'),
os.path.join(OSCAR_MAIN_TEMPLATE_DIR, 'templates'),
OSCAR_MAIN_TEMPLATE_DIR,
] + ofp.get_oscar_fancypages_paths('templates')
DJANGO_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.flatpages',
'django.contrib.admin',
]
THIRD_PARTY_APPS = [
'debug_toolbar',
]
OFP_APPS = ofp.get_required_apps() + ofp.get_oscar_fancypages_apps()
from oscar import get_core_apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + OFP_APPS + get_core_apps()
AUTHENTICATION_BACKENDS = (
'oscar.apps.customer.auth_backends.Emailbackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_REDIRECT_URL = '/accounts/'
APPEND_SLASH = True
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
# Oscar settings
from oscar.defaults import *
from oscar_fancypages.defaults import *
OSCAR_ALLOW_ANON_CHECKOUT = True
OSCAR_SHOP_NAME = 'FancyPages Sandbox'
OSCAR_SHOP_TAGLINE = 'Make your pages sparkle and shine!'
# Haystack settings
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
INTERNAL_IPS = ('127.0.0.1',)
| bsd-3-clause | -5,179,257,276,437,302,000 | 28.550459 | 91 | 0.705061 | false |
CZ-NIC/foris | foris/config_handlers/wan.py | 1 | 17358 | # Foris - web administration interface for OpenWrt based on NETCONF
# Copyright (C) 2017, 2020 CZ.NIC, z.s.p.o. <http://www.nic.cz>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from .base import BaseConfigHandler
from foris import fapi, validators
from foris.state import current_state
from foris.form import Checkbox, Dropdown, Textbox, Number, PasswordWithHide
from foris.utils.translators import gettext_dummy as gettext, _
class WanHandler(BaseConfigHandler):
userfriendly_title = gettext("WAN")
def __init__(self, *args, **kwargs):
# Do not display "none" options for WAN protocol if hide_no_wan is True
self.hide_no_wan = kwargs.pop("hide_no_wan", False)
self.status_data = current_state.backend.perform("wan", "get_wan_status")
self.backend_data = current_state.backend.perform("wan", "get_settings")
super(WanHandler, self).__init__(*args, **kwargs)
@staticmethod
def _convert_backend_data_to_form_data(data):
res = {}
# WAN
# Convert none (initial setup) to dhcp (default)
res["proto"] = (
"dhcp"
if data["wan_settings"]["wan_type"] == "none"
else data["wan_settings"]["wan_type"]
)
if res["proto"] == "dhcp":
res["hostname"] = data["wan_settings"].get("wan_dhcp", {}).get("hostname", "")
elif res["proto"] == "static":
res["ipaddr"] = data["wan_settings"]["wan_static"]["ip"]
res["netmask"] = data["wan_settings"]["wan_static"]["netmask"]
res["gateway"] = data["wan_settings"]["wan_static"]["gateway"]
res["ipv4_dns1"] = data["wan_settings"]["wan_static"].get("dns1", "")
res["ipv4_dns2"] = data["wan_settings"]["wan_static"].get("dns2", "")
elif res["proto"] == "pppoe":
res["username"] = data["wan_settings"]["wan_pppoe"]["username"]
res["password"] = data["wan_settings"]["wan_pppoe"]["password"]
# WAN6
res["wan6_proto"] = data["wan6_settings"]["wan6_type"]
if res["wan6_proto"] == "static":
res["ip6addr"] = data["wan6_settings"]["wan6_static"]["ip"]
res["ip6prefix"] = data["wan6_settings"]["wan6_static"]["network"]
res["ip6gw"] = data["wan6_settings"]["wan6_static"]["gateway"]
res["ipv6_dns1"] = data["wan6_settings"]["wan6_static"].get("dns1", "")
res["ipv6_dns2"] = data["wan6_settings"]["wan6_static"].get("dns2", "")
elif res["wan6_proto"] == "dhcpv6":
res["ip6duid"] = data["wan6_settings"]["wan6_dhcpv6"]["duid"]
elif res["wan6_proto"] == "6to4":
res["6to4_ipaddr"] = data["wan6_settings"]["wan6_6to4"]["ipv4_address"]
elif res["wan6_proto"] == "6in4":
res["6in4_mtu"] = data["wan6_settings"]["wan6_6in4"]["mtu"]
res["6in4_server_ipv4"] = data["wan6_settings"]["wan6_6in4"]["server_ipv4"]
res["6in4_ipv6_prefix"] = data["wan6_settings"]["wan6_6in4"]["ipv6_prefix"]
res["6in4_dynamic_enabled"] = data["wan6_settings"]["wan6_6in4"]["dynamic_ipv4"][
"enabled"
]
if res["6in4_dynamic_enabled"]:
res["6in4_tunnel_id"] = data["wan6_settings"]["wan6_6in4"]["dynamic_ipv4"][
"tunnel_id"
]
res["6in4_username"] = data["wan6_settings"]["wan6_6in4"]["dynamic_ipv4"][
"username"
]
res["6in4_key"] = data["wan6_settings"]["wan6_6in4"]["dynamic_ipv4"][
"password_or_key"
]
# MAC
res["custom_mac"] = data["mac_settings"]["custom_mac_enabled"]
res["macaddr"] = data["mac_settings"].get("custom_mac", "")
return res
@staticmethod
def _convert_form_data_to_backend_data(data):
res = {"wan_settings": {}, "wan6_settings": {}, "mac_settings": {}}
# WAN
res["wan_settings"]["wan_type"] = data["proto"]
if data["proto"] == "dhcp":
hostname = data.get("hostname", False)
res["wan_settings"]["wan_dhcp"] = {"hostname": hostname} if hostname else {}
elif data["proto"] == "static":
res["wan_settings"]["wan_static"] = {}
res["wan_settings"]["wan_static"]["ip"] = data["ipaddr"]
res["wan_settings"]["wan_static"]["netmask"] = data["netmask"]
res["wan_settings"]["wan_static"]["gateway"] = data["gateway"]
dns1 = data.get("ipv4_dns1", None)
dns2 = data.get("ipv4_dns2", None)
res["wan_settings"]["wan_static"].update(
{k: v for k, v in {"dns1": dns1, "dns2": dns2}.items() if v}
)
elif data["proto"] == "pppoe":
res["wan_settings"]["wan_pppoe"] = {}
res["wan_settings"]["wan_pppoe"]["username"] = data["username"]
res["wan_settings"]["wan_pppoe"]["password"] = data["password"]
# WAN6
res["wan6_settings"]["wan6_type"] = data["wan6_proto"]
if data["wan6_proto"] == "static":
res["wan6_settings"]["wan6_static"] = {}
res["wan6_settings"]["wan6_static"]["ip"] = data["ip6addr"]
res["wan6_settings"]["wan6_static"]["network"] = data["ip6prefix"]
res["wan6_settings"]["wan6_static"]["gateway"] = data["ip6gw"]
dns1 = data.get("ipv6_dns1", None)
dns2 = data.get("ipv6_dns2", None)
res["wan6_settings"]["wan6_static"].update(
{k: v for k, v in {"dns1": dns1, "dns2": dns2}.items() if v}
)
if data["wan6_proto"] == "dhcpv6":
res["wan6_settings"]["wan6_dhcpv6"] = {"duid": data.get("ip6duid", "")}
if data["wan6_proto"] == "6to4":
res["wan6_settings"]["wan6_6to4"] = {"ipv4_address": data.get("6to4_ipaddr", "")}
if data["wan6_proto"] == "6in4":
dynamic = {"enabled": data.get("6in4_dynamic_enabled", False)}
if dynamic["enabled"]:
dynamic["tunnel_id"] = data.get("6in4_tunnel_id")
dynamic["username"] = data.get("6in4_username")
dynamic["password_or_key"] = data.get("6in4_key")
res["wan6_settings"]["wan6_6in4"] = {
"mtu": int(data.get("6in4_mtu")),
"ipv6_prefix": data.get("6in4_ipv6_prefix"),
"server_ipv4": data.get("6in4_server_ipv4"),
"dynamic_ipv4": dynamic,
}
# MAC
res["mac_settings"] = (
{"custom_mac_enabled": True, "custom_mac": data["macaddr"]}
if "custom_mac" in data and data["custom_mac"]
else {"custom_mac_enabled": False}
)
return res
def get_form(self):
data = WanHandler._convert_backend_data_to_form_data(self.backend_data)
if self.data:
# Update from post
data.update(self.data)
# WAN
wan_form = fapi.ForisForm("wan", data)
wan_main = wan_form.add_section(
name="set_wan",
title=_(self.userfriendly_title),
description=_(
"Here you specify your WAN port settings. Usually, you can leave these "
"options untouched unless instructed otherwise by your internet service "
"provider. Also, in case there is a cable or DSL modem connecting your "
"router to the network, it is usually not necessary to change this "
"setting."
),
)
WAN_DHCP = "dhcp"
WAN_STATIC = "static"
WAN_PPPOE = "pppoe"
WAN_OPTIONS = (
(WAN_DHCP, _("DHCP (automatic configuration)")),
(WAN_STATIC, _("Static IP address (manual configuration)")),
(WAN_PPPOE, _("PPPoE (for DSL bridges, Modem Turris, etc.)")),
)
WAN6_NONE = "none"
WAN6_DHCP = "dhcpv6"
WAN6_STATIC = "static"
WAN6_6TO4 = "6to4"
WAN6_6IN4 = "6in4"
WAN6_OPTIONS = (
(WAN6_DHCP, _("DHCPv6 (automatic configuration)")),
(WAN6_STATIC, _("Static IP address (manual configuration)")),
(WAN6_6TO4, _("6to4 (public IPv4 address required)")),
(WAN6_6IN4, _("6in4 (public IPv4 address required)")),
)
if not self.hide_no_wan:
WAN6_OPTIONS = ((WAN6_NONE, _("Disable IPv6")),) + WAN6_OPTIONS
# protocol
wan_main.add_field(
Dropdown, name="proto", label=_("IPv4 protocol"), args=WAN_OPTIONS, default=WAN_DHCP
)
# static ipv4
wan_main.add_field(
Textbox,
name="ipaddr",
label=_("IP address"),
required=True,
validators=validators.IPv4(),
).requires("proto", WAN_STATIC)
wan_main.add_field(
Textbox,
name="netmask",
label=_("Network mask"),
required=True,
validators=validators.IPv4Netmask(),
).requires("proto", WAN_STATIC)
wan_main.add_field(
Textbox, name="gateway", label=_("Gateway"), required=True, validators=validators.IPv4()
).requires("proto", WAN_STATIC)
wan_main.add_field(
Textbox,
name="hostname",
label=_("DHCP hostname"),
validators=validators.Domain(),
hint=_("Hostname which will be provided to DHCP server."),
).requires("proto", WAN_DHCP)
# DNS servers
wan_main.add_field(
Textbox,
name="ipv4_dns1",
label=_("DNS server 1 (IPv4)"),
validators=validators.IPv4(),
hint=_(
"DNS server address is not required as the built-in "
"DNS resolver is capable of working without it."
),
).requires("proto", WAN_STATIC)
wan_main.add_field(
Textbox,
name="ipv4_dns2",
label=_("DNS server 2 (IPv4)"),
validators=validators.IPv4(),
hint=_(
"DNS server address is not required as the built-in "
"DNS resolver is capable of working without it."
),
).requires("proto", WAN_STATIC)
# xDSL settings
wan_main.add_field(
Textbox, name="username", label=_("PAP/CHAP username"), required=True,
).requires("proto", WAN_PPPOE)
wan_main.add_field(
PasswordWithHide, name="password", label=_("PAP/CHAP password"), required=True,
).requires("proto", WAN_PPPOE)
# IPv6 configuration
wan_main.add_field(
Dropdown,
name="wan6_proto",
label=_("IPv6 protocol"),
args=WAN6_OPTIONS,
default=WAN6_NONE,
)
wan_main.add_field(
Textbox,
name="ip6addr",
label=_("IPv6 address"),
validators=validators.IPv6Prefix(),
required=True,
hint=_(
"IPv6 address and prefix length for WAN interface, " "e.g. 2001:db8:be13:37da::1/64"
),
).requires("wan6_proto", WAN6_STATIC)
wan_main.add_field(
Textbox,
name="ip6gw",
label=_("IPv6 gateway"),
validators=validators.IPv6(),
required=True,
).requires("wan6_proto", WAN6_STATIC)
wan_main.add_field(
Textbox,
name="ip6prefix",
label=_("IPv6 prefix"),
validators=validators.IPv6Prefix(),
hint=_("Address range for local network, " "e.g. 2001:db8:be13:37da::/64"),
).requires("wan6_proto", WAN6_STATIC)
# DNS servers
wan_main.add_field(
Textbox,
name="ipv6_dns1",
label=_("DNS server 1 (IPv6)"),
validators=validators.IPv6(),
hint=_(
"DNS server address is not required as the built-in "
"DNS resolver is capable of working without it."
),
).requires("wan6_proto", WAN6_STATIC)
wan_main.add_field(
Textbox,
name="ipv6_dns2",
label=_("DNS server 2 (IPv6)"),
validators=validators.IPv6(),
hint=_(
"DNS server address is not required as the built-in "
"DNS resolver is capable of working without it."
),
).requires("wan6_proto", WAN6_STATIC)
wan_main.add_field(
Textbox,
name="ip6duid",
label=_("Custom DUID"),
validators=validators.Duid(),
placeholder=self.status_data["last_seen_duid"],
hint=_("DUID which will be provided to the DHCPv6 server."),
).requires("wan6_proto", WAN6_DHCP)
wan_main.add_field(
Textbox,
name="6to4_ipaddr",
label=_("Public IPv4"),
validators=validators.IPv4(),
hint=_(
"In order to use 6to4 protocol, you might need to specify your public IPv4 "
"address manually (e.g. when your WAN interface has a private address which "
"is mapped to public IP)."
),
placeholder=_("use autodetection"),
required=False,
).requires("wan6_proto", WAN6_6TO4)
wan_main.add_field(
Textbox,
name="6in4_server_ipv4",
label=_("Provider IPv4"),
validators=validators.IPv4(),
hint=_("This address will be used as a endpoint of the tunnel on the provider's side."),
required=True,
).requires("wan6_proto", WAN6_6IN4)
wan_main.add_field(
Textbox,
name="6in4_ipv6_prefix",
label=_("Routed IPv6 prefix"),
validators=validators.IPv6Prefix(),
hint=_("IPv6 addresses which will be routed to your network."),
required=True,
).requires("wan6_proto", WAN6_6IN4)
wan_main.add_field(
Number,
name="6in4_mtu",
label=_("MTU"),
validators=validators.InRange(1280, 1500),
hint=_("Maximum Transmission Unit in the tunnel (in bytes)."),
required=True,
default="1480",
).requires("wan6_proto", WAN6_6IN4)
wan_main.add_field(
Checkbox,
name="6in4_dynamic_enabled",
label=_("Dynamic IPv4 handling"),
hint=_(
"Some tunnel providers allow you to have public dynamic IPv4. "
"Note that you need to fill in some extra fields to make it work."
),
default=False,
).requires("wan6_proto", WAN6_6IN4)
wan_main.add_field(
Textbox,
name="6in4_tunnel_id",
label=_("Tunnel ID"),
validators=validators.NotEmpty(),
hint=_("ID of your tunnel which was assigned to you by the provider."),
required=True,
).requires("6in4_dynamic_enabled", True)
wan_main.add_field(
Textbox,
name="6in4_username",
label=_("Username"),
validators=validators.NotEmpty(),
hint=_("Username which will be used to provide credentials to your tunnel provider."),
required=True,
).requires("6in4_dynamic_enabled", True)
wan_main.add_field(
Textbox,
name="6in4_key",
label=_("Key"),
validators=validators.NotEmpty(),
hint=_("Key which will be used to provide credentials to your tunnel provider."),
required=True,
).requires("6in4_dynamic_enabled", True)
# custom MAC
wan_main.add_field(
Checkbox,
name="custom_mac",
label=_("Custom MAC address"),
hint=_(
"Useful in cases, when a specific MAC address is required by "
"your internet service provider."
),
)
wan_main.add_field(
Textbox,
name="macaddr",
label=_("MAC address"),
validators=validators.MacAddress(),
required=True,
hint=_("Colon is used as a separator, for example 00:11:22:33:44:55"),
).requires("custom_mac", True)
def wan_form_cb(data):
backend_data = WanHandler._convert_form_data_to_backend_data(data)
res = current_state.backend.perform("wan", "update_settings", backend_data)
return "save_result", res # store {"result": ...} to be used later...
wan_form.add_callback(wan_form_cb)
return wan_form
| gpl-3.0 | -9,020,859,925,960,720,000 | 39.842353 | 100 | 0.532204 | false |
BorgERP/borg-erp-6of3 | verticals/garage61/acy_vat_number_truncate/partner.py | 1 | 1631 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
from tools.translate import _
import tools
import os
class res_partner(osv.osv):
_inherit = 'res.partner'
def _vat_truncate(self,cr,uid,ids,name,arg,context={}):
res={}
for partner in self.browse(cr,uid,ids,context):
if partner.country == partner.company_id.partner_id.country:
res[partner.id] = partner.vat[2:]
else:
res[partner.id] = partner.vat
return res
_columns = {
'vat_truncate': fields.function(_vat_truncate, method=True, type='char', size=32, string='VAT Truncate', readonly=True),
}
res_partner() | agpl-3.0 | 3,920,011,529,964,214,000 | 36.953488 | 128 | 0.600858 | false |
feend78/evennia | evennia/commands/cmdset.py | 1 | 23699 | """
A Command Set (CmdSet) holds a set of commands. The Cmdsets can be
merged and combined to create new sets of commands in a
non-destructive way. This makes them very powerful for implementing
custom game states where different commands (or different variations
of commands) are available to the accounts depending on circumstance.
The available merge operations are partly borrowed from mathematical
Set theory.
* Union The two command sets are merged so that as many commands as
possible of each cmdset ends up in the merged cmdset. Same-name
commands are merged by priority. This is the most common default.
Ex: A1,A3 + B1,B2,B4,B5 = A1,B2,A3,B4,B5
* Intersect - Only commands found in *both* cmdsets (i.e. which have
same names) end up in the merged cmdset, with the higher-priority
cmdset replacing the lower one. Ex: A1,A3 + B1,B2,B4,B5 = A1
* Replace - The commands of this cmdset completely replaces the
lower-priority cmdset's commands, regardless of if same-name commands
exist. Ex: A1,A3 + B1,B2,B4,B5 = A1,A3
* Remove - This removes the relevant commands from the
lower-priority cmdset completely. They are not replaced with
anything, so this in effects uses the high-priority cmdset as a filter
to affect the low-priority cmdset. Ex: A1,A3 + B1,B2,B4,B5 = B2,B4,B5
"""
from future.utils import listvalues, with_metaclass
from weakref import WeakKeyDictionary
from django.utils.translation import ugettext as _
from evennia.utils.utils import inherits_from, is_iter
__all__ = ("CmdSet",)
class _CmdSetMeta(type):
"""
This metaclass makes some minor on-the-fly convenience fixes to
the cmdset class.
"""
def __init__(cls, *args, **kwargs):
"""
Fixes some things in the cmdclass
"""
# by default we key the cmdset the same as the
# name of its class.
if not hasattr(cls, 'key') or not cls.key:
cls.key = cls.__name__
cls.path = "%s.%s" % (cls.__module__, cls.__name__)
if not isinstance(cls.key_mergetypes, dict):
cls.key_mergetypes = {}
super(_CmdSetMeta, cls).__init__(*args, **kwargs)
class CmdSet(with_metaclass(_CmdSetMeta, object)):
"""
This class describes a unique cmdset that understands priorities.
CmdSets can be merged and made to perform various set operations
on each other. CmdSets have priorities that affect which of their
ingoing commands gets used.
In the examples, cmdset A always have higher priority than cmdset B.
key - the name of the cmdset. This can be used on its own for game
operations
mergetype (partly from Set theory):
Union - The two command sets are merged so that as many
commands as possible of each cmdset ends up in the
merged cmdset. Same-name commands are merged by
priority. This is the most common default.
Ex: A1,A3 + B1,B2,B4,B5 = A1,B2,A3,B4,B5
Intersect - Only commands found in *both* cmdsets
(i.e. which have same names) end up in the merged
cmdset, with the higher-priority cmdset replacing the
lower one. Ex: A1,A3 + B1,B2,B4,B5 = A1
Replace - The commands of this cmdset completely replaces
the lower-priority cmdset's commands, regardless
of if same-name commands exist.
Ex: A1,A3 + B1,B2,B4,B5 = A1,A3
Remove - This removes the relevant commands from the
lower-priority cmdset completely. They are not
replaced with anything, so this in effects uses the
high-priority cmdset as a filter to affect the
low-priority cmdset.
Ex: A1,A3 + B1,B2,B4,B5 = B2,B4,B5
Note: Commands longer than 2 characters and starting
with double underscrores, like '__noinput_command'
are considered 'system commands' and are
excempt from all merge operations - they are
ALWAYS included across mergers and only affected
if same-named system commands replace them.
priority- All cmdsets are always merged in pairs of two so that
the higher set's mergetype is applied to the
lower-priority cmdset. Default commands have priority 0,
high-priority ones like Exits and Channels have 10 and 9.
Priorities can be negative as well to give default
commands preference.
duplicates - determines what happens when two sets of equal
priority merge. Default has the first of them in the
merger (i.e. A above) automatically taking
precedence. But if allow_duplicates is true, the
result will be a merger with more than one of each
name match. This will usually lead to the account
receiving a multiple-match error higher up the road,
but can be good for things like cmdsets on non-account
objects in a room, to allow the system to warn that
more than one 'ball' in the room has the same 'kick'
command defined on it, so it may offer a chance to
select which ball to kick ... Allowing duplicates
only makes sense for Union and Intersect, the setting
is ignored for the other mergetypes.
key_mergetype (dict) - allows the cmdset to define a unique
mergetype for particular cmdsets. Format is
{CmdSetkeystring:mergetype}. Priorities still apply.
Example: {'Myevilcmdset','Replace'} which would make
sure for this set to always use 'Replace' on
Myevilcmdset no matter what overall mergetype this set
has.
no_objs - don't include any commands from nearby objects
when searching for suitable commands
no_exits - ignore the names of exits when matching against
commands
no_channels - ignore the name of channels when matching against
commands (WARNING- this is dangerous since the
account can then not even ask staff for help if
something goes wrong)
"""
key = "Unnamed CmdSet"
mergetype = "Union"
priority = 0
# These flags, if set to None, will allow "pass-through" of lower-prio settings
# of True/False. If set to True/False, will override lower-prio settings.
no_exits = None
no_objs = None
no_channels = None
# same as above, but if left at None in the final merged set, the
# cmdhandler will auto-assume True for Objects and stay False for all
# other entities.
duplicates = None
permanent = False
key_mergetypes = {}
errmessage = ""
# pre-store properties to duplicate straight off
to_duplicate = ("key", "cmdsetobj", "no_exits", "no_objs",
"no_channels", "permanent", "mergetype",
"priority", "duplicates", "errmessage")
def __init__(self, cmdsetobj=None, key=None):
"""
Creates a new CmdSet instance.
Args:
cmdsetobj (Session, Account, Object, optional): This is the database object
to which this particular instance of cmdset is related. It
is often a character but may also be a regular object, Account
or Session.
key (str, optional): The idenfier for this cmdset. This
helps if wanting to selectively remov cmdsets.
"""
if key:
self.key = key
self.commands = []
self.system_commands = []
self.actual_mergetype = self.mergetype
self.cmdsetobj = cmdsetobj
# this is set only on merged sets, in cmdhandler.py, in order to
# track, list and debug mergers correctly.
self.merged_from = []
# initialize system
self.at_cmdset_creation()
self._contains_cache = WeakKeyDictionary() # {}
# Priority-sensitive merge operations for cmdsets
def _union(self, cmdset_a, cmdset_b):
"""
Merge two sets using union merger
Args:
cmdset_a (Cmdset): Cmdset given higher priority in the case of a tie.
cmdset_b (Cmdset): Cmdset given lower priority in the case of a tie.
Returns:
cmdset_c (Cmdset): The result of A U B operation.
Notes:
Union, C = A U B, means that C gets all elements from both A and B.
"""
cmdset_c = cmdset_a._duplicate()
# we make copies, not refs by use of [:]
cmdset_c.commands = cmdset_a.commands[:]
if cmdset_a.duplicates and cmdset_a.priority == cmdset_b.priority:
cmdset_c.commands.extend(cmdset_b.commands)
else:
cmdset_c.commands.extend([cmd for cmd in cmdset_b
if cmd not in cmdset_a])
return cmdset_c
def _intersect(self, cmdset_a, cmdset_b):
"""
Merge two sets using intersection merger
Args:
cmdset_a (Cmdset): Cmdset given higher priority in the case of a tie.
cmdset_b (Cmdset): Cmdset given lower priority in the case of a tie.
Returns:
cmdset_c (Cmdset): The result of A (intersect) B operation.
Notes:
Intersection, C = A (intersect) B, means that C only gets the
parts of A and B that are the same (that is, the commands
of each set having the same name. Only the one of these
having the higher prio ends up in C).
"""
cmdset_c = cmdset_a._duplicate()
if cmdset_a.duplicates and cmdset_a.priority == cmdset_b.priority:
for cmd in [cmd for cmd in cmdset_a if cmd in cmdset_b]:
cmdset_c.add(cmd)
cmdset_c.add(cmdset_b.get(cmd))
else:
cmdset_c.commands = [cmd for cmd in cmdset_a if cmd in cmdset_b]
return cmdset_c
def _replace(self, cmdset_a, cmdset_b):
"""
Replace the contents of one set with another
Args:
cmdset_a (Cmdset): Cmdset replacing
cmdset_b (Cmdset): Cmdset to replace
Returns:
cmdset_c (Cmdset): This is indentical to cmdset_a.
Notes:
C = A, where B is ignored.
"""
cmdset_c = cmdset_a._duplicate()
cmdset_c.commands = cmdset_a.commands[:]
return cmdset_c
def _remove(self, cmdset_a, cmdset_b):
"""
Filter a set by another.
Args:
cmdset_a (Cmdset): Cmdset acting as a removal filter.
cmdset_b (Cmdset): Cmdset to filter
Returns:
cmdset_c (Cmdset): B, with all matching commands from A removed.
Notes:
C = B - A, where A is used to remove the commands of B.
"""
cmdset_c = cmdset_a._duplicate()
cmdset_c.commands = [cmd for cmd in cmdset_b if cmd not in cmdset_a]
return cmdset_c
def _instantiate(self, cmd):
"""
checks so that object is an instantiated command and not, say
a cmdclass. If it is, instantiate it. Other types, like
strings, are passed through.
Args:
cmd (any): Entity to analyze.
Returns:
result (any): An instantiated Command or the input unmodified.
"""
try:
return cmd()
except TypeError:
return cmd
def _duplicate(self):
"""
Returns a new cmdset with the same settings as this one (no
actual commands are copied over)
Returns:
cmdset (Cmdset): A copy of the current cmdset.
"""
cmdset = CmdSet()
for key, val in ((key, getattr(self, key)) for key in self.to_duplicate):
if val != getattr(cmdset, key):
# only copy if different from default; avoid turning
# class-vars into instance vars
setattr(cmdset, key, val)
cmdset.key_mergetypes = self.key_mergetypes.copy()
return cmdset
def __str__(self):
"""
Show all commands in cmdset when printing it.
Returns:
commands (str): Representation of commands in Cmdset.
"""
return ", ".join([str(cmd) for cmd in sorted(self.commands, key=lambda o:o.key)])
def __iter__(self):
"""
Allows for things like 'for cmd in cmdset':
Returns:
iterable (iter): Commands in Cmdset.
"""
return iter(self.commands)
def __contains__(self, othercmd):
"""
Returns True if this cmdset contains the given command (as
defined by command name and aliases). This allows for things
like 'if cmd in cmdset'
"""
ret = self._contains_cache.get(othercmd)
if ret is None:
ret = othercmd in self.commands
self._contains_cache[othercmd] = ret
return ret
def __add__(self, cmdset_a):
"""
Merge this cmdset (B) with another cmdset (A) using the + operator,
C = B + A
Here, we (by convention) say that 'A is merged onto B to form
C'. The actual merge operation used in the 'addition' depends
on which priorities A and B have. The one of the two with the
highest priority will apply and give its properties to C. In
the case of a tie, A takes priority and replaces the
same-named commands in B unless A has the 'duplicate' variable
set (which means both sets' commands are kept).
"""
# It's okay to merge with None
if not cmdset_a:
return self
sys_commands_a = cmdset_a.get_system_cmds()
sys_commands_b = self.get_system_cmds()
if self.priority <= cmdset_a.priority:
# A higher or equal priority to B
# preserve system __commands
sys_commands = sys_commands_a + [cmd for cmd in sys_commands_b
if cmd not in sys_commands_a]
mergetype = cmdset_a.key_mergetypes.get(self.key, cmdset_a.mergetype)
if mergetype == "Intersect":
cmdset_c = self._intersect(cmdset_a, self)
elif mergetype == "Replace":
cmdset_c = self._replace(cmdset_a, self)
elif mergetype == "Remove":
cmdset_c = self._remove(cmdset_a, self)
else: # Union
cmdset_c = self._union(cmdset_a, self)
# pass through options whenever they are set, unless the merging or higher-prio
# set changes the setting (i.e. has a non-None value). We don't pass through
# the duplicates setting; that is per-merge
cmdset_c.no_channels = self.no_channels if cmdset_a.no_channels is None else cmdset_a.no_channels
cmdset_c.no_exits = self.no_exits if cmdset_a.no_exits is None else cmdset_a.no_exits
cmdset_c.no_objs = self.no_objs if cmdset_a.no_objs is None else cmdset_a.no_objs
else:
# B higher priority than A
# preserver system __commands
sys_commands = sys_commands_b + [cmd for cmd in sys_commands_a
if cmd not in sys_commands_b]
mergetype = self.key_mergetypes.get(cmdset_a.key, self.mergetype)
if mergetype == "Intersect":
cmdset_c = self._intersect(self, cmdset_a)
elif mergetype == "Replace":
cmdset_c = self._replace(self, cmdset_a)
elif mergetype == "Remove":
cmdset_c = self._remove(self, cmdset_a)
else: # Union
cmdset_c = self._union(self, cmdset_a)
# pass through options whenever they are set, unless the higher-prio
# set changes the setting (i.e. has a non-None value). We don't pass through
# the duplicates setting; that is per-merge
cmdset_c.no_channels = cmdset_a.no_channels if self.no_channels is None else self.no_channels
cmdset_c.no_exits = cmdset_a.no_exits if self.no_exits is None else self.no_exits
cmdset_c.no_objs = cmdset_a.no_objs if self.no_objs is None else self.no_objs
# we store actual_mergetype since key_mergetypes
# might be different from the main mergetype.
# This is used for diagnosis.
cmdset_c.actual_mergetype = mergetype
# print "__add__ for %s (prio %i) called with %s (prio %i)." % (self.key, self.priority, cmdset_a.key, cmdset_a.priority)
# return the system commands to the cmdset
cmdset_c.add(sys_commands)
return cmdset_c
def add(self, cmd):
"""
Add a new command or commands to this CmdSetcommand, a list of
commands or a cmdset to this cmdset. Note that this is *not*
a merge operation (that is handled by the + operator).
Args:
cmd (Command, list, Cmdset): This allows for adding one or
more commands to this Cmdset in one go. If another Cmdset
is given, all its commands will be added.
Notes:
If cmd already exists in set, it will replace the old one
(no priority checking etc happens here). This is very useful
when overloading default commands).
If cmd is another cmdset class or -instance, the commands of
that command set is added to this one, as if they were part of
the original cmdset definition. No merging or priority checks
are made, rather later added commands will simply replace
existing ones to make a unique set.
"""
if inherits_from(cmd, "evennia.commands.cmdset.CmdSet"):
# cmd is a command set so merge all commands in that set
# to this one. We raise a visible error if we created
# an infinite loop (adding cmdset to itself somehow)
try:
cmd = self._instantiate(cmd)
except RuntimeError:
string = "Adding cmdset %(cmd)s to %(class)s lead to an "
string += "infinite loop. When adding a cmdset to another, "
string += "make sure they are not themself cyclically added to "
string += "the new cmdset somewhere in the chain."
raise RuntimeError(_(string) % {"cmd": cmd,
"class": self.__class__})
cmds = cmd.commands
elif is_iter(cmd):
cmds = [self._instantiate(c) for c in cmd]
else:
cmds = [self._instantiate(cmd)]
commands = self.commands
system_commands = self.system_commands
for cmd in cmds:
# add all commands
if not hasattr(cmd, 'obj'):
cmd.obj = self.cmdsetobj
try:
ic = commands.index(cmd)
commands[ic] = cmd # replace
except ValueError:
commands.append(cmd)
# extra run to make sure to avoid doublets
self.commands = list(set(commands))
# add system_command to separate list as well,
# for quick look-up
if cmd.key.startswith("__"):
try:
ic = system_commands.index(cmd)
system_commands[ic] = cmd # replace
except ValueError:
system_commands.append(cmd)
def remove(self, cmd):
"""
Remove a command instance from the cmdset.
Args:
cmd (Command or str): Either the Command object to remove
or the key of such a command.
"""
cmd = self._instantiate(cmd)
if cmd.key.startswith("__"):
try:
ic = self.system_commands.index(cmd)
del self.system_commands[ic]
except ValueError:
# ignore error
pass
else:
self.commands = [oldcmd for oldcmd in self.commands if oldcmd != cmd]
def get(self, cmd):
"""
Get a command from the cmdset. This is mostly useful to
check if the command is part of this cmdset or not.
Args:
cmd (Command or str): Either the Command object or its key.
Returns:
cmd (Command): The first matching Command in the set.
"""
cmd = self._instantiate(cmd)
for thiscmd in self.commands:
if thiscmd == cmd:
return thiscmd
return None
def count(self):
"""
Number of commands in set.
Returns:
N (int): Number of commands in this Cmdset.
"""
return len(self.commands)
def get_system_cmds(self):
"""
Get system commands in cmdset
Returns:
sys_cmds (list): The system commands in the set.
Notes:
As far as the Cmdset is concerned, system commands are any
commands with a key starting with double underscore __.
These are excempt from merge operations.
"""
return self.system_commands
def make_unique(self, caller):
"""
Remove duplicate command-keys (unsafe)
Args:
caller (object): Commands on this object will
get preference in the duplicate removal.
Notes:
This is an unsafe command meant to clean out a cmdset of
doublet commands after it has been created. It is useful
for commands inheriting cmdsets from the cmdhandler where
obj-based cmdsets always are added double. Doublets will
be weeded out with preference to commands defined on
caller, otherwise just by first-come-first-served.
"""
unique = {}
for cmd in self.commands:
if cmd.key in unique:
ocmd = unique[cmd.key]
if (hasattr(cmd, 'obj') and cmd.obj == caller) and not \
(hasattr(ocmd, 'obj') and ocmd.obj == caller):
unique[cmd.key] = cmd
else:
unique[cmd.key] = cmd
self.commands = listvalues(unique)
def get_all_cmd_keys_and_aliases(self, caller=None):
"""
Collects keys/aliases from commands
Args:
caller (Object, optional): If set, this is used to check access permissions
on each command. Only commands that pass are returned.
Returns:
names (list): A list of all command keys and aliases in this cmdset. If `caller`
was given, this list will only contain commands to which `caller` passed
the `call` locktype check.
"""
names = []
if caller:
[names.extend(cmd._keyaliases) for cmd in self.commands
if cmd.access(caller)]
else:
[names.extend(cmd._keyaliases) for cmd in self.commands]
return names
def at_cmdset_creation(self):
"""
Hook method - this should be overloaded in the inheriting
class, and should take care of populating the cmdset by use of
self.add().
"""
pass
| bsd-3-clause | -331,896,046,485,190,660 | 37.347896 | 130 | 0.581417 | false |
endee1/gtv | script.gtvtvguide/ResetDatabase.py | 1 | 2238 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Sean Poyser and Richard Dean ([email protected])
#
# Modified for FTV Guide (09/2014 onwards)
# by Thomas Geppert [bluezed] - [email protected]
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import os
import xbmc
import xbmcgui
import xbmcaddon
def deleteDB():
try:
xbmc.log("[script.gtvtvguide] Deleting database...", xbmc.LOGDEBUG)
dbPath = xbmc.translatePath(xbmcaddon.Addon(id = 'script.gtvtvguide').getAddonInfo('profile'))
dbPath = os.path.join(dbPath, 'source.db')
delete_file(dbPath)
passed = not os.path.exists(dbPath)
if passed:
xbmc.log("[script.gtvtvguide] Deleting database...PASSED", xbmc.LOGDEBUG)
else:
xbmc.log("[script.gtvtvguide] Deleting database...FAILED", xbmc.LOGDEBUG)
return passed
except Exception, e:
xbmc.log('[script.gtvtvguide] Deleting database...EXCEPTION', xbmc.LOGDEBUG)
return False
def delete_file(filename):
tries = 10
while os.path.exists(filename) and tries > 0:
try:
os.remove(filename)
break
except:
tries -= 1
if __name__ == '__main__':
if deleteDB():
d = xbmcgui.Dialog()
d.ok('gtvTV Guide', 'The database has been successfully deleted.', 'It will be re-created next time you start the guide')
else:
d = xbmcgui.Dialog()
d.ok('gtvTV Guide', 'Failed to delete database.', 'Database may be locked,', 'please restart XBMC and try again')
| gpl-3.0 | -9,127,050,276,214,440,000 | 32.909091 | 129 | 0.668007 | false |
klamparski/Mrf.NodeTypesBuilder | Documentation/conf.py | 1 | 10297 | # -*- coding: utf-8 -*-
#
# Flow Framework documentation build configuration file, created by
# sphinx-quickstart on Mon Jun 08 11:09:23 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.ifconfig'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Mrf.NodeTypesBuilder'
copyright = u'2016 and onwards by the authors'
author = u'Author and Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'dev-master'
# The full version, including alpha/beta/rc tags.
release = 'dev-master'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mrf.NodeTypesBuilder'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Mrf.NodeTypesBuilder.tex', u'Mrf.NodeTypesBuilder Documentation',
u'The author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = True
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Mrf.NodeTypesBuilder', u'Mrf.NodeTypesBuilder Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Mrf.NodeTypesBuilder', u'Mrf.NodeTypesBuilder Documentation',
author, 'Mrf.NodeTypesBuilder', '',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# load PhpLexer
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
# enable highlighting for PHP code not between <?php ... ?> by default
lexers['php'] = PhpLexer(startinline=True)
lexers['php-annotations'] = PhpLexer(startinline=True)
# Use PHP syntax highlighting in code examples by default
highlight_language='php'
| mit | -2,739,290,844,536,983,600 | 31.897764 | 98 | 0.71312 | false |
pombredanne/invenio | modules/webjournal/lib/webjournal_washer.py | 1 | 4833 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebJournal input parameters washing related functions
"""
import time
import re
from invenio.webjournal_config import \
InvenioWebJournalIssueNumberBadlyFormedError, \
InvenioWebJournalNoArticleNumberError, \
InvenioWebJournalArchiveDateWronglyFormedError, \
InvenioWebJournalNoPopupRecordError, \
InvenioWebJournalNoCategoryError
from invenio.webjournal_utils import \
get_current_issue, \
guess_journal_name, \
get_journal_categories, \
get_journal_nb_issues_per_year
from invenio.config import CFG_SITE_LANG
# precompiled patterns for the parameters
issue_number_pattern = re.compile("^\d{1,3}/\d{4}$")
def wash_journal_language(ln):
"""
Washes the language parameter. If there is a language, return this,
otherwise return CFG_SITE_LANG constant
"""
if ln == "":
return CFG_SITE_LANG
else:
return ln
def wash_journal_name(ln, journal_name, guess=True):
"""
Washes the journal name parameter. In case of non-empty string,
returns it, otherwise redirects to a guessing function.
If 'guess' is True the function tries to fix the capitalization of
the journal name.
"""
if guess or not journal_name:
return guess_journal_name(ln, journal_name)
else:
return journal_name
def wash_issue_number(ln, journal_name, issue_number):
"""
Washes an issue number to fit the pattern ww/YYYY, e.g. 50/2007
w/YYYY is also accepted and transformed to 0w/YYYY, e.g. 2/2007 -> 02/2007
If no issue number is found, tries to get the current issue
"""
if issue_number == "":
return get_current_issue(ln, journal_name)
else:
issue_number_match = issue_number_pattern.match(issue_number)
if issue_number_match:
issue_number = issue_number_match.group()
number, year = issue_number.split('/')
number_issues_per_year = get_journal_nb_issues_per_year(journal_name)
precision = len(str(number_issues_per_year))
return ("%0" + str(precision) + "i/%s") % (int(number), year)
else:
raise InvenioWebJournalIssueNumberBadlyFormedError(ln,
issue_number)
def wash_category(ln, category, journal_name, issue):
"""
Washes a category name.
"""
categories = get_journal_categories(journal_name, issue=None)
if category in categories:
return category
elif category == "" and len(categories) > 0:
return categories[0]
else:
raise InvenioWebJournalNoCategoryError(ln,
category,
categories)
def wash_article_number(ln, number, journal_name):
"""
Washes an article number. First checks if it is non-empty, then if it is
convertable to int. If all passes, returns the number, else throws
exception.
"""
if number == "":
raise InvenioWebJournalNoArticleNumberError(ln, journal_name)
try:
int(number)
except:
raise InvenioWebJournalNoArticleNumberError(ln, journal_name)
return number
def wash_popup_record(ln, record, journal_name):
"""
"""
if record == "":
raise InvenioWebJournalNoPopupRecordError(ln, journal_name,
"no recid")
try:
int(record)
except:
raise InvenioWebJournalNoPopupRecordError(ln, journal_name,
record)
return record
def wash_archive_date(ln, journal_name, archive_date):
"""
Washes an archive date to the form dd/mm/yyyy or empty.
"""
if archive_date == "":
return ""
try:
time.strptime(archive_date, "%d/%m/%Y")
except:
raise InvenioWebJournalArchiveDateWronglyFormedError(ln,
archive_date)
return archive_date
| gpl-2.0 | 580,070,241,764,031,600 | 34.8 | 81 | 0.634802 | false |
lsaffre/lino | lino/utils/cycler.py | 1 | 1933 | # -*- coding: UTF-8 -*-
# Copyright 2013-2014 by Rumma & Ko Ltd.
# License: BSD, see LICENSE for more details.
"""
Turns a list of items into an endless loop.
Useful when generating demo fixtures.
>>> from lino.utils import Cycler
>>> def myfunc():
... yield "a"
... yield "b"
... yield "c"
>>> c = Cycler(myfunc())
>>> s = ""
>>> for i in range(10):
... s += c.pop()
>>> print (s)
abcabcabca
An empty Cycler or a Cycler on an empty list will endlessly pop None values:
>>> c = Cycler()
>>> print (c.pop(), c.pop(), c.pop())
None None None
>>> c = Cycler([])
>>> print (c.pop(), c.pop(), c.pop())
None None None
>>> c = Cycler(None)
>>> print (c.pop(), c.pop(), c.pop())
None None None
"""
from __future__ import unicode_literals
from __future__ import print_function
from builtins import object
class Cycler(object):
def __init__(self, *args):
"""
If there is exactly one argument, then this must be an iterable
and will be used as the list of items to cycle on.
If there is more than one positional argument, then these
arguments themselves will be the list of items.
"""
if len(args) == 0:
self.items = []
elif len(args) == 1:
if args[0] is None:
self.items = []
else:
self.items = list(args[0])
else:
self.items = args
self.current = 0
def pop(self):
if len(self.items) == 0:
return None
item = self.items[self.current]
self.current += 1
if self.current >= len(self.items):
self.current = 0
if isinstance(item, Cycler):
return item.pop()
return item
def __len__(self):
return len(self.items)
def reset(self):
self.current = 0
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| bsd-2-clause | 4,371,008,635,208,604,000 | 21.476744 | 76 | 0.54837 | false |
sbremer/hybrid_rs | hybrid_model/models/sigmoid_item_asymfactoring.py | 1 | 3419 | import numpy as np
from keras.layers import Embedding, Input, Flatten, Dense
from keras.layers.merge import Concatenate, Dot, Add
from keras.models import Model
from keras.regularizers import l2
from util.layers_custom import BiasLayer
from hybrid_model.models.abstract import AbstractModelCF, bias_init
class SigmoidItemAsymFactoring(AbstractModelCF):
def __init__(self, n_users, n_items, config=None):
super().__init__(n_users, n_items, config)
self.implicit = np.zeros((self.n_users, self.n_items))
# Defaults
default = {'n_factors': 40, 'reg_bias': 0.00005, 'reg_latent': 0.00003, 'implicit_thresh': 4.0,
'implicit_thresh_crosstrain': 4.75}
default.update(self.config)
self.config = default
n_factors = self.config['n_factors']
reg_bias = l2(self.config['reg_bias'])
reg_latent = l2(self.config['reg_latent'])
self.implicit_thresh = self.config.get('implicit_thresh', 4.0)
self.implicit_thresh_crosstrain = self.config.get('implicit_thresh_crosstrain', 4.75)
input_u = Input((1,))
input_i = Input((1,))
vec_i = Embedding(self.n_items, n_factors, input_length=1, embeddings_regularizer=reg_latent)(input_i)
vec_i_r = Flatten()(vec_i)
vec_implicit = Embedding(self.n_users, self.n_items, input_length=1, trainable=False, name='implicit')(
input_u)
implicit_factors = Dense(n_factors, kernel_initializer='normal', activation='linear',
kernel_regularizer=reg_latent)(vec_implicit)
implicit_factors = Flatten()(implicit_factors)
mf = Dot(1)([implicit_factors, vec_i_r])
bias_u = Embedding(self.n_users, 1, input_length=1, embeddings_initializer='zeros',
embeddings_regularizer=reg_bias)(input_u)
bias_u_r = Flatten()(bias_u)
bias_i = Embedding(self.n_items, 1, input_length=1, embeddings_initializer='zeros',
embeddings_regularizer=reg_bias)(input_i)
bias_i_r = Flatten()(bias_i)
added = Concatenate()([bias_u_r, bias_i_r, mf])
mf_out = BiasLayer(bias_initializer=bias_init, name='bias', activation='sigmoid')(added)
self.model = Model(inputs=[input_u, input_i], outputs=mf_out)
self.compile()
def recompute_implicit(self, x, y, transformed=False, crosstrain=False):
if transformed:
if crosstrain:
thresh = self.transformation.transform(self.implicit_thresh_crosstrain)
else:
thresh = self.transformation.transform(self.implicit_thresh)
else:
if crosstrain:
thresh = self.implicit_thresh_crosstrain
else:
thresh = self.implicit_thresh
inds_u, inds_i = x
# Use ratings over the threshold as implicit feedback
for u, i, r in zip(inds_u, inds_i, y):
if r >= thresh:
self.implicit[u, i] = 1.0
# Normalize using sqrt (ref. SVD++ paper)
implicit_norm = self.implicit / np.sqrt(np.maximum(1, np.sum(self.implicit, axis=1)[:, None]))
self.model.get_layer('implicit').set_weights([implicit_norm])
def fit(self, x_train, y_train, **kwargs):
self.recompute_implicit(x_train, y_train)
return super().fit(x_train, y_train, **kwargs)
| apache-2.0 | -9,212,830,444,832,515,000 | 37.41573 | 111 | 0.61246 | false |
kyle8998/Practice-Coding-Questions | leetcode/23-Hard-Merge-K-Sorted-Lists/answer.py | 1 | 2603 | #!/usr/bin/env python3
#-------------------------------------------------------------------------------
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
#-------------------------------------------------------------------------------
# Merge Sort Solution
#-------------------------------------------------------------------------------
class Solution:
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
if not lists: return None
def mergeTwoLists(node1, node2):
dummy = ListNode(0)
cur, cur1, cur2 = dummy, node1, node2
while cur1 and cur2:
cur.next = cur1 if cur1.val < cur2.val else cur2
if cur.next == cur1:
cur1 = cur1.next
else:
cur2 = cur2.next
cur = cur.next
cur.next = cur1 or cur2
return [dummy.next]
def mergelists(Lists):
if len(Lists) == 1:
return Lists
elif len(Lists) == 2:
return mergeTwoLists(Lists[0], Lists[1])
else:
low, high = 0, len(Lists)
mid = (low+high)//2
return mergeTwoLists(mergelists(Lists[low:mid])[0], mergelists(Lists[mid:high])[0])
return mergelists(lists)[0]
#-------------------------------------------------------------------------------
# First Solution (Time Limit Exceeded)
#-------------------------------------------------------------------------------
class Solution:
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
if not lists:
return None
for i in range(len(lists)-1, -1, -1):
if not lists[i]:
lists.pop(i)
dummy = ListNode(None)
curr = dummy
while lists:
smallest = float('inf')
idx = 0
for i in range(len(lists)-1, -1, -1):
if lists[i] and lists[i].val < smallest:
smallest = lists[i].val
idx = i
curr.next = ListNode(smallest)
curr = curr.next
lists[idx] = lists[idx].next
for i in range(len(lists)-1, -1, -1):
if not lists[i]:
lists.pop(i)
return dummy.next
#-------------------------------------------------------------------------------
| unlicense | -1,056,980,143,340,566,400 | 33.25 | 99 | 0.384172 | false |
google/pinject | tests/object_graph_test.py | 1 | 31697 | """Copyright 2013 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from pinject import bindings
from pinject import decorators
from pinject import errors
from pinject import object_graph
from pinject import scoping
class NewObjectGraphTest(unittest.TestCase):
def test_can_create_object_graph_with_all_defaults(self):
_ = object_graph.new_object_graph()
def test_creates_object_graph_using_given_modules(self):
obj_graph = object_graph.new_object_graph(modules=[errors])
self.assertIsInstance(obj_graph.provide(errors.Error),
errors.Error)
def test_creates_object_graph_using_given_classes(self):
class SomeClass(object):
pass
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass])
self.assertIsInstance(obj_graph.provide(SomeClass), SomeClass)
def test_creates_object_graph_using_given_binding_specs(self):
class ClassWithFooInjected(object):
def __init__(self, foo):
pass
class SomeClass(object):
pass
class SomeBindingSpec(bindings.BindingSpec):
def configure(self, bind):
bind('foo', to_class=SomeClass)
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassWithFooInjected],
binding_specs=[SomeBindingSpec()])
self.assertIsInstance(obj_graph.provide(ClassWithFooInjected),
ClassWithFooInjected)
def test_uses_binding_spec_dependencies(self):
class BindingSpecOne(bindings.BindingSpec):
def configure(self, bind):
bind('foo', to_instance='a-foo')
class BindingSpecTwo(bindings.BindingSpec):
def configure(self, bind):
bind('bar', to_instance='a-bar')
def dependencies(self):
return [BindingSpecOne()]
class SomeClass(object):
def __init__(self, foo, bar):
self.foobar = '{0}{1}'.format(foo, bar)
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass], binding_specs=[BindingSpecTwo()])
some_class = obj_graph.provide(SomeClass)
self.assertEqual('a-fooa-bar', some_class.foobar)
def test_allows_dag_binding_spec_dependencies(self):
class CommonBindingSpec(bindings.BindingSpec):
def configure(self, bind):
bind('foo', to_instance='a-foo')
class BindingSpecOne(bindings.BindingSpec):
def dependencies(self):
return [CommonBindingSpec()]
class BindingSpecTwo(bindings.BindingSpec):
def dependencies(self):
return [CommonBindingSpec()]
class RootBindingSpec(bindings.BindingSpec):
def dependencies(self):
return [BindingSpecOne(), BindingSpecTwo()]
class SomeClass(object):
def __init__(self, foo):
self.foo = foo
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass],
binding_specs=[RootBindingSpec()])
some_class = obj_graph.provide(SomeClass)
self.assertEqual('a-foo', some_class.foo)
def test_allows_customizing_binding_spec_standard_method_names(self):
class BindingSpecOne(bindings.BindingSpec):
def Configure(self, bind):
bind('foo', to_instance='a-foo')
def Dependencies(self):
return []
class BindingSpecTwo(bindings.BindingSpec):
def Configure(self, bind):
pass
def Dependencies(self):
return [BindingSpecOne()]
class SomeClass(object):
def __init__(self, foo):
self.foo = foo
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass], binding_specs=[BindingSpecTwo()],
configure_method_name='Configure',
dependencies_method_name='Dependencies')
some_class = obj_graph.provide(SomeClass)
self.assertEqual('a-foo', some_class.foo)
def test_customizing_binding_spec_method_names_allow_method_omission(self):
class BindingSpecOne(bindings.BindingSpec):
def Configure(self, bind):
bind('foo', to_instance='a-foo')
# Dependencies() omitted
class BindingSpecTwo(bindings.BindingSpec):
# Configure() omitted
def Dependencies(self):
return [BindingSpecOne()]
class SomeClass(object):
def __init__(self, foo):
self.foo = foo
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass], binding_specs=[BindingSpecTwo()],
configure_method_name='Configure',
dependencies_method_name='Dependencies')
some_class = obj_graph.provide(SomeClass)
self.assertEqual('a-foo', some_class.foo)
def test_allows_binding_spec_with_only_provider_methods(self):
class ClassWithFooInjected(object):
def __init__(self, foo):
self.foo = foo
class SomeBindingSpec(bindings.BindingSpec):
def provide_foo(self):
return 'a-foo'
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassWithFooInjected],
binding_specs=[SomeBindingSpec()],
configure_method_name='Configure',
dependencies_method_name='Dependencies')
self.assertEqual('a-foo', obj_graph.provide(ClassWithFooInjected).foo)
def test_raises_error_if_binding_spec_is_empty(self):
class EmptyBindingSpec(bindings.BindingSpec):
pass
self.assertRaises(errors.EmptyBindingSpecError,
object_graph.new_object_graph, modules=None,
classes=None, binding_specs=[EmptyBindingSpec()])
def test_creates_object_graph_using_given_scopes(self):
class SomeClass(object):
def __init__(self, foo):
self.foo = foo
class SomeBindingSpec(bindings.BindingSpec):
@decorators.provides(in_scope='foo-scope')
def provide_foo(self):
return object()
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass],
binding_specs=[SomeBindingSpec()],
id_to_scope={'foo-scope': scoping.SingletonScope()})
some_class_one = obj_graph.provide(SomeClass)
some_class_two = obj_graph.provide(SomeClass)
self.assertIs(some_class_one.foo, some_class_two.foo)
def test_raises_exception_if_modules_is_wrong_type(self):
self.assertRaises(errors.WrongArgTypeError,
object_graph.new_object_graph, modules=42)
def test_raises_exception_if_classes_is_wrong_type(self):
self.assertRaises(errors.WrongArgTypeError,
object_graph.new_object_graph, classes=42)
def test_raises_exception_if_binding_specs_is_wrong_type(self):
self.assertRaises(errors.WrongArgTypeError,
object_graph.new_object_graph, binding_specs=42)
def test_raises_exception_if_get_arg_names_from_class_name_is_wrong_type(self):
self.assertRaises(errors.WrongArgTypeError,
object_graph.new_object_graph,
get_arg_names_from_class_name=42)
def test_raises_exception_if_get_arg_names_from_provider_fn_name_is_wrong_type(self):
self.assertRaises(errors.WrongArgTypeError,
object_graph.new_object_graph,
get_arg_names_from_provider_fn_name=42)
def test_raises_exception_if_is_scope_usable_from_scope_is_wrong_type(self):
self.assertRaises(errors.WrongArgTypeError,
object_graph.new_object_graph,
is_scope_usable_from_scope=42)
def test_raises_exception_if_configure_method_has_no_expected_args(self):
class SomeBindingSpec(bindings.BindingSpec):
def configure(self):
pass
self.assertRaises(errors.ConfigureMethodMissingArgsError,
object_graph.new_object_graph,
modules=None, binding_specs=[SomeBindingSpec()])
def test_raises_exception_if_required_binding_missing(self):
class SomeBindingSpec(bindings.BindingSpec):
def configure(self, require):
require('foo')
self.assertRaises(
errors.MissingRequiredBindingError, object_graph.new_object_graph,
modules=None, binding_specs=[SomeBindingSpec()])
def test_raises_exception_if_required_binding_conflicts(self):
class SomeBindingSpec(bindings.BindingSpec):
def configure(self, require):
require('foo')
class Foo(object):
pass
class _Foo(object):
pass
self.assertRaises(errors.ConflictingRequiredBindingError,
object_graph.new_object_graph,
modules=None, classes=[Foo, _Foo],
binding_specs=[SomeBindingSpec()])
class PareToPresentArgsTest(unittest.TestCase):
def test_removes_only_args_not_present(self):
def fn(self, present):
pass
self.assertEqual(
{'present': 'a-present-value'},
object_graph._pare_to_present_args(
{'present': 'a-present-value', 'missing': 'a-missing-value'},
fn))
class ObjectGraphProvideTest(unittest.TestCase):
def test_can_provide_trivial_class(self):
class ExampleClassWithInit(object):
def __init__(self):
pass
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ExampleClassWithInit])
self.assertTrue(isinstance(obj_graph.provide(ExampleClassWithInit),
ExampleClassWithInit))
def test_can_provide_class_without_own_init(self):
class ExampleClassWithoutInit(object):
pass
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ExampleClassWithoutInit])
self.assertIsInstance(obj_graph.provide(ExampleClassWithoutInit),
ExampleClassWithoutInit)
def test_can_directly_provide_class_with_colliding_arg_name(self):
class _CollidingExampleClass(object):
pass
class CollidingExampleClass(object):
pass
obj_graph = object_graph.new_object_graph(
modules=None,
classes=[_CollidingExampleClass, CollidingExampleClass])
self.assertIsInstance(obj_graph.provide(CollidingExampleClass),
CollidingExampleClass)
def test_can_provide_class_that_itself_requires_injection(self):
class ClassOne(object):
def __init__(self, class_two):
pass
class ClassTwo(object):
pass
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne, ClassTwo])
self.assertIsInstance(obj_graph.provide(ClassOne), ClassOne)
def test_raises_error_if_arg_is_ambiguously_injectable(self):
class _CollidingExampleClass(object):
pass
class CollidingExampleClass(object):
pass
class AmbiguousParamClass(object):
def __init__(self, colliding_example_class):
pass
obj_graph = object_graph.new_object_graph(
modules=None,
classes=[_CollidingExampleClass, CollidingExampleClass,
AmbiguousParamClass])
self.assertRaises(errors.AmbiguousArgNameError,
obj_graph.provide, AmbiguousParamClass)
def test_raises_error_if_arg_refers_to_no_known_class(self):
class UnknownParamClass(object):
def __init__(self, unknown_class):
pass
obj_graph = object_graph.new_object_graph(
modules=None, classes=[UnknownParamClass])
self.assertRaises(errors.NothingInjectableForArgError,
obj_graph.provide, UnknownParamClass)
def test_raises_error_if_injection_cycle(self):
class ClassOne(object):
def __init__(self, class_two):
pass
class ClassTwo(object):
def __init__(self, class_one):
pass
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne, ClassTwo])
self.assertRaises(errors.CyclicInjectionError,
obj_graph.provide, ClassOne)
def test_injects_args_of_provider_fns(self):
class ClassOne(object):
pass
class SomeBindingSpec(bindings.BindingSpec):
def provide_foo(self, class_one):
class_one.three = 3
return class_one
class ClassTwo(object):
def __init__(self, foo):
self.foo = foo
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne, ClassTwo],
binding_specs=[SomeBindingSpec()])
class_two = obj_graph.provide(ClassTwo)
self.assertEqual(3, class_two.foo.three)
def test_injects_provider_fn_if_so_named(self):
class ClassOne(object):
def __init__(self):
self.forty_two = 42
class ClassTwo(object):
def __init__(self, provide_class_one):
self.provide_class_one = provide_class_one
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne, ClassTwo])
class_two = obj_graph.provide(ClassTwo)
self.assertEqual(42, class_two.provide_class_one().forty_two)
def test_can_provide_arg_with_annotation(self):
class ClassOne(object):
@decorators.annotate_arg('foo', 'an-annotation')
def __init__(self, foo):
self.foo = foo
class SomeBindingSpec(bindings.BindingSpec):
def configure(self, bind):
bind('foo', annotated_with='an-annotation', to_instance='a-foo')
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne], binding_specs=[SomeBindingSpec()])
class_one = obj_graph.provide(ClassOne)
self.assertEqual('a-foo', class_one.foo)
def test_all_parts_of_provide_decorator_are_used(self):
class SomeClass(object):
@decorators.annotate_arg('foo', 'specific-foo')
@decorators.annotate_arg('bar', 'specific-bar')
def __init__(self, foo, bar):
self.foo = foo
self.bar = bar
class SomeBindingSpec(bindings.BindingSpec):
@decorators.provides('foo', annotated_with='specific-foo',
in_scope=scoping.SINGLETON)
def provide_foo(self):
return object()
@decorators.provides('bar', annotated_with='specific-bar',
in_scope=scoping.PROTOTYPE)
def provide_bar(self):
return object()
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass],
binding_specs=[SomeBindingSpec()])
class_one = obj_graph.provide(SomeClass)
class_two = obj_graph.provide(SomeClass)
self.assertIs(class_one.foo, class_two.foo)
self.assertIsNot(class_one.bar, class_two.bar)
def test_singleton_classes_are_singletons_across_arg_names(self):
class InjectedClass(object):
pass
class SomeClass(object):
def __init__(self, foo, bar):
self.foo = foo
self.bar = bar
class SomeBindingSpec(bindings.BindingSpec):
def configure(self, bind):
bind('foo', to_class=InjectedClass, in_scope=scoping.SINGLETON)
bind('bar', to_class=InjectedClass, in_scope=scoping.SINGLETON)
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass],
binding_specs=[SomeBindingSpec()])
some_class = obj_graph.provide(SomeClass)
self.assertIs(some_class.foo, some_class.bar)
def test_raises_error_if_only_binding_has_different_annotation(self):
class ClassOne(object):
@decorators.annotate_arg('foo', 'an-annotation')
def __init__(self, foo):
self.foo = foo
class SomeBindingSpec(bindings.BindingSpec):
def configure(self, bind):
bind('foo', annotated_with='other-annotation',
to_instance='a-foo')
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne], binding_specs=[SomeBindingSpec()])
self.assertRaises(errors.NothingInjectableForArgError,
obj_graph.provide, ClassOne)
def test_raises_error_if_only_binding_has_no_annotation(self):
class ClassOne(object):
@decorators.annotate_arg('foo', 'an-annotation')
def __init__(self, foo):
self.foo = foo
class SomeBindingSpec(bindings.BindingSpec):
def configure(self, bind):
bind('foo', to_instance='a-foo')
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne], binding_specs=[SomeBindingSpec()])
self.assertRaises(errors.NothingInjectableForArgError,
obj_graph.provide, ClassOne)
def test_can_provide_using_provider_fn(self):
class ClassOne(object):
def __init__(self, foo):
self.foo = foo
class SomeBindingSpec(bindings.BindingSpec):
def provide_foo(self):
return 'a-foo'
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne], binding_specs=[SomeBindingSpec()])
class_one = obj_graph.provide(ClassOne)
self.assertEqual('a-foo', class_one.foo)
def test_provider_fn_overrides_implicit_class_binding(self):
class ClassOne(object):
def __init__(self, foo):
self.foo = foo
class Foo(object):
pass
class SomeBindingSpec(bindings.BindingSpec):
def provide_foo(self):
return 'a-foo'
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne, Foo],
binding_specs=[SomeBindingSpec()])
class_one = obj_graph.provide(ClassOne)
self.assertEqual('a-foo', class_one.foo)
def test_autoinjects_args_of_provider_fn(self):
class ClassOne(object):
def __init__(self, foo):
self.foo = foo
class SomeBindingSpec(bindings.BindingSpec):
def provide_foo(self, bar):
return 'a-foo with {0}'.format(bar)
def provide_bar(self):
return 'a-bar'
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne], binding_specs=[SomeBindingSpec()])
class_one = obj_graph.provide(ClassOne)
self.assertEqual('a-foo with a-bar', class_one.foo)
def test_can_use_annotate_arg_with_provides(self):
class ClassOne(object):
@decorators.annotate_arg('foo', 'an-annotation')
def __init__(self, foo):
self.foo = foo
class SomeBindingSpec(bindings.BindingSpec):
@decorators.provides(annotated_with='an-annotation')
@decorators.annotate_arg('bar', 'another-annotation')
def provide_foo(self, bar):
return 'a-foo with {0}'.format(bar)
@decorators.provides(annotated_with='another-annotation')
def provide_bar(self):
return 'a-bar'
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne], binding_specs=[SomeBindingSpec()])
class_one = obj_graph.provide(ClassOne)
self.assertEqual('a-foo with a-bar', class_one.foo)
def test_injectable_decorated_class_can_be_directly_provided(self):
class SomeClass(object):
@decorators.injectable
def __init__(self):
self.foo = 'a-foo'
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass], only_use_explicit_bindings=True)
class_one = obj_graph.provide(SomeClass)
self.assertEqual('a-foo', class_one.foo)
def test_inject_decorated_class_can_be_directly_provided(self):
class SomeClass(object):
@decorators.inject()
def __init__(self):
self.foo = 'a-foo'
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass], only_use_explicit_bindings=True)
class_one = obj_graph.provide(SomeClass)
self.assertEqual('a-foo', class_one.foo)
def test_non_explicitly_injectable_class_cannot_be_directly_provided(self):
class SomeClass(object):
def __init__(self):
self.foo = 'a-foo'
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass], only_use_explicit_bindings=True)
self.assertRaises(
errors.NonExplicitlyBoundClassError, obj_graph.provide, SomeClass)
def test_injectable_decorated_class_is_explicitly_bound(self):
class ClassOne(object):
@decorators.injectable
def __init__(self, class_two):
self.class_two = class_two
class ClassTwo(object):
@decorators.injectable
def __init__(self):
self.foo = 'a-foo'
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne, ClassTwo],
only_use_explicit_bindings=True)
class_one = obj_graph.provide(ClassOne)
self.assertEqual('a-foo', class_one.class_two.foo)
def test_inject_decorated_class_is_explicitly_bound(self):
class ClassOne(object):
@decorators.inject()
def __init__(self, class_two):
self.class_two = class_two
class ClassTwo(object):
@decorators.inject()
def __init__(self):
self.foo = 'a-foo'
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne, ClassTwo],
only_use_explicit_bindings=True)
class_one = obj_graph.provide(ClassOne)
self.assertEqual('a-foo', class_one.class_two.foo)
def test_explicit_binding_is_explicitly_bound(self):
class ClassOne(object):
@decorators.injectable
def __init__(self, class_two):
self.class_two = class_two
class SomeBindingSpec(bindings.BindingSpec):
def configure(self, bind):
bind('class_two', to_instance='a-class-two')
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne], binding_specs=[SomeBindingSpec()],
only_use_explicit_bindings=True)
class_one = obj_graph.provide(ClassOne)
self.assertEqual('a-class-two', class_one.class_two)
def test_provider_fn_is_explicitly_bound(self):
class ClassOne(object):
@decorators.injectable
def __init__(self, class_two):
self.class_two = class_two
class SomeBindingSpec(bindings.BindingSpec):
def provide_class_two(self):
return 'a-class-two'
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne], binding_specs=[SomeBindingSpec()],
only_use_explicit_bindings=True)
class_one = obj_graph.provide(ClassOne)
self.assertEqual('a-class-two', class_one.class_two)
def test_non_bound_non_decorated_class_is_not_explicitly_bound(self):
class ClassOne(object):
@decorators.injectable
def __init__(self, class_two):
self.class_two = class_two
class ClassTwo(object):
def __init__(self):
self.foo = 'a-foo'
obj_graph = object_graph.new_object_graph(
modules=None, classes=[ClassOne, ClassTwo],
only_use_explicit_bindings=True)
self.assertRaises(errors.NothingInjectableForArgError,
obj_graph.provide, ClassOne)
def test_can_pass_direct_args_to_provider_fn(self):
class SomeBindingSpec(bindings.BindingSpec):
@decorators.inject(['injected'])
def provide_foo(self, passed_directly_parg, passed_directly_kwarg,
injected):
return passed_directly_parg + passed_directly_kwarg + injected
def configure(self, bind):
bind('injected', to_instance=2)
class SomeClass(object):
def __init__(self, provide_foo):
self.foo = provide_foo(30, passed_directly_kwarg=10)
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass],
binding_specs=[SomeBindingSpec()])
some_class = obj_graph.provide(SomeClass)
self.assertEqual(42, some_class.foo)
def test_can_pass_kwargs_to_provider_fn(self):
class SomeBindingSpec(bindings.BindingSpec):
def provide_foo(self, injected, **kwargs):
return injected + kwargs['kwarg']
def configure(self, bind):
bind('injected', to_instance=2)
class SomeClass(object):
def __init__(self, provide_foo):
self.foo = provide_foo(kwarg=40)
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass],
binding_specs=[SomeBindingSpec()])
some_class = obj_graph.provide(SomeClass)
self.assertEqual(42, some_class.foo)
def test_cannot_pass_injected_args_to_provider_fn(self):
class SomeBindingSpec(bindings.BindingSpec):
def provide_foo(self, injected):
return 'unused'
def configure(self, bind):
bind('injected', to_instance=2)
class SomeClass(object):
def __init__(self, provide_foo):
self.foo = provide_foo(injected=40)
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass],
binding_specs=[SomeBindingSpec()])
self.assertRaises(errors.DirectlyPassingInjectedArgsError,
obj_graph.provide, SomeClass)
def test_cannot_pass_non_existent_args_to_provider_fn(self):
class SomeBindingSpec(bindings.BindingSpec):
@decorators.inject(['injected'])
def provide_foo(self, injected):
pass
def configure(self, bind):
bind('injected', to_instance=2)
class SomeClass(object):
def __init__(self, provide_foo):
self.foo = provide_foo(non_existent=40)
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass],
binding_specs=[SomeBindingSpec()])
self.assertRaises(TypeError, obj_graph.provide, SomeClass)
def test_inject_decorator_works_on_initializer(self):
class SomeBindingSpec(bindings.BindingSpec):
def configure(self, bind):
bind('injected', to_instance=2)
class Foo(object):
@decorators.inject(['injected'])
def __init__(self, passed_directly_parg, passed_directly_kwarg,
injected):
self.forty_two = (passed_directly_parg +
passed_directly_kwarg + injected)
class SomeClass(object):
def __init__(self, provide_foo):
self.foo = provide_foo(30, passed_directly_kwarg=10)
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass, Foo],
binding_specs=[SomeBindingSpec()])
some_class = obj_graph.provide(SomeClass)
self.assertEqual(42, some_class.foo.forty_two)
def test_cannot_pass_non_existent_args_to_provider_fn_for_instance(self):
class SomeBindingSpec(bindings.BindingSpec):
def configure(self, bind):
bind('foo', to_instance='a-foo')
class SomeClass(object):
def __init__(self, provide_foo):
self.foo = provide_foo(non_existent=42)
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass],
binding_specs=[SomeBindingSpec()])
self.assertRaises(TypeError, obj_graph.provide, SomeClass)
def test_cannot_directly_inject_something_expecting_direct_args(self):
class SomeBindingSpec(bindings.BindingSpec):
@decorators.inject(['injected'])
def provide_foo(self, passed_directly, injected):
return passed_directly + injected
def configure(self, bind):
bind('injected', to_instance=2)
class SomeClass(object):
def __init__(self, foo):
self.foo = foo
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass],
binding_specs=[SomeBindingSpec()])
self.assertRaises(errors.OnlyInstantiableViaProviderFunctionError,
obj_graph.provide, SomeClass)
def test_can_inject_none_when_allowing_injecting_none(self):
class SomeClass(object):
def __init__(self, foo):
self.foo = foo
class SomeBindingSpec(bindings.BindingSpec):
def provide_foo(self):
return None
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass],
binding_specs=[SomeBindingSpec()], allow_injecting_none=True)
some_class = obj_graph.provide(SomeClass)
self.assertIsNone(some_class.foo)
def test_cannot_inject_none_when_disallowing_injecting_none(self):
class SomeClass(object):
def __init__(self, foo):
self.foo = foo
class SomeBindingSpec(bindings.BindingSpec):
def provide_foo(self):
return None
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass],
binding_specs=[SomeBindingSpec()], allow_injecting_none=False)
self.assertRaises(errors.InjectingNoneDisallowedError,
obj_graph.provide, SomeClass)
def test_raises_exception_if_trying_to_provide_nonclass(self):
class SomeClass(object):
pass
obj_graph = object_graph.new_object_graph(
modules=None, classes=[SomeClass])
self.assertRaises(errors.WrongArgTypeError, obj_graph.provide, 42)
| apache-2.0 | -2,696,896,613,695,787,500 | 42.539835 | 89 | 0.599741 | false |
fkie-cad/FACT_core | src/plugins/analysis/qemu_exec/routes/routes.py | 1 | 3219 | import os
from contextlib import suppress
from flask import render_template_string
from flask_restx import Resource, Namespace
from helperFunctions.database import ConnectTo
from helperFunctions.fileSystem import get_src_dir
from storage.db_interface_frontend import FrontEndDbInterface
from web_interface.components.component_base import ComponentBase
from web_interface.rest.helper import error_message, success_message
from web_interface.security.decorator import roles_accepted
from web_interface.security.privileges import PRIVILEGES
from ..code.qemu_exec import AnalysisPlugin
def get_analysis_results_for_included_uid(uid, config): # pylint: disable=invalid-name
results = {}
with ConnectTo(FrontEndDbInterface, config) as db:
this_fo = db.get_object(uid)
if this_fo is not None:
for parent_uid in _get_parent_uids_from_virtual_path(this_fo):
parent_fo = db.get_object(parent_uid)
parent_results = _get_results_from_parent_fo(parent_fo, uid)
if parent_results:
results[parent_uid] = parent_results
return results
def _get_parent_uids_from_virtual_path(file_object):
result = set()
for path_list in file_object.virtual_file_path.values():
for virtual_path in path_list:
with suppress(IndexError):
result.add(virtual_path.split("|")[-2])
return result
def _get_results_from_parent_fo(parent_fo, uid):
if parent_fo is not None and \
AnalysisPlugin.NAME in parent_fo.processed_analysis and \
'files' in parent_fo.processed_analysis[AnalysisPlugin.NAME] and \
uid in parent_fo.processed_analysis[AnalysisPlugin.NAME]['files']:
return parent_fo.processed_analysis[AnalysisPlugin.NAME]['files'][uid]
return None
class PluginRoutes(ComponentBase):
def _init_component(self):
self._app.add_url_rule('/plugins/qemu_exec/ajax/<uid>', 'plugins/qemu_exec/ajax/<uid>', self._get_analysis_results_of_parent_fo)
@roles_accepted(*PRIVILEGES['view_analysis'])
def _get_analysis_results_of_parent_fo(self, uid):
results = get_analysis_results_for_included_uid(uid, self._config)
return render_template_string(self._load_view(), results=results)
@staticmethod
def _load_view():
path = os.path.join(get_src_dir(), 'plugins/analysis/{}/routes/ajax_view.html'.format(AnalysisPlugin.NAME))
with open(path, "r") as fp:
return fp.read()
api = Namespace('/plugins/qemu_exec/rest')
@api.hide
class QemuExecRoutesRest(Resource):
ENDPOINTS = [('/plugins/qemu_exec/rest/<uid>', ['GET'])]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = kwargs.get('config', None)
@roles_accepted(*PRIVILEGES['view_analysis'])
def get(self, uid):
results = get_analysis_results_for_included_uid(uid, self.config)
endpoint = self.ENDPOINTS[0][0]
if not results:
error_message('no results found for uid {}'.format(uid), endpoint, request_data={'uid': uid})
return success_message({AnalysisPlugin.NAME: results}, endpoint, request_data={'uid': uid})
| gpl-3.0 | 604,691,004,250,413,800 | 37.783133 | 136 | 0.679714 | false |
ericwhyne/datapop | datapop-publish.py | 1 | 1446 | #!/usr/bin/python
import sqlite3
import datapop
import sys
import codecs
import re
import time
current_milli_time = lambda: int(round(time.time() * 1000))
outfilename = 'index.html'
interval = 3 * 60 * 60 * 1000
start_time = current_milli_time() - interval
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
databasefile = 'links.db'
# Connect to local database
conn = sqlite3.connect(databasefile)
c = conn.cursor()
urls = []
query = 'SELECT url, count(url), sum(retweets), sum(favorites), sum(followers) FROM twitterlinks where timestamp_ms > ' + str(start_time)+ ' group by url ORDER BY count(url) desc limit 50'
print query
for row in c.execute(query):
(url, count, retweets, favorites, followers) = row
urls.append({'url': url, 'count': count, 'retweets': retweets, 'favorites': favorites, 'followers': followers})
conn.close()
content = []
for url in urls:
title = datapop.fetch_title(url['url'])
if title:
print url['count'], url['retweets'], url['favorites'], url['followers'], "\t", title, url['url']
title = re.sub('\|','',title)
content.append(str(url['count']) + ' | ' + title + ' | ' + "<a href='" + url['url'] + "'>" + url['url'] + "</a>")
print "\n\nWriting to file..."
outfile = codecs.open(outfilename,'w',encoding='utf8')
outfile.write("<html><h2>What's Popular in the Data World</h2><br>\n")
outfile.write("<br>\n".join(content))
outfile.write("</html>")
| apache-2.0 | 6,614,672,527,995,097,000 | 35.15 | 188 | 0.644537 | false |
sbobovyc/GameTools | TSW/src/idx.py | 1 | 6707 | """
Copyright (C) 2013 Stanislav Bobovych
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import sys
import struct
class IDX_bundle_entry:
def __init__(self):
self.RDB_type = None
self.RDB_id = None
def unpack(self, file_pointer, verbose=False):
#print hex(file_pointer.tell())
self.RDB_type, self.RDB_id = struct.unpack("<II", file_pointer.read(8))
if verbose:
print "\tRDB Type: %i RDB ID: %i" % (self.RDB_type, self.RDB_id)
class IDX_bundle_data:
def __init__(self):
self.name_length = None
self.name = None
self.num_entries = None
self.bundle_entries = []
def unpack(self, file_pointer, verbose=False):
self.name_length, = struct.unpack("<I", file_pointer.read(4))
self.name = file_pointer.read(self.name_length)
self.num_entries, = struct.unpack("<I", file_pointer.read(4))
self.num_entries /= 256
if verbose:
print "Bundle name:", self.name, "Entry count: ", self.num_entries
for entry in range(0, self.num_entries):
self.bundle_entries.append(IDX_bundle_entry().unpack(file_pointer, verbose))
class IDX_bundles:
def __init__(self):
self.num_bundles = None
self.bundle_data = []
def unpack(self, file_pointer, verbose=False):
self.num_bundles, = struct.unpack("<I", file_pointer.read(4))
if verbose:
print "Number of bundles", self.num_bundles
for bundle in range(0, self.num_bundles):
self.bundle_data.append(IDX_bundle_data().unpack(file_pointer, verbose))
file_pointer.read(1)
class IDX_entry_details:
def __init__(self):
self.RDB_file_number = None
self.unknown1 = None #Flags?
self.unknown2 = None #????
self.unknown3 = None #????
self.rdbdata_offset = None
self.entry_length = None
self.md5hash = None
def unpack(self, file_pointer, verbose=False):
self.RDB_file_number, self.unknown1, self.unknown2, self.unknown3 = struct.unpack("BBBB", file_pointer.read(4))
self.rdbdata_offset, = struct.unpack("<I", file_pointer.read(4))
self.entry_length, = struct.unpack("<I", file_pointer.read(4))
# unpack md5 hash
self.md5hash, = struct.unpack("!Q", file_pointer.read(8))
self.md5hash = self.md5hash << 64
md5hash_lower, = struct.unpack("!Q", file_pointer.read(8))
self.md5hash |= md5hash_lower
if verbose:
print "\tRDB file number: %i" % (self.RDB_file_number)
print "\tFlags???: 0x%x" % (self.unknown1)
print "\tUnknown: 0x%x" % (self.unknown2)
print "\tUnknown: 0x%x" % (self.unknown3)
print "\tOffset in rdbdata file: 0x%x" % (self.rdbdata_offset)
print "\tLength of entry data: %i" % (self.entry_length)
print "\tMD5:", str(hex(self.md5hash)).strip('L')
return self
class IDX_index:
def __init__(self):
self.RDB_type = None
self.RDB_id = None
def unpack(self, file_pointer, verbose=False):
self.RDB_type, self.RDB_id = struct.unpack("<II", file_pointer.read(8))
if verbose:
print "\tRDB Type: %i RDB ID: %i" % (self.RDB_type, self.RDB_id)
return self
class IDX_index_header:
def __init__(self):
self.magic = None # IBDR
self.version = None # 0x07
self.md5hash = None
self.num_indeces = None
def unpack(self, file_pointer, dest_filepath, verbose=False):
self.magic, = struct.unpack("4s", file_pointer.read(4))
self.version, = struct.unpack("<I", file_pointer.read(4))
# unpack md5 hash
self.md5hash, = struct.unpack("!Q", file_pointer.read(8))
self.md5hash = self.md5hash << 64
md5hash_lower, = struct.unpack("!Q", file_pointer.read(8))
self.md5hash |= md5hash_lower
self.num_indeces, = struct.unpack("<I", file_pointer.read(4))
if verbose:
print "Magic: ", self.magic
print "Version: ", self.version
print "MD5 of index data: ", str(hex(self.md5hash)).strip('L')
print "Number of indeces: ", self.num_indeces
class IDX_index_file:
def __init__(self, filepath=None):
self.filepath = filepath
self.header = None
self.indeces = []
self.entry_details = []
self.bundles = None
if self.filepath != None:
self.open(filepath)
def open(self, filepath=None):
if filepath == None and self.filepath == None:
print "File path is empty"
return
if self.filepath == None:
self.filepath = filepath
def dump(self, dest_filepath=os.getcwd(), verbose=False):
with open(self.filepath, "rb") as f:
self.header = IDX_index_header()
self.header.unpack(f, dest_filepath, verbose)
for index in range(0, self.header.num_indeces):
if verbose:
print "\tIndex: ", index
self.indeces.append(IDX_index().unpack(f, verbose))
for index in range(0, self.header.num_indeces):
if verbose:
print "Index: ", index
self.entry_details.append(IDX_entry_details().unpack(f, verbose))
self.bundles = IDX_bundles().unpack(f, verbose)
def get_indeces(self, RDB_type):
id2index = {}
for i in range(0, self.header.num_indeces):
if self.indeces[i].RDB_type == RDB_type:
id2index[self.indeces[i].RDB_id] = i
return id2index
def get_entry_details(self, index):
entry_detail = self.entry_details[index]
filename = "%02i.rdbdata" % (entry_detail.RDB_file_number)
return (filename, entry_detail.rdbdata_offset, entry_detail.entry_length)
if __name__ == "__main__":
filepath = sys.argv[1]
idx = IDX_index_file(filepath)
idx.dump(verbose=True)
| gpl-3.0 | 6,742,526,433,551,087,000 | 36.892655 | 119 | 0.591472 | false |
Lysxia/dissemin | papers/utils.py | 1 | 16403 | # -*- encoding: utf-8 -*-
# Dissemin: open access policy enforcement tool
# Copyright (C) 2014 Antonin Delpeuch
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
from __future__ import unicode_literals
import re
import hashlib
import datetime
import unicode_tex
import unicodedata
from unidecode import unidecode
from lxml.html.clean import Cleaner
from lxml.html import fromstring, _transform_result
from lxml import etree
from io import StringIO
from titlecase import titlecase
### General string utilities ###
filter_punctuation_alphanum_regex = re.compile(r'.*\w')
def filter_punctuation(lst):
"""
:param lst: list of strings
:returns: all the strings that contain at least one alphanumeric character
>>> filter_punctuation([u'abc',u'ab.',u'/,',u'a-b',u'#=', u'0'])
[u'abc', u'ab.', u'a-b', u'0']
"""
return filter(lambda x: filter_punctuation_alphanum_regex.match(x) is not None,
lst)
def nocomma(lst):
"""
Join fields using ',' ensuring that it does not appear in the fields
This is used to output similarity graphs to be visualized with Gephi.
:param lst: list of strings
:returns: these strings joined by commas, ensuring they do not contain
commas themselves
>>> nocomma([u'a',u'b',u'cd'])
u'a,b,cd'
>>> nocomma([u'a,',u'b'])
u'a,b'
>>> nocomma([u'abc',u'',u'\\n',u'def'])
u'abc, , ,def'
"""
lst = map(lambda x: str(x).replace(',','').replace('\n',''), lst)
lst = [x or ' ' for x in lst]
return ','.join(lst)
def ulower(s):
"""
Converts to unicode and lowercase.
:param s: a string
:return: unicode(s).lower()
>>> ulower('abSc')
u'absc'
>>> ulower(None)
u'none'
>>> ulower(89)
u'89'
"""
return unicode(s).lower()
def nstrip(s):
"""
Just like unicode.strip(), but works for None too.
>>> nstrip(None) is None
True
>>> nstrip(u'aa')
u'aa'
>>> nstrip(u' aa \\n')
u'aa'
"""
return s.strip() if s else None
def remove_diacritics(s):
"""
Removes diacritics using the `unidecode` package.
:param: an str or unicode string
:returns: if str: the same string. if unicode: the unidecoded string.
>>> remove_diacritics(u'aéèï')
'aeei'
>>> remove_diacritics(u'aéè'.encode('utf-8'))
'a\\xc3\\xa9\\xc3\\xa8'
"""
return unidecode(s) if type(s) == unicode else s
def iunaccent(s):
"""
Removes diacritics and case.
>>> iunaccent(u'BÉPO forever')
'bepo forever'
"""
return remove_diacritics(s).lower()
tokenize_space_re = re.compile(r'\s+')
def tokenize(l):
"""
A (very very simple) tokenizer.
>>> tokenize(u'Hello world!')
[u'Hello', u'world!']
>>> tokenize(u'99\\tbottles\\nof beeron \\tThe Wall')
[u'99', u'bottles', u'of', u'beeron', u'The', u'Wall']
"""
return tokenize_space_re.split(l)
def maybe_recapitalize_title(title):
"""
Recapitalize a title if it is mostly uppercase
(number of uppercase letters > number of lowercase letters)
>>> maybe_recapitalize_title(u'THIS IS CALLED SCREAMING')
u'This Is Called Screaming'
>>> maybe_recapitalize_title(u'This is just a normal title')
u'This is just a normal title'
>>> maybe_recapitalize_title(u'THIS IS JUST QUITE Awkward')
u'THIS IS JUST QUITE Awkward'
"""
nb_upper, nb_lower = 0, 0
for letter in title:
if letter.isupper():
nb_upper += 1
elif letter.islower():
nb_lower += 1
if nb_upper > nb_lower:
return titlecase(title)
else:
return title
## HTML sanitizing for the title
overescaped_re = re.compile(r'&#(\d+);')
unicode4_re = re.compile(r'(\\u[0-9A-Z]{4})(?![0-9A-Z])')
whitespace_re = re.compile(r'\s+')
html_cleaner = Cleaner()
html_cleaner.allow_tags = ['sub','sup','b','span']
html_cleaner.remove_unknown_tags = False
html_killer = Cleaner()
html_killer.allow_tags = ['div']
html_killer.remove_unknown_tags = False
latexmath_re = re.compile(r'\$(\S[^$]*?\S|\S)\$')
def remove_latex_math_dollars(string):
"""
Removes LaTeX dollar tags.
>>> remove_latex_math_dollars(u'This is $\\\\beta$-reduction explained')
u'This is \\\\beta-reduction explained'
>>> remove_latex_math_dollars(u'Compare $\\\\frac{2}{3}$ to $\\\\pi$')
u'Compare \\\\frac{2}{3} to \\\\pi'
>>> remove_latex_math_dollars(u'Click here to win $100')
u'Click here to win $100'
>>> remove_latex_math_dollars(u'What do you prefer, $50 or $100?')
u'What do you prefer, $50 or $100?'
"""
return latexmath_re.sub(r'\1', string)
latex_command_re = re.compile(r'(?P<command>\\([a-zA-Z]+|[.=\'\`"])({[^}]*})*)(?P<letter>[a-zA-Z])?')
def unescape_latex(s):
"""
Replaces LaTeX symbols by their unicode counterparts using
the `unicode_tex` package.
>>> unescape_latex(u'the $\\\\alpha$-rays of $\\\\Sigma$-algebras')
u'the $\\u03b1$-rays of $\\u03a3$-algebras'
>>> unescape_latex(u'$\\textit{K}$ -trivial')
u'$\\textit{K}$ -trivial'
"""
def conditional_replace(fragment):
cmd = fragment.group('command')
letter = fragment.group('letter') or ''
rep = unicode_tex.tex_to_unicode_map.get(cmd) or cmd
# We inverse the order to handle accents.
if cmd == r"\'" or cmd == r"\`":
# We normalize back to the normal form to get only one unicode character.
return unicodedata.normalize('NFC', letter + rep)
else:
# Let's just concat.
return rep + letter
return latex_command_re.sub(conditional_replace, s)
latex_one_character_braces_re = re.compile(r'(^|(^|[^\\])\b(\w+)){(.)}', re.UNICODE)
latex_full_line_braces_re = re.compile(r'^{(.*)}$')
latex_word_braces_re = re.compile(r'(^|\s){(\w+)}($|\s)', re.UNICODE)
def remove_latex_braces(s):
"""
Removes spurious braces such as in "Th{é}odore" or "a {CADE} conference"
This should be run *after* unescape_latex
>>> remove_latex_braces(u'Th{é}odore')
u'Th\\xe9odore'
>>> remove_latex_braces(u'the {CADE} conference')
u'the CADE conference'
>>> remove_latex_braces(u'consider 2^{a+b}')
u'consider 2^{a+b}'
>>> remove_latex_braces(u'{why these braces?}')
u'why these braces?'
"""
s = latex_full_line_braces_re.sub(r'\1', s)
s = latex_word_braces_re.sub(r'\1\2\3', s)
s = latex_one_character_braces_re.sub(r'\1\4', s)
s = latex_one_character_braces_re.sub(r'\1\4', s)
s = latex_one_character_braces_re.sub(r'\1\4', s)
return s
def sanitize_html(s):
"""
Removes most HTML tags, keeping the harmless ones.
This also renders some LaTeX characters with `unescape_latex`,
fixes overescaped HTML characters, and a few other fixes.
>>> sanitize_html('My title<sub>is</sub><a href="http://dissem.in"><sup>nice</sup></a>')
u'My title<sub>is</sub><sup>nice</sup>'
>>> sanitize_html('$\\\\alpha$-conversion')
u'$\\u03b1$-conversion'
>>> sanitize_html('$$\\\\eta + \\\\omega$$')
u'$\\u03b7 + \\u03c9$'
"""
s = overescaped_re.sub(r'&#\1;', s)
s = unicode4_re.sub(lambda x: x.group(1).decode('unicode-escape'), s)
s = whitespace_re.sub(r' ', s)
s = unescape_latex(s)
s = kill_double_dollars(s)
orig = html_cleaner.clean_html('<span>'+s+'</span>')
return orig[6:-7] # We cut the <span />
def kill_html(s):
"""
Removes every tag except <div> (but there are no
<div> in titles as sanitize_html removes them)
>>> kill_html('My title<sub>is</sub><a href="http://dissem.in"><sup>nice</sup> </a>')
u'My titleisnice'
"""
orig = html_killer.clean_html('<div>'+s+'</div>')
return orig[5:-6].strip() # We cut the <div />
latex_double_dollar_re = re.compile(r'\$\$([^\$]*?)\$\$')
def kill_double_dollars(s):
"""
Removes double dollars (they generate line breaks with MathJax)
This is included in the sanitize_html function.
>>> kill_double_dollars('This equation $$\\\\mathrm{P} = \\\\mathrm{NP}$$ breaks my design')
u'This equation $\\\\mathrm{P} = \\\\mathrm{NP}$ breaks my design'
"""
s = latex_double_dollar_re.sub(r'$\1$', s)
return s
def urlize(val):
"""
Ensures a would-be URL actually starts with "http://" or "https://".
:param val: the URL
:returns: the cleaned URL
>>> urlize(u'gnu.org')
u'http://gnu.org'
>>> urlize(None) is None
True
>>> urlize(u'https://gnu.org')
u'https://gnu.org'
"""
if val and not val.startswith('http://') and not val.startswith('https://'):
val = 'http://'+val
return val
#### JSON utilities !
def jpath(path, js, default=None):
"""
XPath for JSON!
:param path: a list of keys to follow in the tree of dicts, written in a string,
separated by forward slashes
:param default: the default value to return when the key is not found
>>> jpath(u'message/items', {u'message':{u'items':u'hello'}})
u'hello'
"""
def _walk(lst, js):
if js is None:
return default
if lst == []:
return js
else:
return _walk(lst[1:], js.get(lst[0],{} if len(lst) > 1 else default))
r = _walk(path.split('/'), js)
return r
def remove_nones(dct):
"""
Return a dict, without the None values
>>> remove_nones({u'orcid':None,u'wtf':u'pl'})
{u'wtf': u'pl'}
>>> remove_nones({u'orcid':u'blah',u'hey':u'you'})
{u'orcid': u'blah', u'hey': u'you'}
>>> remove_nones({None:1})
{None: 1}
"""
return dict(filter(lambda (k,v): v is not None, dct.items()))
### Partial date representation
def try_date(year, month, day):
try:
return datetime.date(year=year, month=month, day=day)
except ValueError:
return None
def parse_int(val, default):
"""
Returns an int or a default value if parsing the int failed.
>>> parse_int(90, None)
90
>>> parse_int(None, 90)
90
>>> parse_int('est', 8)
8
"""
try:
return int(val)
except ValueError:
return default
except TypeError:
return default
def date_from_dateparts(dateparts):
"""
Constructs a date from a list of at most 3 integers.
>>> date_from_dateparts([])
datetime.date(1970, 1, 1)
>>> date_from_dateparts([2015])
datetime.date(2015, 1, 1)
>>> date_from_dateparts([2015,02])
datetime.date(2015, 2, 1)
>>> date_from_dateparts([2015,02,16])
datetime.date(2015, 2, 16)
>>> date_from_dateparts([2015,02,16])
datetime.date(2015, 2, 16)
>>> date_from_dateparts([2015,02,35])
Traceback (most recent call last):
...
ValueError: day is out of range for month
"""
year = 1970 if len(dateparts) < 1 else parse_int(dateparts[0], 1970)
month = 01 if len(dateparts) < 2 else parse_int(dateparts[1], 01)
day = 01 if len(dateparts) < 3 else parse_int(dateparts[2], 01)
return datetime.date(year=year, month=month, day=day)
def tolerant_datestamp_to_datetime(datestamp):
"""A datestamp to datetime that's more tolerant of diverse inputs.
Taken from pyoai.
>>> tolerant_datestamp_to_datetime('2016-02-11T18:34:12Z')
datetime.datetime(2016, 2, 11, 18, 34, 12)
>>> tolerant_datestamp_to_datetime('2016-02-11')
datetime.datetime(2016, 2, 11, 0, 0)
>>> tolerant_datestamp_to_datetime('2016-02')
datetime.datetime(2016, 2, 1, 0, 0)
>>> tolerant_datestamp_to_datetime('2016')
datetime.datetime(2016, 1, 1, 0, 0)
>>> tolerant_datestamp_to_datetime('2016-02-11T18:34:12') # Z needed
Traceback (most recent call last):
...
ValueError: Invalid datestamp: 2016-02-11T18:34:12
>>> tolerant_datestamp_to_datetime('2016-02-11-3') # too many numbers
Traceback (most recent call last):
...
ValueError: Invalid datestamp: 2016-02-11-3
>>> tolerant_datestamp_to_datetime('2016-02-11T18:37:09:38') # too many numbers
Traceback (most recent call last):
...
ValueError: Invalid datestamp: 2016-02-11T18:37:09:38
"""
splitted = datestamp.split('T')
if len(splitted) == 2:
d, t = splitted
# if no Z is present, raise error
if t[-1] != 'Z':
raise ValueError("Invalid datestamp: "+str(datestamp))
# split off Z at the end
t = t[:-1]
else:
d = splitted[0]
t = '00:00:00'
d_splitted = d.split('-')
if len(d_splitted) == 3:
YYYY, MM, DD = d_splitted
elif len(d_splitted) == 2:
YYYY, MM = d_splitted
DD = '01'
elif len(d_splitted) == 1:
YYYY = d_splitted[0]
MM = '01'
DD = '01'
else:
raise ValueError("Invalid datestamp: "+str(datestamp))
t_splitted = t.split(':')
if len(t_splitted) == 3:
hh, mm, ss = t_splitted
else:
raise ValueError("Invalid datestamp: "+str(datestamp))
return datetime.datetime(
int(YYYY), int(MM), int(DD), int(hh), int(mm), int(ss))
def datetime_to_date(dt):
"""
Converts a datetime or date object to a date object.
"""
if type(dt) == datetime.datetime:
return dt.date()
elif type(dt) == datetime.date:
return dt
raise ValueError("Invalid date or datetime")
### ORCiD utilities ###
orcid_re = re.compile(r'^(http://orcid.org/)?([0-9]{4}-[0-9]{4}-[0-9]{4}-[0-9]{3}[X0-9])$')
def validate_orcid(orcid):
"""
:returns: a cleaned ORCiD if the argument represents a valid ORCiD, None otherwise
This does not check that the id actually exists on orcid.org,
only checks that it is syntactically valid (including the checksum).
See http://support.orcid.org/knowledgebase/articles/116780-structure-of-the-orcid-identifier
See the test suite for a more complete set of examples
>>> validate_orcid(u' 0000-0001-8633-6098\\n')
u'0000-0001-8633-6098'
"""
if not orcid:
return
try:
orcid = unicode(orcid).strip()
except ValueError, TypeError:
return
match = orcid_re.match(orcid)
if not match:
return
orcid = match.group(2)
nums = orcid.replace('-','')
total = 0
for i in range(15):
total = (total + int(nums[i])) * 2
checkdigit = (12 - (total % 11)) % 11
checkchar = str(checkdigit) if checkdigit != 10 else 'X'
if nums[-1] == checkchar:
return orcid
def affiliation_is_greater(a, b):
"""
Compares to affiliation values. Returns True
when the first contains more information than
the second
>>> affiliation_is_greater(None, None)
False
>>> affiliation_is_greater(None, 'UPenn')
False
>>> affiliation_is_greater('UPenn', None)
True
>>> affiliation_is_greater('0000-0001-8633-6098', 'Ecole normale superieure, Paris')
True
>>> affiliation_is_greater('Ecole normale superieure', 'Upenn')
True
"""
if a is None:
return False
if b is None:
return True
oa, ob = validate_orcid(a), validate_orcid(b)
if oa and not ob:
return True
if ob and not oa:
return False
return len(a) > len(b)
# List utilities
def index_of(elem, choices):
"""
Returns the index of elem (understood as a code) in the list of choices,
where choices are expected to be pairs of (code,verbose_description).
>>> index_of(42, [])
0
>>> index_of('ok', [('ok','This is ok'),('nok','This is definitely not OK')])
0
>>> index_of('nok', [('ok','This is ok'),('nok','This is definitely not OK')])
1
"""
for idx, (code, lbl) in enumerate(choices):
if code == elem:
return idx
else:
return 0
| agpl-3.0 | 3,605,507,531,110,764,000 | 29.530726 | 101 | 0.604636 | false |
jeremiahyan/odoo | addons/payment_payulatam/tests/test_payulatam.py | 1 | 6895 | # Part of Odoo. See LICENSE file for full copyright and licensing details.
from freezegun import freeze_time
from odoo.exceptions import ValidationError
from odoo.fields import Command
from odoo.tests import tagged
from odoo.tools import mute_logger
from .common import PayULatamCommon
from ..controllers.main import PayuLatamController
from ..models.payment_acquirer import SUPPORTED_CURRENCIES
@tagged('post_install', '-at_install')
class PayULatamTest(PayULatamCommon):
def test_compatibility_with_supported_currencies(self):
""" Test that the PayULatam acquirer is compatible with all supported currencies. """
for supported_currency_code in SUPPORTED_CURRENCIES:
supported_currency = self._prepare_currency(supported_currency_code)
compatible_acquirers = self.env['payment.acquirer']._get_compatible_acquirers(
self.company.id, self.partner.id, currency_id=supported_currency.id,
)
self.assertIn(self.payulatam, compatible_acquirers)
def test_incompatibility_with_unsupported_currency(self):
""" Test that the PayULatam acquirer is not compatible with an unsupported currency. """
compatible_acquirers = self.env['payment.acquirer']._get_compatible_acquirers(
self.company.id, self.partner.id, currency_id=self.currency_euro.id,
)
self.assertNotIn(self.payulatam, compatible_acquirers)
@freeze_time('2011-11-02 12:00:21') # Freeze time for consistent singularization behavior
def test_reference_is_singularized(self):
""" Test singularization of reference prefixes. """
reference = self.env['payment.transaction']._compute_reference(self.payulatam.provider)
self.assertEqual(
reference, 'tx-20111102120021', "transaction reference was not correctly singularized"
)
@freeze_time('2011-11-02 12:00:21') # Freeze time for consistent singularization behavior
def test_reference_is_computed_based_on_document_name(self):
""" Test computation of reference prefixes based on the provided invoice. """
invoice = self.env['account.move'].create({})
reference = self.env['payment.transaction']._compute_reference(
self.payulatam.provider, invoice_ids=[Command.set([invoice.id])]
)
self.assertEqual(reference, 'MISC/2011/11/0001-20111102120021')
def test_redirect_form_values(self):
""" Test the values of the redirect form inputs. """
tx = self.create_transaction(flow='redirect')
with mute_logger('odoo.addons.payment.models.payment_transaction'):
processing_values = tx._get_processing_values()
form_info = self._extract_values_from_html_form(processing_values['redirect_form_html'])
expected_values = {
'merchantId': 'dummy',
'accountId': 'dummy',
'description': self.reference,
'referenceCode': self.reference,
'amount': str(self.amount),
'currency': self.currency.name,
'tax': str(0),
'taxReturnBase': str(0),
'buyerEmail': self.partner.email,
'buyerFullName': self.partner.name,
'responseUrl': self._build_url(PayuLatamController._return_url),
'test': str(1), # testing is always performed in test mode
}
expected_values['signature'] = self.payulatam._payulatam_generate_sign(
expected_values, incoming=False
)
self.assertEqual(
form_info['action'], 'https://sandbox.checkout.payulatam.com/ppp-web-gateway-payu/'
)
self.assertDictEqual(form_info['inputs'], expected_values)
def test_feedback_processing(self):
# typical data posted by payulatam after client has successfully paid
payulatam_post_data = {
'installmentsNumber': '1',
'lapPaymentMethod': 'VISA',
'description': self.reference,
'currency': self.currency.name,
'extra2': '',
'lng': 'es',
'transactionState': '7',
'polPaymentMethod': '211',
'pseCycle': '',
'pseBank': '',
'referenceCode': self.reference,
'reference_pol': '844164756',
'signature': 'f3ea3a7414a56d8153c425ab7e2f69d7', # Update me
'pseReference3': '',
'buyerEmail': '[email protected]',
'lapResponseCode': 'PENDING_TRANSACTION_CONFIRMATION',
'pseReference2': '',
'cus': '',
'orderLanguage': 'es',
'TX_VALUE': str(self.amount),
'risk': '',
'trazabilityCode': '',
'extra3': '',
'pseReference1': '',
'polTransactionState': '14',
'polResponseCode': '25',
'merchant_name': 'Test PayU Test comercio',
'merchant_url': 'http://pruebaslapv.xtrweb.com',
'extra1': '/shop/payment/validate',
'message': 'PENDING',
'lapPaymentMethodType': 'CARD',
'polPaymentMethodType': '7',
'telephone': '7512354',
'merchantId': 'dummy',
'transactionId': 'b232989a-4aa8-42d1-bace-153236eee791',
'authorizationCode': '',
'lapTransactionState': 'PENDING',
'TX_TAX': '.00',
'merchant_address': 'Av 123 Calle 12'
}
# should raise error about unknown tx
with self.assertRaises(ValidationError):
self.env['payment.transaction']._handle_feedback_data('payulatam', payulatam_post_data)
tx = self.create_transaction(flow='redirect')
# Validate the transaction ('pending' state)
self.env['payment.transaction']._handle_feedback_data('payulatam', payulatam_post_data)
self.assertEqual(tx.state, 'pending', 'Payulatam: wrong state after receiving a valid pending notification')
self.assertEqual(tx.state_message, payulatam_post_data['message'], 'Payulatam: wrong state message after receiving a valid pending notification')
self.assertEqual(tx.acquirer_reference, 'b232989a-4aa8-42d1-bace-153236eee791', 'Payulatam: wrong txn_id after receiving a valid pending notification')
# Reset the transaction
tx.write({
'state': 'draft',
'acquirer_reference': False})
# Validate the transaction ('approved' state)
payulatam_post_data['lapTransactionState'] = 'APPROVED'
self.env['payment.transaction']._handle_feedback_data('payulatam', payulatam_post_data)
self.assertEqual(tx.state, 'done', 'Payulatam: wrong state after receiving a valid pending notification')
self.assertEqual(tx.acquirer_reference, 'b232989a-4aa8-42d1-bace-153236eee791', 'Payulatam: wrong txn_id after receiving a valid pending notification')
| gpl-3.0 | -6,300,182,012,533,313,000 | 46.226027 | 159 | 0.634808 | false |
marcodebe/dicomecg_convert | setup.py | 1 | 1141 | #!/usr/bin/env python
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
with open("VERSION", "r") as fh:
version = fh.read().strip('\n')
setuptools.setup(
name='dicom-ecg-plot',
version=version,
description='Plot Dicom ECG Waveforms',
long_description=long_description,
long_description_content_type="text/markdown",
author='Marco De Benedetto',
author_email='[email protected]',
url='https://github.com/marcodebe/dicomecg_convert',
packages=setuptools.find_packages(),
scripts=['dicom-ecg-plot'],
install_requires=[
'pydicom>=1.0.1',
'numpy',
'matplotlib',
'scipy',
'docopt',
'requests',
],
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Intended Audience :: Healthcare Industry',
],
)
| mit | -8,328,571,697,448,325,000 | 29.026316 | 71 | 0.546012 | false |
Rhoana/membrane_cnn | assess_thresh_smooth.py | 1 | 2798 | import mahotas
import scipy.ndimage
import scipy.misc
import numpy as np
import gzip
import cPickle
import glob
import os
import h5py
#param_path = 'D:/dev/Rhoana/membrane_cnn/results/good3/'
param_path = 'D:/dev/Rhoana/membrane_cnn/results/stumpin/'
param_files = glob.glob(param_path + "*.h5")
target_boundaries = mahotas.imread(param_path + 'boundaries.png') > 0
offset_max = 32
target_boundaries = target_boundaries[offset_max:-offset_max,offset_max:-offset_max]
for param_file in param_files:
if param_file.find('.ot.h5') != -1:
continue
print param_file
#net_output_file = param_file.replace('.h5','\\0005_classify_output_layer6_0.tif')
net_output_file = param_file.replace('.h5','\\0100_classify_output_layer6_0.tif')
net_output = mahotas.imread(net_output_file)
net_output = np.float32(net_output) / np.max(net_output)
offset_file = param_file.replace('.h5', '.ot.h5')
h5off = h5py.File(offset_file, 'r')
best_offset = h5off['/best_offset'][...]
h5off.close()
xoffset, yoffset = best_offset
best_score = 0
best_thresh = 0
best_sigma = 0
best_result = None
offset_output = np.roll(net_output, xoffset, axis=0)
offset_output = np.roll(offset_output, yoffset, axis=1)
#Crop
offset_output = offset_output[offset_max:-offset_max,offset_max:-offset_max]
for smooth_sigma in arange(0, 3, 0.1):
smooth_output = scipy.ndimage.filters.gaussian_filter(offset_output, smooth_sigma)
for thresh in arange(0.1,1,0.1):
result = smooth_output > thresh
if np.sum(result) == 0:
continue
true_positives = np.sum(np.logical_and(result == 0, target_boundaries == 0))
false_positives = np.sum(np.logical_and(result == 0, target_boundaries > 0))
true_negatives = np.sum(np.logical_and(result > 0, target_boundaries > 0))
false_negatives = np.sum(np.logical_and(result > 0, target_boundaries == 0))
precision = float(true_positives) / float(true_positives + false_positives)
recall = float(true_positives) / float(true_positives + false_negatives)
Fscore = 2 * precision * recall / (precision + recall)
if Fscore > best_score:
best_score = Fscore
best_thresh = thresh
best_sigma = smooth_sigma
best_result = result
print 'Best score of {0} for sigma {1}, thresh {2}.'.format(best_score, best_sigma, best_thresh)
output_file = param_file.replace('.h5', '.sm.ot.h5')
h5out = h5py.File(output_file, 'w')
h5out['/best_score'] = best_score
h5out['/best_offset'] = best_offset
h5out['/best_thresh'] = best_thresh
h5out['/best_sigma'] = best_sigma
h5out.close()
| bsd-3-clause | 1,006,724,249,235,405,300 | 31.534884 | 100 | 0.632595 | false |
teknolab/django.org.tr | apps/events/migrations/0001_initial.py | 1 | 5772 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Event'
db.create_table('events_event', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=250)),
('teaser', self.gf('django.db.models.fields.CharField')(max_length=250)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('start', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('end', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('locations', self.gf('django.db.models.fields.TextField')(max_length=250, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, db_index=True)),
('added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('events', ['Event'])
def backwards(self, orm):
# Deleting model 'Event'
db.delete_table('events_event')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'events.event': {
'Meta': {'ordering': "('-start',)", 'object_name': 'Event'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locations': ('django.db.models.fields.TextField', [], {'max_length': '250', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'teaser': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['events']
| bsd-3-clause | -6,493,933,952,741,872,000 | 65.344828 | 182 | 0.560464 | false |
Epoptes/epoptes | epoptes/ui/common.py | 1 | 1684 | # This file is part of Epoptes, http://epoptes.org
# Copyright 2018 the Epoptes team, see AUTHORS.
# SPDX-License-Identifier: GPL-3.0-or-later
"""
Define required gi package versions in a common place, and install gettext.
Rationale:
gi requires something like:
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
This conflicts with https://www.python.org/dev/peps/pep-0008/#imports
and triggers pycodestyle's "E402 module level import not at top of file".
The following is a bit better:
import sys # Import standard library modules
import twisted # Import third party modules
from epoptes.ui.common import gettext as _ # Import local modules
from gi.repository import Gtk, Gdk
That last line "only" triggers pylint's "wrong-import-position" once.
"""
import errno
import gettext
import locale
import os
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
gi.require_version('Notify', '0.7')
gettext.textdomain('epoptes')
locale.textdomain('epoptes')
gettext = gettext.gettext
def locate_resource(filename, absolute=True):
"""Search for filename in some known paths."""
# Use recursion for absolute instead of multiple ifs:
if absolute:
return os.path.abspath(locate_resource(filename, False))
test = filename
if os.path.isfile(test):
return test
test = "/usr/share/epoptes/" + os.path.basename(filename)
if os.path.isfile(test):
return test
test = "/usr/share/epoptes/images/" + os.path.basename(filename)
if os.path.isfile(test):
return test
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), filename)
| gpl-3.0 | -5,073,299,151,335,349,000 | 30.773585 | 75 | 0.712589 | false |
mago1chi/cTPR | calc_raw_lda_result.py | 1 | 7204 | import psycopg2
import os, sys
TOPIC_NUM_LIST = [30, 100, 200, 500]
if len(sys.argv) is 1:
print("トピック数を入力")
exit()
topic_num = int(sys.argv[1])
if not topic_num in TOPIC_NUM_LIST:
print("入力可能なトピック数は ", end="")
for each in TOPIC_NUM_LIST:
print("{0} ".format(each), end="")
print("です.")
exit()
DBPATH = "dbname=image_tagging host=localhost user=postgres"
con = psycopg2.connect(DBPATH)
concur = con.cursor()
concur.execute('''select distinct a.tweet_id from answer as a, answer_all as b
where a.tweet_id=b.tweet_id''')
tweet_id_list = [x for x in map(lambda y: y[0], concur.fetchall())]
lda_score = {}
except_score = {}
histgram_dic = {}
query = "select distinct tag from exp_rawlda{0} where tweet_id=%s".format(topic_num)
for each_tweet_id in tweet_id_list:
concur.execute(query, (each_tweet_id,))
tag_set = { x for x in map(lambda y: y[0], concur.fetchall()) }
concur.execute('''select distinct tag from answer where tweet_id=%s''', (each_tweet_id,))
except_tag_set = { x for x in map(lambda y: y[0], concur.fetchall()) } - tag_set
good_num = 0
bad_num = 0
for each_tag in tag_set:
concur.execute('''select score from answer
where tweet_id=%s and tag=%s''', (each_tweet_id, each_tag))
score = concur.fetchone()[0]
if score is 1:
good_num += 1
else:
bad_num += 1
if not bad_num in histgram_dic.keys():
histgram_dic[bad_num] = 1
else:
histgram_dic[bad_num] += 1
except_good_num = 0
except_bad_num = 0
for each_tag in except_tag_set:
concur.execute('''select score from answer
where tweet_id=%s and tag=%s''', (each_tweet_id, each_tag))
score = concur.fetchone()[0]
if score is 1:
except_good_num += 1
else:
except_bad_num += 1
lda_score[each_tweet_id] = {'good_num': good_num, 'bad_num': bad_num}
except_score[each_tweet_id] = {'good_num': except_good_num, 'bad_num': except_bad_num}
good_rate_sum = 0
good_only_num = 0
bad_only_num = 0
good_sum = 0
bad_sum = 0
zero_num = 0
for each_tweet_id, value in lda_score.items():
each_good_num = value['good_num']
each_bad_num = value['bad_num']
good_sum += each_good_num
bad_sum += each_bad_num
if each_good_num > 0 and each_bad_num is 0:
good_only_num += 1
if each_good_num is 0 and each_bad_num > 0:
bad_only_num += 1
if each_good_num + each_bad_num == 0:
zero_num += 1
else:
good_rate_sum += each_good_num / (each_good_num + each_bad_num)
good_rate = round(good_rate_sum / (len(lda_score) - zero_num), 3)
total_good_rate = round(good_sum / (good_sum + bad_sum), 3)
except_good_sum = 0
except_bad_sum = 0
except_bad_rate_sum = 0
zero_num = 0
for each_tweet_id, value in except_score.items():
each_good_num = value['good_num']
each_bad_num = value['bad_num']
except_good_sum += each_good_num
except_bad_sum += each_bad_num
if each_good_num + each_bad_num is 0:
zero_num += 1
else:
except_bad_rate_sum += each_bad_num / (each_good_num + each_bad_num)
except_bad_rate = round(except_bad_rate_sum / (len(except_score)-zero_num), 3)
remain_bad_rate = round(bad_sum / (bad_sum + except_bad_sum), 3)
total_tag_num = good_sum + bad_sum + except_good_sum + except_bad_sum
good_only_rate = round(good_only_num / len(lda_score), 3)
good_and_bad_rate = round((len(lda_score) - bad_only_num - good_only_num) / len(lda_score), 3)
bad_only_rate = 1.0 - good_only_rate - good_and_bad_rate
print('''正解タグのみの割合: {0}({1})
正解タグとノイズ両方を含む割合: {2}
ノイズタグのみを含む割合: {3}
正解タグ含有率の平均: {4}
付与したタグのうち正解だった数: {5} / {6} = {7}
全ノイズタグのうち除去できなかったタグの数: {8} / {9} = {10}
全タグ数: {11}
'''.format(good_only_rate, len(lda_score), good_and_bad_rate, bad_only_rate, good_rate, good_sum, good_sum+bad_sum, \
total_good_rate, bad_sum, bad_sum+except_bad_sum, remain_bad_rate, total_tag_num))
good_recall_rate_sum = 0
fmeasure_sum = 0
zero_num = 0
for each_tweet_id in tweet_id_list:
each_good_num = lda_score[each_tweet_id]['good_num']
each_bad_num = lda_score[each_tweet_id]['bad_num']
each_except_good_num = except_score[each_tweet_id]['good_num']
if each_good_num + each_except_good_num is 0:
zero_num += 1
else:
if each_good_num + each_bad_num != 0:
precision = each_good_num / (each_good_num + each_bad_num)
else:
precision = 0
if each_good_num + each_except_good_num != 0:
recall = each_good_num / (each_good_num + each_except_good_num)
else:
recall = 0
good_recall_rate_sum += recall
if precision + recall != 0:
fmeasure_sum += 2*precision*recall / (precision + recall)
ave_recall_rate = round(good_recall_rate_sum / (len(lda_score)-zero_num), 3)
total_recall = round(good_sum / (good_sum+except_good_sum), 3)
good_fmeasure = round(2*total_good_rate*total_recall / (total_good_rate + total_recall), 3)
ave_good_fmeasure = round(fmeasure_sum / (len(tweet_id_list)-zero_num), 3)
print('''正解タグ
全体の適合率: {0}
全体の再現率: {1}
F値: {2}
適合率の平均: {3}
再現率の平均: {4}
F値(平均): {5}
'''.format(total_good_rate, total_recall, good_fmeasure, good_rate, ave_recall_rate, ave_good_fmeasure))
except_bad_recall_rate_sum = 0
removed_fmeasure_sum = 0
zero_num = 0
for each_tweet_id in tweet_id_list:
each_bad_num = lda_score[each_tweet_id]['bad_num']
each_except_good_num = except_score[each_tweet_id]['good_num']
each_except_bad_num = except_score[each_tweet_id]['bad_num']
if each_bad_num + each_except_bad_num is 0:
zero_num += 1
else:
if each_except_good_num + each_except_bad_num != 0:
precision = each_except_bad_num / (each_except_good_num + each_except_bad_num)
else:
precision = 0
if each_bad_num + each_except_bad_num != 0:
recall = each_except_bad_num / (each_bad_num + each_except_bad_num)
else:
recall = 0
except_bad_recall_rate_sum += recall
if precision + recall != 0:
removed_fmeasure_sum += 2*precision*recall / (precision + recall)
ave_bad_recall_rate = round(except_bad_recall_rate_sum / (len(lda_score)-zero_num), 3)
removed_bad_precision = round(except_bad_sum / (except_good_sum + except_bad_sum), 3)
removed_bad_recall = round(except_bad_sum / (bad_sum + except_bad_sum), 3)
removed_bad_fmeasure = round(2*removed_bad_precision*removed_bad_recall / (removed_bad_precision + removed_bad_recall), 3)
ave_removed_bad_fmeasure = round(removed_fmeasure_sum / (len(tweet_id_list)-zero_num), 3)
print('''除去したノイズタグ
全体の適合率: {0}
全体の再現率: {1}
F値: {2}
適合率の平均: {3}
再現率の平均: {4}
F値(平均): {5}
'''.format(removed_bad_precision, removed_bad_recall, removed_bad_fmeasure, except_bad_rate, ave_bad_recall_rate, ave_removed_bad_fmeasure))
print("提案手法適用後のノイズ数分布(トピック数:{0})".format(topic_num))
print("ノイズ数,画像数")
for k, v in histgram_dic.items():
print("{0},{1}".format(k, v))
| gpl-2.0 | 9,060,584,416,003,994,000 | 28.017094 | 140 | 0.644772 | false |
mfherbst/spack | lib/spack/llnl/util/filesystem.py | 1 | 37492 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import collections
import errno
import hashlib
import fileinput
import glob
import grp
import numbers
import os
import pwd
import re
import shutil
import stat
import sys
import tempfile
from contextlib import contextmanager
import six
from llnl.util import tty
from llnl.util.lang import dedupe
from spack.util.executable import Executable
__all__ = [
'FileFilter',
'FileList',
'HeaderList',
'LibraryList',
'ancestor',
'can_access',
'change_sed_delimiter',
'copy_mode',
'filter_file',
'find',
'find_headers',
'find_libraries',
'find_system_libraries',
'fix_darwin_install_name',
'force_remove',
'force_symlink',
'copy',
'install',
'copy_tree',
'install_tree',
'is_exe',
'join_path',
'mkdirp',
'remove_dead_links',
'remove_if_dead_link',
'remove_linked_tree',
'set_executable',
'set_install_permissions',
'touch',
'touchp',
'traverse_tree',
'unset_executable_mode',
'working_dir'
]
def path_contains_subdirectory(path, root):
norm_root = os.path.abspath(root).rstrip(os.path.sep) + os.path.sep
norm_path = os.path.abspath(path).rstrip(os.path.sep) + os.path.sep
return norm_path.startswith(norm_root)
def same_path(path1, path2):
norm1 = os.path.abspath(path1).rstrip(os.path.sep)
norm2 = os.path.abspath(path2).rstrip(os.path.sep)
return norm1 == norm2
def filter_file(regex, repl, *filenames, **kwargs):
r"""Like sed, but uses python regular expressions.
Filters every line of each file through regex and replaces the file
with a filtered version. Preserves mode of filtered files.
As with re.sub, ``repl`` can be either a string or a callable.
If it is a callable, it is passed the match object and should
return a suitable replacement string. If it is a string, it
can contain ``\1``, ``\2``, etc. to represent back-substitution
as sed would allow.
Parameters:
regex (str): The regular expression to search for
repl (str): The string to replace matches with
*filenames: One or more files to search and replace
Keyword Arguments:
string (bool): Treat regex as a plain string. Default it False
backup (bool): Make backup file(s) suffixed with ``~``. Default is True
ignore_absent (bool): Ignore any files that don't exist.
Default is False
"""
string = kwargs.get('string', False)
backup = kwargs.get('backup', True)
ignore_absent = kwargs.get('ignore_absent', False)
# Allow strings to use \1, \2, etc. for replacement, like sed
if not callable(repl):
unescaped = repl.replace(r'\\', '\\')
def replace_groups_with_groupid(m):
def groupid_to_group(x):
return m.group(int(x.group(1)))
return re.sub(r'\\([1-9])', groupid_to_group, unescaped)
repl = replace_groups_with_groupid
if string:
regex = re.escape(regex)
for filename in filenames:
msg = 'FILTER FILE: {0} [replacing "{1}"]'
tty.debug(msg.format(filename, regex))
backup_filename = filename + "~"
if ignore_absent and not os.path.exists(filename):
msg = 'FILTER FILE: file "{0}" not found. Skipping to next file.'
tty.debug(msg.format(filename))
continue
# Create backup file. Don't overwrite an existing backup
# file in case this file is being filtered multiple times.
if not os.path.exists(backup_filename):
shutil.copy(filename, backup_filename)
try:
for line in fileinput.input(filename, inplace=True):
print(re.sub(regex, repl, line.rstrip('\n')))
except BaseException:
# clean up the original file on failure.
shutil.move(backup_filename, filename)
raise
finally:
if not backup and os.path.exists(backup_filename):
os.remove(backup_filename)
class FileFilter(object):
"""Convenience class for calling ``filter_file`` a lot."""
def __init__(self, *filenames):
self.filenames = filenames
def filter(self, regex, repl, **kwargs):
return filter_file(regex, repl, *self.filenames, **kwargs)
def change_sed_delimiter(old_delim, new_delim, *filenames):
"""Find all sed search/replace commands and change the delimiter.
e.g., if the file contains seds that look like ``'s///'``, you can
call ``change_sed_delimiter('/', '@', file)`` to change the
delimiter to ``'@'``.
Note that this routine will fail if the delimiter is ``'`` or ``"``.
Handling those is left for future work.
Parameters:
old_delim (str): The delimiter to search for
new_delim (str): The delimiter to replace with
*filenames: One or more files to search and replace
"""
assert(len(old_delim) == 1)
assert(len(new_delim) == 1)
# TODO: handle these cases one day?
assert(old_delim != '"')
assert(old_delim != "'")
assert(new_delim != '"')
assert(new_delim != "'")
whole_lines = "^s@([^@]*)@(.*)@[gIp]$"
whole_lines = whole_lines.replace('@', old_delim)
single_quoted = r"'s@((?:\\'|[^@'])*)@((?:\\'|[^'])*)@[gIp]?'"
single_quoted = single_quoted.replace('@', old_delim)
double_quoted = r'"s@((?:\\"|[^@"])*)@((?:\\"|[^"])*)@[gIp]?"'
double_quoted = double_quoted.replace('@', old_delim)
repl = r's@\1@\2@g'
repl = repl.replace('@', new_delim)
for f in filenames:
filter_file(whole_lines, repl, f)
filter_file(single_quoted, "'%s'" % repl, f)
filter_file(double_quoted, '"%s"' % repl, f)
def set_install_permissions(path):
"""Set appropriate permissions on the installed file."""
# If this points to a file maintained in a Spack prefix, it is assumed that
# this function will be invoked on the target. If the file is outside a
# Spack-maintained prefix, the permissions should not be modified.
if os.path.islink(path):
return
if os.path.isdir(path):
os.chmod(path, 0o755)
else:
os.chmod(path, 0o644)
def group_ids(uid=None):
"""Get group ids that a uid is a member of.
Arguments:
uid (int): id of user, or None for current user
Returns:
(list of int): gids of groups the user is a member of
"""
if uid is None:
uid = os.getuid()
user = pwd.getpwuid(uid).pw_name
return [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
def copy_mode(src, dest):
"""Set the mode of dest to that of src unless it is a link.
"""
if os.path.islink(dest):
return
src_mode = os.stat(src).st_mode
dest_mode = os.stat(dest).st_mode
if src_mode & stat.S_IXUSR:
dest_mode |= stat.S_IXUSR
if src_mode & stat.S_IXGRP:
dest_mode |= stat.S_IXGRP
if src_mode & stat.S_IXOTH:
dest_mode |= stat.S_IXOTH
os.chmod(dest, dest_mode)
def unset_executable_mode(path):
mode = os.stat(path).st_mode
mode &= ~stat.S_IXUSR
mode &= ~stat.S_IXGRP
mode &= ~stat.S_IXOTH
os.chmod(path, mode)
def copy(src, dest, _permissions=False):
"""Copies the file *src* to the file or directory *dest*.
If *dest* specifies a directory, the file will be copied into *dest*
using the base filename from *src*.
Parameters:
src (str): the file to copy
dest (str): the destination file or directory
_permissions (bool): for internal use only
"""
if _permissions:
tty.debug('Installing {0} to {1}'.format(src, dest))
else:
tty.debug('Copying {0} to {1}'.format(src, dest))
# Expand dest to its eventual full path if it is a directory.
if os.path.isdir(dest):
dest = join_path(dest, os.path.basename(src))
shutil.copy(src, dest)
if _permissions:
set_install_permissions(dest)
copy_mode(src, dest)
def install(src, dest):
"""Installs the file *src* to the file or directory *dest*.
Same as :py:func:`copy` with the addition of setting proper
permissions on the installed file.
Parameters:
src (str): the file to install
dest (str): the destination file or directory
"""
copy(src, dest, _permissions=True)
def copy_tree(src, dest, symlinks=True, _permissions=False):
"""Recursively copy an entire directory tree rooted at *src*.
If the destination directory *dest* does not already exist, it will
be created as well as missing parent directories.
If *symlinks* is true, symbolic links in the source tree are represented
as symbolic links in the new tree and the metadata of the original links
will be copied as far as the platform allows; if false, the contents and
metadata of the linked files are copied to the new tree.
Parameters:
src (str): the directory to copy
dest (str): the destination directory
symlinks (bool): whether or not to preserve symlinks
_permissions (bool): for internal use only
"""
if _permissions:
tty.debug('Installing {0} to {1}'.format(src, dest))
else:
tty.debug('Copying {0} to {1}'.format(src, dest))
mkdirp(dest)
for s, d in traverse_tree(src, dest, order='pre', follow_nonexisting=True):
if symlinks and os.path.islink(s):
# Note that this won't rewrite absolute links into the old
# root to point at the new root. Should we handle that case?
target = os.readlink(s)
os.symlink(os.path.abspath(target), d)
elif os.path.isdir(s):
mkdirp(d)
else:
shutil.copyfile(s, d)
if _permissions:
set_install_permissions(d)
copy_mode(s, d)
def install_tree(src, dest, symlinks=True):
"""Recursively install an entire directory tree rooted at *src*.
Same as :py:func:`copy_tree` with the addition of setting proper
permissions on the installed files and directories.
Parameters:
src (str): the directory to install
dest (str): the destination directory
symlinks (bool): whether or not to preserve symlinks
"""
copy_tree(src, dest, symlinks, _permissions=True)
def is_exe(path):
"""True if path is an executable file."""
return os.path.isfile(path) and os.access(path, os.X_OK)
def get_filetype(path_name):
"""
Return the output of file path_name as a string to identify file type.
"""
file = Executable('file')
file.add_default_env('LC_ALL', 'C')
output = file('-b', '-h', '%s' % path_name,
output=str, error=str)
return output.strip()
def mkdirp(*paths):
"""Creates a directory, as well as parent directories if needed."""
for path in paths:
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise e
elif not os.path.isdir(path):
raise OSError(errno.EEXIST, "File already exists", path)
def force_remove(*paths):
"""Remove files without printing errors. Like ``rm -f``, does NOT
remove directories."""
for path in paths:
try:
os.remove(path)
except OSError:
pass
@contextmanager
def working_dir(dirname, **kwargs):
if kwargs.get('create', False):
mkdirp(dirname)
orig_dir = os.getcwd()
os.chdir(dirname)
yield
os.chdir(orig_dir)
@contextmanager
def replace_directory_transaction(directory_name, tmp_root=None):
"""Moves a directory to a temporary space. If the operations executed
within the context manager don't raise an exception, the directory is
deleted. If there is an exception, the move is undone.
Args:
directory_name (path): absolute path of the directory name
tmp_root (path): absolute path of the parent directory where to create
the temporary
Returns:
temporary directory where ``directory_name`` has been moved
"""
# Check the input is indeed a directory with absolute path.
# Raise before anything is done to avoid moving the wrong directory
assert os.path.isdir(directory_name), \
'"directory_name" must be a valid directory'
assert os.path.isabs(directory_name), \
'"directory_name" must contain an absolute path'
directory_basename = os.path.basename(directory_name)
if tmp_root is not None:
assert os.path.isabs(tmp_root)
tmp_dir = tempfile.mkdtemp(dir=tmp_root)
tty.debug('TEMPORARY DIRECTORY CREATED [{0}]'.format(tmp_dir))
shutil.move(src=directory_name, dst=tmp_dir)
tty.debug('DIRECTORY MOVED [src={0}, dest={1}]'.format(
directory_name, tmp_dir
))
try:
yield tmp_dir
except (Exception, KeyboardInterrupt, SystemExit):
# Delete what was there, before copying back the original content
if os.path.exists(directory_name):
shutil.rmtree(directory_name)
shutil.move(
src=os.path.join(tmp_dir, directory_basename),
dst=os.path.dirname(directory_name)
)
tty.debug('DIRECTORY RECOVERED [{0}]'.format(directory_name))
msg = 'the transactional move of "{0}" failed.'
raise RuntimeError(msg.format(directory_name))
else:
# Otherwise delete the temporary directory
shutil.rmtree(tmp_dir)
tty.debug('TEMPORARY DIRECTORY DELETED [{0}]'.format(tmp_dir))
def hash_directory(directory):
"""Hashes recursively the content of a directory.
Args:
directory (path): path to a directory to be hashed
Returns:
hash of the directory content
"""
assert os.path.isdir(directory), '"directory" must be a directory!'
md5_hash = hashlib.md5()
# Adapted from https://stackoverflow.com/a/3431835/771663
for root, dirs, files in os.walk(directory):
for name in sorted(files):
filename = os.path.join(root, name)
# TODO: if caching big files becomes an issue, convert this to
# TODO: read in chunks. Currently it's used only for testing
# TODO: purposes.
with open(filename, 'rb') as f:
md5_hash.update(f.read())
return md5_hash.hexdigest()
def touch(path):
"""Creates an empty file at the specified path."""
perms = (os.O_WRONLY | os.O_CREAT | os.O_NONBLOCK | os.O_NOCTTY)
fd = None
try:
fd = os.open(path, perms)
os.utime(path, None)
finally:
if fd is not None:
os.close(fd)
def touchp(path):
"""Like ``touch``, but creates any parent directories needed for the file.
"""
mkdirp(os.path.dirname(path))
touch(path)
def force_symlink(src, dest):
try:
os.symlink(src, dest)
except OSError:
os.remove(dest)
os.symlink(src, dest)
def join_path(prefix, *args):
path = str(prefix)
for elt in args:
path = os.path.join(path, str(elt))
return path
def ancestor(dir, n=1):
"""Get the nth ancestor of a directory."""
parent = os.path.abspath(dir)
for i in range(n):
parent = os.path.dirname(parent)
return parent
def can_access(file_name):
"""True if we have read/write access to the file."""
return os.access(file_name, os.R_OK | os.W_OK)
def traverse_tree(source_root, dest_root, rel_path='', **kwargs):
"""Traverse two filesystem trees simultaneously.
Walks the LinkTree directory in pre or post order. Yields each
file in the source directory with a matching path from the dest
directory, along with whether the file is a directory.
e.g., for this tree::
root/
a/
file1
file2
b/
file3
When called on dest, this yields::
('root', 'dest')
('root/a', 'dest/a')
('root/a/file1', 'dest/a/file1')
('root/a/file2', 'dest/a/file2')
('root/b', 'dest/b')
('root/b/file3', 'dest/b/file3')
Keyword Arguments:
order (str): Whether to do pre- or post-order traversal. Accepted
values are 'pre' and 'post'
ignore (str): Predicate indicating which files to ignore
follow_nonexisting (bool): Whether to descend into directories in
``src`` that do not exit in ``dest``. Default is True
follow_links (bool): Whether to descend into symlinks in ``src``
"""
follow_nonexisting = kwargs.get('follow_nonexisting', True)
follow_links = kwargs.get('follow_link', False)
# Yield in pre or post order?
order = kwargs.get('order', 'pre')
if order not in ('pre', 'post'):
raise ValueError("Order must be 'pre' or 'post'.")
# List of relative paths to ignore under the src root.
ignore = kwargs.get('ignore', lambda filename: False)
# Don't descend into ignored directories
if ignore(rel_path):
return
source_path = os.path.join(source_root, rel_path)
dest_path = os.path.join(dest_root, rel_path)
# preorder yields directories before children
if order == 'pre':
yield (source_path, dest_path)
for f in os.listdir(source_path):
source_child = os.path.join(source_path, f)
dest_child = os.path.join(dest_path, f)
rel_child = os.path.join(rel_path, f)
# Treat as a directory
if os.path.isdir(source_child) and (
follow_links or not os.path.islink(source_child)):
# When follow_nonexisting isn't set, don't descend into dirs
# in source that do not exist in dest
if follow_nonexisting or os.path.exists(dest_child):
tuples = traverse_tree(
source_root, dest_root, rel_child, **kwargs)
for t in tuples:
yield t
# Treat as a file.
elif not ignore(os.path.join(rel_path, f)):
yield (source_child, dest_child)
if order == 'post':
yield (source_path, dest_path)
def set_executable(path):
mode = os.stat(path).st_mode
if mode & stat.S_IRUSR:
mode |= stat.S_IXUSR
if mode & stat.S_IRGRP:
mode |= stat.S_IXGRP
if mode & stat.S_IROTH:
mode |= stat.S_IXOTH
os.chmod(path, mode)
def remove_dead_links(root):
"""Removes any dead link that is present in root.
Parameters:
root (str): path where to search for dead links
"""
for file in os.listdir(root):
path = join_path(root, file)
remove_if_dead_link(path)
def remove_if_dead_link(path):
"""Removes the argument if it is a dead link.
Parameters:
path (str): The potential dead link
"""
if os.path.islink(path):
real_path = os.path.realpath(path)
if not os.path.exists(real_path):
os.unlink(path)
def remove_linked_tree(path):
"""Removes a directory and its contents.
If the directory is a symlink, follows the link and removes the real
directory before removing the link.
Parameters:
path (str): Directory to be removed
"""
if os.path.exists(path):
if os.path.islink(path):
shutil.rmtree(os.path.realpath(path), True)
os.unlink(path)
else:
shutil.rmtree(path, True)
def fix_darwin_install_name(path):
"""Fix install name of dynamic libraries on Darwin to have full path.
There are two parts of this task:
1. Use ``install_name('-id', ...)`` to change install name of a single lib
2. Use ``install_name('-change', ...)`` to change the cross linking between
libs. The function assumes that all libraries are in one folder and
currently won't follow subfolders.
Parameters:
path (str): directory in which .dylib files are located
"""
libs = glob.glob(join_path(path, "*.dylib"))
for lib in libs:
# fix install name first:
install_name_tool = Executable('install_name_tool')
install_name_tool('-id', lib, lib)
otool = Executable('otool')
long_deps = otool('-L', lib, output=str).split('\n')
deps = [dep.partition(' ')[0][1::] for dep in long_deps[2:-1]]
# fix all dependencies:
for dep in deps:
for loc in libs:
# We really want to check for either
# dep == os.path.basename(loc) or
# dep == join_path(builddir, os.path.basename(loc)),
# but we don't know builddir (nor how symbolic links look
# in builddir). We thus only compare the basenames.
if os.path.basename(dep) == os.path.basename(loc):
install_name_tool('-change', dep, loc, lib)
break
def find(root, files, recursive=True):
"""Search for ``files`` starting from the ``root`` directory.
Like GNU/BSD find but written entirely in Python.
Examples:
.. code-block:: console
$ find /usr -name python
is equivalent to:
>>> find('/usr', 'python')
.. code-block:: console
$ find /usr/local/bin -maxdepth 1 -name python
is equivalent to:
>>> find('/usr/local/bin', 'python', recursive=False)
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
root (str): The root directory to start searching from
files (str or collections.Sequence): Library name(s) to search for
recurse (bool, optional): if False search only root folder,
if True descends top-down from the root. Defaults to True.
Returns:
list of strings: The files that have been found
"""
if isinstance(files, six.string_types):
files = [files]
if recursive:
return _find_recursive(root, files)
else:
return _find_non_recursive(root, files)
def _find_recursive(root, search_files):
# The variable here is **on purpose** a defaultdict. The idea is that
# we want to poke the filesystem as little as possible, but still maintain
# stability in the order of the answer. Thus we are recording each library
# found in a key, and reconstructing the stable order later.
found_files = collections.defaultdict(list)
# Make the path absolute to have os.walk also return an absolute path
root = os.path.abspath(root)
for path, _, list_files in os.walk(root):
for search_file in search_files:
matches = glob.glob(os.path.join(path, search_file))
matches = [os.path.join(path, x) for x in matches]
found_files[search_file].extend(matches)
answer = []
for search_file in search_files:
answer.extend(found_files[search_file])
return answer
def _find_non_recursive(root, search_files):
# The variable here is **on purpose** a defaultdict as os.list_dir
# can return files in any order (does not preserve stability)
found_files = collections.defaultdict(list)
# Make the path absolute to have absolute path returned
root = os.path.abspath(root)
for search_file in search_files:
matches = glob.glob(os.path.join(root, search_file))
matches = [os.path.join(root, x) for x in matches]
found_files[search_file].extend(matches)
answer = []
for search_file in search_files:
answer.extend(found_files[search_file])
return answer
# Utilities for libraries and headers
class FileList(collections.Sequence):
"""Sequence of absolute paths to files.
Provides a few convenience methods to manipulate file paths.
"""
def __init__(self, files):
if isinstance(files, six.string_types):
files = [files]
self.files = list(dedupe(files))
@property
def directories(self):
"""Stable de-duplication of the directories where the files reside.
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/libc.a'])
>>> l.directories
['/dir1', '/dir2']
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.directories
['/dir1', '/dir2']
Returns:
list of strings: A list of directories
"""
return list(dedupe(
os.path.dirname(x) for x in self.files if os.path.dirname(x)
))
@property
def basenames(self):
"""Stable de-duplication of the base-names in the list
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir3/liba.a'])
>>> l.basenames
['liba.a', 'libb.a']
>>> h = HeaderList(['/dir1/a.h', '/dir2/b.h', '/dir3/a.h'])
>>> h.basenames
['a.h', 'b.h']
Returns:
list of strings: A list of base-names
"""
return list(dedupe(os.path.basename(x) for x in self.files))
def __getitem__(self, item):
cls = type(self)
if isinstance(item, numbers.Integral):
return self.files[item]
return cls(self.files[item])
def __add__(self, other):
return self.__class__(dedupe(self.files + list(other)))
def __radd__(self, other):
return self.__add__(other)
def __eq__(self, other):
return self.files == other.files
def __len__(self):
return len(self.files)
def joined(self, separator=' '):
return separator.join(self.files)
def __repr__(self):
return self.__class__.__name__ + '(' + repr(self.files) + ')'
def __str__(self):
return self.joined()
class HeaderList(FileList):
"""Sequence of absolute paths to headers.
Provides a few convenience methods to manipulate header paths and get
commonly used compiler flags or names.
"""
def __init__(self, files):
super(HeaderList, self).__init__(files)
self._macro_definitions = []
@property
def headers(self):
"""Stable de-duplication of the headers.
Returns:
list of strings: A list of header files
"""
return self.files
@property
def names(self):
"""Stable de-duplication of header names in the list without extensions
>>> h = HeaderList(['/dir1/a.h', '/dir2/b.h', '/dir3/a.h'])
>>> h.names
['a', 'b']
Returns:
list of strings: A list of files without extensions
"""
names = []
for x in self.basenames:
name = x
# Valid extensions include: ['.cuh', '.hpp', '.hh', '.h']
for ext in ['.cuh', '.hpp', '.hh', '.h']:
i = name.rfind(ext)
if i != -1:
names.append(name[:i])
break
else:
# No valid extension, should we still include it?
names.append(name)
return list(dedupe(names))
@property
def include_flags(self):
"""Include flags
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.include_flags
'-I/dir1 -I/dir2'
Returns:
str: A joined list of include flags
"""
return ' '.join(['-I' + x for x in self.directories])
@property
def macro_definitions(self):
"""Macro definitions
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.add_macro('-DBOOST_LIB_NAME=boost_regex')
>>> h.add_macro('-DBOOST_DYN_LINK')
>>> h.macro_definitions
'-DBOOST_LIB_NAME=boost_regex -DBOOST_DYN_LINK'
Returns:
str: A joined list of macro definitions
"""
return ' '.join(self._macro_definitions)
@property
def cpp_flags(self):
"""Include flags + macro definitions
>>> h = HeaderList(['/dir1/a.h', '/dir1/b.h', '/dir2/c.h'])
>>> h.cpp_flags
'-I/dir1 -I/dir2'
>>> h.add_macro('-DBOOST_DYN_LINK')
>>> h.cpp_flags
'-I/dir1 -I/dir2 -DBOOST_DYN_LINK'
Returns:
str: A joined list of include flags and macro definitions
"""
cpp_flags = self.include_flags
if self.macro_definitions:
cpp_flags += ' ' + self.macro_definitions
return cpp_flags
def add_macro(self, macro):
"""Add a macro definition
Parameters:
macro (str): The macro to add
"""
self._macro_definitions.append(macro)
def find_headers(headers, root, recursive=False):
"""Returns an iterable object containing a list of full paths to
headers if found.
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
headers (str or list of str): Header name(s) to search for
root (str): The root directory to start searching from
recursive (bool, optional): if False search only root folder,
if True descends top-down from the root. Defaults to False.
Returns:
HeaderList: The headers that have been found
"""
if isinstance(headers, six.string_types):
headers = [headers]
elif not isinstance(headers, collections.Sequence):
message = '{0} expects a string or sequence of strings as the '
message += 'first argument [got {1} instead]'
message = message.format(find_headers.__name__, type(headers))
raise TypeError(message)
# Construct the right suffix for the headers
suffix = 'h'
# List of headers we are searching with suffixes
headers = ['{0}.{1}'.format(header, suffix) for header in headers]
return HeaderList(find(root, headers, recursive))
class LibraryList(FileList):
"""Sequence of absolute paths to libraries
Provides a few convenience methods to manipulate library paths and get
commonly used compiler flags or names
"""
@property
def libraries(self):
"""Stable de-duplication of library files.
Returns:
list of strings: A list of library files
"""
return self.files
@property
def names(self):
"""Stable de-duplication of library names in the list
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir3/liba.so'])
>>> l.names
['a', 'b']
Returns:
list of strings: A list of library names
"""
names = []
for x in self.basenames:
name = x
if x.startswith('lib'):
name = x[3:]
# Valid extensions include: ['.dylib', '.so', '.a']
for ext in ['.dylib', '.so', '.a']:
i = name.rfind(ext)
if i != -1:
names.append(name[:i])
break
else:
# No valid extension, should we still include it?
names.append(name)
return list(dedupe(names))
@property
def search_flags(self):
"""Search flags for the libraries
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/liba.so'])
>>> l.search_flags
'-L/dir1 -L/dir2'
Returns:
str: A joined list of search flags
"""
return ' '.join(['-L' + x for x in self.directories])
@property
def link_flags(self):
"""Link flags for the libraries
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/liba.so'])
>>> l.link_flags
'-la -lb'
Returns:
str: A joined list of link flags
"""
return ' '.join(['-l' + name for name in self.names])
@property
def ld_flags(self):
"""Search flags + link flags
>>> l = LibraryList(['/dir1/liba.a', '/dir2/libb.a', '/dir1/liba.so'])
>>> l.ld_flags
'-L/dir1 -L/dir2 -la -lb'
Returns:
str: A joined list of search flags and link flags
"""
return self.search_flags + ' ' + self.link_flags
def find_system_libraries(libraries, shared=True):
"""Searches the usual system library locations for ``libraries``.
Search order is as follows:
1. ``/lib64``
2. ``/lib``
3. ``/usr/lib64``
4. ``/usr/lib``
5. ``/usr/local/lib64``
6. ``/usr/local/lib``
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
libraries (str or list of str): Library name(s) to search for
shared (bool, optional): if True searches for shared libraries,
otherwise for static. Defaults to True.
Returns:
LibraryList: The libraries that have been found
"""
if isinstance(libraries, six.string_types):
libraries = [libraries]
elif not isinstance(libraries, collections.Sequence):
message = '{0} expects a string or sequence of strings as the '
message += 'first argument [got {1} instead]'
message = message.format(find_system_libraries.__name__,
type(libraries))
raise TypeError(message)
libraries_found = []
search_locations = [
'/lib64',
'/lib',
'/usr/lib64',
'/usr/lib',
'/usr/local/lib64',
'/usr/local/lib',
]
for library in libraries:
for root in search_locations:
result = find_libraries(library, root, shared, recursive=True)
if result:
libraries_found += result
break
return libraries_found
def find_libraries(libraries, root, shared=True, recursive=False):
"""Returns an iterable of full paths to libraries found in a root dir.
Accepts any glob characters accepted by fnmatch:
======= ====================================
Pattern Meaning
======= ====================================
* matches everything
? matches any single character
[seq] matches any character in ``seq``
[!seq] matches any character not in ``seq``
======= ====================================
Parameters:
libraries (str or list of str): Library name(s) to search for
root (str): The root directory to start searching from
shared (bool, optional): if True searches for shared libraries,
otherwise for static. Defaults to True.
recursive (bool, optional): if False search only root folder,
if True descends top-down from the root. Defaults to False.
Returns:
LibraryList: The libraries that have been found
"""
if isinstance(libraries, six.string_types):
libraries = [libraries]
elif not isinstance(libraries, collections.Sequence):
message = '{0} expects a string or sequence of strings as the '
message += 'first argument [got {1} instead]'
message = message.format(find_libraries.__name__, type(libraries))
raise TypeError(message)
# Construct the right suffix for the library
if shared is True:
suffix = 'dylib' if sys.platform == 'darwin' else 'so'
else:
suffix = 'a'
# List of libraries we are searching with suffixes
libraries = ['{0}.{1}'.format(lib, suffix) for lib in libraries]
return LibraryList(find(root, libraries, recursive))
| lgpl-2.1 | 536,191,456,906,617,700 | 29.985124 | 79 | 0.591219 | false |
matejcik/weblate | weblate/trans/mixins.py | 1 | 5486 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2016 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
from django.core.urlresolvers import reverse
from weblate.logger import LOGGER
class PercentMixin(object):
"""
Defines API to getting percentage status of translations.
"""
_percents = None
def get_percents(self):
"""
Returns percentages of translation status.
"""
if self._percents is None:
self._percents = self._get_percents()
return self._percents
def _get_percents(self):
"""
Returns percentages of translation status.
"""
raise NotImplementedError()
def get_translated_percent(self):
"""
Returns percent of translated strings.
"""
return self.get_percents()[0]
def get_untranslated_percent(self):
"""
Returns percent of untranslated strings.
"""
return 100 - self.get_percents()[0]
def get_fuzzy_percent(self):
"""
Returns percent of fuzzy strings.
"""
return self.get_percents()[1]
def get_failing_checks_percent(self):
"""
Returns percentage of failed checks.
"""
return self.get_percents()[2]
class URLMixin(object):
"""
Mixin providing standard shortcut API for few standard URLs
"""
def _reverse_url_name(self):
"""
Returns base name for URL reversing.
"""
raise NotImplementedError()
def _reverse_url_kwargs(self):
"""
Returns kwargs for URL reversing.
"""
raise NotImplementedError()
def reverse_url(self, name=None):
"""
Generic reverser for URL.
"""
if name is None:
urlname = self._reverse_url_name()
else:
urlname = '%s_%s' % (
name,
self._reverse_url_name()
)
return reverse(
urlname,
kwargs=self._reverse_url_kwargs()
)
def get_absolute_url(self):
return self.reverse_url()
def get_commit_url(self):
return self.reverse_url('commit')
def get_update_url(self):
return self.reverse_url('update')
def get_push_url(self):
return self.reverse_url('push')
def get_reset_url(self):
return self.reverse_url('reset')
def get_lock_url(self):
return self.reverse_url('lock')
def get_unlock_url(self):
return self.reverse_url('unlock')
class LoggerMixin(object):
"""
Mixin with logging.
"""
@property
def log_prefix(self):
return 'default: '
def log_debug(self, msg, *args):
return LOGGER.debug(
self.log_prefix + msg, *args
)
def log_info(self, msg, *args):
return LOGGER.info(
self.log_prefix + msg, *args
)
def log_warning(self, msg, *args):
return LOGGER.warning(
self.log_prefix + msg, *args
)
def log_error(self, msg, *args):
return LOGGER.error(
self.log_prefix + msg, *args
)
class PathMixin(LoggerMixin):
"""
Mixin for path manipulations.
"""
_dir_path = None
_linked_subproject = None
def _get_path(self):
"""
Actual calculation of path.
"""
raise NotImplementedError()
def get_path(self):
"""
Return path to directory.
Caching is really necessary for linked project, otherwise
we end up fetching linked subproject again and again.
"""
if self._dir_path is None:
self._dir_path = self._get_path()
return self._dir_path
def check_rename(self, old):
"""
Detects slug changes and possibly renames underlaying directory.
"""
# No moving for links
if getattr(self, 'is_repo_link', False):
return
old_path = old.get_path()
# Invalidate path cache (otherwise we would still get old path)
self._dir_path = None
new_path = self.get_path()
if old_path != new_path:
self.log_info(
'path changed from %s to %s', old_path, new_path
)
if os.path.exists(old_path) and not os.path.exists(new_path):
self.log_info(
'renaming "%s" to "%s"', old_path, new_path
)
os.rename(old_path, new_path)
# Clean subproject cache on rename
self._linked_subproject = None
def create_path(self):
"""
Create filesystem directory for storing data
"""
path = self.get_path()
if not os.path.exists(path):
os.makedirs(path)
| gpl-3.0 | -2,377,593,369,573,738,500 | 24.621495 | 73 | 0.572132 | false |
Yelp/paasta | paasta_tools/cassandracluster_tools.py | 1 | 7272 | # Copyright 2015-2019 Yelp Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List
from typing import Mapping
from typing import Optional
import service_configuration_lib
from paasta_tools.kubernetes_tools import sanitise_kubernetes_name
from paasta_tools.kubernetes_tools import sanitised_cr_name
from paasta_tools.long_running_service_tools import LongRunningServiceConfig
from paasta_tools.long_running_service_tools import LongRunningServiceConfigDict
from paasta_tools.utils import BranchDictV2
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import decompose_job_id
from paasta_tools.utils import deep_merge_dictionaries
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import InvalidJobNameError
from paasta_tools.utils import load_service_instance_config
from paasta_tools.utils import load_v2_deployments_json
KUBERNETES_NAMESPACE = "paasta-cassandraclusters"
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class CassandraClusterDeploymentConfigDict(LongRunningServiceConfigDict, total=False):
bounce_margin_factor: float
replicas: int
class CassandraClusterDeploymentConfig(LongRunningServiceConfig):
config_dict: CassandraClusterDeploymentConfigDict
config_filename_prefix = "cassandracluster"
def __init__(
self,
service: str,
cluster: str,
instance: str,
config_dict: CassandraClusterDeploymentConfigDict,
branch_dict: Optional[BranchDictV2],
soa_dir: str = DEFAULT_SOA_DIR,
) -> None:
super().__init__(
cluster=cluster,
instance=instance,
service=service,
soa_dir=soa_dir,
config_dict=config_dict,
branch_dict=branch_dict,
)
def get_service_name_smartstack(self) -> str:
"""
To support apollo we always register in
cassandra_<cluster>.main
"""
return "cassandra_" + self.get_instance()
def get_nerve_namespace(self) -> str:
"""
To support apollo we always register in
cassandra_<cluster>.main
"""
return "main"
def get_registrations(self) -> List[str]:
"""
To support apollo we always register in
cassandra_<cluster>.main
"""
registrations = self.config_dict.get("registrations", [])
for registration in registrations:
try:
decompose_job_id(registration)
except InvalidJobNameError:
log.error(
"Provided registration {} for service "
"{} is invalid".format(registration, self.service)
)
return registrations or [
compose_job_id(self.get_service_name_smartstack(), "main")
]
def get_kubernetes_namespace(self) -> str:
return KUBERNETES_NAMESPACE
def get_instances(self, with_limit: bool = True) -> int:
return self.config_dict.get("replicas", 1)
def get_bounce_method(self) -> str:
"""
This isn't really true since we use the StatefulSet RollingUpdate strategy
However for the paasta-api we need to map to a paasta bounce method and
crossover is the closest
"""
return "crossover"
def get_bounce_margin_factor(self) -> float:
return self.config_dict.get("bounce_margin_factor", 1.0)
def get_sanitised_service_name(self) -> str:
return sanitise_kubernetes_name(self.get_service())
def get_sanitised_instance_name(self) -> str:
return sanitise_kubernetes_name(self.get_instance())
def get_sanitised_deployment_name(self) -> str:
return self.get_sanitised_instance_name()
def validate(
self,
params: List[str] = [
"cpus",
"security",
"dependencies_reference",
"deploy_group",
],
) -> List[str]:
# Use InstanceConfig to validate shared config keys like cpus and mem
# TODO: add mem back to this list once we fix PAASTA-15582 and
# move to using the same units as flink/marathon etc.
error_msgs = super().validate(params=params)
if error_msgs:
name = self.get_instance()
return [f"{name}: {msg}" for msg in error_msgs]
else:
return []
def load_cassandracluster_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> CassandraClusterDeploymentConfig:
"""Read a service instance's configuration for CassandraCluster.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "cassandracluster", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = CassandraClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return CassandraClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
# TODO: read this from CRD in service configs
def cr_id(service: str, instance: str) -> Mapping[str, str]:
return dict(
group="yelp.com",
version="v1alpha1",
namespace="paasta-cassandraclusters",
plural="cassandraclusters",
name=sanitised_cr_name(service, instance),
)
| apache-2.0 | -2,582,898,150,545,894,000 | 33.628571 | 104 | 0.662404 | false |
bsipocz/astropy | astropy/_erfa/erfa_generator.py | 1 | 27369 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module's main purpose is to act as a script to create new versions
of ufunc.c when ERFA is updated (or this generator is enhanced).
`Jinja2 <http://jinja.pocoo.org/>`_ must be installed for this
module/script to function.
Note that this does *not* currently automate the process of creating structs
or dtypes for those structs. They should be added manually in the template file.
"""
import re
import os.path
from collections import OrderedDict
from distutils.version import LooseVersion
import numpy
# Note: once we support only numpy >=1.16, all things related to "d3_fix"
# can be removed, here and in the templates (core.py.templ
# NOTE: we define this variable here instead of importing from astropy to
# ensure that running this script does not require importing astropy.
NUMPY_LT_1_16 = LooseVersion(numpy.__version__) < '1.16'
DEFAULT_ERFA_LOC = os.path.join(os.path.split(__file__)[0],
'../../cextern/erfa')
DEFAULT_TEMPLATE_LOC = os.path.split(__file__)[0]
NDIMS_REX = re.compile(re.escape("numpy.dtype([('fi0', '.*', <(.*)>)])").replace(r'\.\*', '.*').replace(r'\<', '(').replace(r'\>', ')'))
class FunctionDoc:
def __init__(self, doc):
self.doc = doc.replace("**", " ").replace("/*\n", "").replace("*/", "")
self.__input = None
self.__output = None
self.__ret_info = None
def _get_arg_doc_list(self, doc_lines):
"""Parse input/output doc section lines, getting arguments from them.
Ensure all elements of eraASTROM and eraLDBODY are left out, as those
are not input or output arguments themselves. Also remove the nb
argument in from of eraLDBODY, as we infer nb from the python array.
"""
doc_list = []
skip = []
for d in doc_lines:
arg_doc = ArgumentDoc(d)
if arg_doc.name is not None:
if skip:
if skip[0] == arg_doc.name:
skip.pop(0)
continue
else:
raise RuntimeError("We whould be skipping {} "
"but {} encountered."
.format(skip[0], arg_doc.name))
if arg_doc.type.startswith('eraLDBODY'):
# Special-case LDBODY: for those, the previous argument
# is always the number of bodies, but we don't need it
# as an input argument for the ufunc since we're going
# to determine this from the array itself. Also skip
# the description of its contents; those are not arguments.
doc_list.pop()
skip = ['bm', 'dl', 'pv']
elif arg_doc.type.startswith('eraASTROM'):
# Special-case ASTROM: need to skip the description
# of its contents; those are not arguments.
skip = ['pmt', 'eb', 'eh', 'em', 'v', 'bm1',
'bpn', 'along', 'xpl', 'ypl', 'sphi',
'cphi', 'diurab', 'eral', 'refa', 'refb']
doc_list.append(arg_doc)
return doc_list
@property
def input(self):
if self.__input is None:
self.__input = []
for regex in ("Given([^\n]*):\n(.+?) \n",
"Given and returned([^\n]*):\n(.+?) \n"):
result = re.search(regex, self.doc, re.DOTALL)
if result is not None:
doc_lines = result.group(2).split("\n")
self.__input += self._get_arg_doc_list(doc_lines)
return self.__input
@property
def output(self):
if self.__output is None:
self.__output = []
for regex in ("Given and returned([^\n]*):\n(.+?) \n",
"Returned([^\n]*):\n(.+?) \n"):
result = re.search(regex, self.doc, re.DOTALL)
if result is not None:
doc_lines = result.group(2).split("\n")
self.__output += self._get_arg_doc_list(doc_lines)
return self.__output
@property
def ret_info(self):
if self.__ret_info is None:
ret_info = []
result = re.search("Returned \\(function value\\)([^\n]*):\n(.+?) \n", self.doc, re.DOTALL)
if result is not None:
ret_info.append(ReturnDoc(result.group(2)))
if len(ret_info) == 0:
self.__ret_info = ''
elif len(ret_info) == 1:
self.__ret_info = ret_info[0]
else:
raise ValueError("Multiple C return sections found in this doc:\n" + self.doc)
return self.__ret_info
def __repr__(self):
return self.doc.replace(" \n", "\n")
class ArgumentDoc:
def __init__(self, doc):
match = re.search("^ +([^ ]+)[ ]+([^ ]+)[ ]+(.+)", doc)
if match is not None:
self.name = match.group(1)
self.type = match.group(2)
self.doc = match.group(3)
else:
self.name = None
self.type = None
self.doc = None
def __repr__(self):
return f" {self.name:15} {self.type:15} {self.doc}"
class Variable:
"""Properties shared by Argument and Return."""
@property
def npy_type(self):
"""Predefined type used by numpy ufuncs to indicate a given ctype.
Eg., NPY_DOUBLE for double.
"""
return "NPY_" + self.ctype.upper()
@property
def dtype(self):
"""Name of dtype corresponding to the ctype.
Specifically,
double : dt_double
int : dt_int
double[3]: dt_vector
double[2][3] : dt_pv
double[2] : dt_pvdpv
double[3][3] : dt_matrix
int[4] : dt_ymdf | dt_hmsf | dt_dmsf, depding on name
eraASTROM: dt_eraASTROM
eraLDBODY: dt_eraLDBODY
char : dt_sign
char[] : dt_type
The corresponding dtypes are defined in ufunc.c, where they are
used for the loop definitions. In core.py, they are also used
to view-cast regular arrays to these structured dtypes.
"""
if self.ctype == 'const char':
return 'dt_type'
elif self.ctype == 'char':
return 'dt_sign'
elif self.ctype == 'int' and self.shape == (4,):
return 'dt_' + self.name[1:]
elif self.ctype == 'double' and self.shape == (3,):
return 'dt_double'
elif self.ctype == 'double' and self.shape == (2, 3):
return 'dt_pv'
elif self.ctype == 'double' and self.shape == (2,):
return 'dt_pvdpv'
elif self.ctype == 'double' and self.shape == (3, 3):
return 'dt_double'
elif not self.shape:
return 'dt_' + self.ctype
else:
raise ValueError("ctype {} with shape {} not recognized."
.format(self.ctype, self.shape))
@property
def view_dtype(self):
"""Name of dtype corresponding to the ctype for viewing back as array.
E.g., dt_double for double, dt_double33 for double[3][3].
The types are defined in core.py, where they are used for view-casts
of structured results as regular arrays.
"""
if self.ctype == 'const char':
return 'dt_bytes12'
elif self.ctype == 'char':
return 'dt_bytes1'
else:
raise ValueError('Only char ctype should need view back!')
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
size = 1
for s in self.shape:
size *= s
return size
@property
def cshape(self):
return ''.join([f'[{s}]' for s in self.shape])
@property
def signature_shape(self):
if self.ctype == 'eraLDBODY':
return '(n)'
elif self.ctype == 'double' and self.shape == (3,):
return '(d3)' if NUMPY_LT_1_16 else '(3)'
elif self.ctype == 'double' and self.shape == (3, 3):
return '(d3, d3)' if NUMPY_LT_1_16 else '(3, 3)'
else:
return '()'
class Argument(Variable):
def __init__(self, definition, doc):
self.definition = definition
self.doc = doc
self.__inout_state = None
self.ctype, ptr_name_arr = definition.strip().rsplit(" ", 1)
if "*" == ptr_name_arr[0]:
self.is_ptr = True
name_arr = ptr_name_arr[1:]
else:
self.is_ptr = False
name_arr = ptr_name_arr
if "[]" in ptr_name_arr:
self.is_ptr = True
name_arr = name_arr[:-2]
if "[" in name_arr:
self.name, arr = name_arr.split("[", 1)
self.shape = tuple([int(size) for size in arr[:-1].split("][")])
else:
self.name = name_arr
self.shape = ()
@property
def inout_state(self):
if self.__inout_state is None:
self.__inout_state = ''
for i in self.doc.input:
if self.name in i.name.split(','):
self.__inout_state = 'in'
for o in self.doc.output:
if self.name in o.name.split(','):
if self.__inout_state == 'in':
self.__inout_state = 'inout'
else:
self.__inout_state = 'out'
return self.__inout_state
@property
def name_for_call(self):
"""How the argument should be used in the call to the ERFA function.
This takes care of ensuring that inputs are passed by value,
as well as adding back the number of bodies for any LDBODY argument.
The latter presumes that in the ufunc inner loops, that number is
called 'nb'.
"""
if self.ctype == 'eraLDBODY':
assert self.name == 'b'
return 'nb, _' + self.name
elif self.is_ptr:
return '_'+self.name
else:
return '*_'+self.name
def __repr__(self):
return f"Argument('{self.definition}', name='{self.name}', ctype='{self.ctype}', inout_state='{self.inout_state}')"
class ReturnDoc:
def __init__(self, doc):
self.doc = doc
self.infoline = doc.split('\n')[0].strip()
self.type = self.infoline.split()[0]
self.descr = self.infoline.split()[1]
if self.descr.startswith('status'):
self.statuscodes = statuscodes = {}
code = None
for line in doc[doc.index(':')+1:].split('\n'):
ls = line.strip()
if ls != '':
if ' = ' in ls:
code, msg = ls.split(' = ')
if code != 'else':
code = int(code)
statuscodes[code] = msg
elif code is not None:
statuscodes[code] += ls
else:
self.statuscodes = None
def __repr__(self):
return f"Return value, type={self.type:15}, {self.descr}, {self.doc}"
class Return(Variable):
def __init__(self, ctype, doc):
self.name = 'c_retval'
self.inout_state = 'stat' if ctype == 'int' else 'ret'
self.ctype = ctype
self.shape = ()
self.doc = doc
def __repr__(self):
return f"Return(name='{self.name}', ctype='{self.ctype}', inout_state='{self.inout_state}')"
@property
def doc_info(self):
return self.doc.ret_info
class Function:
"""
A class representing a C function.
Parameters
----------
name : str
The name of the function
source_path : str
Either a directory, which means look for the function in a
stand-alone file (like for the standard ERFA distribution), or a
file, which means look for the function in that file (as for the
astropy-packaged single-file erfa.c).
match_line : str, optional
If given, searching of the source file will skip until it finds
a line matching this string, and start from there.
"""
def __init__(self, name, source_path, match_line=None):
self.name = name
self.pyname = name.split('era')[-1].lower()
self.filename = self.pyname+".c"
if os.path.isdir(source_path):
self.filepath = os.path.join(os.path.normpath(source_path), self.filename)
else:
self.filepath = source_path
with open(self.filepath) as f:
if match_line:
line = f.readline()
while line != '':
if line.startswith(match_line):
filecontents = '\n' + line + f.read()
break
line = f.readline()
else:
msg = ('Could not find the match_line "{0}" in '
'the source file "{1}"')
raise ValueError(msg.format(match_line, self.filepath))
else:
filecontents = f.read()
pattern = fr"\n([^\n]+{name} ?\([^)]+\)).+?(/\*.+?\*/)"
p = re.compile(pattern, flags=re.DOTALL | re.MULTILINE)
search = p.search(filecontents)
self.cfunc = " ".join(search.group(1).split())
self.doc = FunctionDoc(search.group(2))
self.args = []
for arg in re.search(r"\(([^)]+)\)", self.cfunc).group(1).split(', '):
self.args.append(Argument(arg, self.doc))
self.ret = re.search(f"^(.*){name}", self.cfunc).group(1).strip()
if self.ret != 'void':
self.args.append(Return(self.ret, self.doc))
def args_by_inout(self, inout_filter, prop=None, join=None):
"""
Gives all of the arguments and/or returned values, depending on whether
they are inputs, outputs, etc.
The value for `inout_filter` should be a string containing anything
that arguments' `inout_state` attribute produces. Currently, that can be:
* "in" : input
* "out" : output
* "inout" : something that's could be input or output (e.g. a struct)
* "ret" : the return value of the C function
* "stat" : the return value of the C function if it is a status code
It can also be a "|"-separated string giving inout states to OR
together.
"""
result = []
for arg in self.args:
if arg.inout_state in inout_filter.split('|'):
if prop is None:
result.append(arg)
else:
result.append(getattr(arg, prop))
if join is not None:
return join.join(result)
else:
return result
@property
def user_dtype(self):
"""The non-standard dtype, if any, needed by this function's ufunc.
This would be any structured array for any input or output, but
we give preference to LDBODY, since that also decides that the ufunc
should be a generalized ufunc.
"""
user_dtype = None
for arg in self.args_by_inout('in|inout|out'):
if arg.ctype == 'eraLDBODY':
return arg.dtype
elif user_dtype is None and arg.dtype not in ('dt_double',
'dt_int'):
user_dtype = arg.dtype
return user_dtype
@property
def signature(self):
"""Possible signature, if this function should be a gufunc."""
if all(arg.signature_shape == '()'
for arg in self.args_by_inout('in|inout|out')):
return None
return '->'.join(
[','.join([arg.signature_shape for arg in args])
for args in (self.args_by_inout('in|inout'),
self.args_by_inout('inout|out|ret|stat'))])
def _d3_fix_arg_and_index(self):
if not any('d3' in arg.signature_shape
for arg in self.args_by_inout('in|inout')):
for j, arg in enumerate(self.args_by_inout('out')):
if 'd3' in arg.signature_shape:
return j, arg
return None, None
@property
def d3_fix_op_index(self):
"""Whether only output arguments have a d3 dimension."""
index = self._d3_fix_arg_and_index()[0]
if index is not None:
len_in = len(list(self.args_by_inout('in')))
len_inout = len(list(self.args_by_inout('inout')))
index += + len_in + 2 * len_inout
return index
@property
def d3_fix_arg(self):
"""Whether only output arguments have a d3 dimension."""
return self._d3_fix_arg_and_index()[1]
@property
def python_call(self):
outnames = [arg.name for arg in self.args_by_inout('inout|out|stat|ret')]
argnames = [arg.name for arg in self.args_by_inout('in|inout')]
argnames += [arg.name for arg in self.args_by_inout('inout')]
d3fix_index = self._d3_fix_arg_and_index()[0]
if d3fix_index is not None:
argnames += ['None'] * d3fix_index + [self.d3_fix_arg.name]
return '{out} = {func}({args})'.format(out=', '.join(outnames),
func='ufunc.' + self.pyname,
args=', '.join(argnames))
def __repr__(self):
return f"Function(name='{self.name}', pyname='{self.pyname}', filename='{self.filename}', filepath='{self.filepath}')"
class Constant:
def __init__(self, name, value, doc):
self.name = name.replace("ERFA_", "")
self.value = value.replace("ERFA_", "")
self.doc = doc
class ExtraFunction(Function):
"""
An "extra" function - e.g. one not following the SOFA/ERFA standard format.
Parameters
----------
cname : str
The name of the function in C
prototype : str
The prototype for the function (usually derived from the header)
pathfordoc : str
The path to a file that contains the prototype, with the documentation
as a multiline string *before* it.
"""
def __init__(self, cname, prototype, pathfordoc):
self.name = cname
self.pyname = cname.split('era')[-1].lower()
self.filepath, self.filename = os.path.split(pathfordoc)
self.prototype = prototype.strip()
if prototype.endswith('{') or prototype.endswith(';'):
self.prototype = prototype[:-1].strip()
incomment = False
lastcomment = None
with open(pathfordoc, 'r') as f:
for l in f:
if incomment:
if l.lstrip().startswith('*/'):
incomment = False
lastcomment = ''.join(lastcomment)
else:
if l.startswith('**'):
l = l[2:]
lastcomment.append(l)
else:
if l.lstrip().startswith('/*'):
incomment = True
lastcomment = []
if l.startswith(self.prototype):
self.doc = lastcomment
break
else:
raise ValueError('Did not find prototype {} in file '
'{}'.format(self.prototype, pathfordoc))
self.args = []
argset = re.search(fr"{self.name}\(([^)]+)?\)",
self.prototype).group(1)
if argset is not None:
for arg in argset.split(', '):
self.args.append(Argument(arg, self.doc))
self.ret = re.match(f"^(.*){self.name}",
self.prototype).group(1).strip()
if self.ret != 'void':
self.args.append(Return(self.ret, self.doc))
def __repr__(self):
r = super().__repr__()
if r.startswith('Function'):
r = 'Extra' + r
return r
def main(srcdir=DEFAULT_ERFA_LOC, outfn='core.py', ufuncfn='ufunc.c',
templateloc=DEFAULT_TEMPLATE_LOC, extra='erfa_additions.h',
verbose=True):
from jinja2 import Environment, FileSystemLoader
if verbose:
print_ = lambda *args, **kwargs: print(*args, **kwargs)
else:
print_ = lambda *args, **kwargs: None
# Prepare the jinja2 templating environment
env = Environment(loader=FileSystemLoader(templateloc))
def prefix(a_list, pre):
return [pre+f'{an_element}' for an_element in a_list]
def postfix(a_list, post):
return [f'{an_element}'+post for an_element in a_list]
def surround(a_list, pre, post):
return [pre+f'{an_element}'+post for an_element in a_list]
env.filters['prefix'] = prefix
env.filters['postfix'] = postfix
env.filters['surround'] = surround
erfa_c_in = env.get_template(ufuncfn + '.templ')
erfa_py_in = env.get_template(outfn + '.templ')
# Extract all the ERFA function names from erfa.h
if os.path.isdir(srcdir):
erfahfn = os.path.join(srcdir, 'erfa.h')
multifilserc = True
else:
erfahfn = os.path.join(os.path.split(srcdir)[0], 'erfa.h')
multifilserc = False
with open(erfahfn, "r") as f:
erfa_h = f.read()
print_("read erfa header")
if extra:
with open(os.path.join(templateloc or '.', extra), "r") as f:
erfa_h += f.read()
print_("read extra header")
funcs = OrderedDict()
section_subsection_functions = re.findall(
r'/\* (\w*)/(\w*) \*/\n(.*?)\n\n', erfa_h,
flags=re.DOTALL | re.MULTILINE)
for section, subsection, functions in section_subsection_functions:
print_(f"{section}.{subsection}")
# Right now, we compile everything, but one could be more selective.
# In particular, at the time of writing (2018-06-11), what was
# actually require for astropy was not quite everything, but:
# ((section == 'Extra')
# or (section == "Astronomy")
# or (subsection == "AngleOps")
# or (subsection == "SphericalCartesian")
# or (subsection == "MatrixVectorProducts")
# or (subsection == 'VectorOps'))
if True:
func_names = re.findall(r' (\w+)\(.*?\);', functions,
flags=re.DOTALL)
for name in func_names:
print_(f"{section}.{subsection}.{name}...")
if multifilserc:
# easy because it just looks in the file itself
cdir = (srcdir if section != 'Extra' else
templateloc or '.')
funcs[name] = Function(name, cdir)
else:
# Have to tell it to look for a declaration matching
# the start of the header declaration, otherwise it
# might find a *call* of the function instead of the
# definition
for line in functions.split(r'\n'):
if name in line:
# [:-1] is to remove trailing semicolon, and
# splitting on '(' is because the header and
# C files don't necessarily have to match
# argument names and line-breaking or
# whitespace
match_line = line[:-1].split('(')[0]
funcs[name] = Function(name, cdir, match_line)
break
else:
raise ValueError("A name for a C file wasn't "
"found in the string that "
"spawned it. This should be "
"impossible!")
funcs = funcs.values()
# Extract all the ERFA constants from erfam.h
erfamhfn = os.path.join(srcdir, 'erfam.h')
with open(erfamhfn, 'r') as f:
erfa_m_h = f.read()
constants = []
for chunk in erfa_m_h.split("\n\n"):
result = re.findall(r"#define (ERFA_\w+?) (.+?)$", chunk,
flags=re.DOTALL | re.MULTILINE)
if result:
doc = re.findall(r"/\* (.+?) \*/\n", chunk, flags=re.DOTALL)
for (name, value) in result:
constants.append(Constant(name, value, doc))
# TODO: re-enable this when const char* return values and
# non-status code integer rets are possible
# #Add in any "extra" functions from erfaextra.h
# erfaextrahfn = os.path.join(srcdir, 'erfaextra.h')
# with open(erfaextrahfn, 'r') as f:
# for l in f:
# ls = l.strip()
# match = re.match('.* (era.*)\(', ls)
# if match:
# print_("Extra: {0} ...".format(match.group(1)))
# funcs.append(ExtraFunction(match.group(1), ls, erfaextrahfn))
print_("Rendering template")
erfa_c = erfa_c_in.render(funcs=funcs, NUMPY_LT_1_16=NUMPY_LT_1_16)
erfa_py = erfa_py_in.render(funcs=funcs, constants=constants,
NUMPY_LT_1_16=NUMPY_LT_1_16)
if outfn is not None:
print_("Saving to", outfn, 'and', ufuncfn)
with open(os.path.join(templateloc, outfn), "w") as f:
f.write(erfa_py)
with open(os.path.join(templateloc, ufuncfn), "w") as f:
f.write(erfa_c)
print_("Done!")
return erfa_c, erfa_py, funcs
if __name__ == '__main__':
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument('srcdir', default=DEFAULT_ERFA_LOC, nargs='?',
help='Directory where the ERFA c and header files '
'can be found or to a single erfa.c file '
'(which must be in the same directory as '
'erfa.h). Defaults to the builtin astropy '
'erfa: "{}"'.format(DEFAULT_ERFA_LOC))
ap.add_argument('-o', '--output', default='core.py',
help='The output filename for the pure-python output.')
ap.add_argument('-u', '--ufunc', default='ufunc.c',
help='The output filename for the ufunc .c output')
ap.add_argument('-t', '--template-loc',
default=DEFAULT_TEMPLATE_LOC,
help='the location where the "core.py.templ" and '
'"ufunc.c.templ templates can be found.')
ap.add_argument('-x', '--extra',
default='erfa_additions.h',
help='header file for any extra files in the template '
'location that should be included.')
ap.add_argument('-q', '--quiet', action='store_false', dest='verbose',
help='Suppress output normally printed to stdout.')
args = ap.parse_args()
main(args.srcdir, args.output, args.ufunc, args.template_loc,
args.extra)
| bsd-3-clause | -3,515,980,912,529,225,000 | 36.135685 | 136 | 0.517995 | false |
ufukdogan92/is-teklif-sistemi | teklif/models.py | 1 | 1928 | from django.db import models
from ilan.models import Ilan
from kullanici.models import IsArayan
from register.models import Register
class Teklif(models.Model):
ilan = models.ForeignKey(Ilan,blank=True,null=True,related_name="odeme_ilanı")
teklif_veren = models.OneToOneField(IsArayan,related_name="is_arayan")
butce = models.IntegerField()
sure = models.IntegerField()
onay_durumu = models.BooleanField(default=False)
teklif_tarihi = models.DateTimeField(auto_now_add=True)
duzenlenme_tarihi = models.DateField(auto_now=True)
def __str__(self):
return self.ilan.ilan_basligi+ " ilanına "+ self.teklif_veren.kullanici.username + " kullanıcısının Teklifi"
class Meta:
verbose_name ="Teklifler"
verbose_name_plural="Teklif"
def save(self, *args, **kwargs):
from register.models import Register
self.ilan = Register.teklifVermeBaslat(self.ilan.pk)
self.teklif_veren = Register.getIsArayan(self.teklif_veren.pk)
super(Teklif, self).save(*args, **kwargs)
class TeklifOnay(models.Model):
teklif = models.OneToOneField(Teklif,related_name="teklif_onay")
onay_durumu = models.BooleanField(default=True)
onay_tarihi = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.teklif.ilan.ilan_basligi+ " ilanına verilen teklifin onayı"
class Meta:
verbose_name ="Teklif Onayı"
verbose_name_plural="Teklif Onayları"
def save(self, *args, **kwargs):
if not self.pk:
from odeme.models import Odeme
teklif = Teklif.objects.get(pk=self.teklif.pk)
self.onay_durumu = True
self.tarihi = self.onay_tarihi
odeme = Odeme(odeme_basligi=teklif.ilan.ilan_basligi,ucret=teklif.butce,sure=teklif.sure,teklif=teklif)
odeme.save()
super(TeklifOnay, self).save(*args, **kwargs) | gpl-3.0 | -4,214,891,582,358,801,000 | 36.627451 | 116 | 0.678832 | false |
kg-bot/SupyBot | plugins/Misc1/__init__.py | 1 | 2791 | ###
# Copyright (c) 2014, KG-Bot
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Add a description of the plugin (to be presented to the user inside the wizard)
here. This should describe *what* the plugin does.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = ""
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.authors.unknown
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
# This is a url where the most recent plugin package can be downloaded.
__url__ = '' # 'http://supybot.com/Members/yourname/Misc1/download'
from . import config
from . import plugin
from imp import reload
# In case we're being reloaded.
reload(config)
reload(plugin)
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| gpl-3.0 | 5,609,393,713,654,504,000 | 38.449275 | 79 | 0.741311 | false |
cnheitman/barf-project | tests/core/smt/test_smtfunction.py | 1 | 3060 | # Copyright (c) 2014, Fundacion Dr. Manuel Sadosky
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from barf.core.smt.smtsymbol import BitVec
from barf.core.smt.smtsymbol import Bool
from barf.core.smt.smtfunction import concat
from barf.core.smt.smtfunction import extract
from barf.core.smt.smtfunction import ite
from barf.core.smt.smtfunction import sign_extend
from barf.core.smt.smtfunction import zero_extend
class SmtFunctionTests(unittest.TestCase):
def test_zero_extend(self):
x = BitVec(32, "x")
y = zero_extend(x, 64)
self.assertEqual(y.value, "((_ zero_extend 32) x)")
def test_sign_extend(self):
x = BitVec(32, "x")
y = sign_extend(x, 64)
self.assertEqual(y.value, "((_ sign_extend 32) x)")
def test_extract(self):
x = BitVec(32, "x")
x0 = extract(x, 0, 8)
x1 = extract(x, 8, 8)
x2 = extract(x, 16, 8)
x3 = extract(x, 24, 8)
self.assertEqual(x0.value, "((_ extract 7 0) x)")
self.assertEqual(x1.value, "((_ extract 15 8) x)")
self.assertEqual(x2.value, "((_ extract 23 16) x)")
self.assertEqual(x3.value, "((_ extract 31 24) x)")
def test_ite(self):
b = Bool("b")
x = BitVec(32, "x")
y = BitVec(32, "y")
z = BitVec(32, "z")
v = ite(32, x == 0, y, z)
w = ite(32, b, y, z)
self.assertEqual(v.value, "(ite (= x #x00000000) y z)")
self.assertEqual(w.value, "(ite b y z)")
def test_concat(self):
x = BitVec(32, "x")
y = BitVec(32, "y")
z = concat(32, x, y)
v = concat(32, x)
self.assertEqual(z.value, "(concat x y)")
self.assertEqual(v.value, "x")
def main():
unittest.main()
if __name__ == '__main__':
main()
| bsd-2-clause | -6,082,208,564,606,116,000 | 33.382022 | 80 | 0.662418 | false |
jeremiah-c-leary/vhdl-style-guide | vsg/tests/case/test_rule_003.py | 1 | 1130 |
import os
import unittest
from vsg.rules import case
from vsg import vhdlFile
from vsg.tests import utils
sTestDir = os.path.dirname(__file__)
lFile, eError =vhdlFile.utils.read_vhdlfile(os.path.join(sTestDir,'rule_003_test_input.vhd'))
lExpected = []
lExpected.append('')
utils.read_file(os.path.join(sTestDir, 'rule_003_test_input.fixed.vhd'), lExpected)
class test_case_rule(unittest.TestCase):
def setUp(self):
self.oFile = vhdlFile.vhdlFile(lFile)
self.assertIsNone(eError)
def test_rule_003(self):
oRule = case.rule_003()
self.assertTrue(oRule)
self.assertEqual(oRule.name, 'case')
self.assertEqual(oRule.identifier, '003')
lExpected = [24]
oRule.analyze(self.oFile)
self.assertEqual(lExpected, utils.extract_violation_lines_from_violation_object(oRule.violations))
def test_fix_rule_003(self):
oRule = case.rule_003()
oRule.fix(self.oFile)
lActual = self.oFile.get_lines()
self.assertEqual(lExpected, lActual)
oRule.analyze(self.oFile)
self.assertEqual(oRule.violations, [])
| gpl-3.0 | -605,416,265,127,274,600 | 24.111111 | 106 | 0.673451 | false |
TakeshiTseng/SDN-Work | mininet/bgp-3as/as.py | 1 | 2401 | #!/usr/bin/env python
from mininet.net import Mininet
from mininet.cli import CLI
from mininet.log import setLogLevel
'''
h1 -- r1 -- r2 -- r3 -- h3
|
h2
h1 - r1 : 10.0.1.0/24
h2 - r2 : 10.0.2.0/24
h3 - r3 : 10.0.3.0/24
r1 - r2 : 192.168.1.0/24
r2 - r3 : 192.168.2.0/24
'''
if '__main__' == __name__:
setLogLevel('debug')
net = Mininet(controller=None)
h1 = net.addHost('h1', ip="10.0.1.1/24")
h2 = net.addHost('h2', ip="10.0.2.1/24")
h3 = net.addHost('h3', ip="10.0.3.1/24")
r1 = net.addHost('r1')
r2 = net.addHost('r2')
r3 = net.addHost('r3')
net.addLink(r1, r2)
net.addLink(r2, r3)
net.addLink(h1, r1)
net.addLink(h2, r2)
net.addLink(h3, r3)
net.build()
# default route for hosts
h1.cmd('ip r add 0.0.0.0/0 via 10.0.1.254')
h2.cmd('ip r add 0.0.0.0/0 via 10.0.2.254')
h3.cmd('ip r add 0.0.0.0/0 via 10.0.3.254')
# remove default ip address
r1.cmd('ip a del 10.0.0.4/8 dev r1-eth0')
r2.cmd('ip a del 10.0.0.5/8 dev r2-eth0')
r3.cmd('ip a del 10.0.0.6/8 dev r3-eth0')
# ip for router facing hosts
r1.cmd('ip a add 10.0.1.254/24 dev r1-eth1')
r2.cmd('ip a add 10.0.2.254/24 dev r2-eth2')
r3.cmd('ip a add 10.0.3.254/24 dev r3-eth1')
# subnet between r1 and r2
r1.cmd('ip a add 192.168.1.1/24 dev r1-eth0')
r2.cmd('ip a add 192.168.1.2/24 dev r2-eth0')
# subnet between r2 and r3
r2.cmd('ip a add 192.168.2.1/24 dev r2-eth1')
r3.cmd('ip a add 192.168.2.2/24 dev r3-eth0')
# quagga
r1.cmd('/usr/lib/quagga/zebra -d -f zebra-r1.conf -z /var/run/quagga/zebra-r1.api -i /var/run/quagga/zebra-r1.pid')
r1.cmd('/usr/lib/quagga/bgpd -d -f r1.conf -z /var/run/quagga/zebra-r1.api -i /var/run/quagga/bgpd-r1.pid')
r2.cmd('/usr/lib/quagga/zebra -d -f zebra-r2.conf -z /var/run/quagga/zebra-r2.api -i /var/run/quagga/zebra-r2.pid')
r2.cmd('/usr/lib/quagga/bgpd -d -f r2.conf -z /var/run/quagga/zebra-r2.api -i /var/run/quagga/bgpd-r2.pid')
r3.cmd('/usr/lib/quagga/zebra -d -f zebra-r3.conf -z /var/run/quagga/zebra-r3.api -i /var/run/quagga/zebra-r3.pid')
r3.cmd('/usr/lib/quagga/bgpd -d -f r3.conf -z /var/run/quagga/zebra-r3.api -i /var/run/quagga/bgpd-r3.pid')
CLI(net)
# kill bgpd and zebra
r1.cmd('killall bgpd zebra')
r2.cmd('killall bgpd zebra')
r3.cmd('killall bgpd zebra')
net.stop()
| mit | -5,056,204,420,631,575,000 | 30.592105 | 119 | 0.598917 | false |
jrydberg/guild | guild/actor.py | 1 | 26454 | # Copyright (c) 2012 Johan Rydberg
# Copyright (c) 2009 Donovan Preston
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import uuid
import weakref
try:
import simplejson as json
except ImportError:
import json
from gevent import Greenlet, Timeout, local, core
from gevent.event import Event
from gevent.hub import GreenletExit
import gevent
from guild import exc, shape
class ActorError(RuntimeError):
"""Base class for actor exceptions.
"""
class LinkBroken(ActorError):
""":"""
class Killed(ActorError):
"""Exception which is raised when an Actor is killed.
"""
class DeadActor(ActorError):
"""Exception which is raised when a message is sent to an Address which
refers to an Actor which is no longer running.
"""
class ReceiveTimeout(ActorError):
"""Internal exception used to signal receive timeouts.
"""
class InvalidCallMessage(ActorError):
"""Message doesn't match call message shape.
"""
class RemoteAttributeError(ActorError, AttributeError):
pass
class RemoteException(ActorError):
pass
def build_call_pattern(method,message=object):
call_pat = CALL_PATTERN.copy()
call_pat['method'] = method
call_pat['message'] = message
return call_pat
def lazy_property(property_name, property_factory, doc=None):
def get(self):
if not hasattr(self, property_name):
setattr(self, property_name, property_factory(self))
return getattr(self, property_name)
return property(get)
_curactor = local.local()
def curactor():
"""Return the current actor."""
return _curactor.current
def _setcurrent(actor):
_curactor.current = actor
def curaddr():
"""Return address of current actor."""
return curactor().address
def curmesh():
return curactor().mesh
def curnode():
return curactor().node
def register(name, address):
"""Associates the name C{name} with the address C{address}."""
curnode().register(name, address)
def whereis(name):
"""Returns the address registered under C{name}, or C{None} if the
name is not registered.
"""
return curnode().whereis(name)
def is_actor_type(obj):
"""Return True if obj is a subclass of Actor, False if not.
"""
try:
return issubclass(obj, Actor)
except TypeError:
return False
def spawn(spawnable, *args, **kw):
"""Start a new Actor. If spawnable is a subclass of Actor,
instantiate it with no arguments and call the Actor's "main"
method with *args and **kw.
If spawnable is a callable, call it inside a new Actor with the first
argument being the "receive" method to use to retrieve messages out
of the Actor's mailbox, followed by the given *args and **kw.
Return the Address of the new Actor.
"""
return curnode().spawn(spawnable, *args, **kw)
def spawn_link(spawnable, *args, **kw):
"""Just like spawn, but the currently running Actor will be linked
to the new actor. If an exception occurs or the Actor finishes
execution, a message will be sent to the Actor which called
spawn_link with details.
When an exception occurs, the message will have a pattern like:
{'address': eventlet.actor.Address, 'exception': dict}
The "exception" dict will have information from the stack trace extracted
into a tree of simple Python objects.
On a normal return from the Actor, the actor's return value is given
in a message like:
{'address': eventlet.actor.Address, 'exit': object}
"""
return curnode().spawn_link(spawnable, *args, **kw)
def handle_custom(obj):
if isinstance(obj, Address):
return obj.to_json()
if isinstance(obj, Ref):
return obj.to_json()
raise TypeError(obj)
def generate_custom(obj):
address = Address.from_json(obj)
if address:
return address
ref = Ref.from_json(obj)
if ref:
return ref
return obj
class Ref(object):
"""A reference."""
def __init__(self, node_id, ref_id):
self._node_id = node_id
self._ref_id = ref_id
ref_id = property(lambda self: self._ref_id)
node_id = property(lambda self: self._node_id)
def to_json(self):
return {'_pyact_ref_id': self._ref_id,
'_pyact_node_id': self._node_id}
@classmethod
def from_json(cls, obj):
if sorted(obj.keys()) == ['_pyact_ref_id', '_pyact_node_id']:
return Ref(obj['_pyact_node_id'], obj['_pyact_ref_id'])
return None
def __eq__(self, other):
return (isinstance(other, Ref)
and other.node_id == self.node_id
and other.ref_id == self.ref_id)
def __hash__(self):
return hash((self.node_id, self._ref_id))
class MonitorRef(object):
def __init__(self, address, ref):
self.address = address
self.ref = ref
def demonitor(self):
curmesh().demonitor(self.address, self.ref)
class Address(object):
"""An Address is a reference to another Actor.
Any Actor which has an Address can asynchronously put a message in
that Actor's mailbox. This is called a "cast". To send a message
to another Actor and wait for a response, use "call" instead.
Note that an Address instance itself is rather useless. You need
node or a mesh to actually send a message.
"""
def __init__(self, node_id, actor_id):
self._node_id = node_id
self._actor_id = actor_id
actor_id = property(lambda self: self._actor_id)
node_id = property(lambda self: self._node_id)
def to_json(self):
return {'_pyact_actor_id': self._actor_id,
'_pyact_node_id': self._node_id}
@classmethod
def from_json(cls, obj):
if sorted(obj.keys()) == ['_pyact_actor_id', '_pyact_node_id']:
return Address(obj['_pyact_node_id'], obj['_pyact_actor_id'])
return None
def __eq__(self, other):
return (isinstance(other, Address)
and other.node_id == self.node_id
and other.actor_id == self.actor_id)
def __hash__(self):
return hash((self.node_id, self._actor_id))
def cast(self, message):
"""Send a message to the Actor this object addresses."""
curnode().send(self, message)
def __repr__(self):
return "<%s %s/%s>" % (self.__class__.__name__,
self._node_id, self._actor_id)
def __str__(self):
return "<actor %s/%s>" % (self._node_id, self._actor_id)
def __or__(self, message):
"""Use Erlang-y syntax (| instead of !) to send messages.
addr | msg
is equivalent to:
addr.cast(msg)
"""
self.cast(message)
def monitor(self):
"""Monitor the Actor this object addresses.
When the actor dies, a exit message will be sent to the
current actor.
This call returns a reference that can be used to cancel the
monitor with the C{demonitor} function.
"""
ref = curnode().make_ref()
curmesh().monitor(self, curaddr(), ref)
return MonitorRef(self, ref)
def demonitor(self, ref):
"""Cancel a monitor."""
curmesh().demonitor(self, ref)
def link(self):
"""Link the current actor to the actor this object addresses.
"""
print "addr.link curr %s to %s" % (curaddr(), self)
curactor().link(self)
#curmesh().link(self, curaddr())
def call(self, method, message=None, timeout=None):
"""Send a message to the Actor this object addresses. Wait
for a result. If a timeout in seconds is passed, raise
C{gevent.Timeout} if no result is returned in less than the
timeout.
This could have nicer syntax somehow to make it look like an
actual method call.
"""
message_id = str(uuid.uuid4())
my_address = curaddr()
self.cast(
{'call': message_id, 'method': method,
'address': my_address, 'message': message})
if timeout is None:
cancel = None
else:
# Raise any TimeoutError to the caller so they can handle
# it.
cancel = gevent.Timeout(timeout)
cancel.start()
RSP = {'response': message_id, 'message': object}
EXC = {'response': message_id, 'exception': object}
INV = {'response': message_id, 'invalid_method': str}
pattern, response = curactor().receive(RSP, EXC, INV)
if cancel is not None:
cancel.cancel()
if pattern is INV:
raise RemoteAttributeError(method)
elif pattern is EXC:
raise RemoteException(response)
return response['message']
def __getattr__(self, method):
"""Support address.<method>(message, timout) call pattern.
For example:
addr.call('test') could be written as addr.test()
"""
f = lambda message=None, timeout=None: self.call(
method, message, timeout)
return f
class _Greenlet(Greenlet):
"""Private version of the greenlet that doesn't dump a stacktrace
to stderr when a greenlet dies.
"""
def _report_error(self, exc_info):
self._exc_info = exc_info
exception = exc_info[1]
if isinstance(exception, GreenletExit):
self._report_result(exception)
return
self._exception = exception
if self._links and self._notifier is None:
self._notifier = core.active_event(self._notify_links)
CALL_PATTERN = {'call': str, 'method': str, 'address': Address,
'message': object}
REMOTE_CALL_PATTERN = {'remotecall':str,
'method':str,
'message':object,
'timeout':object}
RESPONSE_PATTERN = {'response': str, 'message': object}
INVALID_METHOD_PATTERN = {'response': str, 'invalid_method': str}
EXCEPTION_PATTERN = {'response': str, 'exception':object}
class Monitor(object):
def __init__(self, actor, ref, to_addr):
self.actor = actor
self.ref = ref
self.to_addr = to_addr
def _send_exit(self, *args):
self.actor._send_exit(self.to_addr, self.ref)
class Actor(object):
"""An Actor is a Greenlet which has a mailbox. Any other Actor
which has the Address can asynchronously put messages in this
mailbox.
The Actor extracts messages from this mailbox using a technique
called selective receive. To receive a message, the Actor calls
self.receive, passing in any number of "shapes" to match against
messages in the mailbox.
A shape describes which messages will be extracted from the
mailbox. For example, if the message ('credit', 250.0) is in the
mailbox, it could be extracted by calling self.receive(('credit',
int)). Shapes are Python object graphs containing only simple
Python types such as tuple, list, dictionary, integer, and string,
or type object constants for these types.
Since multiple patterns may be passed to receive, the return value
is (matched_pattern, message). To receive any message which is in
the mailbox, simply call receive with no patterns.
"""
_wevent = None
_args = (), {}
actor_id = property(lambda self: self._actor_id)
dead = property(lambda self: self.greenlet.ready())
def __init__(self, run=None, node=None, mesh=None):
if run is None:
self._to_run = self.main
else:
self._to_run = lambda *args, **kw: run(self.receive, *args, **kw)
self._actor_id = str(uuid.uuid4())
print "created actor", self._actor_id
self.greenlet = _Greenlet(self._run)
self.start = self.greenlet.start
self.start_later = self.greenlet.start_later
self.node = node
self.mesh = mesh
self._mailbox = []
self.address = Address(node.id, self._actor_id)
self.trap_exit = False
self.monitors = {}
def _run(self):
"""Run the actor."""
args, kw = self._args
del self._args
to_run = self._to_run
del self._to_run
_setcurrent(self)
return to_run(*args, **kw)
def _match_patterns(self,patterns):
"""Internal method to match a list of patterns against
the mailbox. If message matches any of the patterns,
that message is removed from the mailbox and returned
along with the pattern it matched. If message doesn't
match any pattern then None,None is returned.
"""
for i, message in enumerate(self._mailbox):
for pattern in patterns:
if shape.is_shaped(message, pattern):
del self._mailbox[i]
return pattern, message
return None,None
def receive(self, *patterns, **kw):
"""Select a message out of this Actor's mailbox. If patterns
are given, only select messages which match these shapes.
Otherwise, select the next message.
"""
timeout = kw.get('timeout', None)
if timeout == 0 :
if not patterns:
if self._mailbox:
return {object: object}, self._mailbox.pop(0)
else:
return None,None
return self._match_patterns(patterns)
if timeout is not None:
timer = gevent.Timeout(timeout, ReceiveTimeout)
timer.start()
else:
timer = None
try:
while True:
if patterns:
matched_pat, matched_msg = self._match_patterns(patterns)
elif self._mailbox:
matched_pat, matched_msg = ({object:object},
self._mailbox.pop(0))
else:
matched_pat = None
if matched_pat is not None:
if timer:
timer.cancel()
return matched_pat, matched_msg
self._wevent = Event()
try:
# wait until at least one message or timeout
self._wevent.wait()
finally:
self._wevent = None
except ReceiveTimeout:
return (None,None)
def link(self, to_addr):
"""Link this actor to a remote address."""
self._link(to_addr)
self.mesh.link(to_addr, self.address)
def _send_exit(self, to_addr, ref=None):
"""Send an exit message to the remote address."""
if self.greenlet.exception:
message = {'exit': self.address, 'exception': exc.format_exc(
self.greenlet._exc_info)}
else:
message = {'exit': self.address, 'value': self.greenlet.value}
if ref:
message['ref'] = ref
message = json.dumps(message, default=handle_custom)
self.mesh.exit(self.address, to_addr, message)
def _link(self, to_addr):
"""For internal use.
Link the Actor at the given Address to this Actor.
If this Actor has an unhandled exception, cast a message
containing details about the exception to the Address.
"""
print "we link %s to %s" % (self.address, to_addr)
self.greenlet.link(lambda g: self._send_exit(to_addr))
def _monitor(self, to_addr, ref):
"""For internal use.
XXX
"""
if self.greenlet.ready():
self._send_exit(to_addr, ref)
else:
monitor = Monitor(self, ref, to_addr)
self.greenlet.link(monitor._send_exit)
self.monitors[ref] = monitor
def _demonitor(self, to_addr, ref):
if ref in self.monitors:
monitor = self.monitors.pop(ref)
self.greenlet.unlink(monitor._send_exit)
def _cast(self, message):
"""For internal use.
Nodes uses this to insert a message into this Actor's mailbox.
"""
self._mailbox.append(json.loads(message, object_hook=generate_custom))
if self._wevent and not self._wevent.is_set():
self._wevent.set()
def _exit(self, from_addr, message):
"""For internal use.
Handle a received exit signal.
"""
if self.trap_exit:
self._cast(message)
else:
# The actor do not trap the exit, which means we should
# terminate it. But only if it was an abnormal
# termination.
message = json.loads(message, object_hook=generate_custom)
if not message.has_key('value'):
self.greenlet.kill(LinkBroken(from_addr, message),
block=False)
def _get(self, timeout=None):
"""For internal use.
Wait until the actor finishes.
"""
return self.greenlet.get(timeout=timeout)
def main(self, *args, **kw):
"""If subclassing Actor, override this method to implement the Actor's
main loop.
"""
raise NotImplementedError("Implement in subclass.")
def sleep(self, amount):
gevent.sleep(amount)
def cast(self, address, message):
"""Send a message to the given address."""
self.mesh.cast(address, json.dumps(message, default=handle_custom))
class Server(Actor):
"""An actor which responds to the call protocol by looking for the
specified method and calling it.
Also, Server provides start and stop methods which can be overridden
to customize setup.
"""
def respond(self, orig_message, response=None):
if not shape.is_shaped(orig_message, CALL_PATTERN):
raise InvalidCallMessage(str(orig_message))
orig_message['address'].cast({'response':orig_message['call'],
'message':response})
def respond_invalid_method(self, orig_message, method):
if not shape.is_shaped(orig_message, CALL_PATTERN):
raise InvalidCallMessage(str(orig_message))
orig_message['address'].cast({'response':orig_message['call'],
'invalid_method':method})
def respond_exception(self, orig_message, exception):
if not shape.is_shaped(orig_message, CALL_PATTERN):
raise InvalidCallMessage(str(orig_message))
orig_message['address'].cast({'response':orig_message['call'],
'exception':exception})
def start(self, *args, **kw):
"""Override to be notified when the server starts.
"""
pass
def stop(self, *args, **kw):
"""Override to be notified when the server stops.
"""
pass
def main(self, *args, **kw):
"""Implement the actor main loop by waiting forever for messages.
Do not override.
"""
self.start(*args, **kw)
try:
while True:
pattern, message = self.receive(CALL_PATTERN)
method = getattr(self, message['method'], None)
if method is None:
self.respond_invalid_method(message, message['method'])
continue
try:
self.respond(message, method(message['message']))
except Exception:
formatted = exc.format_exc()
self.respond_exception(message, formatted)
finally:
self.stop(*args, **kw)
class Mesh(object):
"""A mesh of nodes.
Nodes are registed using C{add} when they arrive into the mesh.
It is up to an external coordinator to detect when new nodes
arrive.
"""
def __init__(self):
self._nodes = {}
def add(self, node):
"""Add a reachable node to the mesh."""
self._nodes[node.id] = node
def remove(self, id):
"""Remove a node from the mesh."""
del self._nodes[id]
def _forward(self, address, fn, *args):
"""For internal use."""
node = self._nodes.get(address.node_id)
return getattr(node, fn)(*args)
def exit(self, from_addr, to_addr, message):
"""Send an exit signal from Actor C{form_addr}."""
print "exit", from_addr, "to", to_addr, "message", message
self._forward(to_addr, '_exit', to_addr, from_addr, message)
def cast(self, address, message):
"""Send a message to a node in the mesh designated by the given
address.
The message may be silently dropped if the remote node do not
exit or if the actor is dead.
"""
self._forward(address, '_cast', address, message)
def link(self, address, to_addr):
"""Link actor C{pid1} to actor with address C{pid2}.
"""
self._forward(address, '_link', address, to_addr)
def monitor(self, address, to_addr, ref):
"""Monitor C{address}."""
self._forward(address, '_monitor', address, to_addr, ref)
def demonitor(self, address, ref):
"""."""
self._forward(address, '_demonitor', address, ref)
class Node(object):
"""Representation of a node in a mesh of nodes."""
id = property(lambda self: self._id)
def __init__(self, mesh, id):
"""Create a new node."""
self._id = id
self._mesh = mesh
self.actors = weakref.WeakValueDictionary()
mesh.add(self)
self.registry = {}
def make_ref(self):
"""Return a new reference."""
return Ref(self._id, str(uuid.uuid4()))
def register(self, name, address):
"""Associates the name C{name} with the process C{address}."""
assert address.node_id == self.id
if address.actor_id not in self.actors:
raise DeadActor()
if name in self.registry:
raise Exception("Conflicting name")
actor = self.actors[address.actor_id]
self.registry[name] = actor
actor._link(lambda _: self.registry.pop(name))
def whereis(self, name):
"""Return address of registered name C{name} or C{None} if
there's no address with that name.
"""
if name in self.registry:
return self.registry[name].address
def wait(self, address, timeout=None):
"""Wait for actor designated by address to finish."""
assert address.node_id == self._id
if address.actor_id not in self.actors:
raise DeadActor()
return self.actors[address.actor_id]._get(timeout=timeout)
def spawn(self, spawnable, *args, **kw):
"""Start a new actor.
If spawnable is a subclass of Actor, instantiate it with no
arguments and call the Actor's "main" method with *args and
**kw.
If spawnable is a callable, call it inside a new Actor with
the first argument being the "receive" method to use to
retrieve messages out of the Actor's mailbox, followed by the
given *args and **kw.
Return the Address of the new Actor.
"""
if is_actor_type(spawnable):
spawnable = spawnable(node=self, mesh=self._mesh)
else:
spawnable = Actor(spawnable, node=self, mesh=self._mesh)
# Add the actor to the registry, and have it removed when the
# actor dies.
self.actors[spawnable.actor_id] = spawnable
# FIXME (jrydberg): We could pass this as to the ctor.
spawnable._args = (args, kw)
spawnable.start()
return spawnable.address
def spawn_link(self, spawnable, *args, **kw):
"""."""
address = self.spawn(spawnable, *args, **kw)
print "spawned", address
address.link()
return address
def send(self, address, message):
"""Send a message to an actor on this node or another one.
"""
self._mesh.cast(address, json.dumps(message, default=handle_custom))
def _cast(self, address, message):
"""For internal use.
Send a message to an actor on this node.
"""
_actor = self.actors.get(address.actor_id)
if _actor is None or _actor.dead:
# Silently drop the message.
return
_actor._cast(message)
def _exit(self, address, from_addr, message):
try:
_actor = self.actors[address.actor_id]
except KeyError:
# FIXME: Send an exit message.
pass
else:
_actor._exit(from_addr, message)
def _link(self, from_addr, to_addr):
"""For internal use."""
try:
_actor = self.actors[from_addr.actor_id]
except KeyError:
# FIXME: Send an exit message.
pass
else:
_actor._link(to_addr)
def _monitor(self, address, to_addr, ref):
try:
_actor = self.actors[address.actor_id]
except KeyError:
# FIXME: Send an exit message.
pass
else:
_actor._monitor(to_addr, ref)
def _demonitor(self, address, ref):
try:
_actor = self.actors[address.actor_id]
except KeyError:
# FIXME: Send an exit message.
pass
else:
_actor._demonitor(address, ref)
| mit | 2,611,560,143,730,872,300 | 30.605735 | 78 | 0.592878 | false |
timothyjamesbecker/FusorSV | fusorsv/mantel_test.py | 1 | 7498 | # MantelTest v1.2.10
# http://jwcarr.github.io/MantelTest/
#
# Copyright (c) 2014-2016 Jon W. Carr
# Licensed under the terms of the MIT License
import numpy as np
from itertools import permutations
from scipy import spatial, stats
def test(X, Y, perms=10000, method='pearson', tail='two-tail'):
"""
Takes two distance matrices (either redundant matrices or condensed vectors)
and performs a Mantel test. The Mantel test is a significance test of the
correlation between two distance matrices.
Parameters
----------
X : array_like
First distance matrix (condensed or redundant).
Y : array_like
Second distance matrix (condensed or redundant), where the order of
elements corresponds to the order of elements in the first matrix.
perms : int, optional
The number of permutations to perform (default: 10000). A larger number
gives more reliable results but takes longer to run. If the actual number
of possible permutations is smaller, the program will enumerate all
permutations. Enumeration can be forced by setting this argument to 0.
method : str, optional
Type of correlation coefficient to use; either 'pearson' or 'spearman'
(default: 'pearson').
tail : str, optional
Which tail to test in the calculation of the empirical p-value; either
'upper', 'lower', or 'two-tail' (default: 'two-tail').
Returns
-------
r : float
Veridical correlation
p : float
Empirical p-value
z : float
Standard score (z-score)
"""
# Ensure that X and Y are formatted as Numpy arrays.
X, Y = np.asarray(X, dtype=float), np.asarray(Y, dtype=float)
# Check that X and Y are valid distance matrices.
if spatial.distance.is_valid_dm(X) == False and spatial.distance.is_valid_y(X) == False:
raise ValueError('X is not a valid condensed or redundant distance matrix')
if spatial.distance.is_valid_dm(Y) == False and spatial.distance.is_valid_y(Y) == False:
raise ValueError('Y is not a valid condensed or redundant distance matrix')
# If X or Y is a redundant distance matrix, reduce it to a condensed distance matrix.
if len(X.shape) == 2:
X = spatial.distance.squareform(X, force='tovector', checks=False)
if len(Y.shape) == 2:
Y = spatial.distance.squareform(Y, force='tovector', checks=False)
# Check for size equality.
if X.shape[0] != Y.shape[0]:
raise ValueError('X and Y are not of equal size')
# Check for minimum size.
if X.shape[0] < 3:
raise ValueError('X and Y should represent at least 3 objects')
# If Spearman correlation is requested, convert X and Y to ranks.
if method == 'spearman':
X, Y = stats.rankdata(X), stats.rankdata(Y)
# Check for valid method parameter.
elif method != 'pearson':
raise ValueError('The method should be set to "pearson" or "spearman"')
# Check for valid tail parameter.
if tail != 'upper' and tail != 'lower' and tail != 'two-tail':
raise ValueError('The tail should be set to "upper", "lower", or "two-tail"')
# Now we're ready to start the Mantel test using a number of optimizations:
#
# 1. We don't need to recalculate the pairwise distances between the objects
# on every permutation. They've already been calculated, so we can use a
# simple matrix shuffling technique to avoid recomputing them. This works
# like memoization.
#
# 2. Rather than compute correlation coefficients, we'll just compute the
# covariances. This works because the denominator in the equation for the
# correlation coefficient will yield the same result however the objects
# are permuted, making it redundant. Removing the denominator leaves us
# with the covariance.
#
# 3. Rather than permute the Y distances and derive the residuals to calculate
# the covariance with the X distances, we'll represent the Y residuals in
# the matrix and shuffle those directly.
#
# 4. If the number of possible permutations is less than the number of
# permutations that were requested, we'll run a deterministic test where
# we try all possible permutations rather than sample the permutation
# space. This gives a faster, deterministic result.
# Calculate the X and Y residuals, which will be used to compute the
# covariance under each permutation.
X_residuals, Y_residuals = X - X.mean(), Y - Y.mean()
# Expand the Y residuals to a redundant matrix.
Y_residuals_as_matrix = spatial.distance.squareform(Y_residuals, force='tomatrix', checks=False)
# Get the number of objects.
m = Y_residuals_as_matrix.shape[0]
# Calculate the number of possible matrix permutations.
n = np.math.factorial(m)
# Initialize an empty array to store temporary permutations of Y_residuals.
Y_residuals_permuted = np.zeros(Y_residuals.shape[0], dtype=float)
# If the number of requested permutations is greater than the number of
# possible permutations (m!) or the perms parameter is set to 0, then run a
# deterministic Mantel test ...
if perms >= n or perms == 0:
# Initialize an empty array to store the covariances.
covariances = np.zeros(n, dtype=float)
# Enumerate all permutations of row/column orders and iterate over them.
for i, order in enumerate(permutations(range(m))):
# Take a permutation of the matrix.
Y_residuals_as_matrix_permuted = Y_residuals_as_matrix[order, :][:, order]
# Condense the permuted version of the matrix. Rather than use
# distance.squareform(), we call directly into the C wrapper for speed.
spatial.distance._distance_wrap.to_vector_from_squareform_wrap(Y_residuals_as_matrix_permuted, Y_residuals_permuted)
# Compute and store the covariance.
covariances[i] = (X_residuals * Y_residuals_permuted).sum()
# ... otherwise run a stochastic Mantel test.
else:
# Initialize an empty array to store the covariances.
covariances = np.zeros(perms, dtype=float)
# Initialize an array to store the permutation order.
order = np.arange(m)
# Store the veridical covariance in 0th position...
covariances[0] = (X_residuals * Y_residuals).sum()
# ...and then run the random permutations.
for i in range(1, perms):
# Choose a random order in which to permute the rows and columns.
np.random.shuffle(order)
# Take a permutation of the matrix.
Y_residuals_as_matrix_permuted = Y_residuals_as_matrix[order, :][:, order]
# Condense the permuted version of the matrix. Rather than use
# distance.squareform(), we call directly into the C wrapper for speed.
spatial.distance._distance_wrap.to_vector_from_squareform_wrap(Y_residuals_as_matrix_permuted, Y_residuals_permuted)
# Compute and store the covariance.
covariances[i] = (X_residuals * Y_residuals_permuted).sum()
# Calculate the veridical correlation coefficient from the veridical covariance.
r = covariances[0] / np.sqrt((X_residuals ** 2).sum() * (Y_residuals ** 2).sum())
# Calculate the empirical p-value for the upper or lower tail.
if tail == 'upper':
p = (covariances >= covariances[0]).sum() / float(covariances.shape[0])
elif tail == 'lower':
p = (covariances <= covariances[0]).sum() / float(covariances.shape[0])
elif tail == 'two-tail':
p = (abs(covariances) >= abs(covariances[0])).sum() / float(covariances.shape[0])
# Calculate the standard score.
z = (covariances[0] - covariances.mean()) / covariances.std()
return r, p, z | gpl-3.0 | -2,579,909,192,261,438,000 | 40.661111 | 122 | 0.70112 | false |
SKIRT/PTS | magic/dist_ellipse.py | 1 | 2347 | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
# Import standard modules
import numpy as np
# -----------------------------------------------------------------
def distance_ellipse(shape, center, ratio, angle):
"""
:return:
"""
return dist_ellipse(shape, center.x, center.y, ratio, angle.to("deg").value)
# -----------------------------------------------------------------
def dist_ellipse(n, xc, yc, ratio, pa=0): # original implementation (like DIST_ELLIPSE IDL function)
"""
N = either a scalar specifying the size of the N x N square output
array, or a 2 element vector specifying the size of the
M x N rectangular output array.
XC,YC - Scalars giving the position of the ellipse center. This does
not necessarily have to be within the image
RATIO - Scalar giving the ratio of the major to minor axis. This
should be greater than 1 for position angle to have its
standard meaning.
OPTIONAL INPUTS:
POS_ANG - Position angle of the major axis in degrees, measured counter-clockwise
from the Y axis. For an image in standard orientation
(North up, East left) this is the astronomical position angle.
Default is 0 degrees.
OUTPUT:
IM - REAL*4 elliptical mask array, of size M x N. THe value of each
pixel is equal to the semi-major axis of the ellipse of center
XC,YC, axial ratio RATIO, and position angle POS_ANG, which
passes through the pixel.
"""
ang = np.radians(pa + 90.)
cosang = np.cos(ang)
sinang = np.sin(ang)
nx = n[1]
ny = n[0]
x = np.arange(-xc,nx-xc)
y = np.arange(-yc,ny-yc)
im = np.empty(n)
xcosang = x*cosang
xsinang = x*sinang
for i in range(0, ny):
xtemp = xcosang + y[i]*sinang
ytemp = -xsinang + y[i]*cosang
im[i,:] = np.sqrt((xtemp*ratio)**2 + ytemp**2)
return im
# -----------------------------------------------------------------
| agpl-3.0 | -1,271,247,990,375,277,300 | 34.545455 | 100 | 0.511083 | false |
Xeralux/tensorflow | tensorflow/python/data/ops/iterator_ops.py | 1 | 22481 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for Iterators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import warnings
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util.tf_export import tf_export
# NOTE(mrry): It is legitimate to call `Iterator.get_next()` multiple
# times, e.g. when you are distributing different elements to multiple
# devices in a single step. However, a common pitfall arises when
# users call `Iterator.get_next()` in each iteration of their training
# loop. `Iterator.get_next()` adds ops to the graph, and executing
# each op allocates resources (including threads); as a consequence,
# invoking it in every iteration of a training loop causes slowdown
# and eventual resource exhaustion. To guard against this outcome, we
# log a warning when the number of uses crosses a threshold of suspicion.
GET_NEXT_CALL_WARNING_THRESHOLD = 32
GET_NEXT_CALL_WARNING_MESSAGE = (
"An unusually high number of `Iterator.get_next()` calls was detected. "
"This often indicates that `Iterator.get_next()` is being called inside "
"a training loop, which will cause gradual slowdown and eventual resource "
"exhaustion. If this is the case, restructure your code to call "
"`next_element = iterator.get_next()` once outside the loop, and use "
"`next_element` as the input to some computation that is invoked inside "
"the loop.")
@tf_export("data.Iterator")
class Iterator(object):
"""Represents the state of iterating through a `Dataset`."""
def __init__(self, iterator_resource, initializer, output_types,
output_shapes, output_classes):
"""Creates a new iterator from the given iterator resource.
Note: Most users will not call this initializer directly, and will
instead use `Dataset.make_initializable_iterator()` or
`Dataset.make_one_shot_iterator()`.
Args:
iterator_resource: A `tf.resource` scalar `tf.Tensor` representing the
iterator.
initializer: A `tf.Operation` that should be run to initialize this
iterator.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element of this dataset.
output_shapes: A nested structure of `tf.TensorShape` objects
corresponding to each component of an element of this dataset.
output_classes: A nested structure of Python `type` object corresponding
to each
component of an element of this iterator.
"""
self._iterator_resource = iterator_resource
self._initializer = initializer
self._output_classes = output_classes
self._output_types = output_types
self._output_shapes = output_shapes
self._string_handle = gen_dataset_ops.iterator_to_string_handle(
self._iterator_resource)
self._get_next_call_count = 0
@staticmethod
def from_structure(output_types,
output_shapes=None,
shared_name=None,
output_classes=None):
"""Creates a new, uninitialized `Iterator` with the given structure.
This iterator-constructing method can be used to create an iterator that
is reusable with many different datasets.
The returned iterator is not bound to a particular dataset, and it has
no `initializer`. To initialize the iterator, run the operation returned by
`Iterator.make_initializer(dataset)`.
The following is an example
```python
iterator = Iterator.from_structure(tf.int64, tf.TensorShape([]))
dataset_range = Dataset.range(10)
range_initializer = iterator.make_initializer(dataset_range)
dataset_evens = dataset_range.filter(lambda x: x % 2 == 0)
evens_initializer = iterator.make_initializer(dataset_evens)
# Define a model based on the iterator; in this example, the model_fn
# is expected to take scalar tf.int64 Tensors as input (see
# the definition of 'iterator' above).
prediction, loss = model_fn(iterator.get_next())
# Train for `num_epochs`, where for each epoch, we first iterate over
# dataset_range, and then iterate over dataset_evens.
for _ in range(num_epochs):
# Initialize the iterator to `dataset_range`
sess.run(range_initializer)
while True:
try:
pred, loss_val = sess.run([prediction, loss])
except tf.errors.OutOfRangeError:
break
# Initialize the iterator to `dataset_evens`
sess.run(evens_initializer)
while True:
try:
pred, loss_val = sess.run([prediction, loss])
except tf.errors.OutOfRangeError:
break
```
Args:
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element of this dataset.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
corresponding to each component of an element of this dataset. If
omitted, each component will have an unconstrainted shape.
shared_name: (Optional.) If non-empty, this iterator will be shared under
the given name across multiple sessions that share the same devices
(e.g. when using a remote server).
output_classes: (Optional.) A nested structure of Python `type` objects
corresponding to each component of an element of this iterator. If
omitted, each component is assumed to be of type `tf.Tensor`.
Returns:
An `Iterator`.
Raises:
TypeError: If the structures of `output_shapes` and `output_types` are
not the same.
"""
output_types = nest.map_structure(dtypes.as_dtype, output_types)
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if output_classes is None:
output_classes = nest.map_structure(lambda _: ops.Tensor, output_types)
nest.assert_same_structure(output_types, output_shapes)
if shared_name is None:
shared_name = ""
iterator_resource = gen_dataset_ops.iterator(
container="",
shared_name=shared_name,
output_types=nest.flatten(
sparse.as_dense_types(output_types, output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_shapes(output_shapes, output_classes)))
return Iterator(iterator_resource, None, output_types, output_shapes,
output_classes)
@staticmethod
def from_string_handle(string_handle,
output_types,
output_shapes=None,
output_classes=None):
"""Creates a new, uninitialized `Iterator` based on the given handle.
This method allows you to define a "feedable" iterator where you can choose
between concrete iterators by feeding a value in a @{tf.Session.run} call.
In that case, `string_handle` would a @{tf.placeholder}, and you would feed
it with the value of @{tf.data.Iterator.string_handle} in each step.
For example, if you had two iterators that marked the current position in
a training dataset and a test dataset, you could choose which to use in
each step as follows:
```python
train_iterator = tf.data.Dataset(...).make_one_shot_iterator()
train_iterator_handle = sess.run(train_iterator.string_handle())
test_iterator = tf.data.Dataset(...).make_one_shot_iterator()
test_iterator_handle = sess.run(test_iterator.string_handle())
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(
handle, train_iterator.output_types)
next_element = iterator.get_next()
loss = f(next_element)
train_loss = sess.run(loss, feed_dict={handle: train_iterator_handle})
test_loss = sess.run(loss, feed_dict={handle: test_iterator_handle})
```
Args:
string_handle: A scalar `tf.Tensor` of type `tf.string` that evaluates
to a handle produced by the `Iterator.string_handle()` method.
output_types: A nested structure of `tf.DType` objects corresponding to
each component of an element of this dataset.
output_shapes: (Optional.) A nested structure of `tf.TensorShape` objects
corresponding to each component of an element of this dataset. If
omitted, each component will have an unconstrainted shape.
output_classes: (Optional.) A nested structure of Python `type` objects
corresponding to each component of an element of this iterator. If
omitted, each component is assumed to be of type `tf.Tensor`.
Returns:
An `Iterator`.
"""
output_types = nest.map_structure(dtypes.as_dtype, output_types)
if output_shapes is None:
output_shapes = nest.map_structure(
lambda _: tensor_shape.TensorShape(None), output_types)
else:
output_shapes = nest.map_structure_up_to(
output_types, tensor_shape.as_shape, output_shapes)
if output_classes is None:
output_classes = nest.map_structure(lambda _: ops.Tensor, output_types)
nest.assert_same_structure(output_types, output_shapes)
string_handle = ops.convert_to_tensor(string_handle, dtype=dtypes.string)
iterator_resource = gen_dataset_ops.iterator_from_string_handle(
string_handle,
output_types=nest.flatten(
sparse.as_dense_types(output_types, output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_shapes(output_shapes, output_classes)))
return Iterator(iterator_resource, None, output_types, output_shapes,
output_classes)
@property
def initializer(self):
"""A `tf.Operation` that should be run to initialize this iterator.
Returns:
A `tf.Operation` that should be run to initialize this iterator
Raises:
ValueError: If this iterator initializes itself automatically.
"""
if self._initializer is not None:
return self._initializer
else:
# TODO(mrry): Consider whether one-shot iterators should have
# initializers that simply reset their state to the beginning.
raise ValueError("Iterator does not have an initializer.")
def make_initializer(self, dataset, name=None):
"""Returns a `tf.Operation` that initializes this iterator on `dataset`.
Args:
dataset: A `Dataset` with compatible structure to this iterator.
name: (Optional.) A name for the created operation.
Returns:
A `tf.Operation` that can be run to initialize this iterator on the given
`dataset`.
Raises:
TypeError: If `dataset` and this iterator do not have a compatible
element structure.
"""
with ops.name_scope(name, "make_initializer") as name:
nest.assert_same_structure(self._output_types, dataset.output_types)
nest.assert_same_structure(self._output_shapes, dataset.output_shapes)
for iterator_class, dataset_class in zip(
nest.flatten(self._output_classes),
nest.flatten(dataset.output_classes)):
if iterator_class is not dataset_class:
raise TypeError(
"Expected output classes %r but got dataset with output class %r."
% (self._output_classes, dataset.output_classes))
for iterator_dtype, dataset_dtype in zip(
nest.flatten(self._output_types), nest.flatten(dataset.output_types)):
if iterator_dtype != dataset_dtype:
raise TypeError(
"Expected output types %r but got dataset with output types %r." %
(self._output_types, dataset.output_types))
for iterator_shape, dataset_shape in zip(
nest.flatten(self._output_shapes), nest.flatten(
dataset.output_shapes)):
if not iterator_shape.is_compatible_with(dataset_shape):
raise TypeError("Expected output shapes compatible with %r but got "
"dataset with output shapes %r." %
(self._output_shapes, dataset.output_shapes))
with ops.colocate_with(self._iterator_resource):
return gen_dataset_ops.make_iterator(
dataset._as_variant_tensor(), self._iterator_resource, name=name) # pylint: disable=protected-access
def get_next(self, name=None):
"""Returns a nested structure of `tf.Tensor`s representing the next element.
In graph mode, you should typically call this method *once* and use its
result as the input to another computation. A typical loop will then call
@{tf.Session.run} on the result of that computation. The loop will terminate
when the `Iterator.get_next()` operation raises
@{tf.errors.OutOfRangeError}. The following skeleton shows how to use
this method when building a training loop:
```python
dataset = ... # A `tf.data.Dataset` object.
iterator = dataset.make_initializable_iterator()
next_element = iterator.get_next()
# Build a TensorFlow graph that does something with each element.
loss = model_function(next_element)
optimizer = ... # A `tf.train.Optimizer` object.
train_op = optimizer.minimize(loss)
with tf.Session() as sess:
try:
while True:
sess.run(train_op)
except tf.errors.OutOfRangeError:
pass
```
NOTE: It is legitimate to call `Iterator.get_next()` multiple times, e.g.
when you are distributing different elements to multiple devices in a single
step. However, a common pitfall arises when users call `Iterator.get_next()`
in each iteration of their training loop. `Iterator.get_next()` adds ops to
the graph, and executing each op allocates resources (including threads); as
a consequence, invoking it in every iteration of a training loop causes
slowdown and eventual resource exhaustion. To guard against this outcome, we
log a warning when the number of uses crosses a fixed threshold of
suspiciousness.
Args:
name: (Optional.) A name for the created operation.
Returns:
A nested structure of `tf.Tensor` objects.
"""
self._get_next_call_count += 1
if self._get_next_call_count > GET_NEXT_CALL_WARNING_THRESHOLD:
warnings.warn(GET_NEXT_CALL_WARNING_MESSAGE)
return sparse.deserialize_sparse_tensors(
nest.pack_sequence_as(self._output_types,
gen_dataset_ops.iterator_get_next(
self._iterator_resource,
output_types=nest.flatten(
sparse.as_dense_types(
self._output_types,
self._output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_shapes(
self._output_shapes,
self._output_classes)),
name=name)), self._output_types,
self._output_shapes, self._output_classes)
def string_handle(self, name=None):
"""Returns a string-valued `tf.Tensor` that represents this iterator.
Args:
name: (Optional.) A name for the created operation.
Returns:
A scalar `tf.Tensor` of type `tf.string`.
"""
if name is None:
return self._string_handle
else:
return gen_dataset_ops.iterator_to_string_handle(
self._iterator_resource, name=name)
@property
def output_classes(self):
"""Returns the class of each component of an element of this iterator.
The expected values are `tf.Tensor` and `tf.SparseTensor`.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return self._output_classes
@property
def output_shapes(self):
"""Returns the shape of each component of an element of this iterator.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return self._output_shapes
@property
def output_types(self):
"""Returns the type of each component of an element of this iterator.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return self._output_types
_uid_counter = 0
_uid_lock = threading.Lock()
def _generate_shared_name(prefix):
with _uid_lock:
global _uid_counter
uid = _uid_counter
_uid_counter += 1
return "{}{}".format(prefix, uid)
class EagerIterator(object):
"""An iterator producing tf.Tensor objects from a tf.data.Dataset."""
def __init__(self, dataset):
"""Creates a new iterator over the given dataset.
For example:
```python
dataset = tf.data.Dataset.range(4)
for x in Iterator(dataset):
print(x)
```
Tensors produced will be placed on the device on which this iterator object
was created.
Args:
dataset: A `tf.data.Dataset` object.
Raises:
RuntimeError: When invoked without eager execution enabled.
"""
if not context.executing_eagerly():
raise RuntimeError(
"{} objects can only be used when eager execution is enabled, use "
"tf.data.Dataset.make_initializable_iterator or "
"tf.data.Dataset.make_one_shot_iterator for graph construction".
format(type(self)))
with ops.device("/device:CPU:0"):
ds_variant = dataset._as_variant_tensor() # pylint: disable=protected-access
self._output_classes = dataset.output_classes
self._output_types = dataset.output_types
self._output_shapes = dataset.output_shapes
self._flat_output_types = nest.flatten(
sparse.as_dense_types(self._output_types, self._output_classes))
self._flat_output_shapes = nest.flatten(
sparse.as_dense_shapes(self._output_shapes, self._output_classes))
self._resource = gen_dataset_ops.iterator(
shared_name="",
container=_generate_shared_name("eageriterator"),
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
gen_dataset_ops.make_iterator(ds_variant, self._resource)
# Delete the resource when this object is deleted
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._resource, handle_device="/device:CPU:0")
self._device = context.context().device_name
def __iter__(self):
return self
def __next__(self): # For Python 3 compatibility
return self.next()
def _next_internal(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
"""
with ops.device(self._device):
# TODO(ashankar): Consider removing this ops.device() contextmanager
# and instead mimic ops placement in graphs: Operations on resource
# handles execute on the same device as where the resource is placed.
# NOTE(mrry): Here we use the "_sync" variant of `iterator_get_next`
# because in eager mode this code will run synchronously on the calling
# thread. Therefore we do not need to make a defensive context switch
# to a background thread, and can achieve a small constant performance
# boost by invoking the iterator synchronously.
ret = gen_dataset_ops.iterator_get_next_sync(
self._resource,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
return sparse.deserialize_sparse_tensors(
nest.pack_sequence_as(self._output_types, ret), self._output_types,
self._output_shapes, self._output_classes)
def next(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
"""
try:
return self._next_internal()
except errors.OutOfRangeError:
raise StopIteration
@property
def output_classes(self):
"""Returns the class of each component of an element of this iterator.
The expected values are `tf.Tensor` and `tf.SparseTensor`.
Returns:
A nested structure of Python `type` objects corresponding to each
component of an element of this dataset.
"""
return self._output_classes
@property
def output_shapes(self):
"""Returns the shape of each component of an element of this iterator.
Returns:
A nested structure of `tf.TensorShape` objects corresponding to each
component of an element of this dataset.
"""
return self._output_shapes
@property
def output_types(self):
"""Returns the type of each component of an element of this iterator.
Returns:
A nested structure of `tf.DType` objects corresponding to each component
of an element of this dataset.
"""
return self._output_types
def get_next(self, name=None):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
Args:
name: (Optional.) A name for the created operation. Currently unused.
Returns:
A nested structure of `tf.Tensor` objects.
Raises:
`tf.errors.OutOfRangeError`: If the end of the dataset has been reached.
"""
del name
return self._next_internal()
| apache-2.0 | -7,858,735,161,948,046,000 | 39.001779 | 111 | 0.6711 | false |
theresaswayne/imagej-plugins | Demos and Tests/misc scripts/Crop_Confocal_Series_corrected.py | 1 | 1332 | # @OpService ops
# @Dataset data
# @UIService ui
# @OUTPUT ImgPlus c0
# @OUTPUT ImgPlus z12
# @OUTPUT ImgPlus c0z12
# @OUTPUT ImgPlus roiC0z12
# to run this tutorial run 'file->Open Samples->Confocal Series' and make sure that
# confocal-series.tif is the active image
from net.imglib2.util import Intervals
from net.imagej.axis import Axes
# first take a look at the size and type of each dimension
for d in range(data.numDimensions()):
print "axis d: type: "+str(data.axis(d).type())+" length: "+str(data.dimension(d))
img=data.getImgPlus()
xLen = data.dimension(data.dimensionIndex(Axes.X))
yLen = data.dimension(data.dimensionIndex(Axes.Y))
zLen = data.dimension(data.dimensionIndex(Axes.Z))
cLen = data.dimension(data.dimensionIndex(Axes.CHANNEL))
# crop a channel
c0=ops.run("transform.crop",img, Intervals.createMinMax(0, 0, 0,0,xLen-1, yLen-1, 0, zLen-1))
c0.setName("c0")
# crop both channels at z=12
z12=ops.run("transform.crop",img, Intervals.createMinMax(0,0,0,12, xLen-1, yLen-1, cLen-1, 12))
z12.setName("z12")
# crop channel 0 at z=12
c0z12=ops.run("transform.crop",img, Intervals.createMinMax(0,0,0,12, xLen-1, yLen-1, 0, 12))
c0z12.setName("c0z12")
# crop an roi at channel 0, z=12
roiC0z12=ops.run("transform.crop",img, Intervals.createMinMax(150,150,0,12, 200, 200, 0, 12))
roiC0z12.setName("roiC0z12")
| gpl-3.0 | -3,989,707,301,982,040,000 | 32.3 | 95 | 0.731231 | false |
severin-lemaignan/dialogs | src/dialogs/verbalization/verbalization_test.py | 1 | 179763 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created by Chouayakh Mahdi
08/07/2010
The package contains functions to perform test
It is more used for the subject
Functions:
unit_tests : to perform unit tests
"""
import unittest
import logging
logger = logging.getLogger("dialogs")
from dialogs.dialog_core import Dialog
from dialogs.parsing.parser import Parser
from dialogs.sentence import *
from dialogs.sentence_types import *
from dialogs.verbalization import utterance_rebuilding
class TestVerbalization(unittest.TestCase):
"""
Function to compare 2 nominal groups
"""
def test_01(self):
logger.info('\n######################## test 1.1 ##############################')
logger.info('#################################################################\n')
original_utterance = "The bottle is on the table. The bottle is blue. The bottle is Blue."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['table'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['blue', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], ['Blue'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_02(self):
logger.info('\n######################## test 1.2 ##############################')
logger.info('#################################################################\n')
original_utterance = "Jido's blue bottle is on the table. I'll play a guitar, a piano and a violon."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [['blue', []]],
[NominalGroup([], ['Jido'], [], [], [])], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['table'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[NominalGroup(['a'], ['guitar'], [], [], []),
NominalGroup(['a'], ['piano'], [], [], []),
NominalGroup(['a'], ['violon'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_03(self):
logger.info('\n######################## test 1.3 ##############################')
logger.info('#################################################################\n')
original_utterance = "It's on the table. I give it to you. Give me the bottle. I don't give the bottle to you."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['table'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup([], ['it'], [], [], [])],
[IndirectComplement(['to'], [NominalGroup([], ['you'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[IndirectComplement(['to'], [NominalGroup([], ['you'], [], [], [])])],
[], [], VerbalGroup.negative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_04(self):
logger.info('\n######################## test 1.4 ##############################')
logger.info('#################################################################\n')
original_utterance = "You aren't preparing the car and my father's moto at the same time. Is my brother's bottle in your right?"
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['prepare'], [], 'present progressive',
[NominalGroup(['the'], ['car'], [], [], []),
NominalGroup(['the'], ['moto'], [],
[NominalGroup(['my'], ['father'], [], [], [])], [])],
[IndirectComplement(['at'], [
NominalGroup(['the'], ['time'], [['same', []]], [], [])])],
[], [], VerbalGroup.negative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup(['the'], ['bottle'], [], [NominalGroup(['my'], ['brother'], [], [], [])],
[])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['in'],
[NominalGroup(['your'], ['right'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_05(self):
logger.info('\n######################## test 1.5 ##############################')
logger.info('#################################################################\n')
original_utterance = "You shouldn't drive his poorest uncle's wife's big new car. Should I give you the bottle? Shall I go?"
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['should+drive'], [], 'present conditional',
[NominalGroup(['the'], ['car'], [['big', []], ['new', []]],
[NominalGroup(['the'], ['wife'], [],
[NominalGroup(['his'], ['uncle'],
[['poorest', []]], [], [])],
[])], [])],
[],
[], [], VerbalGroup.negative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['should+give'], [], 'present conditional',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[IndirectComplement([], [NominalGroup([], ['you'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['shall+go'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_06(self):
logger.info('\n######################## test 1.6 ##############################')
logger.info('#################################################################\n')
original_utterance = "Isn't he doing his homework and his game now? Can't he take this bottle? Hello."
sentences = [Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['he'], [], [], [])],
[VerbalGroup(['do'], [], 'present progressive',
[NominalGroup(['his'], ['homework'], [], [], []),
NominalGroup(['his'], ['game'], [], [], [])],
[],
[], ['now'], VerbalGroup.negative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['he'], [], [], [])],
[VerbalGroup(['can+take'], [], 'present simple',
[NominalGroup(['this'], ['bottle'], [], [], [])],
[],
[], [], VerbalGroup.negative, [])]),
Sentence(START, '', [], [])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_07(self):
logger.info('\n######################## test 1.7 ##############################')
logger.info('#################################################################\n')
original_utterance = "Don't quickly give me the blue bottle. I want to play with my guitar. I'd like to go to the cinema."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [['blue', []]], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
['quickly'], [], VerbalGroup.negative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['want'], [VerbalGroup(['play'],
[], '',
[],
[IndirectComplement(['with'], [
NominalGroup(['my'], ['guitar'], [], [],
[])])],
[], [], VerbalGroup.affirmative, [])],
'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['like'], [VerbalGroup(['go'],
[], '',
[],
[IndirectComplement(['to'], [
NominalGroup(['the'], ['cinema'], [], [],
[])])],
[], [], VerbalGroup.affirmative, [])],
'present conditional',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_08(self):
logger.info('\n######################## test 1.8 ##############################')
logger.info('#################################################################\n')
original_utterance = "The man who talks, has a new car. I play the guitar that I bought yesterday."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['man'], [], [], [Sentence(RELATIVE, 'who',
[],
[VerbalGroup(['talk'], [],
'present simple',
[],
[],
[], [], VerbalGroup.affirmative,
[])])])],
[VerbalGroup(['have'], [], 'present simple',
[NominalGroup(['a'], ['car'], [['new', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'present simple',
[NominalGroup(['the'], ['guitar'], [], [], [Sentence(RELATIVE, 'that',
[NominalGroup([],
['I'], [], [],
[])],
[VerbalGroup(['buy'],
[], 'past simple',
[],
[],
[], ['yesterday'],
VerbalGroup.affirmative,
[])])])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_09(self):
logger.info('\n######################## test 1.9 ##############################')
logger.info('#################################################################\n')
original_utterance = "Don't quickly give me the bottle which is on the table, and the glass which I cleaned yesterday, at my left."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [], [], [Sentence(RELATIVE, 'which',
[],
[VerbalGroup(['be'],
[],
'present simple',
[],
[
IndirectComplement(
[
'on'],
[
NominalGroup(
[
'the'],
[
'table'],
[],
[],
[])])],
[], [],
VerbalGroup.affirmative,
[])])]),
NominalGroup(['the'], ['glass'], [], [], [Sentence(RELATIVE, 'which',
[NominalGroup([],
['I'], [], [],
[])],
[VerbalGroup(
['clean'], [],
'past simple',
[],
[],
[], ['yesterday'],
VerbalGroup.affirmative,
[])])])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])]),
IndirectComplement(['at'],
[NominalGroup(['my'], ['left'], [], [], [])])],
['quickly'], [], VerbalGroup.negative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_10(self):
logger.info('\n######################## test 1.10 ##############################')
logger.info('#################################################################\n')
original_utterance = "The bottle that I bought from the store which is in the shopping center, is yours."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [Sentence(RELATIVE, 'that',
[NominalGroup([], ['I'], [], [],
[])],
[VerbalGroup(['buy'], [],
'past simple',
[],
[IndirectComplement(
['from'], [
NominalGroup(
['the'],
['store'],
[], [], [
Sentence(
RELATIVE,
'which',
[],
[
VerbalGroup(
[
'be'],
[],
'present simple',
[],
[
IndirectComplement(
[
'in'],
[
NominalGroup(
[
'the'],
[
'center'],
[
[
'shopping',
[]]],
[],
[])])],
[],
[],
VerbalGroup.affirmative,
[])])])])],
[], [],
VerbalGroup.affirmative,
[])])])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], ['yours'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_11(self):
logger.info('\n######################## test 1.11 ##############################')
logger.info('#################################################################\n')
original_utterance = "When won't the planning session take place? When must you take the bus?"
sentences = [Sentence(W_QUESTION, 'date',
[NominalGroup(['the'], ['session'], [['planning', []]], [], [])],
[VerbalGroup(['take+place'], [], 'future simple',
[],
[],
[], [], VerbalGroup.negative, [])]),
Sentence(W_QUESTION, 'date',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['must+take'], [], 'present simple',
[NominalGroup(['the'], ['bus'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_12(self):
logger.info('\n######################## test 1.12 ##############################')
logger.info('#################################################################\n')
original_utterance = "Where is Broyen? Where are you going? Where must Jido and you be from?"
sentences = [Sentence(W_QUESTION, 'place',
[NominalGroup([], ['Broyen'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'place',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['go'], [], 'present progressive',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'origin',
[NominalGroup([], ['Jido'], [], [], []), NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['must+be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_13(self):
logger.info('\n######################## test 1.13 ##############################')
logger.info('#################################################################\n')
original_utterance = "What time is the news on TV? What size do you wear? The code is written by me. Is Mahdi going to the Laas?"
sentences = [Sentence(W_QUESTION, 'time',
[NominalGroup(['the'], ['news'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'], [NominalGroup([], ['TV'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'size',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['wear'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['code'], [], [], [])],
[VerbalGroup(['write'], [], 'present passive',
[],
[IndirectComplement(['by'], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['Mahdi'], [], [], [])],
[VerbalGroup(['go'], [], 'present progressive',
[],
[IndirectComplement(['to'],
[NominalGroup(['the'], ['Laas'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_14(self):
logger.info('\n######################## test 1.14 ##############################')
logger.info('#################################################################\n')
original_utterance = "What's the weather like in the winter here? What were you doing? What isn't Jido going to do tomorrow?"
sentences = [Sentence(W_QUESTION, 'description',
[NominalGroup(['the'], ['weather'], [], [], [])],
[VerbalGroup(['like'], [], 'present simple',
[],
[IndirectComplement(['in'],
[NominalGroup(['the'], ['winter'], [], [], [])])],
[], ['here'], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'thing',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['do'], [], 'past progressive',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'thing',
[NominalGroup([], ['Jido'], [], [], [])],
[VerbalGroup(['go'], [VerbalGroup(['do'],
[], '',
[],
[],
[], ['tomorrow'], VerbalGroup.affirmative, [])],
'present progressive',
[],
[],
[], [], VerbalGroup.negative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_15(self):
logger.info('\n######################## test 1.15 ##############################')
logger.info('#################################################################\n')
original_utterance = "What's happening? What must happen in the company today? What didn't happen here? No, sorry."
sentences = [Sentence(W_QUESTION, 'situation',
[],
[VerbalGroup(['happen'], [], 'present progressive',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'situation',
[],
[VerbalGroup(['must+happen'], [], 'present simple',
[],
[IndirectComplement(['in'],
[NominalGroup(['the'], ['company'], [], [], [])])],
[], ['today'], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'situation',
[],
[VerbalGroup(['happen'], [], 'past simple',
[],
[],
[], ['here'], VerbalGroup.negative, [])]),
Sentence('disagree', '', [], [])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_16(self):
logger.info('\n######################## test 1.16 ##############################')
logger.info('#################################################################\n')
original_utterance = "What's the biggest bottle's color on your left? What does your brother do for a living?"
sentences = [Sentence(W_QUESTION, 'thing',
[NominalGroup(['the'], ['color'], [],
[NominalGroup(['the'], ['bottle'], [['biggest', []]], [], [])], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['your'], ['left'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'explication',
[NominalGroup(['your'], ['brother'], [], [], [])],
[VerbalGroup(['do'], [], 'present simple',
[],
[IndirectComplement(['for'],
[NominalGroup(['a'], [], [['living', []]], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_17(self):
logger.info('\n######################## test 1.17 ##############################')
logger.info('#################################################################\n')
original_utterance = "What kind of people don't read this magazine? What kind of music must he listen to everyday?"
sentences = [Sentence(W_QUESTION, 'classification+people',
[],
[VerbalGroup(['read'], [], 'present simple',
[NominalGroup(['this'], ['magazine'], [], [], [])],
[],
[], [], VerbalGroup.negative, [])]),
Sentence(W_QUESTION, 'classification+music',
[NominalGroup([], ['he'], [], [], [])],
[VerbalGroup(['must+listen+to'], [], 'present simple',
[],
[],
[], ['everyday'], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_18(self):
logger.info('\n######################## test 1.18 ##############################')
logger.info('#################################################################\n')
original_utterance = "What kind of sport is your favorite? What's the problem with him? What's the matter with this person?"
sentences = [Sentence(W_QUESTION, 'classification+sport',
[NominalGroup(['your'], [], [['favorite', []]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'thing',
[NominalGroup(['the'], ['problem'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['with'], [NominalGroup([], ['him'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'thing',
[NominalGroup(['the'], ['matter'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['with'],
[NominalGroup(['this'], ['person'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_19(self):
logger.info('\n######################## test 1.19 ##############################')
logger.info('#################################################################\n')
original_utterance = "How old are you? How long is your uncle's store opened tonight? How long is your uncle's store open tonight?"
sentences = [Sentence(W_QUESTION, 'old',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'long',
[NominalGroup(['the'], ['store'], [], [NominalGroup(['your'], ['uncle'], [], [], [])],
[])],
[VerbalGroup(['open'], [], 'present passive',
[],
[],
[], ['tonight'], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'long',
[NominalGroup(['the'], ['store'], [], [NominalGroup(['your'], ['uncle'], [], [], [])],
[])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['open', []]], [], [])],
[],
[], ['tonight'], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_20(self):
logger.info('\n######################## test 1.20 ##############################')
logger.info('#################################################################\n')
original_utterance = "How far is it from the hotel to the restaurant? How soon can you be here? How often does Jido go skiing?"
sentences = [Sentence(W_QUESTION, 'far',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['from'],
[NominalGroup(['the'], ['hotel'], [], [], [])]),
IndirectComplement(['to'],
[NominalGroup(['the'], ['restaurant'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'soon',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['can+be'], [], 'present simple',
[],
[],
[], ['here'], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'often',
[NominalGroup([], ['Jido'], [], [], [])],
[VerbalGroup(['go+skiing'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_21(self):
logger.info('\n######################## test 1.21 ##############################')
logger.info('#################################################################\n')
original_utterance = "How much water should they transport? How much guests weren't at the party? How much does the motocycle cost?"
sentences = [Sentence(W_QUESTION, 'quantity',
[NominalGroup([], ['they'], [], [], [])],
[VerbalGroup(['should+transport'], [], 'present conditional',
[NominalGroup(['a'], ['water'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'quantity',
[NominalGroup(['a'], ['guests'], [], [], [])],
[VerbalGroup(['be'], [], 'past simple',
[],
[IndirectComplement(['at'],
[NominalGroup(['the'], ['party'], [], [], [])])],
[], [], VerbalGroup.negative, [])]),
Sentence(W_QUESTION, 'quantity',
[NominalGroup(['the'], ['motocycle'], [], [], [])],
[VerbalGroup(['cost'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_22(self):
logger.info('\n######################## test 1.22 ##############################')
logger.info('#################################################################\n')
original_utterance = "How about going to the cinema? How haven't they gotten a loan for their business? OK."
sentences = [Sentence(W_QUESTION, 'invitation',
[],
[VerbalGroup(['go'], [], 'present progressive',
[],
[IndirectComplement(['to'],
[NominalGroup(['the'], ['cinema'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'manner',
[NominalGroup([], ['they'], [], [], [])],
[VerbalGroup(['get'], [], 'present perfect',
[NominalGroup(['a'], ['loan'], [], [], [])],
[IndirectComplement(['for'],
[NominalGroup(['their'], ['business'], [], [], [])])],
[], [], VerbalGroup.negative, [])]),
Sentence(AGREEMENT, '', [], [])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_23(self):
logger.info('\n######################## test 1.23 ##############################')
logger.info('#################################################################\n')
original_utterance = "What did you think of Steven Spilburg's new movie? How could I get to the restaurant from here?"
sentences = [Sentence(W_QUESTION, 'opinion',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['like'], [], 'past simple',
[NominalGroup(['the'], ['movie'], [['new', []]],
[NominalGroup([], ['Steven', 'Spilburg'], [], [], [])],
[])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'manner',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['could+get+to'], [], 'present conditional',
[NominalGroup(['the'], ['restaurant'], [], [], [])],
[IndirectComplement(['from'], [NominalGroup([], ['here'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_24(self):
logger.info('\n######################## test 1.24 ##############################')
logger.info('#################################################################\n')
original_utterance = "Why should she go to Toulouse? Who could you talk to on the phone? Whose blue bottle and red glass are these?"
sentences = [Sentence(W_QUESTION, 'reason',
[NominalGroup([], ['she'], [], [], [])],
[VerbalGroup(['should+go'], [], 'present conditional',
[],
[IndirectComplement(['to'],
[NominalGroup([], ['Toulouse'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'people',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['could+talk+to'], [], 'present conditional',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['phone'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'owner',
[NominalGroup([], ['bottle'], [['blue', []]], [], []),
NominalGroup([], ['glass'], [['red', []]], [], [])],
[VerbalGroup(['be'], [], '',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_25(self):
logger.info('\n######################## test 1.25 ##############################')
logger.info('#################################################################\n')
original_utterance = "What are you thinking about the idea that I present you? What color is the bottle which you bought?"
sentences = [Sentence(W_QUESTION, 'opinion',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['think+about'], [], 'present progressive',
[NominalGroup(['the'], ['idea'], [], [], [Sentence(RELATIVE, 'that',
[NominalGroup([],
['I'], [], [], [])],
[VerbalGroup(
['present'], [],
'present simple',
[],
[
IndirectComplement(
[], [
NominalGroup(
[],
[
'you'],
[],
[],
[])])],
[], [],
VerbalGroup.affirmative,
[])])])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'color',
[NominalGroup(['the'], ['bottle'], [], [], [Sentence(RELATIVE, 'which',
[NominalGroup([], ['you'], [], [],
[])],
[VerbalGroup(['buy'], [],
'past simple',
[],
[],
[], [],
VerbalGroup.affirmative,
[])])])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_26(self):
logger.info('\n######################## test 1.26 ##############################')
logger.info('#################################################################\n')
original_utterance = "Which salesperson's competition won the award which we won in the last years?"
sentences = [Sentence(W_QUESTION, 'choice',
[NominalGroup(['the'], ['competition'], [],
[NominalGroup(['the'], ['salesperson'], [], [], [])], [])],
[VerbalGroup(['win'], [], 'past simple',
[NominalGroup(['the'], ['award'], [], [], [Sentence(RELATIVE, 'which',
[NominalGroup([],
['we'], [], [],
[])],
[VerbalGroup(['win'],
[], 'past simple',
[],
[
IndirectComplement(
[
'in'],
[
NominalGroup(
[
'the'],
[
'year'],
[
[
'last',
[]]],
[],
[])])],
[], [],
VerbalGroup.affirmative,
[])])])],
[],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].d_obj[0].relative[0].sv[0].i_cmpl[0].gn[0]._quantifier = "ALL"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_27(self):
logger.info('\n######################## test 1.27 ##############################')
logger.info('#################################################################\n')
original_utterance = "What will your house look like? What do you think of the latest novel which Jido wrote?"
sentences = [Sentence(W_QUESTION, 'description',
[NominalGroup(['your'], ['house'], [], [], [])],
[VerbalGroup(['look+like'], [], 'future simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'opinion',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['think+of'], [], 'present simple',
[NominalGroup(['the'], ['novel'], [['latest', []]], [],
[Sentence(RELATIVE, 'which',
[NominalGroup([], ['Jido'], [], [], [])],
[VerbalGroup(['write'], [], 'past simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_28(self):
logger.info('\n######################## test 1.28 ##############################')
logger.info('#################################################################\n')
original_utterance = "Learn that I want you to give me the blue bottle. You'll be happy, if you do your job."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['learn'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'that',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['want'], [
VerbalGroup(['give'], [], '',
[NominalGroup(['the'], [
'bottle'], [['blue',
[]]],
[], [])],
[IndirectComplement([],
[NominalGroup([],
['me'], [], [],
[])])],
[], [], VerbalGroup.affirmative,
[])], 'present simple',
[NominalGroup([], ['you'],
[], [], [])],
[],
[], [], VerbalGroup.affirmative,
[])])])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['be'], [], 'future simple',
[NominalGroup([], [], [['happy', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'if',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['do'], [],
'present simple',
[NominalGroup(['your'],
['job'], [],
[], [])],
[],
[], [], VerbalGroup.affirmative,
[])])])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_29(self):
logger.info('\n######################## test 1.29 ##############################')
logger.info('#################################################################\n')
original_utterance = "You'll be happy, if you do your job. Do you want the blue or green bottle?"
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['be'], [], 'future simple',
[NominalGroup([], [], [['happy', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'if',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['do'], [],
'present simple',
[NominalGroup(['your'],
['job'], [],
[], [])],
[],
[], [], VerbalGroup.affirmative,
[])])])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['want'], [],
'present simple',
[NominalGroup(['the'], [], [['blue', []]], [], []),
NominalGroup([], ['bottle'], [['green', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
sentences[1].sv[0].d_obj[1]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_30(self):
logger.info('\n######################## test 1.30 ##############################')
logger.info('#################################################################\n')
original_utterance = "What's wrong with him? I'll play a guitar or a piano and a violon. I played a guitar a year ago."
sentences = [Sentence(W_QUESTION, 'thing',
[NominalGroup([], [], [['wrong', []]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['with'], [NominalGroup([], ['him'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[NominalGroup(['a'], ['guitar'], [], [], []),
NominalGroup(['a'], ['piano'], [], [], []),
NominalGroup(['a'], ['violon'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'past simple',
[NominalGroup(['a'], ['guitar'], [], [], [])],
[IndirectComplement(['ago'],
[NominalGroup(['a'], ['year'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[1].sv[0].d_obj[1]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_31(self):
logger.info('\n######################## test 1.31 ##############################')
logger.info('#################################################################\n')
original_utterance = "Who are you talking to? You should have the bottle. Would you've played a guitar? You'd have played a guitar."
sentences = [Sentence(W_QUESTION, 'people',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['talk+to'], [], 'present progressive',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['should+have'], [], 'present conditional',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['play'], [], 'past conditional',
[NominalGroup(['a'], ['guitar'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['play'], [], 'past conditional',
[NominalGroup(['a'], ['guitar'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_32(self):
logger.info('\n######################## test 1.32 ##############################')
logger.info('#################################################################\n')
original_utterance = "What do you do for a living in this building? What does your brother do for a living here?"
sentences = [Sentence(W_QUESTION, 'explication',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['do'], [], 'present simple',
[],
[IndirectComplement(['for'],
[NominalGroup(['a'], [], [['living', []]], [], [])]),
IndirectComplement(['in'],
[NominalGroup(['this'], ['building'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'explication',
[NominalGroup(['your'], ['brother'], [], [], [])],
[VerbalGroup(['do'], [], 'present simple',
[],
[IndirectComplement(['for'],
[NominalGroup(['a'], [], [['living', []]], [], [])])],
[], ['here'], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_33(self):
logger.info('\n######################## test 1.33 ##############################')
logger.info('#################################################################\n')
original_utterance = "This is a bottle. There is a bottle on the table."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['this'], [], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['a'], ['bottle'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['there'], [], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['a'], ['bottle'], [], [], [])],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['table'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_34(self):
logger.info('\n######################## test 1.34 ##############################')
logger.info('#################################################################\n')
original_utterance = "Is it on the table or the shelf?"
sentences = [Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['table'], [], [], [])]),
IndirectComplement([], [NominalGroup(['the'], ['shelf'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].i_cmpl[1].gn[0]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_35(self):
logger.info('\n######################## test 1.35 ##############################')
logger.info('#################################################################\n')
original_utterance = "Where is it? On the table or on the shelf?"
sentences = [Sentence(W_QUESTION, 'place',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[],
[VerbalGroup([], [], '',
[],
[IndirectComplement(['on'], [
NominalGroup(['the'], ['table'], [], [], [])]),
IndirectComplement(['on'], [
NominalGroup(['the'], ['shelf'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[1].sv[0].i_cmpl[1].gn[0]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_36(self):
logger.info('\n######################## test 1.36 ##############################')
logger.info('#################################################################\n')
original_utterance = "Is it on your left or in front of you?"
sentences = [Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['your'], ['left'], [], [], [])]),
IndirectComplement(['in+front+of'],
[NominalGroup([], ['you'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].i_cmpl[1].gn[0]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_37(self):
logger.info('\n######################## test 1.37 ##############################')
logger.info('#################################################################\n')
original_utterance = "Where is it? On your left or in front of you?"
sentences = [Sentence(W_QUESTION, 'place',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], [], [], [], [])],
[VerbalGroup([], [], '',
[],
[IndirectComplement(['on'], [
NominalGroup(['your'], ['left'], [], [], [])]),
IndirectComplement(['in+front+of'],
[NominalGroup([], ['you'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[1].sv[0].i_cmpl[1].gn[0]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_38(self):
logger.info('\n######################## test 1.38 ##############################')
logger.info('#################################################################\n')
original_utterance = "The blue bottle? What do you mean?"
sentences = [Sentence(YES_NO_QUESTION, '',
[NominalGroup(['the'], ['bottle'], [['blue', []]], [], [])],
[]),
Sentence(W_QUESTION, 'thing',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['mean'], [], 'present simple', [], [], [], [], VerbalGroup.affirmative,
[])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_39(self):
logger.info('\n######################## test 1.39 ##############################')
logger.info('#################################################################\n')
original_utterance = "Would you like the blue bottle or the glass? The green or blue bottle is on the table. Is the green or blue glass mine?"
sentences = [Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['like'], [], 'present conditional',
[NominalGroup(['the'], ['bottle'], [['blue', []]], [], []),
NominalGroup(['the'], ['glass'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], [], [['green', []]], [], []),
NominalGroup([], ['bottle'], [['blue', []]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['table'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup(['the'], [], [['green', []]], [], []),
NominalGroup([], ['glass'], [['blue', []]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], ['mine'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].d_obj[1]._conjunction = "OR"
sentences[1].sn[1]._conjunction = "OR"
sentences[2].sn[1]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_40(self):
logger.info('\n######################## test 1.40 ##############################')
logger.info('#################################################################\n')
original_utterance = "Learn that I want you to give me the blue bottle that's blue."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['learn'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'that',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['want'], [
VerbalGroup(['give'], [], '',
[NominalGroup(['the'], [
'bottle'], [['blue',
[]]],
[], [Sentence(
RELATIVE, 'that',
[],
[VerbalGroup(
['be'], [],
'present simple',
[
NominalGroup(
[],
[], [
[
'blue',
[]]],
[],
[])],
[],
[], [],
VerbalGroup.affirmative,
[])])])],
[IndirectComplement([],
[NominalGroup([],
['me'], [], [],
[])])],
[], [], VerbalGroup.affirmative,
[])], 'present simple',
[NominalGroup([], ['you'],
[], [], [])],
[],
[], [], VerbalGroup.affirmative,
[])])])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_41(self):
logger.info('\n######################## test 1.41 ##############################')
logger.info('#################################################################\n')
original_utterance = "The bottle is behind to me. The bottle is next to the table in front of the kitchen."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['behind+to'],
[NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['next+to'],
[NominalGroup(['the'], ['table'], [], [], [])]),
IndirectComplement(['in+front+of'],
[NominalGroup(['the'], ['kitchen'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_42(self):
logger.info('\n######################## test 1.42 ##############################')
logger.info('#################################################################\n')
original_utterance = "Carefully take the bottle. I take that bottle that I drink in. I take 22 bottles."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['take'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[],
['carefully'], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['take'], [], 'present simple',
[NominalGroup(['that'], ['bottle'], [], [], [Sentence(RELATIVE, 'that',
[NominalGroup([],
['I'], [], [],
[])],
[VerbalGroup(
['drink'], [],
'present simple',
[],
[
IndirectComplement(
['in'],
[])],
[], [],
VerbalGroup.affirmative,
[])])])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['take'], [], 'present simple',
[NominalGroup(['22'], ['bottle'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
sentences[2].sv[0].d_obj[0]._quantifier = "DIGIT"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_43(self):
logger.info('\n######################## test 1.43 ##############################')
logger.info('#################################################################\n')
original_utterance = "I'll play Jido's guitar, a saxophone, my oncle's wife's piano and Patrick's violon."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[NominalGroup(['the'], ['guitar'], [],
[NominalGroup([], ['Jido'], [], [], [])], []),
NominalGroup(['a'], ['saxophone'], [], [], []),
NominalGroup(['a'], ['piano'], [], [NominalGroup(['the'], ['wife'], [], [
NominalGroup(['my'], ['oncle'], [], [], [])], [])], []),
NominalGroup(['the'], ['violon'], [],
[NominalGroup([], ['Patrick'], [], [], [])], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_44(self):
logger.info('\n######################## test 1.44 ##############################')
logger.info('#################################################################\n')
original_utterance = "Give me 2 or 3 bottles. The bottle is blue big funny. Give me the bottle which is on the table."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['2'], [], [], [], []),
NominalGroup(['3'], ['bottle'], [], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['blue', []], ['big', []], ['funny', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [], [], [Sentence(RELATIVE, 'which',
[],
[VerbalGroup(['be'],
[],
'present simple',
[],
[
IndirectComplement(
[
'on'],
[
NominalGroup(
[
'the'],
[
'table'],
[],
[],
[])])],
[], [],
VerbalGroup.affirmative,
[])])])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].d_obj[1]._conjunction = "OR"
sentences[0].sv[0].d_obj[0]._quantifier = "DIGIT"
sentences[0].sv[0].d_obj[1]._quantifier = "DIGIT"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_45(self):
logger.info('\n######################## test 1.45 ##############################')
logger.info('#################################################################\n')
original_utterance = "The boys' ball is blue. He asks me to do something. Is any person courageous on the laboratory?"
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['ball'], [], [NominalGroup(['the'], ['boy'], [], [], [])], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['blue', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['he'], [], [], [])],
[VerbalGroup(['ask'], [VerbalGroup(['do'], [], '',
[NominalGroup([], ['something'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])], 'present simple',
[NominalGroup([], ['me'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup(['any'], ['person'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['courageous', []]], [], [])],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['laboratory'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sn[0].noun_cmpl[0]._quantifier = "ALL"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_46(self):
logger.info('\n######################## test 1.46 ##############################')
logger.info('#################################################################\n')
original_utterance = "What must be happened in the company today? The building shouldn't fastly be built. You can be here."
sentences = [Sentence(W_QUESTION, 'situation',
[],
[VerbalGroup(['must+happen'], [], 'present passive',
[],
[IndirectComplement(['in'],
[NominalGroup(['the'], ['company'], [], [], [])])],
[], ['today'], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['building'], [], [], [])],
[VerbalGroup(['should+build'], [], 'passive conditional',
[],
[],
['fastly'], [], VerbalGroup.negative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['can+be'], [], 'present simple',
[],
[],
[], ['here'], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_47(self):
logger.info('\n######################## test 1.47 ##############################')
logger.info('#################################################################\n')
original_utterance = "What size is the best one? What object is blue? How good is this?"
sentences = [Sentence(W_QUESTION, 'size',
[NominalGroup(['the'], ['one'], [['best', []]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'object',
[],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['blue', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'good',
[NominalGroup(['this'], [], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_48(self):
logger.info('\n######################## test 1.48 ##############################')
logger.info('#################################################################\n')
original_utterance = "Patrick, the bottle is on the table. Give it to me."
sentences = [Sentence('interjection', '',
[NominalGroup([], ['Patrick'], [], [], [])],
[]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['table'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(IMPERATIVE, '',
[NominalGroup([], ['Patrick'], [], [], [])],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup([], ['it'], [], [], [])],
[IndirectComplement(['to'], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_49(self):
logger.info('\n######################## test 1.49 ##############################')
logger.info('#################################################################\n')
original_utterance = "Jido, give me the bottle. Jido, Patrick and you will go to the cinema. Jido, Patrick and you, give me the bottle."
sentences = [Sentence('interjection', '',
[NominalGroup([], ['Jido'], [], [], [])],
[]),
Sentence(IMPERATIVE, '',
[NominalGroup([], ['Jido'], [], [], [])],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['Jido'], [], [], []), NominalGroup([], ['Patrick'], [], [], []),
NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['go'], [], 'future simple',
[],
[IndirectComplement(['to'],
[NominalGroup(['the'], ['cinema'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence('interjection', '',
[NominalGroup([], ['Jido'], [], [], []), NominalGroup([], ['Patrick'], [], [], []),
NominalGroup([], ['you'], [], [], [])],
[]),
Sentence(IMPERATIVE, '',
[NominalGroup([], ['Jido'], [], [], []), NominalGroup([], ['Patrick'], [], [], []),
NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_50(self):
logger.info('\n######################## test 1.50 ##############################')
logger.info('#################################################################\n')
original_utterance = "The bottle isn't blue but it's red. It isn't the glass but the bottle. It's blue or red."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['blue', []]], [], [])],
[],
[], [], VerbalGroup.negative, [Sentence('subsentence', 'but',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [],
[['red', []]], [], [])],
[],
[], [], VerbalGroup.affirmative,
[])])])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['the'], ['glass'], [], [], []),
NominalGroup(['the'], ['bottle'], [], [], [])],
[],
[], [], VerbalGroup.negative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['blue', []]], [], []),
NominalGroup([], [], [['red', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
sentences[1].sv[0].d_obj[1]._conjunction = "BUT"
sentences[2].sv[0].d_obj[1]._conjunction = "OR"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_51(self):
logger.info('\n######################## test 1.51 ##############################')
logger.info('#################################################################\n')
original_utterance = "It isn't red but blue. This is my banana. Bananas are fruits."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], [], [['red', []]], [], []),
NominalGroup([], [], [['blue', []]], [], [])],
[],
[], [], VerbalGroup.negative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['this'], [], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['my'], ['banana'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['banana'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], ['fruit'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].d_obj[1]._conjunction = "BUT"
sentences[2].sn[0]._quantifier = "ALL"
sentences[2].sv[0].d_obj[0]._quantifier = "ALL"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_52(self):
logger.info('\n######################## test 1.52 ##############################')
logger.info('#################################################################\n')
original_utterance = "There are no bananas. All bananas are here. Give me more information which are about the bottle."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['there'], [], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['no'], ['banana'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['all'], ['banana'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], ['here'], VerbalGroup.affirmative, [])]),
Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['more'], ['information'], [], [],
[Sentence(RELATIVE, 'which',
[],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['about'], [
NominalGroup(['the'],
['bottle'], [], [],
[])])],
[], [], VerbalGroup.affirmative, [])])])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sn[0]._quantifier = "SOME"
sentences[0].sv[0].d_obj[0]._quantifier = "ANY"
sentences[1].sn[0]._quantifier = "ALL"
sentences[2].sv[0].d_obj[0]._quantifier = "SOME"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_53(self):
logger.info('\n######################## test 1.53 ##############################')
logger.info('#################################################################\n')
original_utterance = "Jido, tell me where you go. Goodbye. There is nothing. It's another one."
sentences = [Sentence('interjection', '',
[NominalGroup([], ['Jido'], [], [], [])],
[]),
Sentence(IMPERATIVE, '',
[NominalGroup([], ['Jido'], [], [], [])],
[VerbalGroup(['tell'], [], 'present simple',
[],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'where',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['go'], [],
'present simple',
[],
[],
[], [], VerbalGroup.affirmative,
[])])])]),
Sentence(END, '', [], []),
Sentence(STATEMENT, '',
[NominalGroup(['there'], [], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], ['nothing'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['another'], ['one'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_54(self):
logger.info('\n######################## test 1.54 ##############################')
logger.info('#################################################################\n')
original_utterance = "The bottle becomes blue. 1 piece could become 2, if you smoldered it."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['bottle'], [], [], [])],
[VerbalGroup(['become'], [], 'present simple',
[NominalGroup([], [], [['blue', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['1'], ['piece'], [], [], [])],
[VerbalGroup(['could+become'], [], 'present conditional',
[NominalGroup(['2'], [], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'if',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['smolder'], [],
'past simple',
[NominalGroup([], ['it'],
[], [], [])],
[],
[], [], VerbalGroup.affirmative,
[])])])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_55(self):
logger.info('\n######################## test 1.55 ##############################')
logger.info('#################################################################\n')
original_utterance = "This one isn't my uncle's bottle but it's my brother's bottle. It isn't on the table but on the shelf."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['this'], ['one'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['the'], ['bottle'], [],
[NominalGroup(['my'], ['uncle'], [], [], [])], [])],
[],
[], [], VerbalGroup.negative, [Sentence('subsentence', 'but',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['the'],
['bottle'], [],
[NominalGroup(
['my'],
['brother'],
[], [],
[])], [])],
[],
[], [], VerbalGroup.affirmative,
[])])])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'], [NominalGroup(['the'], ['table'], [], [], []),
NominalGroup(['the'], ['shelf'], [], [],
[])])],
[], [], VerbalGroup.negative, [])])]
sentences[1].sv[0].i_cmpl[0].gn[1]._conjunction = "BUT"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_56(self):
logger.info('\n######################## test 1.56 ##############################')
logger.info('#################################################################\n')
original_utterance = "Give me the fourth and seventh bottle. Give me the one thousand ninth and the thirty thousand twenty eighth bottle."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], [], [['fourth', []]], [], []),
NominalGroup([], ['bottle'], [['seventh', []]], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['the'], [], [['one+thousand+ninth', []]], [], []),
NominalGroup(['the'], ['bottle'], [['thirty+thousand+twenty+eighth', []]],
[], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_57(self):
logger.info('\n######################## test 1.57 ##############################')
logger.info('#################################################################\n')
original_utterance = "The evil tyran is in the laboratory. I don't know what you're talking about."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['tyran'], [['evil', []]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['in'],
[NominalGroup(['the'], ['laboratory'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['know'], [], 'present simple',
[],
[],
[], [], VerbalGroup.negative, [Sentence('subsentence', 'what',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['talk'], [],
'present progressive',
[],
[IndirectComplement(['about'],
[])],
[], [], VerbalGroup.affirmative,
[])])])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_58(self):
logger.info('\n######################## test 1.58 ##############################')
logger.info('#################################################################\n')
original_utterance = "I go to the place where I was born. I study where you studied. I study where you build your house where you put the bottle."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['go'], [], 'present simple',
[],
[IndirectComplement(['to'], [
NominalGroup(['the'], ['place'], [], [], [Sentence(RELATIVE, 'where',
[NominalGroup([],
['I'], [], [],
[])],
[VerbalGroup(
['be'], [],
'past simple',
[NominalGroup(
[], [], [[
'born',
[]]],
[], [])],
[],
[], [],
VerbalGroup.affirmative,
[])])])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['study'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'where',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['study'], [],
'past simple',
[],
[],
[], [], VerbalGroup.affirmative,
[])])])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['study'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'where',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['build'], [],
'present simple',
[NominalGroup(['your'],
['house'],
[], [], [Sentence(
RELATIVE, 'where',
[NominalGroup([],
['you'], [], [],
[])],
[VerbalGroup(
['put'], [],
'present simple',
[NominalGroup(
['the'],
['bottle'],
[], [],
[])],
[],
[], [],
VerbalGroup.affirmative,
[])])])],
[],
[], [], VerbalGroup.affirmative,
[])])])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_59(self):
logger.info('\n######################## test 1.59 ##############################')
logger.info('#################################################################\n')
original_utterance = "Apples grow on trees and plants. Give me 3 apples."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['apple'], [], [], [])],
[VerbalGroup(['grow'], [], 'present simple',
[],
[IndirectComplement(['on'], [NominalGroup([], ['tree'], [], [], []),
NominalGroup([], ['plant'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup(['3'], ['apple'], [], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sn[0]._quantifier = "ALL"
sentences[0].sv[0].i_cmpl[0].gn[0]._quantifier = "ALL"
sentences[0].sv[0].i_cmpl[0].gn[1]._quantifier = "ALL"
sentences[1].sv[0].d_obj[0]._quantifier = "DIGIT"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_60(self):
logger.info('\n######################## test 1.56 ##############################')
logger.info('#################################################################\n')
original_utterance = "We were preparing the dinner when your father came. He made a sandwich which is with bacon, while I phoned."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['we'], [], [], [])],
[VerbalGroup(['prepare'], [], 'past progressive',
[NominalGroup(['the'], ['dinner'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'when',
[NominalGroup(['your'], ['father'], [],
[], [])],
[VerbalGroup(['come'], [], 'past simple',
[],
[],
[], [], VerbalGroup.affirmative,
[])])])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['he'], [], [], [])],
[VerbalGroup(['make'], [], 'past simple',
[NominalGroup(['a'], ['sandwich'], [], [], [Sentence(RELATIVE, 'which',
[],
[VerbalGroup(['be'],
[],
'present simple',
[],
[
IndirectComplement(
[
'with'],
[
NominalGroup(
[],
[
'bacon'],
[],
[],
[])])],
[], [],
VerbalGroup.affirmative,
[])])])],
[],
[], [], VerbalGroup.affirmative, [Sentence('subsentence', 'while',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['phone'], [],
'past simple',
[],
[],
[], [], VerbalGroup.affirmative,
[])])])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_61(self):
logger.info('\n######################## test 1.54 ##############################')
logger.info('#################################################################\n')
original_utterance = "The big very strong man is on the corner. The too big very strong man is on the corner."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['man'], [['big', []], ['strong', ['very']]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['corner'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['man'], [['big', ['too']], ['strong', ['very']]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['the'], ['corner'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_62(self):
logger.info('\n######################## test 1.55 ##############################')
logger.info('#################################################################\n')
original_utterance = "Red apples grow on green trees and plants. A kind of thing. It can be played by 30028 players."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['apple'], [['red', []]], [], [])],
[VerbalGroup(['grow'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup([], ['tree'], [['green', []]], [], []),
NominalGroup([], ['plant'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['a'], ['kind'], [], [NominalGroup(['a'], ['thing'], [], [], [])], [])],
[]),
Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['can+play'], [], 'present passive',
[],
[IndirectComplement(['by'],
[NominalGroup(['30028'], ['player'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sn[0]._quantifier = "ALL"
sentences[0].sv[0].i_cmpl[0].gn[0]._quantifier = "ALL"
sentences[0].sv[0].i_cmpl[0].gn[1]._quantifier = "ALL"
sentences[1].sn[0]._quantifier = "SOME"
sentences[1].sn[0].noun_cmpl[0]._quantifier = "SOME"
sentences[2].sv[0].i_cmpl[0].gn[0]._quantifier = "DIGIT"
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_63(self):
logger.info('\n######################## test 1.56 ##############################')
logger.info('#################################################################\n')
original_utterance = "Let the man go to the cinema. Is it the time to let you go? Where is the other tape?"
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['let'], [VerbalGroup(['go'],
[], '',
[],
[IndirectComplement(['to'], [
NominalGroup(['the'], ['cinema'], [], [],
[])])],
[], [], VerbalGroup.affirmative, [])], 'present simple',
[NominalGroup(['the'], ['man'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [VerbalGroup(['let'],
[VerbalGroup(['go'],
[], '',
[],
[],
[], [], VerbalGroup.affirmative, [])], '',
[NominalGroup([], ['you'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])], 'present simple',
[NominalGroup(['the'], ['time'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(W_QUESTION, 'place',
[NominalGroup(['the'], ['tape'], [['other', []]], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
logger.info("The original utterance is : " + original_utterance)
logger.info("The result obtained is : " + utterance)
self.assertEqual(original_utterance, utterance)
def test_64(self):
print('')
print('######################## test 1.57 ##############################')
print('#################################################################')
print('')
original_utterance = "And now, can you reach the tape. it could have been them. It is just me at the door. A strong clause can stand on its own."
sentences = [Sentence(YES_NO_QUESTION, '',
[NominalGroup([], ['you'], [], [], [])],
[VerbalGroup(['can+reach'], [], 'present simple',
[NominalGroup(['the'], ['tape'], [], [], [])],
[],
[], ['now'], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['could+be'], [], 'passive conditional',
[NominalGroup([], ['them'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['it'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup([], ['me'], [], [], [])],
[IndirectComplement(['at'],
[NominalGroup(['the'], ['door'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['a'], ['clause'], [['strong', []]], [], [])],
[VerbalGroup(['can+stand'], [], 'present simple',
[],
[IndirectComplement(['on'],
[NominalGroup(['its'], ['own'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
print(("The original utterance is : ", original_utterance))
print(("The result obtained is : ", utterance))
self.assertEqual(original_utterance, utterance)
def test_65(self):
print('')
print('######################## test 1.58 ##############################')
print('#################################################################')
print('')
original_utterance = "Tell me what to do. No, I can not reach it."
sentences = [Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['tell'], [], 'present simple',
[],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])]),
IndirectComplement([],
[NominalGroup(['the'], ['thing'], [], [], [Sentence(RELATIVE, 'that',
[],
[VerbalGroup(
['be'], [
VerbalGroup(
[
'do'],
[],
'',
[],
[],
[],
[],
VerbalGroup.affirmative,
[])],
'present simple',
[],
[],
[], [],
VerbalGroup.affirmative,
[])])])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(DISAGREEMENT, '', [], []),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['can+reach'], [], 'present simple',
[NominalGroup([], ['it'], [], [], [])],
[],
[], [], VerbalGroup.negative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
print(("The original utterance is : ", original_utterance))
print(("The result obtained is : ", utterance))
self.assertEqual(original_utterance, utterance)
def test_66(self):
print('')
print('######################## test 1.59 ##############################')
print('#################################################################')
print('')
original_utterance = "I'll come back on Monday. I'll play with a guitar. I'll play football."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['come+back'], [], 'future simple',
[],
[IndirectComplement(['on'], [NominalGroup([], ['Monday'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[],
[IndirectComplement(['with'],
[NominalGroup(['a'], ['guitar'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[NominalGroup([], ['football'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])])]
utterance = utterance_rebuilding.verbalising(sentences)
print(("The original utterance is : ", original_utterance))
print(("The result obtained is : ", utterance))
self.assertEqual(original_utterance, utterance)
def test_67(self):
print('')
print('######################## test 1.60 ##############################')
print('#################################################################')
print('')
original_utterance = "I'll play a guitar, a piano and a violon. I'll play with a guitar, a piano and a violon. Give me everything."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[NominalGroup(['a'], ['guitar'], [], [], []),
NominalGroup(['a'], ['piano'], [], [], []),
NominalGroup(['a'], ['violon'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[],
[IndirectComplement(['with'],
[NominalGroup(['a'], ['guitar'], [], [], []),
NominalGroup(['a'], ['piano'], [], [], []),
NominalGroup(['a'], ['violon'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(IMPERATIVE, '',
[],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup([], ['everything'], [], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].d_obj[0]._quantifier = "SOME"
sentences[0].sv[0].d_obj[1]._quantifier = "SOME"
sentences[0].sv[0].d_obj[2]._quantifier = "SOME"
sentences[1].sv[0].i_cmpl[0].gn[0]._quantifier = "SOME"
sentences[1].sv[0].i_cmpl[0].gn[1]._quantifier = "SOME"
sentences[1].sv[0].i_cmpl[0].gn[2]._quantifier = "SOME"
utterance = utterance_rebuilding.verbalising(sentences)
print(("The original utterance is : ", original_utterance))
print(("The result obtained is : ", utterance))
self.assertEqual(original_utterance, utterance)
def test_68(self):
print('')
print('######################## test 1.61 ##############################')
print('#################################################################')
print('')
original_utterance = "I'll come back at 7 o'clock tomorrow. He finishes the project 10 minutes before."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['come+back'], [], 'future simple',
[],
[IndirectComplement(['at'],
[NominalGroup(['7'], ["o'clock"], [], [], [])])],
[], ['tomorrow'], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['he'], [], [], [])],
[VerbalGroup(['finish'], [], 'present simple',
[NominalGroup(['the'], ['project'], [], [], [])],
[IndirectComplement(['before'],
[NominalGroup(['10'], ['minute'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].i_cmpl[0].gn[0]._quantifier = "DIGIT"
sentences[1].sv[0].i_cmpl[0].gn[0]._quantifier = "DIGIT"
utterance = utterance_rebuilding.verbalising(sentences)
print(("The original utterance is : ", original_utterance))
print(("The result obtained is : ", utterance))
self.assertEqual(original_utterance, utterance)
def test_69(self):
print('')
print('######################## test 1.62 ##############################')
print('#################################################################')
print('')
original_utterance = "I'll play a guitar, a piano and a violon. I'll play with a guitar, a piano and a violon. The boss, you and me are here."
sentences = [Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[NominalGroup(['a'], ['guitar'], [], [], []),
NominalGroup(['a'], ['piano'], [], [], []),
NominalGroup(['a'], ['violon'], [], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['play'], [], 'future simple',
[],
[IndirectComplement(['with'],
[NominalGroup(['a'], ['guitar'], [], [], []),
NominalGroup(['a'], ['piano'], [], [], []),
NominalGroup(['a'], ['violon'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup(['the'], ['boss'], [], [], []), NominalGroup([], ['you'], [], [], []),
NominalGroup([], ['me'], [], [], [])],
[VerbalGroup(['be'], [], 'present simple',
[],
[],
[], ['here'], VerbalGroup.affirmative, [])])]
sentences[0].sv[0].d_obj[0]._quantifier = "SOME"
sentences[0].sv[0].d_obj[1]._quantifier = "SOME"
sentences[0].sv[0].d_obj[2]._quantifier = "SOME"
sentences[1].sv[0].i_cmpl[0].gn[0]._quantifier = "SOME"
sentences[1].sv[0].i_cmpl[0].gn[1]._quantifier = "SOME"
sentences[1].sv[0].i_cmpl[0].gn[2]._quantifier = "SOME"
utterance = utterance_rebuilding.verbalising(sentences)
print(("The original utterance is : ", original_utterance))
print(("The result obtained is : ", utterance))
self.assertEqual(original_utterance, utterance)
def test_70(self):
print('')
print('######################## test 1.63 ##############################')
print('#################################################################')
print('')
original_utterance = "A speaking sentence's time is the best. I come at 10 pm. I'll come an evening tomorrow."
sentences = [Sentence(STATEMENT, '',
[NominalGroup(['the'], ['time'], [],
[NominalGroup(['a'], ['sentence'], [['speaking', []]], [], [])], [])],
[VerbalGroup(['be'], [], 'present simple',
[NominalGroup(['the'], [], [['best', []]], [], [])],
[],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['come'], [], 'present simple',
[],
[IndirectComplement(['at'], [NominalGroup(['10'], ['pm'], [], [], [])])],
[], [], VerbalGroup.affirmative, [])]),
Sentence(STATEMENT, '',
[NominalGroup([], ['I'], [], [], [])],
[VerbalGroup(['come'], [], 'future simple',
[],
[IndirectComplement([], [NominalGroup(['an'], ['evening'], [], [], [])])],
[], ['tomorrow'], VerbalGroup.affirmative, [])])]
sentences[0].sn[0].noun_cmpl[0]._quantifier = 'SOME'
sentences[1].sv[0].i_cmpl[0].gn[0]._quantifier = "DIGIT"
sentences[2].sv[0].i_cmpl[0].gn[0]._quantifier = "SOME"
utterance = utterance_rebuilding.verbalising(sentences)
print(("The original utterance is : ", original_utterance))
print(("The result obtained is : ", utterance))
self.assertEqual(original_utterance, utterance)
class TestVerbalizationCompleteLoop(unittest.TestCase):
def setUp(self):
self.dialog = Dialog()
self.dialog.start()
def tearDown(self):
self.dialog.stop()
self.dialog.join()
def test_verbalize1(self):
logger.info("\n##################### test_verbalize1: simple statements ########################\n")
myP = Parser()
stmt = "The cup is on the desk."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
logger.info("\n####\n")
stmt = "The green bottle is next to Joe."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
def test_verbalize2(self):
logger.info("\n##################### test_verbalize2: yes/no questions ########################\n")
myP = Parser()
stmt = "Are you a robot?"
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
def test_verbalize3(self):
logger.info("\n##################### test_verbalize3: orders ########################\n")
myP = Parser()
stmt = "Put the yellow banana on the shelf."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
logger.info("\n####\n")
stmt = "Give me the green banana."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
logger.info("\n####\n")
stmt = "Give the green banana to me."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
logger.info("\n####\n")
stmt = "Get the box which is on the table."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
logger.info("\n####\n")
stmt = "Get the box which is in the trashbin."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
def test_verbalize4(self):
logger.info("\n##################### test_verbalize4: W questions ########################\n")
myP = Parser()
stmt = "Where is the box?"
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
logger.info("\n####\n")
stmt = "What are you doing now?"
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('input: ' + stmt)
logger.info('output:' + res)
self.assertEqual(stmt, res)
def test_verbalize5(self):
logger.info("\n##################### test_verbalize5 ########################\n")
myP = Parser()
stmt = "Jido, tell me where you go."
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
def test_verbalize6(self):
logger.info("\n##################### test_verbalize 6 ########################\n")
myP = Parser()
stmt = "What blue object do you know?"
sentence = myP.parse(stmt)
res = self.dialog._verbalizer.verbalize(sentence)
logger.info('>> input: ' + stmt)
logger.info('<< output: ' + res)
self.assertEqual(stmt, res)
def test_suite():
suite = unittest.TestLoader().loadTestsFromTestCase(TestVerbalization)
suite.addTests(unittest.TestLoader().loadTestsFromTestCase(TestVerbalizationCompleteLoop))
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(test_suite())
| bsd-3-clause | -5,733,380,990,628,415,000 | 62.141201 | 161 | 0.281649 | false |
ryanpdwyer/sigutils | sigutils/fdls.py | 1 | 1223 | """
Frequency Domain Least Squares
==============================
This algorithm tries to approximate an analytic transfer function :math:`H(s)`.
See digital signal processing book.
- Pick an analytic transfer function H(s)
- Select the numerator order N and denominator order D
- Define M separate input u_m coside sequences, each of length N + 1
- Compute M output y_m cosine sequences, each of length D
- X = ( y(-1)...y(-D) u(0)...u(-N) )
- Y = A_m cos(phi_m)
- Compute the psuedo-inverse
"""
import numpy as np
def butter_lp(f, f0):
return 1/(1+f*1j/f0)
# Let's approximate this with a 1st order top and bottom filter function
# def fdls(N, D, M):
# k = np.arange(-N, 0.5)
# np.arange()
# A few lines on the frequency domain least squares algorithm
# See http://dx.doi.org/10.1109/MSP.2007.273077
# import numpy.linalg
# fs = 1000
# f0 = 10
# m = 8192
# n = 513
# d = 0
# f = np.linspace(-0.5, 0.5, m) // All frequencies
# tm = np.arange(-n,0.5,1) // All times
# zf = butter_lp(f, f0/fs)
# af = np.abs(zf)
# pf = -1 * np.angle(zf)
# np.cos(2*np.pi*f[0]*tm)
# f2d, t2d = np.meshgrid(f, t)
# u = np.cos(2*np.pi*f2d*t2d)
# X = u
# Y = af*np.cos(pf)
# X1 = np.linalg.pinv(X)
# out = np.dot(Y, X1) | mit | 7,364,699,537,436,800,000 | 24.5 | 79 | 0.621423 | false |
redhat-openstack/glance | glance/tests/unit/v2/test_registry_client.py | 1 | 24550 | # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Glance Registry's client.
This tests are temporary and will be removed once
the registry's driver tests will be added.
"""
import copy
import datetime
import os
import uuid
from mock import patch
from glance.common import config
from glance.common import exception
from glance import context
from glance.db.sqlalchemy import api as db_api
from glance.openstack.common import timeutils
from glance.registry.api import v2 as rserver
import glance.registry.client.v2.api as rapi
from glance.registry.client.v2.api import client as rclient
from glance.tests.unit import base
from glance.tests import utils as test_utils
_gen_uuid = lambda: str(uuid.uuid4())
UUID1 = str(uuid.uuid4())
UUID2 = str(uuid.uuid4())
#NOTE(bcwaldon): needed to init config_dir cli opt
config.parse_args(args=[])
class TestRegistryV2Client(base.IsolatedUnitTest,
test_utils.RegistryAPIMixIn):
"""
Test proper actions made for both valid and invalid requests
against a Registry service
"""
# Registry server to user
# in the stub.
registry = rserver
def setUp(self):
"""Establish a clean test environment"""
super(TestRegistryV2Client, self).setUp()
db_api.get_engine()
self.context = context.RequestContext(is_admin=True)
uuid1_time = timeutils.utcnow()
uuid2_time = uuid1_time + datetime.timedelta(seconds=5)
self.FIXTURES = [
self.get_extra_fixture(
id=UUID1, name='fake image #1', is_public=False,
disk_format='ami', container_format='ami', size=13,
virtual_size=26, properties={'type': 'kernel'},
location="swift://user:passwd@acct/container/obj.tar.0",
created_at=uuid1_time),
self.get_extra_fixture(id=UUID2, name='fake image #2',
properties={}, size=19, virtual_size=38,
location="file:///tmp/glance-tests/2",
created_at=uuid2_time)]
self.destroy_fixtures()
self.create_fixtures()
self.client = rclient.RegistryClient("0.0.0.0")
def tearDown(self):
"""Clear the test environment"""
super(TestRegistryV2Client, self).tearDown()
self.destroy_fixtures()
def test_image_get_index(self):
"""Test correct set of public image returned"""
images = self.client.image_get_all()
self.assertEqual(len(images), 2)
def test_create_image_with_null_min_disk_min_ram(self):
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf', min_disk=None,
min_ram=None)
db_api.image_create(self.context, extra_fixture)
image = self.client.image_get(image_id=UUID3)
self.assertEqual(0, image["min_ram"])
self.assertEqual(0, image["min_disk"])
def test_get_index_sort_name_asc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by name in
ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='name', sort_dir='asc')
self.assertEqualImages(images, (UUID3, UUID1, UUID2, UUID4),
unjsonify=False)
def test_get_index_sort_status_desc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by status in
descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
status='queued')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='status', sort_dir='desc')
self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1),
unjsonify=False)
def test_get_index_sort_disk_format_asc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by disk_format in
ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
disk_format='vdi')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='disk_format',
sort_dir='asc')
self.assertEqualImages(images, (UUID1, UUID3, UUID4, UUID2),
unjsonify=False)
def test_get_index_sort_container_format_desc(self):
"""
Tests that the registry API returns list of
public images sorted alphabetically by container_format in
descending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami')
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='xyz',
disk_format='iso',
container_format='bare')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='container_format',
sort_dir='desc')
self.assertEqualImages(images, (UUID2, UUID4, UUID3, UUID1),
unjsonify=False)
def test_get_index_sort_size_asc(self):
"""
Tests that the registry API returns list of
public images sorted by size in ascending order.
"""
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='asdf',
disk_format='ami',
container_format='ami',
size=100, virtual_size=200)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='asdf',
disk_format='iso',
container_format='bare',
size=2, virtual_size=4)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='size', sort_dir='asc')
self.assertEqualImages(images, (UUID4, UUID1, UUID2, UUID3),
unjsonify=False)
def test_get_index_sort_created_at_asc(self):
"""
Tests that the registry API returns list of
public images sorted by created_at in ascending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, created_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='created_at',
sort_dir='asc')
self.assertEqualImages(images, (UUID1, UUID2, UUID4, UUID3),
unjsonify=False)
def test_get_index_sort_updated_at_desc(self):
"""
Tests that the registry API returns list of
public images sorted by updated_at in descending order.
"""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, created_at=None,
updated_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, created_at=None,
updated_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(sort_key='updated_at',
sort_dir='desc')
self.assertEqualImages(images, (UUID3, UUID4, UUID2, UUID1),
unjsonify=False)
def test_image_get_index_marker(self):
"""Test correct set of images returned with marker param."""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='new name! #123',
status='saving',
created_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='new name! #125',
status='saving',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(marker=UUID3)
self.assertEqualImages(images, (UUID4, UUID2, UUID1), unjsonify=False)
def test_image_get_index_limit(self):
"""Test correct number of images returned with limit param."""
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #123',
status='saving')
db_api.image_create(self.context, extra_fixture)
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #125',
status='saving')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(limit=2)
self.assertEqual(len(images), 2)
def test_image_get_index_marker_limit(self):
"""Test correct set of images returned with marker/limit params."""
uuid4_time = timeutils.utcnow() + datetime.timedelta(seconds=10)
uuid3_time = uuid4_time + datetime.timedelta(seconds=5)
UUID3 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID3, name='new name! #123',
status='saving',
created_at=uuid3_time)
db_api.image_create(self.context, extra_fixture)
UUID4 = _gen_uuid()
extra_fixture = self.get_fixture(id=UUID4, name='new name! #125',
status='saving',
created_at=uuid4_time)
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(marker=UUID4, limit=1)
self.assertEqualImages(images, (UUID2,), unjsonify=False)
def test_image_get_index_limit_None(self):
"""Test correct set of images returned with limit param == None."""
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #123',
status='saving')
db_api.image_create(self.context, extra_fixture)
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #125',
status='saving')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(limit=None)
self.assertEqual(len(images), 4)
def test_image_get_index_by_name(self):
"""
Test correct set of public, name-filtered image returned. This
is just a sanity check, we test the details call more in-depth.
"""
extra_fixture = self.get_fixture(id=_gen_uuid(),
name='new name! #123')
db_api.image_create(self.context, extra_fixture)
images = self.client.image_get_all(filters={'name': 'new name! #123'})
self.assertEqual(len(images), 1)
for image in images:
self.assertEqual('new name! #123', image['name'])
def test_image_get_is_public_v2(self):
"""Tests that a detailed call can be filtered by a property"""
extra_fixture = self.get_fixture(id=_gen_uuid(), status='saving',
properties={'is_public': 'avalue'})
context = copy.copy(self.context)
db_api.image_create(context, extra_fixture)
filters = {'is_public': 'avalue'}
images = self.client.image_get_all(filters=filters)
self.assertEqual(len(images), 1)
for image in images:
self.assertEqual('avalue', image['properties'][0]['value'])
def test_image_get(self):
"""Tests that the detailed info about an image returned"""
fixture = self.get_fixture(id=UUID1, name='fake image #1',
is_public=False, size=13, virtual_size=26,
disk_format='ami', container_format='ami')
data = self.client.image_get(image_id=UUID1)
for k, v in fixture.items():
el = data[k]
self.assertEqual(v, data[k],
"Failed v != data[k] where v = %(v)s and "
"k = %(k)s and data[k] = %(el)s" %
dict(v=v, k=k, el=el))
def test_image_get_non_existing(self):
"""Tests that NotFound is raised when getting a non-existing image"""
self.assertRaises(exception.NotFound,
self.client.image_get,
image_id=_gen_uuid())
def test_image_create_basic(self):
"""Tests that we can add image metadata and returns the new id"""
fixture = self.get_fixture()
new_image = self.client.image_create(values=fixture)
# Test all other attributes set
data = self.client.image_get(image_id=new_image['id'])
for k, v in fixture.items():
self.assertEqual(v, data[k])
# Test status was updated properly
self.assertIn('status', data)
self.assertEqual('active', data['status'])
def test_image_create_with_properties(self):
"""Tests that we can add image metadata with properties"""
fixture = self.get_fixture(location="file:///tmp/glance-tests/2",
properties={'distro': 'Ubuntu 10.04 LTS'})
new_image = self.client.image_create(values=fixture)
self.assertIn('properties', new_image)
self.assertEqual(new_image['properties'][0]['value'],
fixture['properties']['distro'])
del fixture['location']
del fixture['properties']
for k, v in fixture.items():
self.assertEqual(v, new_image[k])
# Test status was updated properly
self.assertIn('status', new_image.keys())
self.assertEqual('active', new_image['status'])
def test_image_create_already_exists(self):
"""Tests proper exception is raised if image with ID already exists"""
fixture = self.get_fixture(id=UUID2,
location="file:///tmp/glance-tests/2")
self.assertRaises(exception.Duplicate,
self.client.image_create,
values=fixture)
def test_image_create_with_bad_status(self):
"""Tests proper exception is raised if a bad status is set"""
fixture = self.get_fixture(status='bad status',
location="file:///tmp/glance-tests/2")
self.assertRaises(exception.Invalid,
self.client.image_create,
values=fixture)
def test_image_update(self):
"""Tests that the registry API updates the image"""
fixture = {'name': 'fake public image #2',
'disk_format': 'vmdk',
'status': 'saving'}
self.assertTrue(self.client.image_update(image_id=UUID2,
values=fixture))
# Test all other attributes set
data = self.client.image_get(image_id=UUID2)
for k, v in fixture.items():
self.assertEqual(v, data[k])
def test_image_update_conflict(self):
"""Tests that the registry API updates the image"""
next_state = 'saving'
fixture = {'name': 'fake public image #2',
'disk_format': 'vmdk',
'status': next_state}
image = self.client.image_get(image_id=UUID2)
current = image['status']
self.assertEqual(current, 'active')
# image is in 'active' state so this should cause a failure.
from_state = 'saving'
self.assertRaises(exception.Conflict, self.client.image_update,
image_id=UUID2, values=fixture,
from_state=from_state)
try:
self.client.image_update(image_id=UUID2, values=fixture,
from_state=from_state)
except exception.Conflict as exc:
msg = (_('cannot transition from %(current)s to '
'%(next)s in update (wanted '
'from_state=%(from)s)') %
{'current': current, 'next': next_state,
'from': from_state})
self.assertEqual(str(exc), msg)
def _test_image_update_not_existing(self):
"""Tests non existing image update doesn't work"""
fixture = self.get_fixture(status='bad status')
self.assertRaises(exception.NotFound,
self.client.image_update,
image_id=_gen_uuid(),
values=fixture)
def test_image_destroy(self):
"""Tests that image metadata is deleted properly"""
# Grab the original number of images
orig_num_images = len(self.client.image_get_all())
# Delete image #2
image = self.FIXTURES[1]
deleted_image = self.client.image_destroy(image_id=image['id'])
self.assertTrue(deleted_image)
self.assertEqual(image['id'], deleted_image['id'])
self.assertTrue(deleted_image['deleted'])
self.assertTrue(deleted_image['deleted_at'])
# Verify one less image
filters = {'deleted': False}
new_num_images = len(self.client.image_get_all(filters=filters))
self.assertEqual(new_num_images, orig_num_images - 1)
def test_image_destroy_not_existing(self):
"""Tests cannot delete non-existing image"""
self.assertRaises(exception.NotFound,
self.client.image_destroy,
image_id=_gen_uuid())
def test_image_get_members(self):
"""Tests getting image members"""
memb_list = self.client.image_member_find(image_id=UUID2)
num_members = len(memb_list)
self.assertEqual(num_members, 0)
def test_image_get_members_not_existing(self):
"""Tests getting non-existent image members"""
self.assertRaises(exception.NotFound,
self.client.image_get_members,
image_id=_gen_uuid())
def test_image_member_find(self):
"""Tests getting member images"""
memb_list = self.client.image_member_find(member='pattieblack')
num_members = len(memb_list)
self.assertEqual(num_members, 0)
def test_add_update_members(self):
"""Tests updating image members"""
values = dict(image_id=UUID2, member='pattieblack')
member = self.client.image_member_create(values=values)
self.assertTrue(member)
values['member'] = 'pattieblack2'
self.assertTrue(self.client.image_member_update(memb_id=member['id'],
values=values))
def test_add_delete_member(self):
"""Tests deleting image members"""
values = dict(image_id=UUID2, member='pattieblack')
member = self.client.image_member_create(values=values)
self.client.image_member_delete(memb_id=member['id'])
memb_list = self.client.image_member_find(member='pattieblack')
self.assertEqual(len(memb_list), 0)
class TestRegistryV2ClientApi(base.IsolatedUnitTest):
"""
Test proper actions made for both valid and invalid requests
against a Registry service
"""
def setUp(self):
"""Establish a clean test environment"""
super(TestRegistryV2ClientApi, self).setUp()
reload(rapi)
def tearDown(self):
"""Clear the test environment"""
super(TestRegistryV2ClientApi, self).tearDown()
def test_configure_registry_client_not_using_use_user_token(self):
self.config(use_user_token=False)
with patch.object(rapi,
'configure_registry_admin_creds') as mock_rapi:
rapi.configure_registry_client()
mock_rapi.assert_called_once_with()
def _get_fake_config_creds(self, auth_url='auth_url', strategy='keystone'):
return {
'user': 'user',
'password': 'password',
'username': 'user',
'tenant': 'tenant',
'auth_url': auth_url,
'strategy': strategy,
'region': 'region'
}
def test_configure_registry_admin_creds(self):
expected = self._get_fake_config_creds(auth_url=None,
strategy='configured_strategy')
self.config(admin_user=expected['user'])
self.config(admin_password=expected['password'])
self.config(admin_tenant_name=expected['tenant'])
self.config(auth_strategy=expected['strategy'])
self.config(auth_region=expected['region'])
self.stubs.Set(os, 'getenv', lambda x: None)
self.assertIsNone(rapi._CLIENT_CREDS)
rapi.configure_registry_admin_creds()
self.assertEqual(rapi._CLIENT_CREDS, expected)
def test_configure_registry_admin_creds_with_auth_url(self):
expected = self._get_fake_config_creds()
self.config(admin_user=expected['user'])
self.config(admin_password=expected['password'])
self.config(admin_tenant_name=expected['tenant'])
self.config(auth_url=expected['auth_url'])
self.config(auth_strategy='test_strategy')
self.config(auth_region=expected['region'])
self.assertIsNone(rapi._CLIENT_CREDS)
rapi.configure_registry_admin_creds()
self.assertEqual(rapi._CLIENT_CREDS, expected)
| apache-2.0 | 5,718,802,242,312,375,000 | 37.479624 | 79 | 0.565743 | false |
APTE/APTE | bench/data.py | 1 | 21149 | #!/usr/bin/python
# -*- coding: iso-8859-1 -*-
DICO = {
'ref' : {
"name" : "Apte without POR (reference version)",
"call" : "apte",
"branch" : "",
"benchs": {
"TEST": {
"new" : False,
"file": "TEST.txt",
"res" : True,
"date" : "1263",
"time": "453",
"nbExplo" : "4674",
"fileFrom" : "BENCH.log"
}
}
},
'comp' : {
"name" : "Compression (+ killing improper)",
"call" : "apte -with_por compr improper",
"branch" : "",
"benchs": {}
},
'red' : {
"name" : "Reduction (+ killing improper + NoUse criterion)",
"call" : "apte -with_por red improper nouse",
"branch" : "",
"benchs": {}
},
}
TESTS = {
'Yahalom-stef-6' : {
'res' : True,
'name' : "TODO",
'file' : 'yahalom-s-6.txt',
'cat' : 9,
},
'Yahalom-stef-modif' : {
'res' : True,
'name' : "TODO",
'file' : 'yahalom-s-modified.txt',
'cat' : 9,
},
'Yahalom-3' : {
'res' : True,
'name' : "TODO",
'file' : 'Yahalom-shared-key-3.txt',
'cat' : 9,
},
'Yahalom-4' : {
'res' : True,
'name' : "TODO",
'file' : 'Yahalom-shared-key-4.txt',
'cat' : 9,
},
'Yahalom-5' : {
'res' : True,
'name' : "TODO",
'file' : 'Yahalom-shared-key-5.txt',
'cat' : 9,
},
'Yahalom-6' : {
'res' : True,
'name' : "TODO",
'file' : 'Yahalom-shared-key-6.txt',
'cat' : 9,
},
'Yahalom-7' : {
'res' : True,
'name' : "TODO",
'file' : 'Yahalom-shared-key-7.txt',
'cat' : 9,
},
'Yahalom-8' : {
'res' : True,
'name' : "TODO",
'file' : 'Yahalom-shared-key-8.txt',
'cat' : 9,
},
'Yahalom-9' : {
'res' : True,
'name' : "TODO",
'file' : 'Yahalom-shared-key-9.txt',
'cat' : 9,
},
'PrivateAuth-2' : {
'res' : True,
'name' : "TODO",
'file' : 'PrivateAuth-pub-key-2.txt',
'cat' : 9,
},
'PrivateAuth-3' : {
'res' : True,
'name' : "TODO",
'file' : 'PrivateAuth-pub-key-3.txt',
'cat' : 9,
},
'PrivateAuth-4' : {
'res' : True,
'name' : "TODO",
'file' : 'PrivateAuth-pub-key-4.txt',
'cat' : 9,
},
'PrivateAuth-5' : {
'res' : True,
'name' : "TODO",
'file' : 'PrivateAuth-pub-key-5.txt',
'cat' : 9,
},
'PrivateAuth-6' : {
'res' : True,
'name' : "TODO",
'file' : 'PrivateAuth-pub-key-6.txt',
'cat' : 9,
},
'PrivateAuth-7' : {
'res' : True,
'name' : "TODO",
'file' : 'PrivateAuth-pub-key-7.txt',
'cat' : 9,
},
'PrivateAuth-8' : {
'res' : True,
'name' : "TODO",
'file' : 'PrivateAuth-pub-key-8.txt',
'cat' : 9,
},
'PrivateAuth-9' : {
'res' : True,
'name' : "TODO",
'file' : 'PrivateAuth-pub-key-9.txt',
'cat' : 9,
},
'PrivateAuth-5+' : {
'res' : True,
'name' : "TODO",
'file' : 'PrivateAuth-pub-key-5+.txt',
'cat' : 9,
},
'PrivateAuth-6-' : {
'res' : True,
'name' : "TODO",
'file' : 'PrivateAuth-pub-key-6-.txt',
'cat' : 9,
},
'PrivateAuth-4-diff' : {
'res' : True,
'name' : "TODO",
'file' : 'PrivateAuth-pub-key-4-diff.txt',
'cat' : 9,
},
'PrivateAuth-6-diff' : {
'res' : True,
'name' : "TODO",
'file' : 'PrivateAuth-pub-key-6-diff.txt',
'cat' : 9,
},
'DS-Shared-3' : {
'res' : True,
'name' : "TODO",
'file' : 'DS-shared-key-3.txt',
'cat' : 9,
},
'DS-Shared-4' : {
'res' : True,
'name' : "TODO",
'file' : 'DS-shared-key-4.txt',
'cat' : 9,
},
'DS-Shared-5' : {
'res' : True,
'name' : "TODO",
'file' : 'DS-shared-key-5.txt',
'cat' : 9,
},
'DS-Shared-6' : {
'res' : True,
'name' : "TODO",
'file' : 'DS-shared-key-6.txt',
'cat' : 9,
},
'DS-Shared-7' : {
'res' : True,
'name' : "TODO",
'file' : 'DS-shared-key-7.txt',
'cat' : 9,
},
'DS-Shared-8' : {
'res' : True,
'name' : "TODO",
'file' : 'DS-shared-key-8.txt',
'cat' : 9,
},
'DS-Shared-9' : {
'res' : True,
'name' : "TODO",
'file' : 'DS-shared-key-9.txt',
'cat' : 9,
},
'DS-Shared-10' : {
'res' : True,
'name' : "TODO",
'file' : 'DS-shared-key-10.txt',
'cat' : 9,
},
'DS-Shared-11' : {
'res' : True,
'name' : "TODO",
'file' : 'DS-shared-key-11.txt',
'cat' : 9,
},
'DS-Shared-12' : {
'res' : True,
'name' : "TODO",
'file' : 'DS-shared-key-12.txt',
'cat' : 9,
},
'NS-Shared_diff_4' : {
'res' : True,
'name' : "TODO",
'file' : 'NS-shared-key-4-diff.txt',
'cat' : 9,
},
'NS-Shared_diff_5' : {
'res' : True,
'name' : "TODO",
'file' : 'NS-shared-key-5-diff.txt',
'cat' : 9,
},
'NS-Shared_diff_6' : {
'res' : True,
'name' : "TODO",
'file' : 'NS-shared-key-6-diff.txt',
'cat' : 9,
},
'NS-Shared_diff_7' : {
'res' : True,
'name' : "TODO",
'file' : 'NS-shared-key-7-diff.txt',
'cat' : 9,
},
'NS-Shared_diff_8' : {
'res' : True,
'name' : "TODO",
'file' : 'NS-shared-key-8-diff.txt',
'cat' : 9,
},
'NS-Shared_diff_9' : {
'res' : True,
'name' : "TODO",
'file' : 'NS-shared-key-9-diff.txt',
'cat' : 9,
},
'WMF_SS_diff_4' : {
'res' : True,
'name' : "TODO",
'file' : 'WMF-shared-key-4-diff.txt',
'cat' : 9,
},
'WMF_SS_diff_5' : {
'res' : True,
'name' : "TODO",
'file' : 'WMF-shared-key-5-diff.txt',
'cat' : 9,
},
'WMF_SS_diff_6' : {
'res' : True,
'name' : "TODO",
'file' : 'WMF-shared-key-6-diff.txt',
'cat' : 9,
},
'WMF_SS_diff_7' : {
'res' : True,
'name' : "TODO",
'file' : 'WMF-shared-key-7-diff.txt',
'cat' : 9,
},
'WMF_SS_diff_8' : {
'res' : True,
'name' : "TODO",
'file' : 'WMF-shared-key-8-diff.txt',
'cat' : 9,
},
'WMF_SS_diff_9' : {
'res' : True,
'name' : "TODO",
'file' : 'WMF-shared-key-9-diff.txt',
'cat' : 9,
},
'WMF_SS_diff_10' : {
'res' : True,
'name' : "TODO",
'file' : 'WMF-shared-key-10-diff.txt',
'cat' : 9,
},
'WMF_SS_diff_11' : {
'res' : True,
'name' : "TODO",
'file' : 'WMF-shared-key-11-diff.txt',
'cat' : 9,
},
'WMF_SS_diff_12' : {
'res' : True,
'name' : "TODO",
'file' : 'WMF-shared-key-12-diff.txt',
'cat' : 9,
},
'WMF_SS_diff_13' : {
'res' : True,
'name' : "TODO",
'file' : 'WMF-shared-key-13-diff.txt',
'cat' : 9,
},
'WMF_SS_diff_14' : {
'res' : True,
'name' : "TODO",
'file' : 'WMF-shared-key-14-diff.txt',
'cat' : 9,
},
'WMF_SS_diff_15' : {
'res' : True,
'name' : "TODO",
'file' : 'WMF-shared-key-15-diff.txt',
'cat' : 9,
},
'WMF_SS_3' : {
'res' : True,
'name' : "Wide Mouth Frog: Strong secrecy of the shared key kab - 3 agents among [Alice | Server | Bob]",
'file' : 'WMF-shared-key-3.txt',
'cat' : 3,
},
'WMF_SS_4' : {
'res' : True,
'name' : "Wide Mouth Frog: Strong secrecy of the shared key kab - 4 agents among [Alice | Server | Bob]",
'file' : 'WMF-shared-key-4.txt',
'cat' : 3,
},
'WMF_SS_5' : {
'res' : True,
'name' : "Wide Mouth Frog: Strong secrecy of the shared key kab - 5 agents among [Alice | Server | Bob]",
'file' : 'WMF-shared-key-5.txt',
'cat' : 3,
},
'WMF_SS_6' : {
'res' : True,
'name' : "Wide Mouth Frog: Strong secrecy of the shared key kab - 6 agents among [Alice | Server | Bob]",
'file' : 'WMF-shared-key-6.txt',
'cat' : 3,
},
'WMF_SS_7' : {
'res' : True,
'name' : "Wide Mouth Frog: Strong secrecy of the shared key kab - 7 agents among [Alice | Server | Bob]",
'file' : 'WMF-shared-key-7.txt',
'cat' : 3,
},
'WMF_SS_8' : {
'res' : True,
'name' : "Wide Mouth Frog: Strong secrecy of the shared key kab - 8 agents among [Alice | Server | Bob]",
'file' : 'WMF-shared-key-8.txt',
'cat' : 3,
},
'WMF_SS_9' : {
'res' : True,
'name' : "Wide Mouth Frog: Strong secrecy of the shared key kab - 9 agents among [Alice | Server | Bob]",
'file' : 'WMF-shared-key-9.txt',
'cat' : 3,
},
'WMF_SS_10' : {
'res' : True,
'name' : "Wide Mouth Frog: Strong secrecy of the shared key kab - 10 agents among [Alice | Server | Bob]",
'file' : 'WMF-shared-key-10.txt',
'cat' : 3,
},
'WMF_SS_11' : {
'res' : True,
'name' : "Wide Mouth Frog: Strong secrecy of the shared key kab - 11 agents among [Alice | Server | Bob]",
'file' : 'WMF-shared-key-11.txt',
'cat' : 3,
},
'WMF_SS_12' : {
'res' : True,
'name' : "Wide Mouth Frog: Strong secrecy of the shared key kab - 12 agents among [Alice | Server | Bob]",
'file' : 'WMF-shared-key-12.txt',
'cat' : 3,
},
'WMF_SS_13' : {
'res' : True,
'name' : "Wide Mouth Frog: Strong secrecy of the shared key kab - 13 agents among [Alice | Server | Bob]",
'file' : 'WMF-shared-key-13.txt',
'cat' : 3,
},
'WMF_SS_14' : {
'res' : True,
'name' : "Wide Mouth Frog: Strong secrecy of the shared key kab - 14 agents among [Alice | Server | Bob]",
'file' : 'WMF-shared-key-14.txt',
'cat' : 3,
},
'NS_SharedK_3' : {
'res' : True,
'name' : "Needham Shroeder Symmetric: Strong Secrecy of shared key - 3 agents among [Alice | Server | Bob]",
'file' : 'NS-shared-key-3.txt',
'cat' : 3,
},
'NS_SharedK_4' : {
'res' : True,
'name' : "Needham Shroeder Symmetric: Strong Secrecy of shared key - 4 agents among [Alice | Server | Bob]",
'file' : 'NS-shared-key-4.txt',
'cat' : 3,
},
'NS_SharedK_5' : {
'res' : True,
'name' : "Needham Shroeder Symmetric: Strong Secrecy of shared key - 5 agents among [Alice | Server | Bob]",
'file' : 'NS-shared-key-5.txt',
'cat' : 3,
},
'NS_SharedK_6' : {
'res' : True,
'name' : "Needham Shroeder Symmetric: Strong Secrecy of shared key - 6 agents among [Alice | Server | Bob]",
'file' : 'NS-shared-key-6.txt',
'cat' : 3,
},
'NS_SharedK_7' : {
'res' : True,
'name' : "Needham Shroeder Symmetric: Strong Secrecy of shared key - 7 agents among [Alice | Server | Bob]",
'file' : 'NS-shared-key-7.txt',
'cat' : 3,
},
'NS_SharedK_8' : {
'res' : True,
'name' : "Needham Shroeder Symmetric: Strong Secrecy of shared key - 8 agents among [Alice | Server | Bob]",
'file' : 'NS-shared-key-8.txt',
'cat' : 3,
},
'NS_SharedK_9' : {
'res' : True,
'name' : "Needham Shroeder Symmetric: Strong Secrecy of shared key - 9 agents among [Alice | Server | Bob]",
'file' : 'NS-shared-key-9.txt',
'cat' : 3,
},
'AKA_3G_s__2' : {
'res' : True,
'name' : "3G AKA protocol: we test strong secrecy of the agreed key - 2 agent among [Mobile Station, Network]",
'file' : '3G-AKA-s-2.txt',
'cat' : 3,
},
'AKA_3G_s__4' : {
'res' : True,
'name' : "3G AKA protocol: we test strong secrecy of the agreed key - 4 agent among [Mobile Station, Network]",
'file' : '3G-AKA-s-4.txt',
'cat' : 3,
},
'AKA_3G_s__6' : {
'res' : True,
'name' : "3G AKA protocol: we test strong secrecy of the agreed key - 6 agent among [Mobile Station, Network]",
'file' : '3G-AKA-s-6.txt',
'cat' : 3,
},
'AKA_3G_s__8' : {
'res' : True,
'name' : "3G AKA protocol: we test strong secrecy of the agreed key - 8 agent among [Mobile Station, Network]",
'file' : '3G-AKA-s-8.txt',
'cat' : 3,
},
'AKA_3G_s__10' : {
'res' : True,
'name' : "3G AKA protocol: we test strong secrecy of the agreed key - 10 agent among [Mobile Station, Network]",
'file' : '3G-AKA-s-10.txt',
'cat' : 3,
},
'AKA_3G_s__12' : {
'res' : True,
'name' : "3G AKA protocol: we test strong secrecy of the agreed key - 12 agent among [Mobile Station, Network]",
'file' : '3G-AKA-s-12.txt',
'cat' : 3,
},
'AKA_3G_2' : {
'res' : True,
'name' : "3G AKA protocol: we test strong secrecy of the agreed key - 2 agent among [Mobile Station, Network]",
'file' : '3G_PPAuthentication_sec_2.txt',
'cat' : 3,
},
'AKA_3G_3' : {
'res' : True,
'name' : "3G AKA protocol: we test strong secrecy of the agreed key - 3 agent among [Mobile Station, Network]",
'file' : '3G_PPAuthentication_sec_3.txt',
'cat' : 3,
},
'AKA_3G_4' : {
'res' : True,
'name' : "3G AKA protocol: we test strong secrecy of the agreed key - 4 agent among [Mobile Station, Network]",
'file' : '3G_PPAuthentication_sec_4.txt',
'cat' : 3,
},
'AKA_3G_5' : {
'res' : True,
'name' : "3G AKA protocol: we test strong secrecy of the agreed key - 5 agent among [Mobile Station, Network]",
'file' : '3G_PPAuthentication_sec_5.txt',
'cat' : 3,
},
'AKA_3G_6' : {
'res' : True,
'name' : "3G AKA protocol: we test strong secrecy of the agreed key - 6 agent among [Mobile Station, Network]",
'file' : '3G_PPAuthentication_sec_6.txt',
'cat' : 3,
},
'Pass-PA_ano_2' : {
'res' : True,
'name' : 'Passport: Passive Authentification - Anonymity of one Passport - 2 agents among [Reader, Passport]',
'file' : 'PA_ano_2.txt',
'cat' : 2,
},
'Pass-PA_ano_3' : {
'res' : True,
'name' : 'Passport: Passive Authentification - Anonymity of one Passport - 3 agents among [Reader, Passport]',
'file' : 'PA_ano_3.txt',
'cat' : 2,
},
'Pass-PA_ano_4' : {
'res' : True,
'name' : 'Passport: Passive Authentification - Anonymity of one Passport - 4 agents among [Reader, Passport]',
'file' : 'PA_ano_4.txt',
'cat' : 2,
},
'Pass-PA_ano_5' : {
'res' : True,
'name' : 'Passport: Passive Authentification - Anonymity of one Passport - 5 agents among [Reader, Passport]',
'file' : 'PA_ano_5.txt',
'cat' : 2,
},
'Pass-PA_ano_6' : {
'res' : True,
'name' : 'Passport: Passive Authentification - Anonymity of one Passport - 6 agents among [Reader, Passport]',
'file' : 'PA_ano_6.txt',
'cat' : 2,
},
'Pass-PA_ano_7' : {
'res' : True,
'name' : 'Passport: Passive Authentification - Anonymity of one Passport - 7 agents among [Reader, Passport]',
'file' : 'PA_ano_7.txt',
'cat' : 2,
},
'Pass-PA_ano_8' : {
'res' : True,
'name' : 'Passport: Passive Authentification - Anonymity of one Passport - 8 agents among [Reader, Passport]',
'file' : 'PA_ano_8.txt',
'cat' : 2,
},
'Pass-PA_ano_9' : {
'res' : True,
'name' : 'Passport: Passive Authentification - Anonymity of one Passport - 9 agents among [Reader, Passport]',
'file' : 'PA_ano_9.txt',
'cat' : 2,
},
'Pass-PA_ano_10' : {
'res' : True,
'name' : 'Passport: Passive Authentification - Anonymity of one Passport - 10 agents among [Reader, Passport]',
'file' : 'PA_ano_10.txt',
'cat' : 2,
},
'Bench_Graph_1' : {
'res' : True,
'name' : 'Benchmarks for POR - 1 sessions of very simple processes',
'file' : 'Simple_1_par.txt',
'cat' : 4,
},
'Bench_Graph_2' : {
'res' : True,
'name' : 'Benchmarks for POR - 2 sessions of very simple processes',
'file' : 'Simple_2_par.txt',
'cat' : 4,
},
'Bench_Graph_3' : {
'res' : True,
'name' : 'Benchmarks for POR - 3 sessions of very simple processes',
'file' : 'Simple_3_par.txt',
'cat' : 4,
},
'Bench_Graph_4' : {
'res' : True,
'name' : 'Benchmarks for POR - 4 sessions of very simple processes',
'file' : 'Simple_4_par.txt',
'cat' : 4,
},
'Bench_Graph_5' : {
'res' : True,
'name' : 'Benchmarks for POR - 5 sessions of very simple processes',
'file' : 'Simple_5_par.txt',
'cat' : 4,
},
'Bench_Graph_6' : {
'res' : True,
'name' : 'Benchmarks for POR - 6 sessions of very simple processes',
'file' : 'Simple_6_par.txt',
'cat' : 4,
},
'Bench_Graph_7' : {
'res' : True,
'name' : 'Benchmarks for POR - 7 sessions of very simple processes',
'file' : 'Simple_7_par.txt',
'cat' : 4,
},
'Bench_Graph_8' : {
'res' : True,
'name' : 'Benchmarks for POR - 8 sessions of very simple processes',
'file' : 'Simple_8_par.txt',
'cat' : 4,
},
'Bench_Graph_9' : {
'res' : True,
'name' : 'Benchmarks for POR - 9 sessions of very simple processes',
'file' : 'Simple_9_par.txt',
'cat' : 4,
},
'Bench_Graph_10' : {
'res' : True,
'name' : 'Benchmarks for POR - 10 sessions of very simple processes',
'file' : 'Simple_10_par.txt',
'cat' : 4,
},
'Bench_Graph_11' : {
'res' : True,
'name' : 'Benchmarks for POR - 11 sessions of very simple processes',
'file' : 'Simple_11_par.txt',
'cat' : 4,
},
'Bench_Graph_12' : {
'res' : True,
'name' : 'Benchmarks for POR - 12 sessions of very simple processes',
'file' : 'Simple_12_par.txt',
'cat' : 4,
},
'Bench_Graph_13' : {
'res' : True,
'name' : 'Benchmarks for POR - 13 sessions of very simple processes',
'file' : 'Simple_13_par.txt',
'cat' : 4,
},
'Bench_Graph_14' : {
'res' : True,
'name' : 'Benchmarks for POR - 14 sessions of very simple processes',
'file' : 'Simple_14_par.txt',
'cat' : 4,
},
'Bench_Graph_15' : {
'res' : True,
'name' : 'Benchmarks for POR - 15 sessions of very simple processes',
'file' : 'Simple_15_par.txt',
'cat' : 4,
},
'Bench_Graph_16' : {
'res' : True,
'name' : 'Benchmarks for POR - 16 sessions of very simple processes',
'file' : 'Simple_16_par.txt',
'cat' : 4,
},
'Bench_Graph_17' : {
'res' : True,
'name' : 'Benchmarks for POR - 17 sessions of very simple processes',
'file' : 'Simple_17_par.txt',
'cat' : 4,
},
'Bench_Graph_18' : {
'res' : True,
'name' : 'Benchmarks for POR - 18 sessions of very simple processes',
'file' : 'Simple_18_par.txt',
'cat' : 4,
},
'Bench_Graph_19' : {
'res' : True,
'name' : 'Benchmarks for POR - 19 sessions of very simple processes',
'file' : 'Simple_19_par.txt',
'cat' : 4,
},
'Bench_Graph_20' : {
'res' : True,
'name' : 'Benchmarks for POR - 20 sessions of very simple processes',
'file' : 'Simple_20_par.txt',
'cat' : 4,
},
'Bench_Graph_21' : {
'res' : True,
'name' : 'Benchmarks for POR - 21 sessions of very simple processes',
'file' : 'Simple_21_par.txt',
'cat' : 4,
},
'Bench_Graph_22' : {
'res' : True,
'name' : 'Benchmarks for POR - 22 sessions of very simple processes',
'file' : 'Simple_22_par.txt',
'cat' : 4,
},
'Bench_Graph_23' : {
'res' : True,
'name' : 'Benchmarks for POR - 23 sessions of very simple processes',
'file' : 'Simple_23_par.txt',
'cat' : 4,
},
'Bench_Graph_24' : {
'res' : True,
'name' : 'Benchmarks for POR - 24 sessions of very simple processes',
'file' : 'Simple_24_par.txt',
'cat' : 4,
},
}
def get_versDico():
return(DICO)
def get_testsDico():
return(TESTS)
| gpl-3.0 | -2,781,676,103,520,612,000 | 28.171034 | 120 | 0.458509 | false |
Freso/listenbrainz-server | listenbrainz/labs_api/labs/api/recording_from_recording_mbid.py | 1 | 5867 | import psycopg2
import psycopg2.extras
from flask import current_app
from datasethoster import Query
psycopg2.extras.register_uuid()
class RecordingFromRecordingMBIDQuery(Query):
'''
Look up a musicbrainz data for a list of recordings, based on MBID.
'''
def names(self):
return ("recording-mbid-lookup", "MusicBrainz Recording by MBID Lookup")
def inputs(self):
return ['[recording_mbid]']
def introduction(self):
return """Look up recording and artist information given a recording MBID"""
def outputs(self):
return ['recording_mbid', 'recording_name', 'length', 'comment', 'artist_credit_id',
'artist_credit_name', '[artist_credit_mbids]', 'original_recording_mbid']
def fetch(self, params, offset=-1, count=-1):
mbids = [p['[recording_mbid]'] for p in params]
with psycopg2.connect(current_app.config['MB_DATABASE_URI']) as conn:
with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as curs:
# First lookup and MBIDs that may have been redirected
query = '''SELECT rgr.gid::TEXT AS recording_mbid_old,
r.gid::TEXT AS recording_mbid_new
FROM recording_gid_redirect rgr
JOIN recording r
ON r.id = rgr.new_id
where rgr.gid in %s'''
args = [tuple([psycopg2.extensions.adapt(p) for p in mbids])]
curs.execute(query, tuple(args))
# Build an index with all redirected recordings
redirect_index = {}
inverse_redirect_index = {}
while True:
row = curs.fetchone()
if not row:
break
r = dict(row)
redirect_index[r['recording_mbid_old']] = r['recording_mbid_new']
inverse_redirect_index[r['recording_mbid_new']] = r['recording_mbid_old']
# Now start looking up actual recordings
for i, mbid in enumerate(mbids):
if mbid in redirect_index:
mbids[i] = redirect_index[mbid]
query = '''SELECT r.gid::TEXT AS recording_mbid,
r.name AS recording_name,
r.length,
r.comment,
ac.id AS artist_credit_id,
ac.name AS artist_credit_name,
array_agg(a.gid)::TEXT[] AS artist_credit_mbids
FROM recording r
JOIN artist_credit ac
ON r.artist_credit = ac.id
JOIN artist_credit_name acn
ON ac.id = acn.artist_credit
JOIN artist a
ON acn.artist = a.id
WHERE r.gid
IN %s
GROUP BY r.gid, r.id, r.name, r.length, r.comment, ac.id, ac.name
ORDER BY r.gid'''
args = [tuple([psycopg2.extensions.adapt(p) for p in mbids])]
curs.execute(query, tuple(args))
# Build an index of all the fetched recordings
recording_index = {}
while True:
row = curs.fetchone()
if not row:
break
recording_index[row['recording_mbid']] = dict(row)
# Finally collate all the results, ensuring that we have one entry with original_recording_mbid for each
# input argument
output = []
for p in params:
mbid = p['[recording_mbid]']
try:
r = dict(recording_index[mbid])
except KeyError:
try:
r = dict(recording_index[redirect_index[mbid]])
except KeyError:
output.append({'recording_mbid': None,
'recording_name': None,
'length': None,
'comment': None,
'artist_credit_id': None,
'artist_credit_name': None,
'[artist_credit_mbids]': None,
'original_recording_mbid': mbid})
continue
r['[artist_credit_mbids]'] = [ac_mbid for ac_mbid in r['artist_credit_mbids']]
del r['artist_credit_mbids']
r['original_recording_mbid'] = inverse_redirect_index.get(mbid, mbid)
output.append(r)
# Ideally offset and count should be handled by the postgres query itself, but the 1:1 relationship
# of what the user requests and what we need to fetch is no longer true, so we can't easily use LIMIT/OFFSET.
# We might be able to use a RIGHT JOIN to fix this, but for now I'm happy to leave this as it. We need to
# revisit this when we get closer to pushing recommendation tools to production.
if offset > 0 and count > 0:
return output[offset:offset+count]
if offset > 0 and count < 0:
return output[offset:]
if offset < 0 and count > 0:
return output[:count]
return output
| gpl-2.0 | 760,555,066,453,154,600 | 43.112782 | 125 | 0.46736 | false |
hydroshare/hydroshare | docs/conf.py | 1 | 4971 | # -*- coding: utf-8 -*-
#
# HydroShare documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 25 11:27:53 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
from recommonmark.parser import CommonMarkParser
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_parsers = {
'.md': CommonMarkParser,
}
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'HydroShare'
copyright = '2017, https://help.hydroshare.org/about-hydroshare/team/'
author = 'https://help.hydroshare.org/about-hydroshare/team/'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.10'
# The full version, including alpha/beta/rc tags.
release = '1.10.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'HydroSharedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'HydroShare.tex', 'HydroShare Documentation',
'https://help.hydroshare.org/about-hydroshare/team/', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hydroshare', 'HydroShare Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'HydroShare', 'HydroShare Documentation',
author, 'HydroShare', 'One line description of project.',
'Miscellaneous'),
]
| bsd-3-clause | -3,372,806,723,805,227,000 | 29.875776 | 79 | 0.678334 | false |
MAECProject/maec-to-stix | docs/conf.py | 1 | 8864 | # -*- coding: utf-8 -*-
#
# stix-ramrod documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 30 14:08:10 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import maec_to_stix
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'maec-to-stix'
copyright = u'2014, The MITRE Corporation'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = maec_to_stix.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
'_build',
'_includes'
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'stix-maectostixdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'maec-to-stix.tex', u'maec-to-stix Documentation',
u'The MITRE Corporation', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'maec-to-stix', u'maec-to-stix Documentation',
[u'The MITRE Corporation'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'maec-to-stix', u'maec-to-stix Documentation',
u'The MITRE Corporation', 'maec-to-stix', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None} | bsd-3-clause | 1,624,199,354,319,191,800 | 30.43617 | 80 | 0.706115 | false |
VHAINNOVATIONS/GE-Pressure-Ulcer | python_gui_decision_support_webportal/python/experiment_dialog.py | 1 | 9688 | import add_modify_dialog
import util
import olv_dialog_controller
import wx
from ObjectListView import ObjectListView, Filter
from experiment import Experiment
from experiment_olv import OlvExperiment, OlvExperimentCols
from experiment_controller import ExperimentController
from add_modify_experiment_dialog import AddModifyExperimentDialog
from experiment_configuration_dialog import ExperimentConfigurationDialog
########################################################################
class ExperimentDialog(wx.Dialog):
"""
This class implements the generic select/add/update/delete dialog for a database object.
It constructs the list of objects and places them in an ObjectListView widget.
It then implements the button handlers for calling the add_modify_dialog to add or modify
the object. Selection and deletion are handled in this dialog by calling the
olv_dialog_controller controller.
Methods:
__init__(parent, db, obj, objOlv, objOlvCols, mode) - creates the widgets in the panel and performs initialization
getSelectedObject() - Gets the selected object in the ObjectListView
onAddRecord(event) - Button handler to add a record to the database
onEditRecord(event) - Button handler to edit a record
onDeleteRecord(event) - Button handler to delete a record
onSearch(event) - Search field handler to search database based on the user's filter choice and keyword
onSelectRecord(event) - Button handler to select a record
onShowAllRecord(event) - Button handler to update the record list to show all of them
setResultsOlv() - Sets the columns and objects in the ObjectListView
showAllRecords() - Shows all records in the object list view control
"""
#----------------------------------------------------------------------
def __init__(self, parent, db, algorithmId=-1, mode="Add-Update-Delete"):
"""
Constructor which creates the modal dialog and its widgets, instantiates an
ObjectlistView and populates it with the results from a query containing all
database objects in a class.
Arguments:
parent - Parent window
db - Database connection object
algorithmId - algorithmId (-1 for all)
mode - Dialog mode which can be either "Add-Update-Delete" or "Select"
"""
self.algorithm_id = algorithmId
self.db = db
self.obj = Experiment
self.objOlv = OlvExperiment
self.objOlvCols = OlvExperimentCols()
width = self.objOlvCols.getTotalColumnWidth()
wx.Dialog.__init__(self, parent, size=wx.Size(width,500))
self.controller = ExperimentController(db, self.obj, self.objOlv, self.objOlvCols)
try:
self.results = self.controller.getAllForOLView(self.algorithm_id)
except:
self.results = []
font = wx.Font(12, wx.DEFAULT, wx.NORMAL, wx.BOLD)
lbl = wx.StaticText(self, label=self.obj.displayTableName)
lbl.SetFont(font)
mainSizer = wx.BoxSizer(wx.VERTICAL)
searchSizer = wx.BoxSizer(wx.HORIZONTAL)
btnSizer = wx.BoxSizer(wx.HORIZONTAL)
font = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD)
# create the search related widgets
searchByLbl = wx.StaticText(self, label="Search By:")
searchByLbl.SetFont(font)
searchSizer.Add(searchByLbl, 0, wx.ALL, 5)
self.search = wx.SearchCtrl(self, style=wx.TE_PROCESS_ENTER)
self.search.Bind(wx.EVT_TEXT_ENTER, self.onSearch)
searchSizer.Add(self.search, 0, wx.ALL, 5)
self.resultsOlv = ObjectListView(self, style=wx.LC_REPORT
|wx.SUNKEN_BORDER)
self.resultsOlv.SetEmptyListMsg("No Records Found")
self.setResultsOlv()
# create the button row
if mode == "Select-Only":
selectRecordBtn = wx.Button(self, label="Select")
selectRecordBtn.Bind(wx.EVT_BUTTON, self.onSelectRecord)
btnSizer.Add(selectRecordBtn, 0, wx.ALL, 5)
if mode == "Add-Update-Delete":
addRecordBtn = wx.Button(self, label="Add")
addRecordBtn.Bind(wx.EVT_BUTTON, self.onAddRecord)
btnSizer.Add(addRecordBtn, 0, wx.ALL, 5)
editRecordBtn = wx.Button(self, label="Edit")
editRecordBtn.Bind(wx.EVT_BUTTON, self.onEditRecord)
btnSizer.Add(editRecordBtn, 0, wx.ALL, 5)
deleteRecordBtn = wx.Button(self, label="Delete")
deleteRecordBtn.Bind(wx.EVT_BUTTON, self.onDelete)
btnSizer.Add(deleteRecordBtn, 0, wx.ALL, 5)
showAllBtn = wx.Button(self, label="Show All")
showAllBtn.Bind(wx.EVT_BUTTON, self.onShowAllRecord)
btnSizer.Add(showAllBtn, 0, wx.ALL, 5)
configBtn = wx.Button(self, label="Configuration")
configBtn.Bind(wx.EVT_BUTTON, self.onConfiguration)
btnSizer.Add(configBtn, 0, wx.ALL, 5)
mainSizer.Add(lbl, 0, wx.CENTER)
mainSizer.Add(searchSizer)
mainSizer.Add(self.resultsOlv, 1, wx.ALL|wx.EXPAND, 5)
mainSizer.Add(btnSizer, 0, wx.CENTER)
self.SetSizer(mainSizer)
#----------------------------------------------------------------------
def getSelectedObject(self):
"""
Gets the selected object in the ObjectListView
"""
return self.selectedObject
#----------------------------------------------------------------------
def onAddRecord(self, event):
"""
Button handler to add a record to the database
"""
dlg = AddModifyExperimentDialog(self.controller, self.obj, self.db, title="Add", addRecord=True)
rc = dlg.ShowModal()
if rc == 0:
self.showAllRecords()
#----------------------------------------------------------------------
def onEditRecord(self, event):
"""
Button handler to edit a record
"""
selectedRow = self.resultsOlv.GetSelectedObject()
if selectedRow == None:
util.showMessageDialog("No row selected!", "Error")
return
dlg = AddModifyExperimentDialog(self.controller, self.obj, self.db, row=selectedRow, title="Modify",
addRecord=False)
rc = dlg.ShowModal()
if rc == 0:
self.showAllRecords()
#----------------------------------------------------------------------
def onDelete(self, event):
"""
Button handler to delete a record
"""
selectedRow = self.resultsOlv.GetSelectedObject()
if selectedRow == None:
util.showMessageDialog("No row selected!", "Error")
return
(rc, msg) = self.controller.deleteRecord(selectedRow.getKey())
# Check return code from above and put up appropriate message dialog
if rc == 0:
util.showMessageDialog("Record Deleted Successfully!", "Success!", wx.ICON_INFORMATION)
else:
util.showMessageDialog(msg, "Failure!", wx.ICON_INFORMATION)
self.showAllRecords()
#----------------------------------------------------------------------
def onConfiguration(self, event):
"""
Button handler to show configuration parameters
"""
selectedRow = self.resultsOlv.GetSelectedObject()
if selectedRow == None:
util.showMessageDialog("No row selected!", "Error")
return
algorithmId = selectedRow.getKey()
olvDialog = ExperimentConfigurationDialog(None, self.db, algorithmId)
rc = olvDialog.ShowModal()
olvDialog.Destroy()
self.Enable()
self.showAllRecords()
#----------------------------------------------------------------------
def onSearch(self, event):
"""
Search field handler to search database based on the user's filter choice and keyword
"""
keyword = self.search.GetValue()
Filter.TextSearch(self.resultsOlv,columns=(), text=keyword)
#----------------------------------------------------------------------
def onSelectRecord(self, event):
"""
Button handler to select a record
"""
selectedRow = self.resultsOlv.GetSelectedObject()
if selectedRow == None:
util.showMessageDialog("No row selected!", "Error")
return
key = selectedRow.getKey()
self.selectedObject = self.controller.getRecordByKey(key)
self.EndModal(0)
#----------------------------------------------------------------------
def onShowAllRecord(self, event):
"""
Button handler to update the record list to show all of them
"""
self.showAllRecords()
#----------------------------------------------------------------------
def setResultsOlv(self):
"""
Sets the columns and objects in the ObjectListView
"""
cd = self.objOlvCols.getColumnDefinitions()
# print len(cd)
self.resultsOlv.SetColumns(self.objOlvCols.getColumnDefinitions())
self.resultsOlv.SetObjects(self.results)
#----------------------------------------------------------------------
def showAllRecords(self):
"""
Shows all records in the object list view control
"""
self.results = self.controller.getAllForOLView(self.algorithm_id)
self.setResultsOlv()
| apache-2.0 | -8,946,937,582,413,827,000 | 42.25 | 122 | 0.569055 | false |
nitmir/django-cas-server | docs/conf.py | 1 | 10835 | # -*- coding: utf-8 -*-
#
# django-cas-server documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 5 12:11:50 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
import setup as mysetup
os.environ['DJANGO_SETTINGS_MODULE'] = 'cas_server.tests.settings'
import django
django.setup()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'djangodocs',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-cas-server'
copyright = u'2016, Valentin Samir'
author = u'Valentin Samir'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mysetup.VERSION
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'django-cas-server v5.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-cas-serverdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'django-cas-server.tex', u'django-cas-server Documentation',
u'Valentin Samir', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'django-cas-server', u'django-cas-server Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'django-cas-server', u'django-cas-server Documentation',
author, 'django-cas-server', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ('https://docs.python.org/', None),
"django": ('https://docs.djangoproject.com/en/3.2/', 'django.inv'),
}
autodoc_member_order = 'bysource'
locale_dirs = ['../test_venv/lib/python2.7/site-packages/django/conf/locale/']
def _download_django_inv():
import requests
with open(_download_django_inv.path, 'wb') as f:
r = requests.get("https://docs.djangoproject.com/en/3.2/_objects")
f.write(r.content)
_download_django_inv.path = os.path.abspath(os.path.join(os.path.dirname(__file__), "django.inv"))
if not os.path.isfile(_download_django_inv.path):
_download_django_inv()
| gpl-3.0 | 3,197,410,253,486,718,000 | 28.363144 | 98 | 0.693032 | false |
sagarjauhari/BCIpy | eegml.py | 1 | 10074 | # /usr/bin/env python
# Copyright 2013, 2014 Justis Grant Peters and Sagar Jauhari
# This file is part of BCIpy.
#
# BCIpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BCIpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BCIpy. If not, see <http://www.gnu.org/licenses/>.
import csv
import time
import re
from datetime import datetime
from decimal import Decimal
from matplotlib import *
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
pylab.rcParams['figure.figsize'] = 15, 6
from os import listdir
from os.path import join, isfile
import numpy as np
import pandas as pd
import pickle
from scipy.stats.stats import pearsonr
import warnings
warnings.filterwarnings('ignore', 'DeprecationWarning')
try: # Import config params
import dev_settings as config
except ImportError:
print "Please create a dev_settings.py using dev_settings.py.example as an example"
def print_config():
print config.DATA_URL
print config.SAVE_URL
def format_time(ti):
"""
Converts format '2010-12-14 16:56:36.996' to Decimal
"""
to = datetime.strptime(ti, '%Y-%m-%d %H:%M:%S.%f')
#Decimal not losing precision
to = Decimal(to.strftime('%s.%f'))
return str(to)
def format_task_xls(indir, outdir):
path_task_xls = join(indir, "task.xls")
path_task_xls_labels = join(outdir, "task_xls_labels.csv")
with open(path_task_xls, 'rb') as fi,\
open(path_task_xls_labels, 'w') as fo:
fr = csv.reader(fi, delimiter='\t')
fw = csv.writer(fo, delimiter='\t')
h = fr.next()
fw.writerow(['taskid',h[0], h[1], h[2], h[3], h[-1]]) #header
for idx, row in enumerate(fr):
row[2] = format_time(row[2])
row[3] = format_time(row[3])
fw.writerow([idx, row[0], row[1], row[2], row[3], row[-1]])
def label_data(in_file, out_file, compressed_label_file, subj_t, time_t, dbg=False):
if dbg: print "#"+subj_t + "--------"
with open(in_file, 'rb') as fi,\
open(out_file, 'rb') as fi2,\
open(compressed_label_file, 'w') as fo:
day = time_t[0:4]+"-"+time_t[4:6]+"-"+time_t[6:8]
fr1 = csv.reader(fi, delimiter=',') # combined.csv
fr2 = csv.reader(fi2, delimiter='\t')# xls_labels.csv
fw = csv.writer(fo, delimiter='\t')# combined_label_uncompress.csv
if dbg: print "day: " + day
#headers
fw.writerow(next(fr1, None) + ['Difficulty', 'taskid'] )
next(fr2, None)
#forward till subject data starts
lab_row = fr2.next()
while subj_t != lab_row[2]:
lab_row = fr2.next()
if dbg: print "start: " + str(lab_row[0])
for idx, row in enumerate(fr1):
row[0] = datetime.strptime(day+' '+row[0]+'.0',\
'%Y-%m-%d %H:%M:%S.%f').strftime('%s.%f')
if Decimal(row[0]) < Decimal(lab_row[3]): # t < start_time
if dbg: print str(idx)+": t<start_time"
label = -1
fw.writerow(row + [label, lab_row[0]])
continue
if Decimal(row[0]) <= Decimal(lab_row[4]): # t <= end_time
if dbg: print str(idx)+": t <= end_time"
label = lab_row[5]
fw.writerow(row + [label, lab_row[0]])
continue
while Decimal(row[0] > lab_row[4]): # t > end_time
try:
lab_row = next(fr2)
label = lab_row[5]
if lab_row[2] != subj_t:
raise Exception("Reached end of data for subject" + subj_t)
except Exception as e: # reached end of file, or next subject
label = -1
if dbg: print e
break
fw.writerow(row + [label,lab_row[0]])
if dbg: print "end: "+str(lab_row[0])
return
def plot_signal(x_ax, y_ax, label, ax=None):
if ax==None:
fig, ax = plt.subplots()
ax.plot(x_ax, y_ax, label=label)
ax.grid(True)
fig.tight_layout()
plt.legend(loc='upper left')
plt.show()
return ax
def create_sub_dict(indir):
""" Create dict of subject data [1Hz conmbined files]"""
onlyfiles = [ f for f in listdir(indir) if isfile(join(indir,f)) ]
pat = re.compile("[0-9]*\.[0-9]*\.combined\.csv")
temp_dat = [f.split('.')[0:2] for f in onlyfiles if pat.match(f)]
sub_dict = {i[1]: i[0] for i in temp_dat}
return sub_dict
def label_sub_files(indir, outdir):
""" Label each subject file [1Hz conmbined files]"""
sub_dict = create_sub_dict(indir)
for i in sub_dict:
label_data(indir + "/"+sub_dict[i] + "." +i+".combined.csv",
outdir + "/task_xls_labels.csv",
outdir + "/"+sub_dict[i] + "." +i+".labelled.csv",
i, sub_dict[i])
def get_subject_list(dir_url):
onlyfiles = [ f for f in listdir(dir_url) if isfile(join(dir_url,f)) ]
pat = re.compile("[0-9]*\.[0-9]*\.labelled\.csv")
temp_dat = [f.split('.')[0:2] for f in onlyfiles if pat.match(f)]
sub_dict = {i[1]: i[0] for i in temp_dat}
return sub_dict
def get_data(subj_list, dir_url):
subj_data = {}
for s_id in subj_list.keys():
s_time = subj_list[s_id]
s_file = s_time + "." + s_id + ".labelled.csv"
with open(join(dir_url,s_file), 'rb') as fi:
fr = csv.reader(fi,delimiter="\t")
next(fr) #header
s_data = list(fr)
subj_data[int(s_id)] = s_data
return subj_data
def plot_subject(s_comb, pdfpages, title=None):
"""
Plot each subject's data (1Hz)
"""
fig, ax = plt.subplots()
x_ax = [int(i[0].split('.')[0]) for i in s_comb]
sig_q = [int(i[1]) for i in s_comb]
atten = [int(i[2]) for i in s_comb]
medit = [int(i[3]) for i in s_comb]
diffi = [int(i[4])*50 for i in s_comb]
taskid= [int(i[5]) for i in s_comb]
taskid_set = list(set(taskid))
taskid_norm = [taskid_set.index(i) for i in taskid]
ax.plot(x_ax, sig_q, label='Quality')
ax.plot(x_ax, atten, label='Attention')
ax.plot(x_ax, medit, label='Meditation')
ax.plot(x_ax, diffi, label='Difficulty')
#ax.plot(x_ax, taskid_norm, label='taskid')
ax.grid(True)
fig.tight_layout()
plt.legend(loc='upper left')
plt.title(title)
pdfpages.savefig(fig)
return
def plot_subjects(subj_list, data, pdfpages, count=None):
for i in range(count if count else len(subj_list.keys())):
s1 = subj_list.keys()[i]
plot_subject(data[int(s1)], pdfpages, "Subject: "+s1)
return
def plot_avg_rows(targets, features, pdfpages, n, title):
"""
Given targets (difficulty) and features, plot the average of each features
grouped by the difficulty.
"""
print "Plotting Avg of dataframe"
avg_all = features.mean()
features['difficulty']=targets
grouped = features.groupby(by='difficulty')
fig, ax = plt.subplots()
ax.plot(avg_all, label='all')
for d in range(1, 5):
ax.plot(grouped.get_group(d).mean()[0:n-1],
label="difficulty: %d (%d tasks)" % (d,len(grouped.get_group(d))))
plt.legend(loc='upper right')
plt.title(title)
ax.grid(True)
pdfpages.savefig(fig)
def get_num_words(DATA_URL):
path_task_xls = DATA_URL + "/task.xls"
with open(path_task_xls, 'rb') as fi:
fr = csv.reader(fi, delimiter='\t')
next(fr)#header
data = list(fr)
data_cols = zip(*data)
l=len(data_cols[0])
num_words_stim = [float(len(i.split())) for i in data_cols[4]]
num_chars_stim = [float(len(i)) for i in data_cols[4]]
difficulty = [float(i) for i in data_cols[-1]]
time_diff = [float(Decimal(format_time(data_cols[3][i]))-\
Decimal(format_time(data_cols[2][i])))\
for i in xrange(l)]
time_per_word = [time_diff[i]/num_words_stim[i] for i in range(l)]
time_per_char = [time_diff[i]/num_chars_stim[i] for i in range(l)]
sentence_idx=[i for i in xrange(l) if num_words_stim[i] > 1]
print pearsonr(time_per_word, difficulty)
print pearsonr(time_per_char, difficulty)
print pearsonr([time_per_word[i] for i in sentence_idx],
[difficulty[i] for i in sentence_idx])
print pearsonr([time_per_char[i] for i in sentence_idx],
[difficulty[i] for i in sentence_idx])
tpa = [difficulty[i] for i in sentence_idx]
plt.hist(tpa)
def get_performance(x,y):
""" Measures the performance metrics for x(actual)
and y (experimental).
"""
if len(x) != len(y):
print "Error: Lengths not same"
return
TP = FN = FP = TN = 0.0
for i in range(0,len(x)):
for j in range(0, len(x)):
if i == j:
continue
if x[i]==x[j] and y[i]==y[j]:
TP = TP + 1
elif x[i]!=x[j] and y[i]!=y[j]:
TN = TN + 1
elif x[i]==x[j] and y[i]!=y[j]:
FN = FN + 1
elif x[i]!=x[j] and y[i]==y[j]:
FP = FP + 1
TP = TP/2
TN = TN/2
FN = FN/2
FP = FP/2
accuracy = (TP + TN) / (TP + TN + FP + FN)
precision = TP/(TP + FP)
recall = TP/(TP + FN)
fscore = 2*precision*recall/(precision + recall)
print " Accuracy: \t" + str(round(accuracy, 3))
print " Precision: \t" + str(round(precision, 3))
print " Recall: \t" + str(round(recall, 3))
print " F-Score: \t" + str(round(fscore, 3))
| gpl-3.0 | 8,123,222,228,558,660,000 | 32.247525 | 87 | 0.562835 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.