repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Rfam/rfam-production | scripts/support/mirnas/auto_commit.py | 1 | 5214 | import os
import sys
import argparse
import subprocess
import json
import time
from datetime import date
from subprocess import Popen, PIPE
search_dirs = ["/hps/nobackup/production/xfam/rfam/RELEASES/14.3/miRNA_relabelled/batch1_chunk1_searches",
"/hps/nobackup/production/xfam/rfam/RELEASES/14.3/miRNA_relabelled/batch1_chunk2_searches",
"/hps/nobackup/production/xfam/rfam/RELEASES/14.3/miRNA_relabelled/batch2/searches"]
# ---------------------------------------------------------------------------------------------
def check_desc_ga(DESC, cut_ga):
"""
"""
process = Popen(['grep', "GA", DESC], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = process.communicate()
if output.find("%.2f"%float(cut_ga)) == -1:
return False
return True
# ---------------------------------------------------------------------------------------------
def check_family_passes_qc(family_dir):
dir_elements = os.path.split(family_dir)
search_dir = dir_elements[0]
os.chdir(search_dir)
process = Popen(["rqc-all.pl", dir_elements[1]], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output = process.communicate()[1]
if output.find("Family passed with no serious errors") == -1:
return False
return True
# ---------------------------------------------------------------------------------------------
def commit_family(family_dir, mirna_name):
dir_elements = os.path.split(family_dir)
os.chdir(dir_elements[0])
family_dir = dir_elements[1]
process = Popen(['rfnew.pl', '-m', "\"Adding new miRNA family %s \""% (mirna_name), family_dir], stdin=PIPE, stdout=PIPE, stderr=PIPE)
output = process.communicate()[1]
if output.find("This family has been assigned the accession") == -1:
return False
return True
# ---------------------------------------------------------------------------------------------
def calculate_progress(num_to_commit, num_processed):
return num_processed*100/num_to_comit
# ---------------------------------------------------------------------------------------------
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("--mirna-ids", help="A .json file with miRNAs to commit", action="store")
parser.add_argument("--skip", help="A list of miRNA ids to skip", action="store", default=None)
parser.add_argument("--log-dir", help="Log file destination", action="store", default=os.getcwd())
parser.add_argument("--verbose", help="Display progress messages", action="store_true", default=False)
parser.add_argument("--no-qc", help="Skips QC step", action="store_true", default=False)
return parser
# ---------------------------------------------------------------------------------------------
if __name__=='__main__':
parser = parse_arguments()
args = parser.parse_args()
fp = open(args.mirna_ids, 'r')
miRNA_accessions = json.load(fp)
fp.close()
existing_fams = {}
if args.skip is not None:
fp = open("/hps/nobackup/production/xfam/rfam/RELEASES/14.3/input/existing_mirna_families.json", 'r')
existing_fams = json.load(fp)
fp.close()
committed = {}
num_to_commit = len(miRNA_accessions.keys())
count_processed = 0
#skip = ["MIPF0001496__mir-6012", "MIPF0001508__mir-4504", "MIPF0001511__mir-4803"]
#skip = []
#for miRNA in skip:
# del(miRNA_accessions[miRNA])
fp = open(os.path.join(args.log_dir, 'failed_mirna_commits_'+str(date.today())+'.log'), 'w')
for accession in miRNA_accessions.keys():
if accession not in committed:
dir_label = ''
if accession.find("_relabelled")==-1:
dir_label = accession+"_relabelled"
for search_dir in search_dirs:
family_dir_loc = os.path.join(search_dir, dir_label)
if os.path.exists(family_dir_loc):
desc_file = os.path.join(family_dir_loc, "DESC")
if check_desc_ga(desc_file, miRNA_accessions[accession]) is True:
check = False
if args.no_qc is True:
mirna_name = ""
if accession[0:2]=='MI':
mirna_name = accession.split("_")[2]
else:
mirna_name = accession.split("_")[0]
check = commit_family(family_dir_loc, mirna_name)
elif check_family_passes_qc(family_dir_loc) is True:
mirna_name = ""
if accession[0:2]=='MI':
mirna_name = accession.split("_")[2]
else:
mirna_name = accession.split("_")[0]
check = commit_family(family_dir_loc, mirna_name)
if check is True:
committed[accession] = ""
print ("Family %s committed" % (accession))
else:
fp.write(accession+'\n')
count_processed += 1
else:
continue
#if args.verbose:
# print ("%s%s families processed"%(calculate_progress(num_to_commit, count_processed)))
# close log file
fp.close()
# create a json dump with all successful family commits
print ("\nDumping committed family list...")
fp = open(os.path.join(args.log_dir,"committed_mirnas_"+str(date.today())+".json"), 'w')
json.dump(committed, fp)
fp.close()
print ("\nDone!\n")
| apache-2.0 | 9,119,662,882,263,894,000 | 29.313953 | 135 | 0.561949 | false |
mscuthbert/abjad | abjad/tools/documentationtools/ReSTAutosummaryDirective.py | 1 | 2421 | # -*- encoding: utf-8 -*-
from abjad.tools import datastructuretools
from abjad.tools.documentationtools.ReSTDirective import ReSTDirective
class ReSTAutosummaryDirective(ReSTDirective):
r'''A ReST Autosummary directive.
::
>>> toc = documentationtools.ReSTAutosummaryDirective()
>>> for item in ['foo.Foo', 'bar.Bar', 'baz.Baz']:
... toc.append(documentationtools.ReSTAutosummaryItem(text=item))
...
>>> toc
ReSTAutosummaryDirective(
children=(
ReSTAutosummaryItem(
text='foo.Foo'
),
ReSTAutosummaryItem(
text='bar.Bar'
),
ReSTAutosummaryItem(
text='baz.Baz'
),
),
directive='autosummary'
)
::
>>> print(toc.rest_format)
.. autosummary::
<BLANKLINE>
foo.Foo
bar.Bar
baz.Baz
'''
### CLASS VARIABLES ###
__documentation_section__ = 'reStructuredText'
### SPECIAL METHODS ###
def __setitem__(self, i, expr):
r'''Sets item `i` to `expr`.
Returns none.
'''
from abjad.tools import documentationtools
newexpr = []
for x in expr:
if isinstance(x, str):
newexpr.append(documentationtools.ReSTAutosummaryItem(text=x))
else:
newexpr.append(x)
datastructuretools.TreeContainer.__setitem__(self, i, newexpr)
### PRIVATE PROPERTIES ###
@property
def _children_rest_format_contributions(self):
result = ['']
for child in self.children:
contribution = child._rest_format_contributions
for x in contribution:
if x:
result.append(' ' + x)
else:
result.append(x)
return result
### PUBLIC PROPERTIES ###
@property
def directive(self):
r'''Directive of ReST autosummary diretive.
Returns ``'autosummary'``.
'''
return 'autosummary'
@property
def node_class(self):
r'''Node class of ReST autosummary directive.
'''
from abjad.tools import documentationtools
return (
documentationtools.ReSTAutosummaryItem,
) | gpl-3.0 | 665,987,143,855,517,400 | 25.043011 | 78 | 0.518381 | false |
apdavison/elephant | elephant/test/test_spike_train_dissimilarity.py | 1 | 29313 | # -*- coding: utf-8 -*-
"""
Tests for the spike train dissimilarity measures module.
:copyright: Copyright 2016 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
import unittest
from neo import SpikeTrain
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
import scipy.integrate as spint
from quantities import ms, s, Hz
import elephant.kernels as kernels
import elephant.spike_train_generation as stg
import elephant.spike_train_dissimilarity as stds
class TimeScaleDependSpikeTrainDissimMeasures_TestCase(unittest.TestCase):
def setUp(self):
self.st00 = SpikeTrain([], units='ms', t_stop=1000.0)
self.st01 = SpikeTrain([1], units='ms', t_stop=1000.0)
self.st02 = SpikeTrain([2], units='ms', t_stop=1000.0)
self.st03 = SpikeTrain([2.9], units='ms', t_stop=1000.0)
self.st04 = SpikeTrain([3.1], units='ms', t_stop=1000.0)
self.st05 = SpikeTrain([5], units='ms', t_stop=1000.0)
self.st06 = SpikeTrain([500], units='ms', t_stop=1000.0)
self.st07 = SpikeTrain([12, 32], units='ms', t_stop=1000.0)
self.st08 = SpikeTrain([32, 52], units='ms', t_stop=1000.0)
self.st09 = SpikeTrain([42], units='ms', t_stop=1000.0)
self.st10 = SpikeTrain([18, 60], units='ms', t_stop=1000.0)
self.st11 = SpikeTrain([10, 20, 30, 40], units='ms', t_stop=1000.0)
self.st12 = SpikeTrain([40, 30, 20, 10], units='ms', t_stop=1000.0)
self.st13 = SpikeTrain([15, 25, 35, 45], units='ms', t_stop=1000.0)
self.st14 = SpikeTrain([10, 20, 30, 40, 50], units='ms', t_stop=1000.0)
self.st15 = SpikeTrain([0.01, 0.02, 0.03, 0.04, 0.05],
units='s', t_stop=1000.0)
self.st16 = SpikeTrain([12, 16, 28, 30, 42], units='ms', t_stop=1000.0)
self.st21 = stg.homogeneous_poisson_process(50*Hz, 0*ms, 1000*ms)
self.st22 = stg.homogeneous_poisson_process(40*Hz, 0*ms, 1000*ms)
self.st23 = stg.homogeneous_poisson_process(30*Hz, 0*ms, 1000*ms)
self.rd_st_list = [self.st21, self.st22, self.st23]
self.st31 = SpikeTrain([12.0], units='ms', t_stop=1000.0)
self.st32 = SpikeTrain([12.0, 12.0], units='ms', t_stop=1000.0)
self.st33 = SpikeTrain([20.0], units='ms', t_stop=1000.0)
self.st34 = SpikeTrain([20.0, 20.0], units='ms', t_stop=1000.0)
self.array1 = np.arange(1, 10)
self.array2 = np.arange(1.2, 10)
self.qarray1 = self.array1 * Hz
self.qarray2 = self.array2 * Hz
self.tau0 = 0.0 * ms
self.q0 = np.inf / ms
self.tau1 = 0.000000001 * ms
self.q1 = 1.0 / self.tau1
self.tau2 = 1.0 * ms
self.q2 = 1.0 / self.tau2
self.tau3 = 10.0 * ms
self.q3 = 1.0 / self.tau3
self.tau4 = 100.0 * ms
self.q4 = 1.0 / self.tau4
self.tau5 = 1000000000.0 * ms
self.q5 = 1.0 / self.tau5
self.tau6 = np.inf * ms
self.q6 = 0.0 / ms
self.tau7 = 0.01 * s
self.q7 = 1.0 / self.tau7
self.t = np.linspace(0, 200, 20000001) * ms
def test_wrong_input(self):
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.array1, self.array2], self.q3)
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.qarray1, self.qarray2], self.q3)
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.qarray1, self.qarray2], 5.0 * ms)
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.array1, self.array2], self.q3,
algorithm='intuitive')
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.qarray1, self.qarray2], self.q3,
algorithm='intuitive')
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.qarray1, self.qarray2], 5.0 * ms,
algorithm='intuitive')
self.assertRaises(TypeError, stds.van_rossum_dist,
[self.array1, self.array2], self.tau3)
self.assertRaises(TypeError, stds.van_rossum_dist,
[self.qarray1, self.qarray2], self.tau3)
self.assertRaises(TypeError, stds.van_rossum_dist,
[self.qarray1, self.qarray2], 5.0 * Hz)
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.st11, self.st13], self.tau2)
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.st11, self.st13], 5.0)
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.st11, self.st13], self.tau2,
algorithm='intuitive')
self.assertRaises(TypeError, stds.victor_purpura_dist,
[self.st11, self.st13], 5.0,
algorithm='intuitive')
self.assertRaises(TypeError, stds.van_rossum_dist,
[self.st11, self.st13], self.q4)
self.assertRaises(TypeError, stds.van_rossum_dist,
[self.st11, self.st13], 5.0)
self.assertRaises(NotImplementedError, stds.victor_purpura_dist,
[self.st01, self.st02], self.q3,
kernel=kernels.Kernel(2.0 / self.q3))
self.assertRaises(NotImplementedError, stds.victor_purpura_dist,
[self.st01, self.st02], self.q3,
kernel=kernels.SymmetricKernel(2.0 / self.q3))
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st02], self.q1,
kernel=kernels.TriangularKernel(
2.0 / (np.sqrt(6.0) * self.q2)))[0, 1],
stds.victor_purpura_dist(
[self.st01, self.st02], self.q3,
kernel=kernels.TriangularKernel(
2.0 / (np.sqrt(6.0) * self.q2)))[0, 1])
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st02],
kernel=kernels.TriangularKernel(
2.0 / (np.sqrt(6.0) * self.q2)))[0, 1], 1.0)
self.assertNotEqual(stds.victor_purpura_dist(
[self.st01, self.st02],
kernel=kernels.AlphaKernel(
2.0 / (np.sqrt(6.0) * self.q2)))[0, 1], 1.0)
self.assertRaises(NameError, stds.victor_purpura_dist,
[self.st11, self.st13], self.q2, algorithm='slow')
def test_victor_purpura_distance_fast(self):
# Tests of distances of simplest spike trains:
self.assertEqual(stds.victor_purpura_dist(
[self.st00, self.st00], self.q2)[0, 1], 0.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st00, self.st01], self.q2)[0, 1], 1.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st00], self.q2)[0, 1], 1.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st01], self.q2)[0, 1], 0.0)
# Tests of distances under elementary spike operations
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st02], self.q2)[0, 1], 1.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st03], self.q2)[0, 1], 1.9)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st04], self.q2)[0, 1], 2.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st05], self.q2)[0, 1], 2.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st00, self.st07], self.q2)[0, 1], 2.0)
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st07, self.st08], self.q4)[0, 1], 0.4)
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st07, self.st10], self.q3)[0, 1], 0.6 + 2)
self.assertEqual(stds.victor_purpura_dist(
[self.st11, self.st14], self.q2)[0, 1], 1)
# Tests on timescales
self.assertEqual(stds.victor_purpura_dist(
[self.st11, self.st14], self.q1)[0, 1],
stds.victor_purpura_dist(
[self.st11, self.st14], self.q5)[0, 1])
self.assertEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q0)[0, 1], 6.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q1)[0, 1], 6.0)
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q5)[0, 1], 2.0, 5)
self.assertEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q6)[0, 1], 2.0)
# Tests on unordered spiketrains
self.assertEqual(stds.victor_purpura_dist(
[self.st11, self.st13], self.q4)[0, 1],
stds.victor_purpura_dist(
[self.st12, self.st13], self.q4)[0, 1])
self.assertNotEqual(stds.victor_purpura_dist(
[self.st11, self.st13], self.q4,
sort=False)[0, 1],
stds.victor_purpura_dist(
[self.st12, self.st13], self.q4,
sort=False)[0, 1])
# Tests on metric properties with random spiketrains
# (explicit calculation of second metric axiom in particular case,
# because from dist_matrix it is trivial)
dist_matrix = stds.victor_purpura_dist(
[self.st21, self.st22, self.st23], self.q3)
for i in range(3):
for j in range(3):
self.assertGreaterEqual(dist_matrix[i, j], 0)
if dist_matrix[i, j] == 0:
assert_array_equal(self.rd_st_list[i], self.rd_st_list[j])
assert_array_equal(stds.victor_purpura_dist(
[self.st21, self.st22], self.q3),
stds.victor_purpura_dist(
[self.st22, self.st21], self.q3))
self.assertLessEqual(dist_matrix[0, 1],
dist_matrix[0, 2] + dist_matrix[1, 2])
self.assertLessEqual(dist_matrix[0, 2],
dist_matrix[1, 2] + dist_matrix[0, 1])
self.assertLessEqual(dist_matrix[1, 2],
dist_matrix[0, 1] + dist_matrix[0, 2])
# Tests on proper unit conversion
self.assertAlmostEqual(
stds.victor_purpura_dist([self.st14, self.st16], self.q3)[0, 1],
stds.victor_purpura_dist([self.st15, self.st16], self.q3)[0, 1])
self.assertAlmostEqual(
stds.victor_purpura_dist([self.st16, self.st14], self.q3)[0, 1],
stds.victor_purpura_dist([self.st16, self.st15], self.q3)[0, 1])
self.assertEqual(
stds.victor_purpura_dist([self.st01, self.st05], self.q3)[0, 1],
stds.victor_purpura_dist([self.st01, self.st05], self.q7)[0, 1])
# Tests on algorithmic behaviour for equal spike times
self.assertEqual(
stds.victor_purpura_dist([self.st31, self.st34], self.q3)[0, 1],
0.8 + 1.0)
self.assertEqual(
stds.victor_purpura_dist([self.st31, self.st34], self.q3)[0, 1],
stds.victor_purpura_dist([self.st32, self.st33], self.q3)[0, 1])
self.assertEqual(
stds.victor_purpura_dist(
[self.st31, self.st33], self.q3)[0, 1] * 2.0,
stds.victor_purpura_dist(
[self.st32, self.st34], self.q3)[0, 1])
# Tests on spike train list lengthes smaller than 2
self.assertEqual(stds.victor_purpura_dist(
[self.st21], self.q3)[0, 0], 0)
self.assertEqual(len(stds.victor_purpura_dist([], self.q3)), 0)
def test_victor_purpura_distance_intuitive(self):
# Tests of distances of simplest spike trains
self.assertEqual(stds.victor_purpura_dist(
[self.st00, self.st00], self.q2,
algorithm='intuitive')[0, 1], 0.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st00, self.st01], self.q2,
algorithm='intuitive')[0, 1], 1.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st00], self.q2,
algorithm='intuitive')[0, 1], 1.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st01], self.q2,
algorithm='intuitive')[0, 1], 0.0)
# Tests of distances under elementary spike operations
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st02], self.q2,
algorithm='intuitive')[0, 1], 1.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st03], self.q2,
algorithm='intuitive')[0, 1], 1.9)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st04], self.q2,
algorithm='intuitive')[0, 1], 2.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st05], self.q2,
algorithm='intuitive')[0, 1], 2.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st00, self.st07], self.q2,
algorithm='intuitive')[0, 1], 2.0)
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st07, self.st08], self.q4,
algorithm='intuitive')[0, 1], 0.4)
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st07, self.st10], self.q3,
algorithm='intuitive')[0, 1], 2.6)
self.assertEqual(stds.victor_purpura_dist(
[self.st11, self.st14], self.q2,
algorithm='intuitive')[0, 1], 1)
# Tests on timescales
self.assertEqual(stds.victor_purpura_dist(
[self.st11, self.st14], self.q1,
algorithm='intuitive')[0, 1],
stds.victor_purpura_dist(
[self.st11, self.st14], self.q5,
algorithm='intuitive')[0, 1])
self.assertEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q0,
algorithm='intuitive')[0, 1], 6.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q1,
algorithm='intuitive')[0, 1], 6.0)
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q5,
algorithm='intuitive')[0, 1], 2.0, 5)
self.assertEqual(stds.victor_purpura_dist(
[self.st07, self.st11], self.q6,
algorithm='intuitive')[0, 1], 2.0)
# Tests on unordered spiketrains
self.assertEqual(stds.victor_purpura_dist(
[self.st11, self.st13], self.q4,
algorithm='intuitive')[0, 1],
stds.victor_purpura_dist(
[self.st12, self.st13], self.q4,
algorithm='intuitive')[0, 1])
self.assertNotEqual(stds.victor_purpura_dist(
[self.st11, self.st13], self.q4,
sort=False, algorithm='intuitive')[0, 1],
stds.victor_purpura_dist(
[self.st12, self.st13], self.q4,
sort=False, algorithm='intuitive')[0, 1])
# Tests on metric properties with random spiketrains
# (explicit calculation of second metric axiom in particular case,
# because from dist_matrix it is trivial)
dist_matrix = stds.victor_purpura_dist(
[self.st21, self.st22, self.st23],
self.q3, algorithm='intuitive')
for i in range(3):
for j in range(3):
self.assertGreaterEqual(dist_matrix[i, j], 0)
if dist_matrix[i, j] == 0:
assert_array_equal(self.rd_st_list[i], self.rd_st_list[j])
assert_array_equal(stds.victor_purpura_dist(
[self.st21, self.st22], self.q3,
algorithm='intuitive'),
stds.victor_purpura_dist(
[self.st22, self.st21], self.q3,
algorithm='intuitive'))
self.assertLessEqual(dist_matrix[0, 1],
dist_matrix[0, 2] + dist_matrix[1, 2])
self.assertLessEqual(dist_matrix[0, 2],
dist_matrix[1, 2] + dist_matrix[0, 1])
self.assertLessEqual(dist_matrix[1, 2],
dist_matrix[0, 1] + dist_matrix[0, 2])
# Tests on proper unit conversion
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st14, self.st16], self.q3,
algorithm='intuitive')[0, 1],
stds.victor_purpura_dist(
[self.st15, self.st16], self.q3,
algorithm='intuitive')[0, 1])
self.assertAlmostEqual(stds.victor_purpura_dist(
[self.st16, self.st14], self.q3,
algorithm='intuitive')[0, 1],
stds.victor_purpura_dist(
[self.st16, self.st15], self.q3,
algorithm='intuitive')[0, 1])
self.assertEqual(stds.victor_purpura_dist(
[self.st01, self.st05], self.q3,
algorithm='intuitive')[0, 1],
stds.victor_purpura_dist(
[self.st01, self.st05], self.q7,
algorithm='intuitive')[0, 1])
# Tests on algorithmic behaviour for equal spike times
self.assertEqual(stds.victor_purpura_dist(
[self.st31, self.st34], self.q3,
algorithm='intuitive')[0, 1],
0.8 + 1.0)
self.assertEqual(stds.victor_purpura_dist(
[self.st31, self.st34], self.q3,
algorithm='intuitive')[0, 1],
stds.victor_purpura_dist(
[self.st32, self.st33], self.q3,
algorithm='intuitive')[0, 1])
self.assertEqual(stds.victor_purpura_dist(
[self.st31, self.st33], self.q3,
algorithm='intuitive')[0, 1] * 2.0,
stds.victor_purpura_dist(
[self.st32, self.st34], self.q3,
algorithm='intuitive')[0, 1])
# Tests on spike train list lengthes smaller than 2
self.assertEqual(stds.victor_purpura_dist(
[self.st21], self.q3,
algorithm='intuitive')[0, 0], 0)
self.assertEqual(len(stds.victor_purpura_dist(
[], self.q3, algorithm='intuitive')), 0)
def test_victor_purpura_algorithm_comparison(self):
assert_array_almost_equal(
stds.victor_purpura_dist([self.st21, self.st22, self.st23],
self.q3),
stds.victor_purpura_dist([self.st21, self.st22, self.st23],
self.q3, algorithm='intuitive'))
def test_van_rossum_distance(self):
# Tests of distances of simplest spike trains
self.assertEqual(stds.van_rossum_dist(
[self.st00, self.st00], self.tau2)[0, 1], 0.0)
self.assertEqual(stds.van_rossum_dist(
[self.st00, self.st01], self.tau2)[0, 1], 1.0)
self.assertEqual(stds.van_rossum_dist(
[self.st01, self.st00], self.tau2)[0, 1], 1.0)
self.assertEqual(stds.van_rossum_dist(
[self.st01, self.st01], self.tau2)[0, 1], 0.0)
# Tests of distances under elementary spike operations
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st01, self.st02], self.tau2)[0, 1],
float(np.sqrt(2*(1.0-np.exp(-np.absolute(
((self.st01[0]-self.st02[0]) /
self.tau2).simplified))))))
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st01, self.st05], self.tau2)[0, 1],
float(np.sqrt(2*(1.0-np.exp(-np.absolute(
((self.st01[0]-self.st05[0]) /
self.tau2).simplified))))))
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st01, self.st05], self.tau2)[0, 1],
np.sqrt(2.0), 1)
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st01, self.st06], self.tau2)[0, 1],
np.sqrt(2.0), 20)
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st00, self.st07], self.tau1)[0, 1],
np.sqrt(0 + 2))
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st07, self.st08], self.tau4)[0, 1],
float(np.sqrt(2*(1.0-np.exp(-np.absolute(
((self.st07[0]-self.st08[-1]) /
self.tau4).simplified))))))
f_minus_g_squared = (
(self.t > self.st08[0]) * np.exp(
-((self.t-self.st08[0])/self.tau3).simplified) +
(self.t > self.st08[1]) * np.exp(
-((self.t-self.st08[1])/self.tau3).simplified) -
(self.t > self.st09[0]) * np.exp(
-((self.t-self.st09[0])/self.tau3).simplified))**2
distance = np.sqrt(2.0 * spint.cumtrapz(
y=f_minus_g_squared, x=self.t.magnitude)[-1] /
self.tau3.rescale(self.t.units).magnitude)
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st08, self.st09], self.tau3)[0, 1], distance, 5)
self.assertAlmostEqual(stds.van_rossum_dist(
[self.st11, self.st14], self.tau2)[0, 1], 1)
# Tests on timescales
self.assertAlmostEqual(
stds.van_rossum_dist([self.st11, self.st14], self.tau1)[0, 1],
stds.van_rossum_dist([self.st11, self.st14], self.tau5)[0, 1])
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st11], self.tau0)[0, 1],
np.sqrt(len(self.st07) + len(self.st11)))
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st14], self.tau0)[0, 1],
np.sqrt(len(self.st07) + len(self.st14)))
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st11], self.tau1)[0, 1],
np.sqrt(len(self.st07) + len(self.st11)))
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st14], self.tau1)[0, 1],
np.sqrt(len(self.st07) + len(self.st14)))
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st11], self.tau5)[0, 1],
np.absolute(len(self.st07) - len(self.st11)))
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st14], self.tau5)[0, 1],
np.absolute(len(self.st07) - len(self.st14)))
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st11], self.tau6)[0, 1],
np.absolute(len(self.st07) - len(self.st11)))
self.assertAlmostEqual(
stds.van_rossum_dist([self.st07, self.st14], self.tau6)[0, 1],
np.absolute(len(self.st07) - len(self.st14)))
# Tests on unordered spiketrains
self.assertEqual(
stds.van_rossum_dist([self.st11, self.st13], self.tau4)[0, 1],
stds.van_rossum_dist([self.st12, self.st13], self.tau4)[0, 1])
self.assertNotEqual(
stds.van_rossum_dist([self.st11, self.st13],
self.tau4, sort=False)[0, 1],
stds.van_rossum_dist([self.st12, self.st13],
self.tau4, sort=False)[0, 1])
# Tests on metric properties with random spiketrains
# (explicit calculation of second metric axiom in particular case,
# because from dist_matrix it is trivial)
dist_matrix = stds.van_rossum_dist(
[self.st21, self.st22, self.st23], self.tau3)
for i in range(3):
for j in range(3):
self.assertGreaterEqual(dist_matrix[i, j], 0)
if dist_matrix[i, j] == 0:
assert_array_equal(self.rd_st_list[i], self.rd_st_list[j])
assert_array_equal(
stds.van_rossum_dist([self.st21, self.st22], self.tau3),
stds.van_rossum_dist([self.st22, self.st21], self.tau3))
self.assertLessEqual(dist_matrix[0, 1],
dist_matrix[0, 2] + dist_matrix[1, 2])
self.assertLessEqual(dist_matrix[0, 2],
dist_matrix[1, 2] + dist_matrix[0, 1])
self.assertLessEqual(dist_matrix[1, 2],
dist_matrix[0, 1] + dist_matrix[0, 2])
# Tests on proper unit conversion
self.assertAlmostEqual(
stds.van_rossum_dist([self.st14, self.st16], self.tau3)[0, 1],
stds.van_rossum_dist([self.st15, self.st16], self.tau3)[0, 1])
self.assertAlmostEqual(
stds.van_rossum_dist([self.st16, self.st14], self.tau3)[0, 1],
stds.van_rossum_dist([self.st16, self.st15], self.tau3)[0, 1])
self.assertEqual(
stds.van_rossum_dist([self.st01, self.st05], self.tau3)[0, 1],
stds.van_rossum_dist([self.st01, self.st05], self.tau7)[0, 1])
# Tests on algorithmic behaviour for equal spike times
f_minus_g_squared = (
(self.t > self.st31[0]) * np.exp(
-((self.t-self.st31[0])/self.tau3).simplified) -
(self.t > self.st34[0]) * np.exp(
-((self.t-self.st34[0])/self.tau3).simplified) -
(self.t > self.st34[1]) * np.exp(
-((self.t-self.st34[1])/self.tau3).simplified))**2
distance = np.sqrt(2.0 * spint.cumtrapz(
y=f_minus_g_squared, x=self.t.magnitude)[-1] /
self.tau3.rescale(self.t.units).magnitude)
self.assertAlmostEqual(stds.van_rossum_dist([self.st31, self.st34],
self.tau3)[0, 1],
distance, 5)
self.assertEqual(stds.van_rossum_dist([self.st31, self.st34],
self.tau3)[0, 1],
stds.van_rossum_dist([self.st32, self.st33],
self.tau3)[0, 1])
self.assertEqual(stds.van_rossum_dist([self.st31, self.st33],
self.tau3)[0, 1] * 2.0,
stds.van_rossum_dist([self.st32, self.st34],
self.tau3)[0, 1])
# Tests on spike train list lengthes smaller than 2
self.assertEqual(stds.van_rossum_dist([self.st21], self.tau3)[0, 0], 0)
self.assertEqual(len(stds.van_rossum_dist([], self.tau3)), 0)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -2,688,211,261,997,703,700 | 55.371154 | 79 | 0.495821 | false |
icsnju/nap-core | nap_rest/orchestration/test/createFromTable.py | 1 | 1102 | from orchestration.nap_api.project_create import create_project_from_table
table = []
t = {}
t['name'] = 'master'
t['cpu_shares'] = '1024'
t['mem_limit'] = '32m'
t['command'] = '/usr/sbin/sshd -D'
t['image'] = 'docker.iwanna.xyz:5000/hmonkey/mpi:v1'
t['volumes'] = [{'container_path': '/data', 'host_path': '/va', 'mode': 'rw'}, {'container_path': '/datass', 'host_path': '/vagr', 'mode': 'ro'}]
t['ports'] = [{'container_port': '3200', 'host_port': '32400', 'protocol': 'tcp'}, {'container_port': '3300', 'host_port': '32401', 'protocol': 'udp'}]
table.append(t)
t = {}
t['name'] = 'slave'
t['cpu_shares'] = '1024'
t['mem_limit'] = '32m'
t['command'] = '/usr/sbin/sshd -D'
t['image'] = 'docker.iwanna.xyz:5000/hmonkey/mpi:v1'
t['volumes'] = [{'container_path': '/data', 'host_path': '/va', 'mode': 'rw'}, {'container_path': '/datass', 'host_path': '/vagr', 'mode': 'ro'}]
t['ports'] = [{'container_port': '3200', 'host_port': '32400', 'protocol': 'tcp'}, {'container_port': '3300', 'host_port': '32401', 'protocol': 'udp'}]
table.append(t)
print create_project_from_table('bana', 'tabless', table)
| apache-2.0 | -3,719,459,185,290,209,000 | 44.916667 | 151 | 0.596189 | false |
simphony/simphony-common | simphony/io/tests/test_h5_cuds.py | 1 | 9192 | import unittest
import os
from contextlib import closing
import shutil
import tempfile
import tables
from simphony.core import CUBA
from simphony.core.data_container import DataContainer
from simphony.io.h5_cuds import H5CUDS
from simphony.io.h5_mesh import H5Mesh
from simphony.io.h5_particles import H5Particles
from simphony.io.h5_lattice import H5Lattice
from simphony.cuds import Mesh, Particles
from simphony.cuds.mesh_items import Edge, Face, Cell, Point
from simphony.cuds.lattice import make_cubic_lattice
from simphony.testing.abc_check_engine import (
ParticlesEngineCheck, MeshEngineCheck,
LatticeEngineCheck)
class TestH5CUDS(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_open_with_append_mode(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
with closing(H5CUDS.open(filename, 'a')) as handle:
self.assertTrue(handle.valid())
def test_open_with_write_mode(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
with closing(H5CUDS.open(filename, 'w')) as handle:
self.assertTrue(handle.valid())
def test_open_with_read_only_mode(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
with closing(H5CUDS.open(filename, 'w')) as handle:
self.assertTrue(handle.valid())
with closing(H5CUDS.open(filename, 'r')) as handle:
self.assertTrue(handle.valid())
def test_open_with_compression_off(self):
filters = tables.Filters(complevel=0)
filename = os.path.join(self.temp_dir, 'test.cuds')
with closing(H5CUDS.open(filename, 'w', filters=filters)) as handle:
self.assertTrue(handle.valid())
with closing(H5CUDS.open(filename, 'r', filters=filters)) as handle:
self.assertTrue(handle.valid())
with closing(H5CUDS.open(filename, 'a', filters=filters)) as handle:
self.assertTrue(handle.valid())
def test_init_with_non_file(self):
with self.assertRaises(Exception):
H5CUDS(None)
def test_valid(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
with closing(H5CUDS.open(filename, 'w')) as handle:
self.assertTrue(handle.valid())
self.assertFalse(handle.valid())
with closing(H5CUDS.open(filename, 'a')) as handle:
self.assertTrue(handle.valid())
self.assertFalse(handle.valid())
def test_closed_file_not_usable(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
with closing(H5CUDS.open(filename)) as handle:
handle.add_dataset(Mesh(name="test_1"))
handle.add_dataset(Particles(name="test_2"))
lattice = make_cubic_lattice("test_3", 1.0, (2, 3, 4))
handle.add_dataset(lattice)
test_h1 = handle.get_dataset("test_1")
test_h2 = handle.get_dataset("test_2")
test_h3 = handle.get_dataset("test_3")
with self.assertRaises(Exception):
handle.get_dataset('test_h1')
with self.assertRaises(Exception):
test_h1.name = 'foo'
with self.assertRaises(Exception):
test_h2.name = 'foo'
with self.assertRaises(Exception):
test_h3.name = 'foo'
class TestH5CUDSVersions(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.existing_filename = os.path.join(self.temp_dir, 'test.cuds')
handle = H5CUDS.open(self.existing_filename)
handle.close()
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_version(self):
with closing(tables.open_file(
self.existing_filename, mode="r")) as h5file:
self.assertTrue(isinstance(h5file.root._v_attrs.cuds_version, int))
def test_incorrect_version(self):
with closing(tables.open_file(
self.existing_filename, mode="a")) as h5file:
h5file.root._v_attrs.cuds_version = -1
with self.assertRaises(ValueError):
H5CUDS.open(self.existing_filename)
class TestParticlesCudsOperations(ParticlesEngineCheck, unittest.TestCase):
def setUp(self):
ParticlesEngineCheck.setUp(self)
self.temp_dir = tempfile.mkdtemp()
self.engines = []
def engine_factory(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
engine = H5CUDS.open(filename)
self.engines.append(engine)
return engine
def check_instance_of_dataset(self, ds):
""" Check if a dataset is instance of a class
"""
self.assertTrue(isinstance(ds, H5Particles))
def test_add_get_dataset_with_cuba_keys_argument(self):
engine = self.engine_factory()
items = self.create_dataset_items()
reference = self.create_dataset(name='test')
expected = self.create_dataset(name='test')
# Add some CUBA data
for particle in items:
particle.data = DataContainer({CUBA.VELOCITY: [1, 0, 0]})
expected.add([particle])
particle.data = DataContainer(
{CUBA.VELOCITY: [1, 0, 0], CUBA.MASS: 1})
reference.add([particle])
# Store reference dataset along with its data
engine.add_dataset(reference, {CUBA.PARTICLE: [CUBA.VELOCITY]})
# Closing and reopening the file
engine.close()
engine = self.engine_factory()
ds = engine.get_dataset('test')
self.compare_dataset(ds, expected)
def tearDown(self):
for engine in self.engines:
engine.close()
class TestMeshCudsOperations(MeshEngineCheck, unittest.TestCase):
def setUp(self):
MeshEngineCheck.setUp(self)
self.temp_dir = tempfile.mkdtemp()
self.engines = []
def engine_factory(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
engine = H5CUDS.open(filename)
self.engines.append(engine)
return engine
def check_instance_of_dataset(self, ds):
""" Check if a dataset is instance of a class
"""
self.assertTrue(isinstance(ds, H5Mesh))
def test_add_get_dataset_with_cuba_keys_argument(self):
engine = self.engine_factory()
items = self.create_dataset_items()
reference = self.create_dataset(name='test')
expected = self.create_dataset(name='test')
# Add some CUBA data
for point in [p for p in items if isinstance(p, Point)]:
point.data = DataContainer({CUBA.VELOCITY: [1, 0, 0]})
expected.add([point])
point.data = DataContainer(
{CUBA.VELOCITY: [1, 0, 0], CUBA.MASS: 1})
reference.add([point])
for edge in [e for e in items if isinstance(e, Edge)]:
expected.add([edge])
reference.add([edge])
for face in [f for f in items if isinstance(f, Face)]:
expected.add([face])
reference.add([face])
for cell in [c for c in items if isinstance(c, Cell)]:
expected.add([cell])
reference.add([cell])
# Store reference dataset along with its data
engine.add_dataset(reference, {CUBA.POINT: [CUBA.VELOCITY]})
# Closing and reopening the file
engine.close()
engine = self.engine_factory()
ds = engine.get_dataset('test')
self.compare_dataset(ds, expected)
def tearDown(self):
for engine in self.engines:
engine.close()
class TestLatticeCudsOperations(LatticeEngineCheck, unittest.TestCase):
def setUp(self):
LatticeEngineCheck.setUp(self)
self.temp_dir = tempfile.mkdtemp()
self.engines = []
def engine_factory(self):
filename = os.path.join(self.temp_dir, 'test.cuds')
engine = H5CUDS.open(filename)
self.engines.append(engine)
return engine
def check_instance_of_dataset(self, ds):
""" Check if a dataset is instance of a class
"""
self.assertTrue(isinstance(ds, H5Lattice))
def test_add_get_dataset_with_cuba_keys_argument(self):
engine = self.engine_factory()
reference = self.create_dataset(name='test')
expected = self.create_dataset(name='test')
# Add some CUBA data
for node in reference.iter(item_type=CUBA.NODE):
node.data = DataContainer({CUBA.NAME: 'test_container'})
expected.update([node])
node.data = DataContainer({CUBA.NAME: 'test_container',
CUBA.DENSITY: 2})
reference.update([node])
# Store reference dataset along with its data
engine.add_dataset(reference, {CUBA.NODE: [CUBA.NAME]})
# Closing and reopening the file
engine.close()
engine = self.engine_factory()
ds = engine.get_dataset('test')
self.compare_dataset(ds, expected)
def tearDown(self):
for engine in self.engines:
engine.close()
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | 6,869,646,592,674,389,000 | 32.794118 | 79 | 0.619017 | false |
lindenb/bedtools2 | docs/conf.py | 2 | 8228 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../'))
sys.path.append(os.path.abspath('sphinxext'))
sys.path.append(os.path.abspath('pyplots'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.ifconfig', 'sphinx.ext.viewcode',
'matplotlib.sphinxext.plot_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bedtools'
copyright = u'2009 - 2017, Aaron R. Quinlan and Neil Kindlon'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.27.0'
# The full version, including alpha/beta/rc tags.
release = '2.27.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'rtd'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = project + " v" + release
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'bedtools.swiss.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'bedtools.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = 'labibi.css'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebar-intro.html', 'sourcelink.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bedtools-docs'
# Google analytics
#googleanalytics_id = "UA-24167610-15"
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'bedtools.tex', u'Bedtools Documentation',
u'Quinlan lab @ Univ. of Utah', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bedtools', u'Bedtools Documentation', [u'UU'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'bedtools': ('http://bedtools.readthedocs.org/en/latest/', None)}
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
return type(name, (), {})
else:
return Mock()
MOCK_MODULES = ['numpy', 'matplotlib', 'matplotlib.pyplot',
'matplotlib.sphinxext', 'matplotlib.sphinxext.plot_directive']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
| gpl-2.0 | 4,217,841,412,627,542,500 | 32.311741 | 88 | 0.69227 | false |
jbudynk/sherpa | sherpa/logposterior.py | 1 | 3090 | #
# Copyright (C) 2009 Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from sherpa.stats import Likelihood, truncation_value, Stat
from sherpa.models import Parameter
from sherpa.utils import NoNewAttributesAfterInit
from sherpa.utils.err import StatErr
from itertools import izip
import numpy
class Prior(Likelihood):
# Provide a Model-like parameter interface
def __getattr__(self, name):
par = self.__dict__.get(name.lower())
if (par is not None) and isinstance(par, Parameter):
return par
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
def __setattr__(self, name, val):
par = getattr(self, name.lower(), None)
if (par is not None) and isinstance(par, Parameter):
par.val = val
else:
NoNewAttributesAfterInit.__setattr__(self, name, val)
def __init__(self, statfunc=None, hyperpars={}, pars={}, name='prior'):
# Posterior hyper-parameters
self.hyperpars = []
for key in hyperpars.keys():
val = hyperpars[key]
param = Parameter(name, key, val, alwaysfrozen=True)
self.__dict__[key] = param
self.hyperpars.append(param)
# References to parameters in source model
self.pars = []
for key in pars.keys():
self.__dict__[key] = pars[key]
self.pars.append(pars[key])
self._statfuncset = False
self.statfunc = (lambda x: None)
if statfunc is not None:
self.statfunc = statfunc
self._statfuncset = True
Likelihood.__init__(self, name)
def __str__(self):
s = self.name
hfmt = '\n %-15s %-6s %12s'
s += hfmt % ('Param', 'Type', 'Value')
s += hfmt % ('-'*5, '-'*4, '-'*5)
for p in self.hyperpars:
s += ('\n %-15s %-6s %12g' %
(p.fullname,'frozen', p.val))
return s
def set_statfunc(self, func):
self.statfunc = func
self._statfuncset = True
def calc_stat(self, data, model, staterror=None, syserror=None,
weight=None):
if not self._statfuncset:
raise StatErr('nostat', self.name, 'calc_stat()')
return self.statfunc(self, data, model, staterror, syserror, weight)
| gpl-3.0 | 2,770,907,433,389,379,000 | 31.87234 | 76 | 0.609385 | false |
jimstorch/python-read-filepro | read_filepro/fpdatabase.py | 1 | 4921 | #------------------------------------------------------------------------------
# read_filepro/database.py
# Copyright 2010 Jim Storch
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain a
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#------------------------------------------------------------------------------
"""
Open and interpret a Filepro database directory.
Not intended for use on files under control of an active Filepro session.
In other words; halt Filepro, copy the data files, use on the copies.
"""
from read_filepro.fpmap import FPMap
from read_filepro.fpkey import FPKey
from read_filepro.fpdata import FPData
class FPDatabase(object):
def __init__(self, folder):
self.fpmap = FPMap(folder)
self.fpkey = FPKey(folder, self.fpmap)
self.fpdata = FPData(folder, self.fpmap, self.fpkey)
def is_deleted(self, index):
"""
Given a record number, returns True if that record is marked as
deleted.
"""
return self.fpkey.deletes[index]
def get_total_record_count(self):
"""
Return the total number of records, including deleted ones.
"""
return self.fpkey.total_records
def get_active_record_count(self):
"""
Return the number of active records; i.e. total - deleted.
"""
return self.fpkey.active_records
def get_deleted_record_count(self):
"""
Return the number of deleted records.
"""
return self.fpkey.deleted_records
def get_field_count(self):
"""
Return the number of fields/columns in the database.
Omits dummy/placeholder fields with zero length.
"""
return len(self.get_field_names())
def get_field_names(self):
"""
Return the name of all fields/columns in the database.
Merges key file and data file field names.
Omits dummy/placeholder fields with zero length.
"""
key_fields = [ d[0] for d in self.fpkey.fields ]
data_fields = [ d[0] for d in self.fpdata.fields ]
return key_fields + data_fields
def get_all_records(self):
"""
Return a list of all records, including deleted.
"""
records = []
for x in range(self.fpkey.total_records):
row = self.fpkey.records[x] + self.fpdata.records[x]
records.append(row)
return records
def get_active_records(self):
"""
Return a list of active records, omitting deleted ones.
"""
records = []
for x in range(self.fpkey.total_records):
if not self.is_deleted(x):
row = self.fpkey.records[x] + self.fpdata.records[x]
records.append(row)
return records
def get_deleted_records(self):
"""
Return a list of deleted records, omitting active ones.
"""
records = []
for x in range(self.fpkey.total_records):
if self.is_deleted(x):
row = self.fpkey.records[x] + self.fpdata.records[x]
records.append(row)
return records
def get_record(self, index):
"""
Given an integer value, returns the corresponding record merges from
the key and data files.
"""
return self.fpkey.records[index] + self.fpdata.records[index]
def get_record_dict(self, index):
"""
Given an integer value, returns a dictionary of field names mapped
to record values.
"""
fields = self.get_field_names()
columns = self.get_record(index)
combo = zip(fields, columns)
return dict(combo)
def get_field_types(self):
"""
Scans all values in each database column to see if they are numeric
or string values.
Returns a table containing 'number' or 'string' for each column.
The purpose of this is determining whether to quote when
exporting to CSV files.
"""
column_types = []
for i in range(self.get_field_count()):
this_type = 'number'
for record in self.get_all_records():
entry = record[i]
if entry:
try:
foo = float(entry)
except ValueError:
this_type = 'string'
column_types.append(this_type)
return column_types
| apache-2.0 | 5,153,057,967,458,974,000 | 32.705479 | 79 | 0.58037 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/services/mail/tests/test_notificationrecipients.py | 1 | 1215 | # Copyright 2009-2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
__metaclass__ = type
from lp.registry.interfaces.person import PersonVisibility
from lp.services.mail.notificationrecipientset import NotificationRecipientSet
from lp.testing import (
celebrity_logged_in,
person_logged_in,
TestCaseWithFactory,
)
from lp.testing.layers import DatabaseFunctionalLayer
class TestNotificationRecipientSet(TestCaseWithFactory):
layer = DatabaseFunctionalLayer
def test_add_doesnt_break_on_private_teams(self):
# Since notifications are not exposed to UI, they should handle
# protected preferred emails fine.
email = self.factory.getUniqueEmailAddress()
notified_team = self.factory.makeTeam(
email=email, visibility=PersonVisibility.PRIVATE)
recipients = NotificationRecipientSet()
notifier = self.factory.makePerson()
with person_logged_in(notifier):
recipients.add([notified_team], 'some reason', 'some header')
with celebrity_logged_in("admin"):
self.assertEqual([notified_team], recipients.getRecipients())
| agpl-3.0 | -8,638,399,599,228,031,000 | 38.193548 | 78 | 0.729218 | false |
sunu/oppia-test-2 | apps/image/tests.py | 1 | 1768 | # coding: utf-8
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Jeremy Emerson'
from oppia.apps.image.models import Image
from django.utils import unittest
from django.core.exceptions import ValidationError
from django.core.files import File
class ImageUnitTests(unittest.TestCase):
"""Test image models."""
def test_image_class(self):
"""Test the Image class."""
# An Image must have the 'raw' property set.
image = Image(id='The hash id')
with self.assertRaises(ValidationError):
image.put()
# TODO: image format validation.
# The 'raw' property must be a valid image.
# with self.assertRaises(AssertionError):
# image.raw = 'The image'
# Set the 'raw' property to be a valid image, then do a put().
with open("oppia/tests/data/img.png") as f:
image.raw = File(f)
image_file_content = image.raw.read()
image.put()
# Retrieve the image.
retrieved_image = Image.objects.get(id='The hash id')
# Read its content
retrieved_content = retrieved_image.raw.read()
self.assertEqual(retrieved_content, image_file_content)
| apache-2.0 | 6,075,939,482,199,800,000 | 33.666667 | 74 | 0.671946 | false |
JohnReid/biopsy | Python/gapped_pssms/score_pssms.py | 1 | 5645 | #
# Copyright John Reid 2008
#
"""
Code to rank PSSMs by "interesting-ness".
Information content.
Low-order predictability.
Number of sequences with sites.
"""
from gapped_pssms.parse_gapped_format import parse_models
from itertools import imap
from gapped_pssms.pssm_score import *
from cookbook import DictOfLists
import glob, logging, shutil, os, re
import hmm.pssm.logo as logo
def calculate_emissions(model):
emissions = numpy.zeros((model.N, model.M))
for i in xrange(model.N):
assert model.emissions[i][0] == i
emissions[i] = model.emissions[i][1]
return emissions
def calculate_gap_probs(model):
gap_probs = numpy.ones((model.N))
for f, t, p in model.transitions:
gap_probs[t] = p
return gap_probs
class Log(object):
"""
Parses log files.
"""
log_file_name_re = re.compile('(.*).log')
pssm_num_re = re.compile('PSSM ([0-9]+)')
sites_re = re.compile('[Ff]ound ([0-9]+) sites. ([0-9]+)/([0-9]+) sequences have at least one site')
def __init__(self, log_file):
"""
Parses log files:
************** PSSM 4 **************
Seed ctgctgtg with gap at 3 had 79 hits in 72/601 sequences
Seed score: 2528.810289
Found 238 sites. 145/601 sequences have at least one site
Entropy/base : 0.923442
Information content : 10.238500
"""
logging.info('Parsing log file %s', log_file)
self.log_file = log_file
self.site_numbers = dict()
re_match = Log.log_file_name_re.match(os.path.basename(log_file))
self.tag = re_match.group(1)
logging.info('%s: %s', self.log_file, self.tag)
for line in open(log_file):
m = Log.pssm_num_re.search(line)
if m:
pssm_num = int(m.group(1))
# logging.info('PSSM: %d', pssm_num)
m = Log.sites_re.search(line)
if m and -1 == line.find('Trained model'):
num_sites = int(m.group(1))
num_seqs_with_site = int(m.group(2))
num_seqs = int(m.group(3))
# logging.info('# sites: %d; # seqs with sites: %d; # seqs: %d', num_sites, num_seqs_with_site, num_seqs)
self.site_numbers[pssm_num] = (num_sites, num_seqs_with_site, num_seqs)
class Pssm(object):
pssm_file_name_re = re.compile('(.*)-([0-9]+).pssm')
def __init__(self, pssm_file, log):
self.pssm_file = pssm_file
self.png_file = pssm_file.replace('.pssm', '.png')
self.eps_file = pssm_file.replace('.pssm', '.eps')
re_match = Pssm.pssm_file_name_re.match(os.path.basename(pssm_file))
self.tag = re_match.group(1)
self.pssm_idx = int(re_match.group(2))
self.num_sites, self.num_seqs_with_site, self.num_seqs = log.site_numbers[self.pssm_idx]
# logging.info('%s: %s %d %d', self.pssm_file, self.fragment, self.cross_fold, self.pssm_idx)
self.model = parse_models(open(self.pssm_file)).next()
self.emissions = calculate_emissions(self.model)
self.gap_probs = calculate_gap_probs(self.model)
self.first_order_entropy_score = calculate_first_order_entropy_score(self.emissions)
self.information_content_score = calculate_information_content_score(self.emissions)
self.num_seqs_with_site_score = float(self.num_seqs_with_site) / float(self.num_seqs)
self.overall_score = weighted_geometric_mean(
(self.first_order_entropy_score, self.information_content_score, self.num_seqs_with_site_score),
[1.5 , 1. , 1.]
)
logging.info(
'%s; %8g; %8g; %8g; %8g',
self.pssm_file,
self.first_order_entropy_score,
self.information_content_score,
self.num_seqs_with_site_score,
self.overall_score
)
def write_image(self):
image = logo.pssm_as_image(
self.emissions,
transparencies=self.gap_probs
)
image.save(self.png_file, "PNG")
image.save(self.eps_file, "EPS")
def montage(input_files, output_file):
montage_cmd = 'montage -tile 1x -geometry x240 %s %s' % (' '.join(input_files), output_file)
os.system(montage_cmd)
class PssmSet(object):
def __init__(self, basename):
self.basename = basename
self.tag = os.path.basename(basename)
self.log = Log('%s.log' % self.basename)
self.pssms = dict(
(num, Pssm('%s-%03d.pssm' % (self.basename, num), self.log))
for num in self.log.site_numbers.keys()
)
def sorted_by_score(self):
"""
Returns a list of pssms sorted by score.
"""
sorted_pssms = self.pssms.values()
sorted_pssms.sort(key=lambda p: p.overall_score, reverse=True)
logging.info(' '.join(imap(str, (p.pssm_idx for p in sorted_pssms))))
return sorted_pssms
def montage_by_score(self):
sorted_pssms = self.sorted_by_score()
ranked_files = [p.png_file for p in sorted_pssms]
ranked_file = '%s-ranked.png' % self.basename
montage(ranked_files, ranked_file)
if '__main__' == __name__:
logging.basicConfig(level=logging.DEBUG)
import sys
root_dir = sys.argv[1]
tag_re = re.compile('(T.*).log')
tags = map(lambda m: m.group(1), filter(None, imap(tag_re.search, glob.glob(os.path.join(root_dir, 'T*.log')))))
# tags = ['T00140-3']
for tag in tags:
logging.info(tag)
pssm_set = PssmSet(os.path.join(root_dir, tag))
pssm_set.montage_by_score()
| mit | -1,598,437,059,165,579,800 | 33.631902 | 121 | 0.586005 | false |
ilogue/scrolls | test/configuration_tests.py | 1 | 3494 | from test.ditestcase import DITestCase
from mock import Mock, patch
from os.path import expanduser
class ConfigurationTests(DITestCase):
def setUp(self):
super(ConfigurationTests, self).setUp()
self.patchers = {
'configparser': patch('scrolls.configuration.configparser'),
'os': patch('scrolls.configuration.os'),
}
configparser = self.patchers['configparser'].start()
self.os = self.patchers['os'].start()
self.os.path.isfile.return_value = False # default: no conf file
self.os.path.expanduser = expanduser
self.parser = Mock()
self.parser.sections.return_value = ['scrolls']
configparser.ConfigParser.return_value = self.parser
def tearDown(self):
super(ConfigurationTests, self).tearDown()
for patcher in self.patchers.values():
patcher.stop()
def test_defaults(self):
from scrolls.configuration import Configuration
config = Configuration(self.dependencies)
self.assertEqual(config.server, '0.0.0.0')
def test_useCommandlineArgs_overrides_defaults_and_config_file(self):
from scrolls.configuration import Configuration
config = Configuration(self.dependencies)
self.assertEqual(config.server, '0.0.0.0')
self.assertEqual(config.dry_run, False)
self.parser.get.side_effect = lambda s, k: 'somewhere.else'
args = Mock()
args.server = 'remote.com'
args.dry_run = True
config.useCommandlineArgs(args)
self.assertEqual(config.server, 'remote.com')
self.assertEqual(config.dry_run, True)
def test_If_config_file_reads_it(self):
from scrolls.configuration import Configuration
self.os.path.isfile.return_value = True
Configuration(self.dependencies)
self.parser.read.assert_called_with(expanduser('~/scrolls.conf'))
def test_Uses_values_in_file(self):
from scrolls.configuration import Configuration
self.os.path.isfile.return_value = True
self.parser.getboolean.side_effect = lambda s, k: True
self.parser.get.side_effect = lambda s, k: 'mothership'
config = Configuration(self.dependencies)
self.assertEqual('mothership', config.ticket_secret)
def test_selectApplications(self):
from scrolls.configuration import Configuration
config = Configuration(self.dependencies)
pkgs = {'nginx': False, 'mongodb': True}
self.filesys.hasPackage.side_effect = lambda p: pkgs[p]
apps = config.selectApplications()
self.assertEqual(apps, {
'mongodb': '/var/log/mongodb/mongodb.log'
})
self.log.selectedApplication.assert_any_call(
name='mongodb',
logfile='/var/log/mongodb/mongodb.log'
)
pkgs = {'nginx': True, 'mongodb': False}
self.filesys.hasPackage.side_effect = lambda p: pkgs[p]
config = Configuration(self.dependencies)
apps = config.selectApplications()
self.assertEqual(apps, {
'nginx-access': '/var/log/nginx/access.log',
'nginx-error': '/var/log/nginx/error.log',
})
self.log.selectedApplication.assert_any_call(
name='nginx-access',
logfile='/var/log/nginx/access.log'
)
self.log.selectedApplication.assert_any_call(
name='nginx-error',
logfile='/var/log/nginx/error.log'
)
| mit | 3,694,330,506,573,593,000 | 39.16092 | 73 | 0.64024 | false |
lutianming/leetcode | reverse_nodes_in_k_group.py | 1 | 1097 | from leetcode import ListNode
class Solution:
# @param head, a ListNode
# @param k, an integer
# @return a ListNode
def reverseKGroup(self, head, k):
if not head or not head.next or k==1:
return head
newhead = None
head = head
tail = head
prev = None
while True:
count = 1
while tail and tail.next and count < k:
count += 1
tail = tail.next
if count != k:
break
node = head
next = node.next
for i in range(k-1):
tmp = next.next
next.next = node
node = next
next = tmp
if not prev:
newhead = tail
else:
prev.next = tail
prev = head
head.next = tmp
head = tmp
tail = head
if not newhead:
newhead = head
return newhead
a = ListNode.from_list([1,2,3,4,5,6])
s = Solution()
print(s.reverseKGroup(a, 2))
| mit | -468,447,601,640,617,300 | 22.340426 | 51 | 0.44485 | false |
ChampionZP/DeepLearningImplementations | Colorful/src/utils/general_utils.py | 1 | 4257 | import os
import numpy as np
from skimage import color
import matplotlib.pylab as plt
def remove_files(files):
"""
Remove files from disk
args: files (str or list) remove all files in 'files'
"""
if isinstance(files, (list, tuple)):
for f in files:
if os.path.isfile(os.path.expanduser(f)):
os.remove(f)
elif isinstance(files, str):
if os.path.isfile(os.path.expanduser(files)):
os.remove(files)
def create_dir(dirs):
"""
Create directory
args: dirs (str or list) create all dirs in 'dirs'
"""
if isinstance(dirs, (list, tuple)):
for d in dirs:
if not os.path.exists(os.path.expanduser(d)):
os.makedirs(d)
elif isinstance(dirs, str):
if not os.path.exists(os.path.expanduser(dirs)):
os.makedirs(dirs)
def setup_logging(model_name):
model_dir = "../../models"
# Output path where we store experiment log and weights
model_dir = os.path.join(model_dir, model_name)
fig_dir = "../../figures"
# Create if it does not exist
create_dir([model_dir, fig_dir])
def plot_batch(color_model, q_ab, X_batch_black, X_batch_color, batch_size, h, w, nb_q, epoch):
# Format X_colorized
X_colorized = color_model.predict(X_batch_black / 100.)[:, :, :, :-1]
X_colorized = X_colorized.reshape((batch_size * h * w, nb_q))
X_colorized = q_ab[np.argmax(X_colorized, 1)]
X_a = X_colorized[:, 0].reshape((batch_size, 1, h, w))
X_b = X_colorized[:, 1].reshape((batch_size, 1, h, w))
X_colorized = np.concatenate((X_batch_black, X_a, X_b), axis=1).transpose(0, 2, 3, 1)
X_colorized = [np.expand_dims(color.lab2rgb(im), 0) for im in X_colorized]
X_colorized = np.concatenate(X_colorized, 0).transpose(0, 3, 1, 2)
X_batch_color = [np.expand_dims(color.lab2rgb(im.transpose(1, 2, 0)), 0) for im in X_batch_color]
X_batch_color = np.concatenate(X_batch_color, 0).transpose(0, 3, 1, 2)
list_img = []
for i, img in enumerate(X_colorized[:min(32, batch_size)]):
arr = np.concatenate([X_batch_color[i], np.repeat(X_batch_black[i] / 100., 3, axis=0), img], axis=2)
list_img.append(arr)
plt.figure(figsize=(20,20))
list_img = [np.concatenate(list_img[4 * i: 4 * (i + 1)], axis=2) for i in range(len(list_img) / 4)]
arr = np.concatenate(list_img, axis=1)
plt.imshow(arr.transpose(1,2,0))
ax = plt.gca()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
plt.tight_layout()
plt.savefig("../../figures/fig_epoch%s.png" % epoch)
plt.clf()
plt.close()
def plot_batch_eval(color_model, q_ab, X_batch_black, X_batch_color, batch_size, h, w, nb_q, T):
# Format X_colorized
X_colorized = color_model.predict(X_batch_black / 100.)[:, :, :, :-1]
X_colorized = X_colorized.reshape((batch_size * h * w, nb_q))
# Reweight probas
X_colorized = np.exp(np.log(X_colorized) / T)
X_colorized = X_colorized / np.sum(X_colorized, 1)[:, np.newaxis]
# Reweighted
q_a = q_ab[:, 0].reshape((1, 313))
q_b = q_ab[:, 1].reshape((1, 313))
X_a = np.sum(X_colorized * q_a, 1).reshape((batch_size, 1, h, w))
X_b = np.sum(X_colorized * q_b, 1).reshape((batch_size, 1, h, w))
X_colorized = np.concatenate((X_batch_black, X_a, X_b), axis=1).transpose(0, 2, 3, 1)
X_colorized = [np.expand_dims(color.lab2rgb(im), 0) for im in X_colorized]
X_colorized = np.concatenate(X_colorized, 0).transpose(0, 3, 1, 2)
X_batch_color = [np.expand_dims(color.lab2rgb(im.transpose(1, 2, 0)), 0) for im in X_batch_color]
X_batch_color = np.concatenate(X_batch_color, 0).transpose(0, 3, 1, 2)
list_img = []
for i, img in enumerate(X_colorized[:min(32, batch_size)]):
arr = np.concatenate([X_batch_color[i], np.repeat(X_batch_black[i] / 100., 3, axis=0), img], axis=2)
list_img.append(arr)
plt.figure(figsize=(20,20))
list_img = [np.concatenate(list_img[4 * i: 4 * (i + 1)], axis=2) for i in range(len(list_img) / 4)]
arr = np.concatenate(list_img, axis=1)
plt.imshow(arr.transpose(1,2,0))
ax = plt.gca()
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
plt.tight_layout()
plt.show()
| mit | -6,095,970,284,944,061,000 | 34.181818 | 108 | 0.603477 | false |
MMTObservatory/camsrv | setup.py | 1 | 1955 | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# NOTE: The configuration for the package, including the name, version, and
# other information are set in the setup.cfg file.
import os
import sys
from setuptools import setup
# First provide helpful messages if contributors try and run legacy commands
# for tests or docs.
TEST_HELP = """
Note: running tests is no longer done using 'python setup.py test'. Instead
you will need to run:
tox -e test
If you don't already have tox installed, you can install it with:
pip install tox
If you only want to run part of the test suite, you can also use pytest
directly with::
pip install -e .[test]
pytest
For more information, see:
http://docs.astropy.org/en/latest/development/testguide.html#running-tests
"""
if 'test' in sys.argv:
print(TEST_HELP)
sys.exit(1)
DOCS_HELP = """
Note: building the documentation is no longer done using
'python setup.py build_docs'. Instead you will need to run:
tox -e build_docs
If you don't already have tox installed, you can install it with:
pip install tox
You can also build the documentation with Sphinx directly using::
pip install -e .[docs]
cd docs
make html
For more information, see:
http://docs.astropy.org/en/latest/install.html#builddocs
"""
if 'build_docs' in sys.argv or 'build_sphinx' in sys.argv:
print(DOCS_HELP)
sys.exit(1)
VERSION_TEMPLATE = """
# Note that we need to fall back to the hard-coded version if either
# setuptools_scm can't be imported or setuptools_scm can't determine the
# version, so we catch the generic 'Exception'.
try:
from setuptools_scm import get_version
version = get_version(root='..', relative_to=__file__)
except Exception:
version = '{version}'
""".lstrip()
setup(use_scm_version={'write_to': os.path.join('camsrv', 'version.py'),
'write_to_template': VERSION_TEMPLATE})
| bsd-3-clause | -7,323,620,810,202,405,000 | 24.064103 | 76 | 0.706394 | false |
visionegg/visionegg | VisionEgg/win32_maxpriority.py | 1 | 2142 | # This file was created automatically by SWIG.
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _win32_maxpriority
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name) or (name == "thisown"):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
IDLE_PRIORITY_CLASS = _win32_maxpriority.IDLE_PRIORITY_CLASS
NORMAL_PRIORITY_CLASS = _win32_maxpriority.NORMAL_PRIORITY_CLASS
HIGH_PRIORITY_CLASS = _win32_maxpriority.HIGH_PRIORITY_CLASS
REALTIME_PRIORITY_CLASS = _win32_maxpriority.REALTIME_PRIORITY_CLASS
THREAD_PRIORITY_IDLE = _win32_maxpriority.THREAD_PRIORITY_IDLE
THREAD_PRIORITY_LOWEST = _win32_maxpriority.THREAD_PRIORITY_LOWEST
THREAD_PRIORITY_BELOW_NORMAL = _win32_maxpriority.THREAD_PRIORITY_BELOW_NORMAL
THREAD_PRIORITY_NORMAL = _win32_maxpriority.THREAD_PRIORITY_NORMAL
THREAD_PRIORITY_ABOVE_NORMAL = _win32_maxpriority.THREAD_PRIORITY_ABOVE_NORMAL
THREAD_PRIORITY_HIGHEST = _win32_maxpriority.THREAD_PRIORITY_HIGHEST
THREAD_PRIORITY_TIME_CRITICAL = _win32_maxpriority.THREAD_PRIORITY_TIME_CRITICAL
set_self_process_priority_class = _win32_maxpriority.set_self_process_priority_class
set_self_thread_priority = _win32_maxpriority.set_self_thread_priority
| lgpl-2.1 | 1,918,292,327,711,323,000 | 38.666667 | 84 | 0.737162 | false |
OpenAgInitiative/gro-api | gro_api/actuators/migrations/0008_auto_20150812_1550.py | 1 | 1250 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('actuators', '0007_auto_20150812_1520'),
]
operations = [
migrations.AlterField(
model_name='actuatoreffect',
name='control_profile',
field=models.ForeignKey(to='actuators.ControlProfile', related_name='effects'),
),
migrations.AlterField(
model_name='actuatoreffect',
name='effect_on_active',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='actuatoreffect',
name='property',
field=models.ForeignKey(to='resources.ResourceProperty', related_name='+'),
),
migrations.AlterField(
model_name='actuatoreffect',
name='threshold',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='controlprofile',
name='properties',
field=models.ManyToManyField(through='actuators.ActuatorEffect', editable=False, to='resources.ResourceProperty', related_name='+'),
),
]
| gpl-2.0 | -4,914,615,802,447,475,000 | 31.051282 | 144 | 0.5896 | false |
LumaPictures/rez | src/rez/resolved_context.py | 1 | 63181 | from rez import __version__, module_root_path
from rez.package_repository import package_repository_manager
from rez.solver import SolverCallbackReturn
from rez.resolver import Resolver, ResolverStatus
from rez.system import system
from rez.config import config
from rez.util import shlex_join, dedup
from rez.utils.sourcecode import SourceCodeError
from rez.utils.colorize import critical, heading, local, implicit, Printer
from rez.utils.formatting import columnise, PackageRequest
from rez.utils.filesystem import TempDirs
from rez.utils.memcached import pool_memcached_connections
from rez.backport.shutilwhich import which
from rez.rex import RexExecutor, Python, OutputStyle
from rez.rex_bindings import VersionBinding, VariantBinding, \
VariantsBinding, RequirementsBinding
from rez import package_order
from rez.packages_ import get_variant, iter_packages
from rez.package_filter import PackageFilterList
from rez.shells import create_shell
from rez.exceptions import ResolvedContextError, PackageCommandError, RezError
from rez.utils.graph_utils import write_dot, write_compacted, read_graph_from_string
from rez.vendor.version.version import VersionRange
from rez.vendor.enum import Enum
from rez.vendor import yaml
from rez.utils.yaml import dump_yaml
from tempfile import mkdtemp
from functools import wraps
import getpass
import traceback
import inspect
import time
import sys
import os
import os.path
# specifically so that str's are not converted to unicode on load
from rez.vendor import simplejson
class RezToolsVisibility(Enum):
"""Determines if/how rez cli tools are added back to PATH within a
resolved environment."""
never = 0 # Don't expose rez in resolved env
append = 1 # Append to PATH in resolved env
prepend = 2 # Prepend to PATH in resolved env
class SuiteVisibility(Enum):
"""Defines what suites on $PATH stay visible when a new rez environment is
resolved."""
never = 0 # Don't attempt to keep any suites visible in a new env
always = 1 # Keep suites visible in any new env
parent = 2 # Keep only the parent suite of a tool visible
parent_priority = 3 # Keep all suites visible and the parent takes precedence
class PatchLock(Enum):
""" Enum to represent the 'lock type' used when patching context objects.
"""
no_lock = ("No locking", -1)
lock_2 = ("Minor version updates only (X.*)", 1)
lock_3 = ("Patch version updates only (X.X.*)", 2)
lock_4 = ("Build version updates only (X.X.X.*)", 3)
lock = ("Exact version", -1)
__order__ = "no_lock,lock_2,lock_3,lock_4,lock"
def __init__(self, description, rank):
self.description = description
self.rank = rank
def get_lock_request(name, version, patch_lock, weak=True):
"""Given a package and patch lock, return the equivalent request.
For example, for object 'foo-1.2.1' and lock type 'lock_3', the equivalent
request is '~foo-1.2'. This restricts updates to foo to patch-or-lower
version changes only.
For objects not versioned down to a given lock level, the closest possible
lock is applied. So 'lock_3' applied to 'foo-1' would give '~foo-1'.
Args:
name (str): Package name.
version (Version): Package version.
patch_lock (PatchLock): Lock type to apply.
Returns:
`PackageRequest` object, or None if there is no equivalent request.
"""
ch = '~' if weak else ''
if patch_lock == PatchLock.lock:
s = "%s%s==%s" % (ch, name, str(version))
return PackageRequest(s)
elif (patch_lock == PatchLock.no_lock) or (not version):
return None
version_ = version.trim(patch_lock.rank)
s = "%s%s-%s" % (ch, name, str(version_))
return PackageRequest(s)
class ResolvedContext(object):
"""A class that resolves, stores and spawns Rez environments.
The main Rez entry point for creating, saving, loading and executing
resolved environments. A ResolvedContext object can be saved to file and
loaded at a later date, and it can reconstruct the equivalent environment
at that time. It can spawn interactive and non-interactive shells, in any
supported shell plugin type, such as bash and tcsh. It can also run a
command within a configured python namespace, without spawning a child
shell.
"""
serialize_version = (4, 3)
tmpdir_manager = TempDirs(config.context_tmpdir, prefix="rez_context_")
class Callback(object):
def __init__(self, max_fails, time_limit, callback, buf=None):
self.max_fails = max_fails
self.time_limit = time_limit
self.callback = callback
self.start_time = time.time()
self.buf = buf or sys.stdout
def __call__(self, state):
if self.max_fails != -1 and state.num_fails >= self.max_fails:
reason = ("fail limit reached: aborted after %d failures"
% state.num_fails)
return SolverCallbackReturn.fail, reason
if self.time_limit != -1:
secs = time.time() - self.start_time
if secs > self.time_limit:
return SolverCallbackReturn.abort, "time limit exceeded"
if self.callback:
return self.callback(state)
return SolverCallbackReturn.keep_going, ''
def __init__(self, package_requests, verbosity=0, timestamp=None,
building=False, caching=None, package_paths=None,
package_filter=None, package_orderers=None, max_fails=-1,
add_implicit_packages=True, time_limit=-1, callback=None,
package_load_callback=None, buf=None):
"""Perform a package resolve, and store the result.
Args:
package_requests: List of strings or PackageRequest objects
representing the request.
verbosity: Verbosity level. One of [0,1,2].
timestamp: Ignore packages released after this epoch time. Packages
released at exactly this time will not be ignored.
building: True if we're resolving for a build.
caching: If True, cache(s) may be used to speed the resolve. If
False, caches will not be used. If None, config.resolve_caching
is used.
package_paths: List of paths to search for pkgs, defaults to
config.packages_path.
package_filter (`PackageFilterBase`): Filter used to exclude certain
packages. Defaults to settings from config.package_filter. Use
`package_filter.no_filter` to remove all filtering.
package_orderers (list of `PackageOrder`): Custom package ordering.
add_implicit_packages: If True, the implicit package list defined
by config.implicit_packages is appended to the request.
max_fails (int): Abort the resolve if the number of failed steps is
greater or equal to this number. If -1, does not abort.
time_limit (int): Abort the resolve if it takes longer than this
many seconds. If -1, there is no time limit.
callback: See `Solver`.
package_load_callback: If not None, this callable will be called
prior to each package being loaded. It is passed a single
`Package` object.
buf (file-like object): Where to print verbose output to, defaults
to stdout.
"""
self.load_path = None
# resolving settings
self.requested_timestamp = timestamp
self.timestamp = self.requested_timestamp or int(time.time())
self.building = building
self.implicit_packages = []
self.caching = config.resolve_caching if caching is None else caching
self.verbosity = verbosity
self._package_requests = []
for req in package_requests:
if isinstance(req, basestring):
req = PackageRequest(req)
self._package_requests.append(req)
if add_implicit_packages:
self.implicit_packages = [PackageRequest(x)
for x in config.implicit_packages]
self.package_paths = (config.packages_path if package_paths is None
else package_paths)
self.package_paths = list(dedup(self.package_paths))
self.package_filter = (PackageFilterList.singleton if package_filter is None
else package_filter)
self.package_orderers = package_orderers or config.package_orderers
# patch settings
self.default_patch_lock = PatchLock.no_lock
self.patch_locks = {}
# info about env the resolve occurred in
self.rez_version = __version__
self.rez_path = module_root_path
self.user = getpass.getuser()
self.host = system.fqdn
self.platform = system.platform
self.arch = system.arch
self.os = system.os
self.created = int(time.time())
# resolve results
self.status_ = ResolverStatus.pending
self._resolved_packages = None
self.failure_description = None
self.graph_string = None
self.graph_ = None
self.from_cache = None
# stats
self.solve_time = 0.0 # total solve time, inclusive of load time
self.load_time = 0.0 # total time loading packages (disk or memcache)
self.num_loaded_packages = 0 # num packages loaded (disk or memcache)
# the pre-resolve bindings. We store these because @late package.py
# functions need them, and we cache them to avoid cost
self.pre_resolve_bindings = None
# suite information
self.parent_suite_path = None
self.suite_context_name = None
# perform the solve
callback_ = self.Callback(buf=buf,
max_fails=max_fails,
time_limit=time_limit,
callback=callback)
def _package_load_callback(package):
if package_load_callback:
_package_load_callback(package)
self.num_loaded_packages += 1
request = self.requested_packages(include_implicit=True)
resolver = Resolver(context=self,
package_requests=request,
package_paths=self.package_paths,
package_filter=self.package_filter,
package_orderers=self.package_orderers,
timestamp=self.requested_timestamp,
building=self.building,
caching=self.caching,
callback=callback_,
package_load_callback=_package_load_callback,
verbosity=verbosity,
buf=buf)
resolver.solve()
# convert the results
self.status_ = resolver.status
self.solve_time = resolver.solve_time
self.load_time = resolver.load_time
self.failure_description = resolver.failure_description
self.graph_ = resolver.graph
self.from_cache = resolver.from_cache
if self.status_ == ResolverStatus.solved:
self._resolved_packages = []
for variant in resolver.resolved_packages:
variant.set_context(self)
self._resolved_packages.append(variant)
def __str__(self):
request = self.requested_packages(include_implicit=True)
req_str = " ".join(str(x) for x in request)
if self.status == ResolverStatus.solved:
res_str = " ".join(x.qualified_name for x in self._resolved_packages)
return "%s(%s ==> %s)" % (self.status.name, req_str, res_str)
else:
return "%s:%s(%s)" % (self.__class__.__name__,
self.status.name, req_str)
@property
def success(self):
"""True if the context has been solved, False otherwise."""
return (self.status_ == ResolverStatus.solved)
@property
def status(self):
"""Return the current status of the context.
Returns:
ResolverStatus.
"""
return self.status_
def requested_packages(self, include_implicit=False):
"""Get packages in the request.
Args:
include_implicit (bool): If True, implicit packages are appended
to the result.
Returns:
List of `PackageRequest` objects.
"""
if include_implicit:
return self._package_requests + self.implicit_packages
else:
return self._package_requests
@property
def resolved_packages(self):
"""Get packages in the resolve.
Returns:
List of `Variant` objects, or None if the resolve failed.
"""
return self._resolved_packages
def set_load_path(self, path):
"""Set the path that this context was reportedly loaded from.
You may want to use this method in cases where a context is saved to
disk, but you need to associate this new path with the context while it
is still in use.
"""
self.load_path = path
def __eq__(self, other):
"""Equality test.
Two contexts are considered equal if they have a equivalent request,
and an equivalent resolve. Other details, such as timestamp, are not
considered.
"""
return (isinstance(other, ResolvedContext)
and other.requested_packages(True) == self.requested_packages(True)
and other.resolved_packages == self.resolved_packages)
def __hash__(self):
list_ = []
req = self.requested_packages(True)
list_.append(tuple(req))
res = self.resolved_packages
if res is None:
list_.append(None)
else:
list_.append(tuple(res))
value = tuple(list_)
return hash(value)
@property
def has_graph(self):
"""Return True if the resolve has a graph."""
return bool((self.graph_ is not None) or self.graph_string)
def get_resolved_package(self, name):
"""Returns a `Variant` object or None if the package is not in the
resolve.
"""
pkgs = [x for x in self._resolved_packages if x.name == name]
return pkgs[0] if pkgs else None
def copy(self):
"""Returns a shallow copy of the context."""
import copy
return copy.copy(self)
# TODO: deprecate in favor of patch() method
def get_patched_request(self, package_requests=None,
package_subtractions=None, strict=False, rank=0):
"""Get a 'patched' request.
A patched request is a copy of this context's request, but with some
changes applied. This can then be used to create a new, 'patched'
context.
New package requests override original requests based on the type -
normal, conflict or weak. So 'foo-2' overrides 'foo-1', '!foo-2'
overrides '!foo-1' and '~foo-2' overrides '~foo-1', but a request such
as '!foo-2' would not replace 'foo-1' - it would be added instead.
Note that requests in `package_requests` can have the form '^foo'. This
is another way of supplying package subtractions.
Any new requests that don't override original requests are appended,
in the order that they appear in `package_requests`.
Args:
package_requests (list of str or list of `PackageRequest`):
Overriding requests.
package_subtractions (list of str): Any original request with a
package name in this list is removed, before the new requests
are added.
strict (bool): If True, the current context's resolve is used as the
original request list, rather than the request.
rank (int): If > 1, package versions can only increase in this rank
and further - for example, rank=3 means that only version patch
numbers are allowed to increase, major and minor versions will
not change. This is only applied to packages that have not been
explicitly overridden in `package_requests`. If rank <= 1, or
`strict` is True, rank is ignored.
Returns:
List of `PackageRequest` objects that can be used to construct a
new `ResolvedContext` object.
"""
# assemble source request
if strict:
request = []
for variant in self.resolved_packages:
req = PackageRequest(variant.qualified_package_name)
request.append(req)
else:
request = self.requested_packages()[:]
# convert '^foo'-style requests to subtractions
if package_requests:
package_subtractions = package_subtractions or []
indexes = []
for i, req in enumerate(package_requests):
name = str(req)
if name.startswith('^'):
package_subtractions.append(name[1:])
indexes.append(i)
for i in reversed(indexes):
del package_requests[i]
# apply subtractions
if package_subtractions:
request = [x for x in request if x.name not in package_subtractions]
# apply overrides
if package_requests:
request_dict = dict((x.name, (i, x)) for i, x in enumerate(request))
request_ = []
for req in package_requests:
if isinstance(req, basestring):
req = PackageRequest(req)
if req.name in request_dict:
i, req_ = request_dict[req.name]
if (req_ is not None) and (req_.conflict == req.conflict) \
and (req_.weak == req.weak):
request[i] = req
del request_dict[req.name]
else:
request_.append(req)
else:
request_.append(req)
request += request_
# add rank limiters
if not strict and rank > 1:
overrides = set(x.name for x in package_requests if not x.conflict)
rank_limiters = []
for variant in self.resolved_packages:
if variant.name not in overrides:
if len(variant.version) >= rank:
version = variant.version.trim(rank - 1)
version = version.next()
req = "~%s<%s" % (variant.name, str(version))
rank_limiters.append(req)
request += rank_limiters
return request
def graph(self, as_dot=False):
"""Get the resolve graph.
Args:
as_dot: If True, get the graph as a dot-language string. Otherwise,
a pygraph.digraph object is returned.
Returns:
A string or `pygraph.digraph` object, or None if there is no graph
associated with the resolve.
"""
if not self.has_graph:
return None
if not as_dot:
if self.graph_ is None:
# reads either dot format or our compact format
self.graph_ = read_graph_from_string(self.graph_string)
return self.graph_
if self.graph_string:
if self.graph_string.startswith('{'): # compact format
self.graph_ = read_graph_from_string(self.graph_string)
else:
# already in dot format. Note that this will only happen in
# old rez contexts where the graph is not stored in the newer
# compact format.
return self.graph_string
return write_dot(self.graph_)
def save(self, path):
"""Save the resolved context to file."""
with open(path, 'w') as f:
self.write_to_buffer(f)
def write_to_buffer(self, buf):
"""Save the context to a buffer."""
doc = self.to_dict()
if config.rxt_as_yaml:
content = dump_yaml(doc)
else:
content = simplejson.dumps(doc, indent=4, separators=(",", ": "))
buf.write(content)
@classmethod
def get_current(cls):
"""Get the context for the current env, if there is one.
Returns:
`ResolvedContext`: Current context, or None if not in a resolved env.
"""
filepath = os.getenv("REZ_RXT_FILE")
if not filepath or not os.path.exists(filepath):
return None
return cls.load(filepath)
@classmethod
def load(cls, path):
"""Load a resolved context from file."""
with open(path) as f:
context = cls.read_from_buffer(f, path)
context.set_load_path(path)
return context
@classmethod
def read_from_buffer(cls, buf, identifier_str=None):
"""Load the context from a buffer."""
try:
return cls._read_from_buffer(buf, identifier_str)
except Exception as e:
cls._load_error(e, identifier_str)
def get_resolve_diff(self, other):
"""Get the difference between the resolve in this context and another.
The difference is described from the point of view of the current context
- a newer package means that the package in `other` is newer than the
package in `self`.
Diffs can only be compared if their package search paths match, an error
is raised otherwise.
The diff is expressed in packages, not variants - the specific variant
of a package is ignored.
Returns:
A dict containing:
- 'newer_packages': A dict containing items:
- package name (str);
- List of `Package` objects. These are the packages up to and
including the newer package in `self`, in ascending order.
- 'older_packages': A dict containing:
- package name (str);
- List of `Package` objects. These are the packages down to and
including the older package in `self`, in descending order.
- 'added_packages': Set of `Package` objects present in `self` but
not in `other`;
- 'removed_packages': Set of `Package` objects present in `other`,
but not in `self`.
If any item ('added_packages' etc) is empty, it is not added to the
resulting dict. Thus, an empty dict is returned if there is no
difference between contexts.
"""
if self.package_paths != other.package_paths:
from difflib import ndiff
diff = ndiff(self.package_paths, other.package_paths)
raise ResolvedContextError("Cannot diff resolves, package search "
"paths differ:\n%s" % '\n'.join(diff))
d = {}
self_pkgs_ = set(x.parent for x in self._resolved_packages)
other_pkgs_ = set(x.parent for x in other._resolved_packages)
self_pkgs = self_pkgs_ - other_pkgs_
other_pkgs = other_pkgs_ - self_pkgs_
if not (self_pkgs or other_pkgs):
return d
self_fams = dict((x.name, x) for x in self_pkgs)
other_fams = dict((x.name, x) for x in other_pkgs)
newer_packages = {}
older_packages = {}
added_packages = set()
removed_packages = set()
for pkg in self_pkgs:
if pkg.name not in other_fams:
removed_packages.add(pkg)
else:
other_pkg = other_fams[pkg.name]
if other_pkg.version > pkg.version:
r = VersionRange.as_span(lower_version=pkg.version,
upper_version=other_pkg.version)
it = iter_packages(pkg.name, range_=r)
pkgs = sorted(it, key=lambda x: x.version)
newer_packages[pkg.name] = pkgs
elif other_pkg.version < pkg.version:
r = VersionRange.as_span(lower_version=other_pkg.version,
upper_version=pkg.version)
it = iter_packages(pkg.name, range_=r)
pkgs = sorted(it, key=lambda x: x.version, reverse=True)
older_packages[pkg.name] = pkgs
for pkg in other_pkgs:
if pkg.name not in self_fams:
added_packages.add(pkg)
if newer_packages:
d["newer_packages"] = newer_packages
if older_packages:
d["older_packages"] = older_packages
if added_packages:
d["added_packages"] = added_packages
if removed_packages:
d["removed_packages"] = removed_packages
return d
@pool_memcached_connections
def print_info(self, buf=sys.stdout, verbosity=0, source_order=False,
show_resolved_uris=False):
"""Prints a message summarising the contents of the resolved context.
Args:
buf (file-like object): Where to print this info to.
verbosity (bool): Verbose mode.
source_order (bool): If True, print resolved packages in the order
they are sourced, rather than alphabetical order.
show_resolved_uris (bool): By default, resolved packages have their
'root' property listed, or their 'uri' if 'root' is None. Use
this option to list 'uri' regardless.
"""
_pr = Printer(buf)
def _rt(t):
if verbosity:
s = time.strftime("%a %b %d %H:%M:%S %Z %Y", time.localtime(t))
return s + " (%d)" % int(t)
else:
return time.strftime("%a %b %d %H:%M:%S %Y", time.localtime(t))
if self.status_ in (ResolverStatus.failed, ResolverStatus.aborted):
_pr("The context failed to resolve:\n%s"
% self.failure_description, critical)
return
t_str = _rt(self.created)
_pr("resolved by %s@%s, on %s, using Rez v%s"
% (self.user, self.host, t_str, self.rez_version))
if self.requested_timestamp:
t_str = _rt(self.requested_timestamp)
_pr("packages released after %s were ignored" % t_str)
_pr()
if verbosity:
_pr("search paths:", heading)
rows = []
colors = []
for path in self.package_paths:
if package_repository_manager.are_same(path, config.local_packages_path):
label = "(local)"
col = local
else:
label = ""
col = None
rows.append((path, label))
colors.append(col)
for col, line in zip(colors, columnise(rows)):
_pr(line, col)
_pr()
if self.package_filter:
data = self.package_filter.to_pod()
txt = dump_yaml(data)
_pr("package filters:", heading)
_pr(txt)
_pr()
_pr("requested packages:", heading)
rows = []
colors = []
for request in self._package_requests:
rows.append((str(request), ""))
colors.append(None)
for request in self.implicit_packages:
rows.append((str(request), "(implicit)"))
colors.append(implicit)
for col, line in zip(colors, columnise(rows)):
_pr(line, col)
_pr()
_pr("resolved packages:", heading)
rows = []
colors = []
resolved_packages = self.resolved_packages or []
if not source_order:
resolved_packages = sorted(resolved_packages, key=lambda x: x.name)
for pkg in resolved_packages:
t = []
col = None
location = None
# print root/uri
if show_resolved_uris or not pkg.root:
location = pkg.uri
else:
location = pkg.root
if not os.path.exists(pkg.root):
t.append('NOT FOUND')
col = critical
if pkg.is_local:
t.append('local')
col = local
t = '(%s)' % ', '.join(t) if t else ''
rows.append((pkg.qualified_package_name, location, t))
colors.append(col)
for col, line in zip(colors, columnise(rows)):
_pr(line, col)
if verbosity:
_pr()
actual_solve_time = self.solve_time - self.load_time
_pr("resolve details:", heading)
_pr("load time: %.02f secs" % self.load_time)
_pr("solve time: %.02f secs" % actual_solve_time)
_pr("packages queried: %d" % self.num_loaded_packages)
_pr("from cache: %s" % self.from_cache)
if self.load_path:
_pr("rxt file: %s" % self.load_path)
if verbosity >= 2:
_pr()
_pr("tools:", heading)
self.print_tools(buf=buf)
def print_tools(self, buf=sys.stdout):
data = self.get_tools()
if not data:
return
_pr = Printer(buf)
conflicts = set(self.get_conflicting_tools().keys())
rows = [["TOOL", "PACKAGE", ""],
["----", "-------", ""]]
colors = [None, None]
for _, (variant, tools) in sorted(data.items()):
pkg_str = variant.qualified_package_name
for tool in sorted(tools):
col = None
row = [tool, pkg_str, ""]
if tool in conflicts:
col = critical
row[-1] = "(in conflict)"
rows.append(row)
colors.append(col)
for col, line in zip(colors, columnise(rows)):
_pr(line, col)
def print_resolve_diff(self, other, heading=None):
"""Print the difference between the resolve of two contexts.
Args:
other (`ResolvedContext`): Context to compare to.
heading: One of:
- None: Do not display a heading;
- True: Display the filename of each context as a heading, if
both contexts have a filepath;
- 2-tuple: Use the given two strings as headings - the first is
the heading for `self`, the second for `other`.
"""
d = self.get_resolve_diff(other)
if not d:
return
rows = []
if heading is True and self.load_path and other.load_path:
a = os.path.basename(self.load_path)
b = os.path.basename(other.load_path)
heading = (a, b)
if isinstance(heading, tuple):
rows.append(list(heading) + [""])
rows.append(('-' * len(heading[0]), '-' * len(heading[1]), ""))
newer_packages = d.get("newer_packages", {})
older_packages = d.get("older_packages", {})
added_packages = d.get("added_packages", set())
removed_packages = d.get("removed_packages", set())
if newer_packages:
for name, pkgs in newer_packages.iteritems():
this_pkg = pkgs[0]
other_pkg = pkgs[-1]
diff_str = "(+%d versions)" % (len(pkgs) - 1)
rows.append((this_pkg.qualified_name,
other_pkg.qualified_name,
diff_str))
if older_packages:
for name, pkgs in older_packages.iteritems():
this_pkg = pkgs[0]
other_pkg = pkgs[-1]
diff_str = "(-%d versions)" % (len(pkgs) - 1)
rows.append((this_pkg.qualified_name,
other_pkg.qualified_name,
diff_str))
if added_packages:
for pkg in sorted(added_packages, key=lambda x: x.name):
rows.append(("-", pkg.qualified_name, ""))
if removed_packages:
for pkg in sorted(removed_packages, key=lambda x: x.name):
rows.append((pkg.qualified_name, "-", ""))
print '\n'.join(columnise(rows))
def _on_success(fn):
@wraps(fn)
def _check(self, *nargs, **kwargs):
if self.status_ == ResolverStatus.solved:
return fn(self, *nargs, **kwargs)
else:
raise ResolvedContextError(
"Cannot perform operation in a failed context")
return _check
@_on_success
def get_dependency_graph(self):
"""Generate the dependency graph.
The dependency graph is a simpler subset of the resolve graph. It
contains package name nodes connected directly to their dependencies.
Weak references and conflict requests are not included in the graph.
The dependency graph does not show conflicts.
Returns:
`pygraph.digraph` object.
"""
from rez.vendor.pygraph.classes.digraph import digraph
nodes = {}
edges = set()
for variant in self._resolved_packages:
nodes[variant.name] = variant.qualified_package_name
for request in variant.get_requires():
if not request.conflict:
edges.add((variant.name, request.name))
g = digraph()
node_color = "#AAFFAA"
node_fontsize = 10
attrs = [("fontsize", node_fontsize),
("fillcolor", node_color),
("style", "filled")]
for name, qname in nodes.iteritems():
g.add_node(name, attrs=attrs + [("label", qname)])
for edge in edges:
g.add_edge(edge)
return g
@_on_success
def validate(self):
"""Validate the context."""
try:
for pkg in self.resolved_packages:
pkg.validate_data()
except RezError as e:
raise ResolvedContextError("%s: %s" % (e.__class__.__name__, str(e)))
@_on_success
def get_environ(self, parent_environ=None):
"""Get the environ dict resulting from interpreting this context.
@param parent_environ Environment to interpret the context within,
defaults to os.environ if None.
@returns The environment dict generated by this context, when
interpreted in a python rex interpreter.
"""
interp = Python(target_environ={}, passive=True)
executor = self._create_executor(interp, parent_environ)
self._execute(executor)
return executor.get_output()
@_on_success
def get_key(self, key, request_only=False):
"""Get a data key value for each resolved package.
Args:
key (str): String key of property, eg 'tools'.
request_only (bool): If True, only return the key from resolved
packages that were also present in the request.
Returns:
Dict of {pkg-name: (variant, value)}.
"""
values = {}
requested_names = [x.name for x in self._package_requests
if not x.conflict]
for pkg in self.resolved_packages:
if (not request_only) or (pkg.name in requested_names):
value = getattr(pkg, key)
if value is not None:
values[pkg.name] = (pkg, value)
return values
@_on_success
def get_tools(self, request_only=False):
"""Returns the commandline tools available in the context.
Args:
request_only: If True, only return the tools from resolved packages
that were also present in the request.
Returns:
Dict of {pkg-name: (variant, [tools])}.
"""
return self.get_key("tools", request_only=request_only)
@_on_success
def get_tool_variants(self, tool_name):
"""Get the variant(s) that provide the named tool.
If there are more than one variants, the tool is in conflict, and Rez
does not know which variant's tool is actually exposed.
Args:
tool_name(str): Name of the tool to search for.
Returns:
Set of `Variant` objects. If no variant provides the tool, an
empty set is returned.
"""
variants = set()
tools_dict = self.get_tools(request_only=False)
for variant, tools in tools_dict.itervalues():
if tool_name in tools:
variants.add(variant)
return variants
@_on_success
def get_conflicting_tools(self, request_only=False):
"""Returns tools of the same name provided by more than one package.
Args:
request_only: If True, only return the key from resolved packages
that were also present in the request.
Returns:
Dict of {tool-name: set([Variant])}.
"""
from collections import defaultdict
tool_sets = defaultdict(set)
tools_dict = self.get_tools(request_only=request_only)
for variant, tools in tools_dict.itervalues():
for tool in tools:
tool_sets[tool].add(variant)
conflicts = dict((k, v) for k, v in tool_sets.iteritems() if len(v) > 1)
return conflicts
@_on_success
def get_shell_code(self, shell=None, parent_environ=None, style=OutputStyle.file):
"""Get the shell code resulting from intepreting this context.
Args:
shell (str): Shell type, for eg 'bash'. If None, the current shell
type is used.
parent_environ (dict): Environment to interpret the context within,
defaults to os.environ if None.
style (): Style to format shell code in.
"""
executor = self._create_executor(interpreter=create_shell(shell),
parent_environ=parent_environ)
if self.load_path and os.path.isfile(self.load_path):
executor.env.REZ_RXT_FILE = self.load_path
self._execute(executor)
return executor.get_output(style)
@_on_success
def get_actions(self, parent_environ=None):
"""Get the list of rex.Action objects resulting from interpreting this
context. This is provided mainly for testing purposes.
Args:
parent_environ Environment to interpret the context within,
defaults to os.environ if None.
Returns:
A list of rex.Action subclass instances.
"""
interp = Python(target_environ={}, passive=True)
executor = self._create_executor(interp, parent_environ)
self._execute(executor)
return executor.actions
@_on_success
def apply(self, parent_environ=None):
"""Apply the context to the current python session.
Note that this updates os.environ and possibly sys.path.
@param environ Environment to interpret the context within, defaults to
os.environ if None.
"""
interpreter = Python(target_environ=os.environ)
executor = self._create_executor(interpreter, parent_environ)
self._execute(executor)
@_on_success
def which(self, cmd, parent_environ=None, fallback=False):
"""Find a program in the resolved environment.
Args:
cmd: String name of the program to find.
parent_environ: Environment to interpret the context within,
defaults to os.environ if None.
fallback: If True, and the program is not found in the context,
the current environment will then be searched.
Returns:
Path to the program, or None if the program was not found.
"""
env = self.get_environ(parent_environ=parent_environ)
path = which(cmd, env=env)
if fallback and path is None:
path = which(cmd)
return path
@_on_success
def execute_command(self, args, parent_environ=None, **subprocess_kwargs):
"""Run a command within a resolved context.
This applies the context to a python environ dict, then runs a
subprocess in that namespace. This is not a fully configured subshell -
shell-specific commands such as aliases will not be applied. To execute
a command within a subshell instead, use execute_shell().
Warning:
This runs a command in a configured environ dict only, not in a true
shell. To do that, call `execute_shell` using the `command` keyword
argument.
Args:
args: Command arguments, can be a string.
parent_environ: Environment to interpret the context within,
defaults to os.environ if None.
subprocess_kwargs: Args to pass to subprocess.Popen.
Returns:
A subprocess.Popen object.
Note:
This does not alter the current python session.
"""
interpreter = Python(target_environ={})
executor = self._create_executor(interpreter, parent_environ)
self._execute(executor)
return interpreter.subprocess(args, **subprocess_kwargs)
@_on_success
def execute_rex_code(self, code, filename=None, shell=None,
parent_environ=None, **Popen_args):
"""Run some rex code in the context.
Note:
This is just a convenience form of `execute_shell`.
Args:
code (str): Rex code to execute.
filename (str): Filename to report if there are syntax errors.
shell: Shell type, for eg 'bash'. If None, the current shell type
is used.
parent_environ: Environment to run the shell process in, if None
then the current environment is used.
Popen_args: args to pass to the shell process object constructor.
Returns:
`subprocess.Popen` object for the shell process.
"""
def _actions_callback(executor):
executor.execute_code(code, filename=filename)
return self.execute_shell(shell=shell,
parent_environ=parent_environ,
command='', # don't run any command
block=False,
actions_callback=_actions_callback,
**Popen_args)
@_on_success
def execute_shell(self, shell=None, parent_environ=None, rcfile=None,
norc=False, stdin=False, command=None, quiet=False,
block=None, actions_callback=None, post_actions_callback=None,
context_filepath=None, start_new_session=False, detached=False,
pre_command=None, **Popen_args):
"""Spawn a possibly-interactive shell.
Args:
shell: Shell type, for eg 'bash'. If None, the current shell type
is used.
parent_environ: Environment to run the shell process in, if None
then the current environment is used.
rcfile: Specify a file to source instead of shell startup files.
norc: If True, skip shell startup files, if possible.
stdin: If True, read commands from stdin, in a non-interactive
shell.
command: If not None, execute this command in a non-interactive shell.
If an empty string or list, don't run a command, but don't open
an interactive shell either. Can be a list of args.
quiet: If True, skip the welcome message in interactive shells.
block: If True, block until the shell is terminated. If False,
return immediately. If None, will default to blocking if the
shell is interactive.
actions_callback: Callback with signature (RexExecutor). This lets
the user append custom actions to the context, such as setting
extra environment variables. Callback is run prior to context Rex
execution.
post_actions_callback: Callback with signature (RexExecutor). This lets
the user append custom actions to the context, such as setting
extra environment variables. Callback is run after context Rex
execution.
context_filepath: If provided, the context file will be written
here, rather than to the default location (which is in a
tempdir). If you use this arg, you are responsible for cleaning
up the file.
start_new_session: If True, change the process group of the target
process. Note that this may override the Popen_args keyword
'preexec_fn'.
detached: If True, open a separate terminal. Note that this may
override the `pre_command` argument.
pre_command: Command to inject before the shell command itself. This
is for internal use.
Popen_args: args to pass to the shell process object constructor.
Returns:
If blocking: A 3-tuple of (returncode, stdout, stderr);
If non-blocking - A subprocess.Popen object for the shell process.
"""
sh = create_shell(shell)
if hasattr(command, "__iter__"):
command = sh.join(command)
# start a new session if specified
if start_new_session:
Popen_args.update(config.new_session_popen_args)
# open a separate terminal if specified
if detached:
term_cmd = config.terminal_emulator_command
if term_cmd:
pre_command = term_cmd.strip().split()
# block if the shell is likely to be interactive
if block is None:
block = not (command or stdin)
# context and rxt files. If running detached, don't cleanup files, because
# rez-env returns too early and deletes the tmp files before the detached
# process can use them
tmpdir = self.tmpdir_manager.mkdtemp(cleanup=not detached)
if self.load_path and os.path.isfile(self.load_path):
rxt_file = self.load_path
else:
rxt_file = os.path.join(tmpdir, "context.rxt")
self.save(rxt_file)
context_file = context_filepath or \
os.path.join(tmpdir, "context.%s" % sh.file_extension())
# interpret this context and write out the native context file
executor = self._create_executor(sh, parent_environ)
executor.env.REZ_RXT_FILE = rxt_file
executor.env.REZ_CONTEXT_FILE = context_file
if actions_callback:
actions_callback(executor)
self._execute(executor)
if post_actions_callback:
post_actions_callback(executor)
context_code = executor.get_output()
with open(context_file, 'w') as f:
f.write(context_code)
quiet = quiet or (RezToolsVisibility[config.rez_tools_visibility]
== RezToolsVisibility.never)
# spawn the shell subprocess
p = sh.spawn_shell(context_file,
tmpdir,
rcfile=rcfile,
norc=norc,
stdin=stdin,
command=command,
env=parent_environ,
quiet=quiet,
pre_command=pre_command,
**Popen_args)
if block:
stdout, stderr = p.communicate()
return p.returncode, stdout, stderr
else:
return p
def to_dict(self):
resolved_packages = []
for pkg in (self._resolved_packages or []):
resolved_packages.append(pkg.handle.to_dict())
serialize_version = '.'.join(str(x) for x in ResolvedContext.serialize_version)
patch_locks = dict((k, v.name) for k, v in self.patch_locks)
if self.package_orderers:
package_orderers_list = self.package_orderers.to_pod()
else:
package_orderers_list = None
if self.graph_string and self.graph_string.startswith('{'):
graph_str = self.graph_string # already in compact format
else:
g = self.graph()
graph_str = write_compacted(g)
return dict(
serialize_version=serialize_version,
timestamp=self.timestamp,
requested_timestamp=self.requested_timestamp,
building=self.building,
caching=self.caching,
implicit_packages=[str(x) for x in self.implicit_packages],
package_requests=[str(x) for x in self._package_requests],
package_paths=self.package_paths,
package_filter=self.package_filter.to_pod(),
package_orderers=package_orderers_list or None,
default_patch_lock=self.default_patch_lock.name,
patch_locks=patch_locks,
rez_version=self.rez_version,
rez_path=self.rez_path,
user=self.user,
host=self.host,
platform=self.platform,
arch=self.arch,
os=self.os,
created=self.created,
parent_suite_path=self.parent_suite_path,
suite_context_name=self.suite_context_name,
status=self.status_.name,
resolved_packages=resolved_packages,
failure_description=self.failure_description,
graph=graph_str,
from_cache=self.from_cache,
solve_time=self.solve_time,
load_time=self.load_time,
num_loaded_packages=self.num_loaded_packages)
@classmethod
def from_dict(cls, d, identifier_str=None):
"""Load a `ResolvedContext` from a dict.
Args:
d (dict): Dict containing context data.
identifier_str (str): String identifying the context, this is only
used to display in an error string if a serialization version
mismatch is detected.
Returns:
`ResolvedContext` object.
"""
# check serialization version
def _print_version(value):
return '.'.join(str(x) for x in value)
toks = str(d["serialize_version"]).split('.')
load_ver = tuple(int(x) for x in toks)
curr_ver = ResolvedContext.serialize_version
if load_ver[0] > curr_ver[0]:
msg = ["The context"]
if identifier_str:
msg.append("in %s" % identifier_str)
msg.append("was written by a newer version of Rez. The load may "
"fail (serialize version %d > %d)"
% (_print_version(load_ver), _print_version(curr_ver)))
print >> sys.stderr, ' '.join(msg)
# create and init the context
r = ResolvedContext.__new__(ResolvedContext)
r.load_path = None
r.pre_resolve_bindings = None
r.timestamp = d["timestamp"]
r.building = d["building"]
r.caching = d["caching"]
r.implicit_packages = [PackageRequest(x) for x in d["implicit_packages"]]
r._package_requests = [PackageRequest(x) for x in d["package_requests"]]
r.package_paths = d["package_paths"]
r.rez_version = d["rez_version"]
r.rez_path = d["rez_path"]
r.user = d["user"]
r.host = d["host"]
r.platform = d["platform"]
r.arch = d["arch"]
r.os = d["os"]
r.created = d["created"]
r.verbosity = d.get("verbosity", 0)
r.status_ = ResolverStatus[d["status"]]
r.failure_description = d["failure_description"]
r.solve_time = d["solve_time"]
r.load_time = d["load_time"]
r.graph_string = d["graph"]
r.graph_ = None
r._resolved_packages = []
for d_ in d["resolved_packages"]:
variant_handle = d_
if load_ver < (4, 0):
# -- SINCE SERIALIZE VERSION 4.0
from rez.utils.backcompat import convert_old_variant_handle
variant_handle = convert_old_variant_handle(variant_handle)
variant = get_variant(variant_handle)
variant.set_context(r)
r._resolved_packages.append(variant)
# -- SINCE SERIALIZE VERSION 1
r.requested_timestamp = d.get("requested_timestamp", 0)
# -- SINCE SERIALIZE VERSION 2
r.parent_suite_path = d.get("parent_suite_path")
r.suite_context_name = d.get("suite_context_name")
# -- SINCE SERIALIZE VERSION 3
r.default_patch_lock = PatchLock[d.get("default_patch_lock", "no_lock")]
patch_locks = d.get("patch_locks", {})
r.patch_locks = dict((k, PatchLock[v]) for k, v in patch_locks)
# -- SINCE SERIALIZE VERSION 4.0
r.from_cache = d.get("from_cache", False)
# -- SINCE SERIALIZE VERSION 4.1
data = d.get("package_filter", [])
r.package_filter = PackageFilterList.from_pod(data)
# -- SINCE SERIALIZE VERSION 4.2
data = d.get("package_orderers")
if data:
r.package_orderers = package_order.OrdererDict(data)
else:
r.package_orderers = None
# -- SINCE SERIALIZE VERSION 4.3
r.num_loaded_packages = d.get("num_loaded_packages", -1)
return r
@classmethod
def _read_from_buffer(cls, buf, identifier_str=None):
content = buf.read()
if content.startswith('{'): # assume json content
doc = simplejson.loads(content)
else:
doc = yaml.load(content)
context = cls.from_dict(doc, identifier_str)
return context
@classmethod
def _load_error(cls, e, path=None):
exc_name = e.__class__.__name__
msg = "Failed to load context"
if path:
msg += " from %s" % path
raise ResolvedContextError("%s: %s: %s" % (msg, exc_name, str(e)))
def _set_parent_suite(self, suite_path, context_name):
self.parent_suite_path = suite_path
self.suite_context_name = context_name
def _create_executor(self, interpreter, parent_environ):
parent_vars = True if config.all_parent_variables \
else config.parent_variables
return RexExecutor(interpreter=interpreter,
parent_environ=parent_environ,
parent_variables=parent_vars)
def _get_pre_resolve_bindings(self):
if self.pre_resolve_bindings is None:
self.pre_resolve_bindings = {
"system": system,
"building": self.building,
"request": RequirementsBinding(self._package_requests),
"implicits": RequirementsBinding(self.implicit_packages)
}
return self.pre_resolve_bindings
@pool_memcached_connections
def _execute(self, executor):
br = '#' * 80
br_minor = '-' * 80
def _heading(txt):
executor.comment("")
executor.comment("")
executor.comment(br)
executor.comment(txt)
executor.comment(br)
def _minor_heading(txt):
executor.comment("")
executor.comment(txt)
executor.comment(br_minor)
# bind various info to the execution context
resolved_pkgs = self.resolved_packages or []
request_str = ' '.join(str(x) for x in self._package_requests)
implicit_str = ' '.join(str(x) for x in self.implicit_packages)
resolve_str = ' '.join(x.qualified_package_name for x in resolved_pkgs)
package_paths_str = os.pathsep.join(self.package_paths)
_heading("system setup")
executor.setenv("REZ_USED", self.rez_path)
executor.setenv("REZ_USED_VERSION", self.rez_version)
executor.setenv("REZ_USED_TIMESTAMP", str(self.timestamp))
executor.setenv("REZ_USED_REQUESTED_TIMESTAMP",
str(self.requested_timestamp or 0))
executor.setenv("REZ_USED_REQUEST", request_str)
executor.setenv("REZ_USED_IMPLICIT_PACKAGES", implicit_str)
executor.setenv("REZ_USED_RESOLVE", resolve_str)
executor.setenv("REZ_USED_PACKAGES_PATH", package_paths_str)
if self.building:
executor.setenv("REZ_BUILD_ENV", "1")
# rez-1 environment variables, set in backwards compatibility mode
if config.rez_1_environment_variables and \
not config.disable_rez_1_compatibility:
request_str_ = " ".join([request_str, implicit_str]).strip()
executor.setenv("REZ_VERSION", self.rez_version)
executor.setenv("REZ_PATH", self.rez_path)
executor.setenv("REZ_REQUEST", request_str_)
executor.setenv("REZ_RESOLVE", resolve_str)
executor.setenv("REZ_RAW_REQUEST", request_str_)
executor.setenv("REZ_RESOLVE_MODE", "latest")
# binds objects such as 'request', which are accessible before a resolve
bindings = self._get_pre_resolve_bindings()
for k, v in bindings.iteritems():
executor.bind(k, v)
executor.bind('resolve', VariantsBinding(resolved_pkgs))
#
# -- apply each resolved package to the execution context
#
_heading("package variables")
error_class = SourceCodeError if config.catch_rex_errors else None
# set basic package variables and create per-package bindings
bindings = {}
for pkg in resolved_pkgs:
_minor_heading("variables for package %s" % pkg.qualified_name)
prefix = "REZ_" + pkg.name.upper().replace('.', '_')
executor.setenv(prefix + "_VERSION", str(pkg.version))
major_version = str(pkg.version[0] if len(pkg.version) >= 1 else '')
minor_version = str(pkg.version[1] if len(pkg.version) >= 2 else '')
patch_version = str(pkg.version[2] if len(pkg.version) >= 3 else '')
executor.setenv(prefix + "_MAJOR_VERSION", major_version)
executor.setenv(prefix + "_MINOR_VERSION", minor_version)
executor.setenv(prefix + "_PATCH_VERSION", patch_version)
executor.setenv(prefix + "_BASE", pkg.base)
executor.setenv(prefix + "_ROOT", pkg.root)
bindings[pkg.name] = dict(version=VersionBinding(pkg.version),
variant=VariantBinding(pkg))
# commands
for attr in ("pre_commands", "commands", "post_commands"):
found = False
for pkg in resolved_pkgs:
commands = getattr(pkg, attr)
if commands is None:
continue
if not found:
found = True
_heading(attr)
_minor_heading("%s from package %s" % (attr, pkg.qualified_name))
bindings_ = bindings[pkg.name]
executor.bind('this', bindings_["variant"])
executor.bind("version", bindings_["version"])
executor.bind('root', pkg.root)
executor.bind('base', pkg.base)
exc = None
trace = None
commands.set_package(pkg)
try:
executor.execute_code(commands, isolate=True)
except error_class as e:
exc = e
if exc:
header = "Error in %s in package %r:\n" % (attr, pkg.uri)
if self.verbosity >= 2:
msg = header + str(exc)
else:
msg = header + exc.short_msg
raise PackageCommandError(msg)
_heading("post system setup")
# append suite paths based on suite visibility setting
self._append_suite_paths(executor)
# append system paths
executor.append_system_paths()
# add rez path so that rez commandline tools are still available within
# the resolved environment
mode = RezToolsVisibility[config.rez_tools_visibility]
if mode == RezToolsVisibility.append:
executor.append_rez_path()
elif mode == RezToolsVisibility.prepend:
executor.prepend_rez_path()
def _append_suite_paths(self, executor):
from rez.suite import Suite
mode = SuiteVisibility[config.suite_visibility]
if mode == SuiteVisibility.never:
return
visible_suite_paths = Suite.visible_suite_paths()
if not visible_suite_paths:
return
suite_paths = []
if mode == SuiteVisibility.always:
suite_paths = visible_suite_paths
elif self.parent_suite_path:
if mode == SuiteVisibility.parent:
suite_paths = [self.parent_suite_path]
elif mode == SuiteVisibility.parent_priority:
pop_parent = None
try:
parent_index = visible_suite_paths.index(self.parent_suite_path)
pop_parent = visible_suite_paths.pop(parent_index)
except ValueError:
pass
suite_paths.insert(0, (pop_parent or self.parent_suite_path))
for path in suite_paths:
tools_path = os.path.join(path, "bin")
executor.env.PATH.append(tools_path)
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| lgpl-3.0 | -8,348,680,213,081,125,000 | 37.619193 | 89 | 0.572767 | false |
matthewjwoodruff/language-of-choice | article/nodsl.py | 1 | 3949 | """
nodsl.py
Attempt to express the language of choice without
overloading operators.
I understand why the article did it, and I'm impressed
that it works so well, but it's hard to follow what's
happening. Too much magic for me.
I think partly it's because the article conflates variables
and choices. A variable's role is to participate in
choices, either as an index or as a branch, but not to
*be* a choice.
"""
class Node(object):
pass
class ConstantNode(Node):
def __init__(self, name, value):
self.rank = float("Inf")
self.value = value
self.name = name
def __repr__(self): return self.name
def evaluate(self, _): return self.value
class Variable(object):
"""
A variable is not a node in the BDD!
"""
def __init__(self, name, rank):
self.name = name
self.rank = rank
def __repr__(self): return self.name
def evaluate(self, env): return env[self.rank]
class ChoiceNode(Node):
def __init__(self, index, if0, if1):
"""
index is a constant, variable, or another choice node
"""
self.index = index
self.if0 = if0
self.if1 = if1
self.rank = self.index.rank
def __repr__(self):
return "{}({},{})".format(repr(self.index), repr(self.if0), repr(self.if1))
def evaluate(self, env):
fork = self.index.evaluate(env)
if fork == 0:
return self.if0.evaluate(env)
elif fork == 1:
return self.if1.evaluate(env)
raise Exception()
def subst(index, rank, value):
if index == constants[0]:
return index
if index == constants[1]:
return constants[1]
if rank < index.rank: return index
try:
if0 = index.if0
if1 = index.if1
except AttributeError:
if0 = constants[0]
if1 = constants[1]
if rank == index.rank:
if value == 0:
return if0
if value == 1:
return if1
raise Exception()
_if0 = subst(if0, rank, value)
_if1 = subst(if1, rank, value)
if _if0 is _if1: return _if0
return choice(index, _if0, _if1)
# one global dictionary for choices
# Keys: (index, if0, if1)
choices = dict()
def choice(index, if0, if1):
global choices
try:
return choices[(index, if0, if1)]
except KeyError: pass
choices[(index, if0, if1)] = ChoiceNode(index, if0, if1)
print("choice {} {} {}".format(index, if0, if1))
if index == constants[0]:
return if0
if index == constants[1]:
return if1
if if0 == constants[0] and if1 == constants[1]:
return choices[(index, if0, if1)]
top = index.rank
_index = index
if if0.rank < top:
top = if0.rank
_index = if0
if if1.rank < top:
top = if1.rank
_index = if1
top = min(index.rank, if0.rank, if1.rank)
_if0 = choice(
subst(index, top, 0), subst(if0, top, 0), subst(if1, top, 0))
_if1 = choice(
subst(index, top, 1), subst(if0, top, 1), subst(if1, top, 1))
new_node = choice(_index, _if0, _if1)
return new_node
# one global dictionary for constants
# Key: constant value
constants = dict()
def constant(name, value):
global constants
try: return constants[value]
except KeyError:
constants[value] = ConstantNode(name, value)
return constants[value]
variables = dict()
def variable(name, rank):
global variables
try:
variable = variables[rank]
except KeyError:
return Variable(name, rank)
if variable.name != name:
raise Exception()
return variable
const0 = constant("0", 0)
const1 = constant("1", 1)
a = variable('a', 0)
b = variable('b', 1)
c = variable('c', 2)
p = variable('p', 3)
q = variable('q', 4)
left = choice(p, a, choice(q, b, c))
right = choice(q, choice(p, a, b), choice(p, a, c))
print("left {}".format(repr(left)))
print("right {}".format(repr(right)))
| gpl-3.0 | 1,861,754,821,942,455,800 | 25.863946 | 83 | 0.593062 | false |
ASMlover/study | compiler/eLisp2/eLisp/number.py | 1 | 2219 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2015 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import types
from interface import Eval
class Number(Eval):
def __init__(self, value):
self.data = value
def __repr__(self):
return repr(self.data)
def eval(self, env, args=None):
return self
def __eq__(self, rhs):
if isinstance(rhs, Number):
return (self.data == rhs.data)
else:
return False
class Integral(Number):
REGEX = re.compile(r'^[+-]?\d+$')
def __init__(self, value):
super(Integral, self).__init__(value)
class LongInt(Number):
REGEX = re.compile(r'^[+-]?\d+[lL]$')
def __init__(self, value):
super(LongInt, self).__init__(value)
class Float(Number):
REGEX = re.compile(r'^[+-]?(\d+\.\d*$|\d*\.\d+$)')
def __init__(self, value):
super(Float, self).__init__(value)
| bsd-2-clause | 8,802,085,610,972,698,000 | 32.119403 | 70 | 0.684993 | false |
lizardsystem/lizard-kpi | setup.py | 1 | 1150 | from setuptools import setup
version = '0.5.dev0'
long_description = '\n\n'.join([
open('README.rst').read(),
open('TODO.rst').read(),
open('CREDITS.rst').read(),
open('CHANGES.rst').read(),
])
install_requires = [
'Django',
'django-extensions',
'django-nose',
'lizard-ui',
'pkginfo',
],
tests_require = [
]
setup(name='lizard-kpi',
version=version,
description="Key performance indicators ('fuel gauges') for lizard",
long_description=long_description,
# Get strings from http://www.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Programming Language :: Python',
'Framework :: Django',
],
keywords=[],
author='Reinout van Rees',
author_email='[email protected]',
url='',
license='GPL',
packages=['lizard_kpi'],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
extras_require = {'test': tests_require},
entry_points={
'console_scripts': [
]},
)
| gpl-3.0 | -1,418,867,999,999,005,700 | 24.555556 | 78 | 0.58 | false |
tonygalmiche/is_coheliance | __openerp__.py | 1 | 3089 | # -*- coding: utf-8 -*-
{
"name" : "InfoSaône - Module Odoo pour Coheliance",
"version" : "0.1",
"author" : "InfoSaône / Tony Galmiche",
"category" : "InfoSaône",
'description': """
InfoSaône - Module Odoo pour Coheliance
===================================================
InfoSaône - Module Odoo pour Coheliance
""",
'maintainer': 'InfoSaône',
'website': 'http://www.infosaone.com',
"depends" : [
"base",
"mail",
"calendar", # Agenda
"crm", # CRM
"account",
"account_voucher", # eFacturation & Règlements
"account_accountant", # Comptabilité et finance
"sale", # Gestion des ventes
"purchase", # Gestion des achats
"sale_order_dates", # Ajout de champs dates dans les commandes clients (date demandée)
"project", # Gestin de projets
"hr", # Répertoire des employés
"hr_timesheet_sheet", # Feuilles de temps
"report",
], # Liste des dépendances (autres modules nececessaire au fonctionnement de celui-ci)
# -> Il peut être interessant de créer un module dont la seule fonction est d'installer une liste d'autres modules
# Remarque : La desinstallation du module n'entrainera pas la desinstallation de ses dépendances (ex : mail)
"init_xml" : [], # Liste des fichiers XML à installer uniquement lors de l'installation du module
"demo_xml" : [], # Liste des fichiers XML à installer pour charger les données de démonstration
"data" : [
"assets.xml", # Permet d'ajouter des css et des js
"product_view.xml",
"res_partner_view.xml",
"sale_view.xml",
"account_invoice_view.xml",
"account_bank_statement_view.xml",
"is_coheliance_view.xml",
"is_coheliance_sequence.xml",
"is_suivi_tresorerie_view.xml",
"is_export_compta.xml",
"is_coheliance_report.xml",
"is_prospective_view.xml",
"is_compte_resultat_view.xml",
"is_bilan_pedagogique_view.xml",
"is_suivi_banque_view.xml",
"is_suivi_caisse_view.xml",
"views/layouts.xml",
"views/layouts-convention.xml",
"views/report_affaire.xml",
"views/report_convention.xml",
"views/report_convention_st.xml",
"views/report_contrat_formation.xml",
"views/report_invoice.xml",
"views/report_frais.xml",
"views/report_fiche_frais.xml",
"views/report_compte_resultat.xml",
"report/is_suivi_facture.xml",
"report/is_suivi_refacturation_associe.xml",
"report/is_suivi_intervention.xml",
"report/is_account_invoice_line.xml",
"menu.xml",
"security/ir.model.access.csv",
], # Liste des fichiers XML à installer lors d'une mise à jour du module (ou lord de l'installation)
"installable": True, # Si False, ce module sera visible mais non installable (intéret ?)
"active": False, # Si True, ce module sera installé automatiquement dés la création de la base de données d'OpenERP
"application": True,
}
| agpl-3.0 | 7,043,962,657,802,477,000 | 39.302632 | 129 | 0.616716 | false |
NCI-GDC/gdcdatamodel | migrations/update_legacy_states.py | 1 | 6311 | #!/usr/bin/env python
"""gdcdatamodel.migrations.update_legacy_states
----------------------------------
File nodes from legacy projects were given a `state` that represents
what is now `file_state`. This script transforms the old `state` into
`file_state` and set's the `state` according the the following table:
| from file.state | to file.state | to file.file_state |
|-----------------+---------------+--------------------|
| None | submitted | None |
| error | validated | error |
| invalid | validated | error |
| live | submitted | submitted |
| submitted | submitted | registered |
| uploaded | submitted | uploaded |
| validated | submitted | validated |
This script runs in parallel -> it has to use separate sessions -> it
has a session per Node subclass which is automatically committed.
See also https://jira.opensciencedatacloud.org/browse/DAT-276.
Usage:
```python
update_legacy_states(
host='localhost',
user='test',
database='automated_test',
password='test')
```
"""
import logging
from sqlalchemy import not_, or_, and_
from psqlgraph import Node, PsqlGraphDriver
from gdcdatamodel import models as md
from multiprocessing import Process, cpu_count, Queue
from collections import namedtuple
CLS_WITH_PROJECT_ID = {
cls for cls in Node.get_subclasses()
if 'project_id' in cls.__pg_properties__
}
CLS_WITH_STATE = {
cls for cls in Node.get_subclasses()
if 'state' in cls.__pg_properties__
}
CLS_TO_UPDATE = CLS_WITH_PROJECT_ID & CLS_WITH_STATE
# Determines state and file_state based on existing state
STATE_MAP = {
None: {
'state': 'submitted',
'file_state': None
},
'error': {
'state': 'validated',
'file_state': 'error'
},
'invalid': {
'state': 'validated',
'file_state': 'error'
},
'live': {
'state': 'submitted',
'file_state': 'submitted'
},
'submitted': {
'state': 'submitted',
'file_state': 'registered'
},
'uploaded': {
'state': 'submitted',
'file_state': 'uploaded'
},
'validated': {
'state': 'submitted',
'file_state': 'validated'
},
}
logger = logging.getLogger("state_updater")
logging.basicConfig(level=logging.INFO)
def legacy_filter(query, legacy_projects):
"""filter query to those whose project_id is None or points to TARGET
or TCGA
"""
legacy_filters = [
query.entity().project_id.astext ==
project.programs[0].name + '-' + project.code
for project in legacy_projects
]
return query.filter(or_(
null_prop(query.entity(), 'project_id'),
*legacy_filters
))
def null_prop(cls, key):
"""Provide expression to filter on a null or nonexistent value"""
return or_(
cls._props.contains({key: None}),
not_(cls._props.has_key(key)),
)
def print_cls_query_summary(graph):
"""Print breakdown of class counts to stdout"""
cls_queries = {
cls.get_label(): cls_query(graph, cls)
for cls in CLS_WITH_PROJECT_ID & CLS_WITH_STATE
}
print(
"%s: %d" % ("legacy_stateless_nodes".ljust(40),
sum([query.count() for query in cls_queries.itervalues()]))
)
for label, query in cls_queries.items():
count = query.count()
if count:
print("%35s : %d" % (label, count))
def cls_query(graph, cls):
"""Returns query for legacy nodes with state in {null, 'live'}"""
legacy_projects = graph.nodes(md.Project).props(state='legacy').all()
options = [
# state
null_prop(cls, 'state'),
cls.state.astext.in_(STATE_MAP),
]
if 'file_state' in cls.__pg_properties__:
options += [null_prop(cls, 'file_state')]
return (legacy_filter(graph.nodes(cls), legacy_projects)
.filter(or_(*options)))
def update_cls(graph, cls):
"""Updates as described in update_target_states for a single class"""
with graph.session_scope() as session:
query = cls_query(graph, cls)
count = query.count()
if count == 0:
return
logger.info('Loading %d %s nodes', count, cls.label)
nodes = query.all()
logger.info('Loaded %d %s nodes', len(nodes), cls.label)
for node in nodes:
state = node._props.get('state', None)
file_state = node._props.get('file_state', None)
if state in STATE_MAP:
node.state = STATE_MAP[state]['state']
set_file_state = (
'file_state' in node.__pg_properties__
and file_state is None
and state in STATE_MAP
)
if set_file_state:
node.file_state = STATE_MAP[state]['file_state']
node.sysan['legacy_state'] = state
node.sysan['legacy_file_state'] = file_state
logger.info('Committing %s nodes', cls.label)
graph.current_session().commit()
logger.info('Done with %s nodes', cls.label)
def update_classes(graph_kwargs, input_q):
"""Creates a db driver and pulls classes from the queue to update"""
graph = PsqlGraphDriver(**graph_kwargs)
while True:
cls = input_q.get()
if cls is None: # none means no more work
return
update_cls(graph, cls)
def update_legacy_states(graph_kwargs):
"""Updates state, file_state on legacy nodes
- node.state in {None, 'live'}
- node.project_id in {None, <Legacy project_id list>}
there is no project_id, or project_id points to a legacy project
"""
graph = PsqlGraphDriver(**graph_kwargs)
with graph.session_scope():
print_cls_query_summary(graph)
input_q = Queue()
pool = [
Process(target=update_classes, args=(graph_kwargs, input_q))
for _ in range(cpu_count())
]
for cls in CLS_TO_UPDATE:
input_q.put(cls)
for process in pool:
input_q.put(None) # put a no more work signal for each process
for process in pool:
process.start()
for process in pool:
process.join()
| apache-2.0 | 7,030,365,817,418,758,000 | 25.078512 | 79 | 0.575978 | false |
fahadsultan/CausalRelations | FeaturesExtractor.py | 1 | 14860 |
from bs4 import BeautifulSoup
import os
import pandas as pd
import sys
import traceback
from sklearn.feature_extraction.text import CountVectorizer
class FeaturesExtractor:
def __init__(self):
self.FEATURE_NAMES = ['e1_token_id', 'e1_number','e1_sentence','e1_token','e1_aspect', 'e1_class','e1_event_id','e1_modality','e1_polarity','e1_pos','e1_tense','e2_token_id', 'e2_number','e2_sentence','e2_token','e2_aspect', 'e2_class','e2_event_id','e2_modality','e2_polarity','e2_pos','e2_tense','dep_path', 'same_pos_tag','sentence_distance','event_distance','same_polarity','same_aspect','same_tense','same_class','csignals_in_bw','csignal_position','tlink_exists','e1_is_sent_root','e2_is_sent_root','causal_relation_exists']
COLUMN_NAMES = ['filename', 'sentence', 'relation', 'governor',
'governor_idx', 'dependent', 'dependent_idx']
self.data = []
self.deps = pd.read_csv('data/text/_out_dependencies.csv',
names=COLUMN_NAMES, sep='\t')
def recursive_search(self, df, path, to_find_token,
to_find_index, to_find_sentence, governor_token,
governor_index, governor_sentence):
dependencies = df[(self.deps['governor'] == governor_token) &
(self.deps['governor_idx'] == int(governor_index)) &
(self.deps['sentence'] == int(governor_sentence))]
for i in range(len(dependencies)):
dependency = dependencies.iloc[i]
#Weird idiosynracy I came across where the governor and the dependent
#were the same token
if ((dependency['governor'] == dependency['dependent']) and
(dependency['dependent_idx'] == dependency['governor_idx'])):
continue
#check break condition
if (dependency['dependent'] == to_find_token and
dependency['dependent_idx'] == to_find_index and
dependency['sentence'] == to_find_sentence):
path = path+' '+dependency['relation']
break
else:
path_to_pass = path+' '+dependency['relation']
path_returned = self.recursive_search(
df, path_to_pass, to_find_token,
to_find_index, to_find_sentence, dependency['dependent'],
dependency['dependent_idx'], dependency['sentence'])
if path_returned != path_to_pass:
path = path_returned
break
return path
def get_dependency_path(self, filename, e1_token, e1_token_id,
e1_sentence, e2_token,
e2_token_id, e2_sentence):
#Since intersentential paths are allowed, the next sentence is
#also included
df = self.deps[(self.deps['filename'] == filename) &
((self.deps['sentence'] == int(e1_sentence)) |
(self.deps['sentence'] == int(e1_sentence)+1))]
path = self.recursive_search(df, '', e2_token, e2_token_id,
e2_sentence, e1_token, e1_token_id,
e1_sentence)
if path is not '':
return path
else:
#Try finding path from e2 to e1
return self.recursive_search(df, '', e1_token,
e1_token_id, int(e1_sentence),
e2_token, e2_token_id,
int(e2_sentence))
def parseFile(self, filename):
f = open(filename)
soup = BeautifulSoup(f.read())
events = soup.findAll('event')
tokens = soup.findAll('token')
for i in range(0,len(events)-1):
event = events[i]
for j in range(i+1, len(events)):
next_event = events[j]
event_token_id = event.find('token_anchor').attrs['id']
next_event_token_id = next_event.find('token_anchor').attrs['id']
event_token_tag = soup.find(lambda tag: (tag.name) == 'token' and
(tag.attrs['id']) == (event_token_id))
next_event_token_tag = soup.find(lambda tag: (tag.name) == 'token' and
(tag.attrs['id']) == (next_event_token_id))
event_sentence = event_token_tag['sentence']
next_event_sentence = next_event_token_tag['sentence']
if (int(next_event_sentence) - int(event_sentence)) > 1:
break # For now, intersentential event pairs can only be one sentence apart
else:
e1_number = event_token_tag.attrs['number']
e1_sentence = event_sentence
e1_token = event_token_tag.text
e1_aspect = event.attrs['aspect']
e1_certainty = event.attrs['certainty']
e1_class = event.attrs['class']
e1_comment = event.attrs['comment']
e1_factuality = event.attrs['factuality']
e1_event_id = event.attrs['id']
e1_modality = event.attrs['modality']
e1_polarity = event.attrs['polarity']
e1_pos = event.attrs['pos']
e1_tense = event.attrs['tense']
e2_number = next_event_token_tag.attrs['number']
e2_sentence = event_sentence
e2_token = next_event_token_tag.text
e2_aspect = next_event.attrs['aspect']
e2_certainty = next_event.attrs['certainty']
e2_class = next_event.attrs['class']
e2_comment = next_event.attrs['comment']
e2_factuality = next_event.attrs['factuality']
e2_event_id = next_event.attrs['id']
e2_modality = next_event.attrs['modality']
e2_polarity = next_event.attrs['polarity']
e2_pos = next_event.attrs['pos']
e2_tense = next_event.attrs['tense']
causal_relation_exists = len(soup.findAll(lambda tag:
tag.name == 'source' and
tag.findParent().name == 'clink' and
tag.findNextSibling().name == 'target' and
((tag.attrs['id'] == e1_event_id and
tag.findNextSibling().attrs['id'] == e2_event_id)
or
(tag.attrs['id'] == e2_event_id and
tag.findNextSibling().attrs['id'] == e1_event_id)) )) > 0
e1_token_id_offset = soup.find(
lambda tag: tag.name == 'token' and
tag.attrs['sentence'] == e1_sentence).attrs['id']
if e1_sentence == e2_sentence:
e2_token_id_offset = e1_token_id_offset
else:
e2_token_id_offset = soup.find(
lambda tag: tag.name == 'token' and
tag.attrs['sentence'] == e2_sentence).attrs['id']
e1_token_id = int(event_token_tag.attrs['id']) - int(e1_token_id_offset) + 1
e2_token_id = int(next_event_token_tag.attrs['id']) - int(e2_token_id_offset) + 1
e1_event_id = int(e1_event_id)
e2_event_id = int(e2_event_id)
same_pos_tag = e1_pos == e2_pos
sentence_distance = int(e2_sentence) - int(e1_sentence)
event_distance = e2_event_id - e1_event_id + 1
same_polarity = e1_polarity == e2_polarity
same_aspect = e1_aspect == e2_aspect
same_tense = e1_tense == e2_tense
same_class = e1_class == e2_class
'''
TODO: The conditions between e1_event_id and e2_event_id maybe don't
make sense because e1_event_id would always be greater than e2_event_id.
Reverse causal relations are identified only if e2 is specifed as
source in clink and e1 as target
'''
csignals_in_bw = soup.findAll(lambda tag: tag.name == 'c-signal' and
(( (e1_event_id < e2_event_id) and
(int(tag.attrs['id']) > e1_event_id) and
(int(tag.attrs['id']) < e2_event_id)) or
(e1_event_id > e2_event_id and
int(tag.attrs['id']) > e2_event_id and
int(tag.attrs['id']) < e1_event_id)))
csignal_position = csignal = ''
if len(csignals_in_bw) == 0:
csignal_tag = event.findPreviousSibling(lambda tag: tag.name == 'c-signal')
if csignal_tag is not None:
csignal_token_id = csignal_tag.find('token_anchor').attrs['id']
csignal_token_tag = soup.find(lambda x:
x.name == 'token' and x.attrs['id'] == csignal_token_id)
if csignal_token_tag.attrs['sentence'] == e1_sentence:
csignal = soup.find(lambda x:
x.name == 'token' and x.attrs['id'] == csignal_token_id).text
csignal_position = 'before'
else:
csignal_token_id = csignals_in_bw[-1].find('token_anchor').attrs['id']
csignal = soup.find(lambda x: x.name == 'token' and x.attrs['id'] == csignal_token_id).text
csignal_position = 'between'
tlink_exists = len(soup.findAll(lambda tag:
tag.name == 'tlink'
and (
((tag.find('source').attrs['id'] == str(e1_event_id)) and
(tag.find('target').attrs['id'] == str(e2_event_id)))
or
((tag.find('source').attrs['id'] == str(e2_event_id)) and
(tag.find('target').attrs['id'] == str(e1_event_id))) )
)) > 0
filename = filename.split('.xml')[0]
filename = filename.split('/')
filename = filename[len(filename) - 1]
dep_path = self.get_dependency_path(
filename, e1_token, e1_token_id, e1_sentence,
e2_token, e2_token_id, e2_sentence)
e1_is_sent_root = len(self.deps[
(self.deps['governor'] == 'ROOT') &
(self.deps['dependent'] == e1_token) &
(self.deps['dependent_idx'] == int(e1_token_id)) &
(self.deps['sentence'] == int(e1_sentence))] ) > 0
e2_is_sent_root = len(self.deps[
(self.deps['governor'] == 'ROOT') &
(self.deps['dependent'] == e2_token) &
(self.deps['dependent_idx'] == int(e2_token_id)) &
(self.deps['sentence'] == int(e2_sentence))] ) > 0
row = [
e1_token_id,
e1_number,
e1_sentence,
e1_token,
e1_aspect,
e1_class,
e1_event_id,
e1_modality,
e1_polarity,
e1_pos,
e1_tense,
e2_token_id,
e2_number,
e2_sentence,
e2_token,
e2_aspect,
e2_class,
e2_event_id,
e2_modality,
e2_polarity,
e2_pos,
e2_tense,
dep_path,
same_pos_tag,
sentence_distance,
event_distance,
same_polarity,
same_aspect,
same_tense,
same_class,
csignal,
csignal_position,
tlink_exists,
e1_is_sent_root,
e2_is_sent_root,
causal_relation_exists ]
self.data.append(row)
f.close()
def extract_features(self):
for folder, subs, files in os.walk('data/xml'):
for filename in files:
try:
if ('.xml' in filename) and (filename[0] != '.'):
print 'Parsing File: '+filename
self.parseFile(os.path.join(folder, filename))
except Exception as e:
traceback.print_exc()
continue
self.data = pd.DataFrame(self.data)
self.data.columns = self.FEATURE_NAMES
def save_to_csv(filename):
self.data.to_csv(filename)
if __name__ == "__main__":
extractor = FeaturesExtractor()
extractor.extract_features()
extractor.save_to_csv('features.csv')
| apache-2.0 | 3,989,359,469,246,249,500 | 43.624625 | 538 | 0.421467 | false |
hotsyk/uapython2 | event/migrations/0001_initial.py | 1 | 2895 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('description', models.TextField()),
('map_link', models.URLField(max_length=250)),
('photos', models.ImageField(upload_to=b'')),
],
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('code', models.CharField(max_length=10)),
('description', models.TextField()),
('currency', models.CharField(max_length=3)),
('map_link', models.URLField(max_length=250)),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('coordinator', models.CharField(max_length=30)),
('city', models.ForeignKey(blank=True, to='event.City', null=True)),
],
),
migrations.CreateModel(
name='EventType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Venue',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=30)),
('geo_coordinates', models.CharField(max_length=20)),
('map_link', models.URLField(max_length=250)),
('photos', models.ImageField(upload_to=b'')),
('city', models.ForeignKey(to='event.City')),
],
),
migrations.AddField(
model_name='event',
name='event_type',
field=models.ForeignKey(to='event.EventType'),
),
migrations.AddField(
model_name='event',
name='venue',
field=models.ForeignKey(to='event.Venue'),
),
migrations.AddField(
model_name='city',
name='country',
field=models.ForeignKey(to='event.Country'),
),
]
| bsd-3-clause | 5,386,760,394,031,964,000 | 37.092105 | 114 | 0.516408 | false |
dutradda/myreco | myreco/engine_strategies/filters/factory.py | 1 | 4207 | # MIT License
# Copyright (c) 2016 Diogo Dutra <[email protected]>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from myreco.engine_strategies.filters.filters import (ArrayFilterBy,
ArrayFilterOf,
BooleanFilterBy,
IndexFilterByPropertyOf,
IndexFilterOf,
ObjectFilterBy,
ObjectFilterOf,
SimpleFilterBy,
SimpleFilterOf)
class FiltersFactory(object):
_filters_types_map = {
'property_value': {
'name': 'By Property Value',
'types': {
'integer': SimpleFilterBy,
'string': SimpleFilterBy,
'object': ObjectFilterBy,
'array': ArrayFilterBy,
'boolean': BooleanFilterBy
}
},
'item_property_value': {
'name': 'By Item Property Value',
'types': {
'integer': SimpleFilterOf,
'string': SimpleFilterOf,
'object': ObjectFilterOf,
'array': ArrayFilterOf,
'boolean': BooleanFilterBy
}
},
'property_value_index': {
'name': 'By Property Value Index',
'types': {
'integer': IndexFilterOf,
'string': IndexFilterOf,
'object': IndexFilterOf,
'array': IndexFilterOf,
'boolean': IndexFilterOf
}
},
'item_property_value_index': {
'name': 'By Item Property Value Index',
'types': {
'integer': IndexFilterByPropertyOf,
'string': IndexFilterByPropertyOf,
'object': IndexFilterByPropertyOf,
'array': IndexFilterByPropertyOf,
'boolean': IndexFilterByPropertyOf
}
}
}
@classmethod
def get_filter_types(cls):
return [{'name': filter_type['name'], 'id': filter_type_id}
for filter_type_id, filter_type in sorted(cls._filters_types_map.items())]
@classmethod
def get_filter_type(cls, filter_type_id):
filter_type = cls._filters_types_map.get(filter_type_id)
return {
'name': filter_type['name'],
'id': filter_type_id
} if filter_type else None
@classmethod
def make(cls, items_model, slot_filter, schema, skip_values=None):
value_type = schema['type']
filter_name = slot_filter['property_name']
type_id = slot_filter['type_id']
is_inclusive = slot_filter['is_inclusive']
id_names = schema.get('id_names')
filter_class = cls._filters_types_map.get(type_id, {'types': {}})['types'].get(value_type)
if filter_class:
return filter_class(items_model, filter_name, is_inclusive, id_names, skip_values)
| mit | -5,812,263,325,621,395,000 | 40.245098 | 98 | 0.550986 | false |
hyperhq/nova-hyper | novahyper/virt/hyper/hostinfo.py | 1 | 2080 | # Copyright (c) 2013 dotCloud, Inc.
# Copyright (c) 2015 HyperHQ Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
CONF = cfg.CONF
def statvfs():
hyper_path = CONF.hyper.root_directory
if not os.path.exists(hyper_path):
hyper_path = '/'
return os.statvfs(hyper_path)
def get_disk_usage():
st = statvfs()
return {
'total': st.f_blocks * st.f_frsize,
'available': st.f_bavail * st.f_frsize,
'used': (st.f_blocks - st.f_bfree) * st.f_frsize
}
def get_total_vcpus():
total_vcpus = 0
with open('/proc/cpuinfo') as f:
for ln in f.readlines():
if ln.startswith('processor'):
total_vcpus += 1
return total_vcpus
def get_vcpus_used(containers):
total_vcpus_used = 0
for container in containers:
if isinstance(container, dict):
total_vcpus_used += container.get('Config', {}).get(
'CpuShares', 0)
return total_vcpus_used
def get_memory_usage():
with open('/proc/meminfo') as f:
m = f.read().split()
idx1 = m.index('MemTotal:')
idx2 = m.index('MemFree:')
idx3 = m.index('Buffers:')
idx4 = m.index('Cached:')
total = int(m[idx1 + 1])
avail = int(m[idx2 + 1]) + int(m[idx3 + 1]) + int(m[idx4 + 1])
return {
'total': total * 1024,
'used': (total - avail) * 1024
}
def get_mounts():
with open('/proc/mounts') as f:
return f.readlines()
| apache-2.0 | 7,434,647,035,159,597,000 | 24.679012 | 78 | 0.603365 | false |
sony/nnabla | python/benchmark/function/test_logical.py | 1 | 2622 | # Copyright 2017,2018,2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import nnabla.initializer as I
import nnabla.functions as F
from function_benchmark import FunctionBenchmark, Inspec
def inspecs_params():
inspecs = []
u = I.UniformInitializer((0, 2))
inspecs.append([Inspec((64, 32, 224, 224), u)])
return inspecs
@pytest.mark.parametrize('inspecs', inspecs_params())
@pytest.mark.parametrize('op',
['logical_and_scalar', 'logical_or_scalar', 'logical_xor_scalar',
'greater_scalar', 'greater_equal_scalar',
'less_scalar', 'less_equal_scalar',
'equal_scalar', 'not_equal_scalar'])
def test_scalar_logical(inspecs, op, nnabla_opts):
func = getattr(F, op)
fb = FunctionBenchmark(
func, inspecs, [1], {},
nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer)
@pytest.mark.parametrize('inspecs', inspecs_params())
def test_logical_not(inspecs, nnabla_opts):
func = F.logical_not
fb = FunctionBenchmark(
func, inspecs, [], {},
nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer)
def pairwise_inspecs_params():
inspecs = []
u = I.UniformInitializer((0, 2))
inspecs.append([Inspec((64, 32, 224, 224), u),
Inspec((64, 32, 224, 224), u)])
return inspecs
@pytest.mark.parametrize('inspecs', pairwise_inspecs_params())
@pytest.mark.parametrize('op',
['logical_and', 'logical_or', 'logical_xor',
'greater', 'greater_equal',
'less', 'less_equal',
'equal', 'not_equal'])
def test_pairwise_logical(inspecs, op, nnabla_opts):
func = getattr(F, op)
fb = FunctionBenchmark(
func, inspecs, [], {},
nnabla_opts.ext, nnabla_opts.ext_kwargs)
fb.benchmark()
fb.write(writer=nnabla_opts.function_benchmark_writer)
| apache-2.0 | 6,322,361,230,130,387,000 | 33.5 | 90 | 0.635774 | false |
JamesClough/dagology | dagology/generators/random_dag.py | 1 | 1482 | """
Random DAG model, as in Karrer & Newman, 2009, Phys Rev E
"""
# Copyright (C) 2016 by
# James Clough <[email protected]>
# All rights reserved.
# BSD license.
__author__ = "\n".join(["James Clough ([email protected])"])
import networkx as nx
import numpy as np
from random import randrange
import dagology as dag
__all__ = ['random_dag']
def random_dag(degree_sequence):
""" Create a random DAG from a given degree sequence
Parameters
----------
degree_sequence - list of pairs of in, out degrees
all edges go from earlier to later in this list
Returns
-------
NetworkX DiGraph
"""
G = nx.DiGraph()
G.add_nodes_from(range(len(degree_sequence)))
remaining_stubs = [] # list of forward pointing stubs
for node, degrees in enumerate(degree_sequence):
indegree, outdegree = degrees
allowed_stubs = remaining_stubs[:]
for x in range(indegree):
if len(allowed_stubs) == 0:
# raise networkx error
assert False, 'Not a valid degree sequence'
older_node = allowed_stubs.pop(randrange(len(allowed_stubs)))
remaining_stubs.remove(older_node)
# be careful about multiedges
allowed_stubs = [x for x in allowed_stubs if x != older_node]
G.add_edge(older_node, node)
for x in range(outdegree):
remaining_stubs.append(node)
return G
| mit | -711,647,949,574,725,400 | 27.5 | 73 | 0.609312 | false |
SonicFrog/jdrpoly | main/migrations/0004_comitymember_mainpagesection_news.py | 1 | 2798 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-02-24 11:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0003_contest'),
]
operations = [
migrations.CreateModel(
name='ComityMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=100, verbose_name='Pr\xe9nom')),
('last_name', models.CharField(max_length=100, verbose_name='Nom')),
('post', models.CharField(max_length=100, verbose_name='Poste')),
('description', models.TextField(verbose_name='Description du poste')),
('email', models.EmailField(max_length=254, verbose_name='Addresse de contact')),
],
options={
'ordering': ('pk',),
'verbose_name': 'Membre du comit\xe9',
'verbose_name_plural': 'Membres du comit\xe9',
},
),
migrations.CreateModel(
name='MainPageSection',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Titre')),
('content', models.TextField(verbose_name='Contenu')),
('order', models.IntegerField(verbose_name='Position')),
],
options={
'ordering': ('order', '-pk'),
'verbose_name': "Section page d'acceuil",
'verbose_name_plural': "Sections page d'accueil",
},
),
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default=b'Nouvelle news', max_length=200, verbose_name='Titre')),
('content', models.TextField(max_length=10000, verbose_name='Contenu')),
('date', models.DateField(default=django.utils.timezone.now, verbose_name='Date')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Auteur')),
],
options={
'ordering': ('date',),
'verbose_name': 'News',
'verbose_name_plural': 'News',
},
),
]
| gpl-2.0 | 1,634,196,145,750,455,800 | 42.71875 | 143 | 0.556469 | false |
MaxTyutyunnikov/lino | obsolete/src/lino/adamo/datatypes.py | 1 | 11685 | ## Copyright 2003-2007 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
yet another attempt to create a universal set of datatypes...
"""
import datetime
from time import mktime, ctime
import types
from lino.tools.months import Month
from lino.misc.descr import Describable
from lino.misc.etc import ispure, iif
#from lino.adamo.exceptions import RefuseValue
from lino.adamo.exceptions import DataVeto
ERR_FORMAT_NONE = "caller must handle None values"
ERR_PARSE_EMPTY = "caller must handle empty strings"
#def itself(x): return x
class Type(Describable):
"base class for containers of data-type specific meta information"
defaultValue=None
parser=lambda x: x # itself
formatter=str
allowedClasses=None # None or list of allowed classes for value
# sizes are given in "characters" or "lines"
minHeight = 1
maxHeight = 1
def __call__(self,*args,**kw):
return self.child(*args,**kw)
#return apply(self.__class__,[],kw)
def __repr__(self):
return "%s (%s)" % (self.__class__.__name__,
repr(self.__dict__))
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
return self.formatter(v)
#return repr(v)
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
return self.parser(s)
def validate(self,value):
if self.allowedClasses is None: return
if value.__class__ in self.allowedClasses: return
raise DataVeto("%r is not a valid %s" % (value,self))
## def getPreferredWidth(self):
## #note: for StringType, self.width is an instance variable, for
## #other classes it is a class variable.
## return self.width
## def getMinSize(self):
## return (self.minWidth
class WidthType(Type):
defaultWidth=50
minWidth=15
maxWidth=50
def __init__(self,parent=None,
width=None,minWidth=None,maxWidth=None,
**kw):
Type.__init__(self,parent,**kw)
if width is not None:
minWidth = maxWidth = width
if maxWidth is not None:
self.maxWidth = maxWidth
elif parent is not None:
if self.maxWidth != parent.maxWidth:
self.maxWidth = parent.maxWidth
if minWidth is not None:
self.minWidth = minWidth
elif parent is not None:
if self.minWidth != parent.minWidth:
self.minWidth = parent.minWidth
## def parse(self,s):
## assert len(s), ERR_PARSE_EMPTY
## return int(s)
class IntType(WidthType):
defaultValue=0
defaultWidth=5
minWidth=3
maxWidth=7
parser=int
allowedClasses=(types.IntType,)
## def parse(self,s):
## assert len(s), ERR_PARSE_EMPTY
## return int(s)
## def validate(self,value):
## if value.__class__ is types.IntType:
## return
## raise DataVeto("not an integer")
class BoolType(IntType):
defaultValue=False
parser=bool
formatter=lambda s,x: iif(x,'X','-')
allowedClasses=(types.BooleanType,)
## def validate(self,value):
## #print __name__,value
## Type.validate(self,value)
class AutoIncType(IntType):
pass
#class AreaType(IntType):
# pass
class LongType(IntType):
parser=long
allowedClasses=(types.LongType,)
class AsciiType(WidthType):
defaultValue=""
defaultWidth=20
minWidth=1
maxWidth=50
allowedClasses=(types.StringType,)
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
return str(s)
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
return v
def validate(self,value):
Type.validate(self,value)
if len(value) == 0:
raise DataVeto("Cannot store empty string.")
if value.endswith(' '):
raise DataVeto("%r ends with a space" % value)
class StringType(AsciiType):
defaultValue=""
defaultWidth=50
minWidth=15
maxWidth=50
allowedClasses=(types.StringType,types.UnicodeType)
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
return s
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
#return v
return unicode(v)
#return v.encode("cp1252",'replace')
def validate(self,value):
AsciiType.validate(self,value)
if not ispure(value):
raise DataVeto("%r is not pure" % value)
class PasswordType(StringType):
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
return '*' * len(v)
class MemoType(StringType):
def __init__(self,parent=None,
height=None, minHeight=None,maxHeight=None,
**kw):
StringType.__init__(self,parent,**kw)
if height is not None:
minHeight = maxHeight = height
if minHeight is None:
if parent is None:
minHeight=4
else:
minHeight=parent.minHeight
if maxHeight is None:
if parent is None:
maxHeight=10
else:
maxHeight=parent.maxHeight
self.minHeight = minHeight
self.maxHeight = maxHeight
class TimeStampType(Type):
maxWidth = 10
minWidth = 10
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
l=s.split()
if len(l) == 2:
d=DATE.parse(l[0])
t=TIME.parse(l[1])
dt=datetime.datetime.combine(d,t)
ts_tuple=dt.timetuple()
return mktime(ts_tuple)
raise ValueError, repr(s)
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
return ctime(v)
def validate(self,value):
if value.__class__ in (types.FloatType, types.IntType):
return
raise DataVeto("not a date")
## if not isinstance(value,types.FloatType):
## #raise repr(value)+" is not a date"
## raise DataVeto("not a date")
class DateType(Type):
maxWidth = 10
minWidth = 10
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
s = s.replace(".","-")
l = s.split("-")
if len(l) == 3:
l = map(int,l)
return datetime.date(*l)
elif len(l) == 1:
assert len(s) == 8, repr(s)
y = int(s[0:4])
m = int(s[4:6])
d = int(s[6:8])
return datetime.date(y,m,d)
else:
raise ValueError, repr(s)
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
#return repr(v) # "[-]yyyymmdd"
return v.isoformat()
def validate(self,value):
if not isinstance(value,datetime.date):
#raise repr(value)+" is not a date"
raise DataVeto("not a date")
class MonthType(Type):
maxWidth = 7
minWidth = 7
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
return Month.parse(s)
## s = s.replace(".","-")
## s = s.replace("/","-")
## l = s.split("-")
## if len(l) == 2:
## l = map(int,l)
## return Month(*l)
## elif len(l) == 1:
## assert len(s) == 6, repr(s)
## y = int(s[0:4])
## m = int(s[4:6])
## return Month(y,m)
## else:
## raise ValueError, repr(s)
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
return str(s)
def validate(self,value):
if not isinstance(value,datetime.date):
#raise repr(value)+" is not a date"
raise DataVeto("not a date")
class TimeType(Type):
maxWidth = 8
minWidth = 8
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
l = s.split(":")
if len(l) > 4:
raise ValueError, repr(s)
if len(l) < 1:
return stot(s)
l = map(int,l)
return datetime.time(*l)
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
return str(v)[:self.maxWidth]
def validate(self,value):
if not isinstance(value,datetime.time):
#raise repr(value)+" is not a time"
raise DataVeto("not a time")
class DurationType(Type):
minWidth = 8
maxWidth = 8
fmt = "hh.mm.ss" # currently only possible fmt
def parse(self,s):
assert len(s), ERR_PARSE_EMPTY
l = s.split(".")
if len(l) == 3:
hours = int(l[0])
minutes = int(l[1])
seconds = int(l[2])
return datetime.timedelta(0,seconds,0,0,minutes,hours)
elif len(l) == 2:
minutes = int(l[0])
seconds = int(l[1])
return datetime.timedelta(0,seconds,0,0,minutes)
else:
raise ValueError, repr(s)
def format(self,v):
assert v is not None, ERR_FORMAT_NONE
h = v.seconds / 3600
m = (v.seconds - h * 3600) / 60
s = v.seconds - h * 3600 - m*60
return "%02d.%02d.%02d" % (h,m,s)
def validate(self,value):
if not isinstance(value,datetime.timedelta):
#raise DataVeto(repr(value)+" is not a timedelta")
raise DataVeto("not a timedelta")
class UrlType(StringType):
pass
class ImageType(StringType):
pass
class LogoType(StringType):
pass
class EmailType(StringType):
pass
class AmountType(IntType):
pass
class PriceType(IntType):
pass
ASTRING = AsciiType()
STRING = StringType()
PASSWORD = PasswordType()
MEMO = MemoType()
DATE = DateType()
MONTH = MonthType()
TIME = TimeType() # StringType(width=8)
TIMESTAMP = TimeStampType()
DURATION = DurationType()
INT = IntType()
LONG = LongType()
BOOL = BoolType()
AMOUNT = AmountType()
PRICE = PriceType()
ROWID = AutoIncType()
URL = UrlType(width=200)
EMAIL = EmailType(width=60)
#AREA = AreaType()
IMAGE = ImageType()
LOGO = LogoType()
LANG=STRING(2)
def itot(i):
return stot(str(i))
def stot(s):
if len(s) == 4:
return datetime.time(int(s[0:2]),int(s[2:]))
elif len(s) == 3:
return datetime.time(int(s[0:1]),int(s[1:]))
elif len(s) <= 2:
return datetime.time(i)
else:
raise ValueError, repr(s)
def itod(i):
return DATE.parse(str(i))
## s=str(i)
## assert len(s) == 8, repr(i)
## y = int(s[0:4])
## m = int(s[4:6])
## d = int(s[6:8])
## return datetime.date(y,m,d)
def stod(s):
return DATE.parse(s)
def itom(i):
return MONTH.parse(str(i))
def stom(s):
return MONTH.parse(s)
__all__ = filter(lambda x: x[0] != "_", dir())
| gpl-3.0 | -7,101,941,256,306,729,000 | 24.681319 | 73 | 0.55798 | false |
nju-websoft/JAPE | code/attr2vec_func.py | 1 | 2786 | import math
import collections
import random
import numpy as np
import tensorflow as tf
import itertools
import time
def sum_rows(x):
"""Returns a vector summing up each row of the matrix x."""
cols = tf.shape(x)[1]
ones_shape = tf.stack([cols, 1])
ones = tf.ones(ones_shape, x.dtype)
return tf.reshape(tf.matmul(x, ones), [-1])
def compute_sampled_logits(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1):
if not isinstance(weights, list):
weights = [weights]
if labels.dtype != tf.int64:
labels = tf.cast(labels, tf.int64)
labels_flat = tf.reshape(labels, [-1])
sampled_ids, true_expected_count, sampled_expected_count = tf.nn.log_uniform_candidate_sampler(
true_classes=labels,
num_true=num_true,
num_sampled=num_sampled,
unique=True,
range_max=num_classes)
true_w = tf.nn.embedding_lookup(weights, labels_flat)
true_b = tf.nn.embedding_lookup(biases, labels_flat)
sampled_w = tf.nn.embedding_lookup(weights, sampled_ids)
sampled_b = tf.nn.embedding_lookup(biases, sampled_ids)
dim = tf.shape(true_w)[1:2]
new_true_w_shape = tf.concat([[-1, num_true], dim], 0)
row_wise_dots = tf.multiply(tf.expand_dims(inputs, 1), tf.reshape(true_w, new_true_w_shape))
dots_as_matrix = tf.reshape(row_wise_dots, tf.concat([[-1], dim], 0))
true_logits = tf.reshape(sum_rows(dots_as_matrix), [-1, num_true])
true_b = tf.reshape(true_b, [-1, num_true])
true_logits += true_b
sampled_b_vec = tf.reshape(sampled_b, [num_sampled])
sampled_logits = tf.matmul(inputs, sampled_w, transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(weights,
biases,
labels,
inputs,
num_sampled,
num_classes,
num_true=1,
v=None):
batch_size = int(labels.get_shape()[0])
if v is None:
v = tf.ones([batch_size, 1])
true_logits, sampled_logits = compute_sampled_logits(
weights=weights,
biases=biases,
labels=labels,
inputs=inputs,
num_sampled=num_sampled,
num_classes=num_classes,
num_true=num_true)
true_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(true_logits), logits=true_logits)
sampled_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(sampled_logits), logits=sampled_logits)
true_loss = tf.multiply(true_loss, v)
return tf.div(tf.reduce_sum(true_loss) + tf.reduce_sum(sampled_loss), tf.constant(batch_size, dtype=tf.float32))
| mit | 4,553,009,698,414,285,000 | 34.717949 | 119 | 0.604451 | false |
sebastien-forestier/explaupoppydiva | explaupoppydiva/environment/slider_env.py | 1 | 5012 | import matplotlib.pyplot as plt
from numpy import array, dot
from numpy.linalg import norm
from explauto.environment.environment import Environment
from explauto.utils import bounds_min_max
class SliderEnvironment(Environment):
''' Add a slider to an environment '''
def __init__(self, env_cls, env_config, m_mins, m_maxs, s_mins, s_maxs, slider, combined_s):
self.env = env_cls(**env_config)
self.n_params_env = len(self.env.conf.m_dims)
self.slider = slider
self.combined_s = combined_s
environment_seq = dict(m_mins = m_mins,
m_maxs = m_maxs,
s_mins = s_mins,
s_maxs = s_maxs)
#print environment_seq
Environment.__init__(self, **environment_seq)
def rest_position(self):
return self.env.rest_position
def rest_params(self):
return self.env.rest_params
def compute_motor_command(self, m_ag):
return self.env.compute_motor_command(m_ag)
def compute_slider(self, s_env):
slider_coord = array(self.slider['m_maxs']) - array(self.slider['m_mins'])
pos = dot(array(s_env) - self.slider['m_mins'], slider_coord) / dot(slider_coord, slider_coord)
dist = norm((self.slider['m_mins'] + pos*slider_coord) - array(s_env))
#print "dist", dist
if pos > self.slider['s_max'] or pos < self.slider['s_min'] or dist > self.slider['width']:
pos = self.slider['default_s']
return pos
def trajectory(self, m):
return m
def compute_sensori_effect(self, m):
s_env = self.env.update(m)
#print "slider s_env", self.env, s_env
# print "s_env", s_env
# print "s_slider", self.compute_slider(s_env)
# print "s_comb", self.combined_s(s_env, self.compute_slider(s_env))
s = self.combined_s(list(s_env), self.compute_slider(s_env))
return bounds_min_max(s, self.conf.s_mins, self.conf.s_maxs)
def plot(self, ax, m):
if hasattr(self.env, 'plot_arm'):
self.env.plot_arm(ax, m)
else:
self.env.plot(ax, m)
ax.plot([self.slider['m_mins'][0], self.slider['m_maxs'][0]], [self.slider['m_mins'][1], self.slider['m_maxs'][1]], 'b')
slider_coord = array(self.slider['m_maxs']) - array(self.slider['m_mins'])
ort_vect = array([slider_coord[1], - slider_coord[0]]) / dot(slider_coord, slider_coord)
ort_vect = self.slider['width'] * ort_vect / norm(ort_vect)
#print 'ort_vect', ort_vect
ax.plot([self.slider['m_mins'][0] + ort_vect[0],
self.slider['m_maxs'][0] + ort_vect[0]],
[self.slider['m_mins'][1] + ort_vect[1],
self.slider['m_maxs'][1] + ort_vect[1]], 'b')
ax.plot([self.slider['m_mins'][0] - ort_vect[0],
self.slider['m_maxs'][0] - ort_vect[0]],
[self.slider['m_mins'][1] - ort_vect[1],
self.slider['m_maxs'][1] - ort_vect[1]], 'b')
ax.plot([self.slider['m_mins'][0] + ort_vect[0],
self.slider['m_mins'][0] - ort_vect[0]],
[self.slider['m_mins'][1] + ort_vect[1],
self.slider['m_mins'][1] - ort_vect[1]], 'b')
ax.plot([self.slider['m_maxs'][0] + ort_vect[0],
self.slider['m_maxs'][0] - ort_vect[0]],
[self.slider['m_maxs'][1] + ort_vect[1],
self.slider['m_maxs'][1] - ort_vect[1]], 'b')
pos = self.compute_slider(self.env.update(m))
pt = self.slider['m_mins'] + pos*slider_coord
#print [pt[0]], [pt[1]]
ax.scatter([pt[0]], [pt[1]])
if __name__ == '__main__':
from explauto.environment.simple_arm import SimpleArmEnvironment
armConfig = dict(
m_mins = [-3, -3, -3],
m_maxs = [3, 3, 3],
s_mins = [-1,-1],
s_maxs = [1, 1],
length_ratio = 2,
noise = 0
)
sConfig = dict(
env_cls = SimpleArmEnvironment,
env_config = armConfig,
m_mins = armConfig['m_mins'],
m_maxs = armConfig['m_maxs'],
slider = dict(
m_mins = [0., 1],
m_maxs = [1., 0],
s_min = 0,
s_max = 1,
default_s = -1,
width = 0.1
),
combined_s = lambda s, sl: s + [sl]
)
sliderEnv = SliderEnvironment(**sConfig)
m = [0.1,1.4,1]
#m = [0,0,1]
s = sliderEnv.compute_sensori_effect(m)
print "m=", m, "s=", s
sliderEnv.plot(plt.subplot(), m)
plt.show() | gpl-3.0 | -2,884,166,587,582,036,500 | 37.267176 | 128 | 0.48344 | false |
andrew-lundgren/gwpy | gwpy/plotter/frequencyseries.py | 1 | 11434 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""This module defines plotting classes for the data series defined in
`~gwpy.frequencyseries`
"""
import warnings
import numpy
from matplotlib.projections import register_projection
from matplotlib import colors
from . import tex
from .core import Plot
from .axes import Axes
from .decorators import auto_refresh
from ..frequencyseries import (FrequencySeries, SpectralVariance)
__author__ = "Duncan Macleod <[email protected]>"
class FrequencySeriesAxes(Axes):
"""Custom `Axes` for a :class:`~gwpy.plotter.FrequencySeriesPlot`.
"""
name = 'frequencyseries'
# -------------------------------------------
# GWpy class plotting methods
@auto_refresh
def plot(self, *args, **kwargs):
"""Plot data onto these Axes.
Parameters
----------
args
a single :class:`~gwpy.frequencyseries.FrequencySeries`
(or sub-class) or standard (x, y) data arrays
kwargs
keyword arguments applicable to :meth:`~matplotib.axes.Axes.plot`
Returns
-------
Line2D
the :class:`~matplotlib.lines.Line2D` for this line layer
See Also
--------
:meth:`matplotlib.axes.Axes.plot`
for a full description of acceptable ``*args` and ``**kwargs``
"""
if len(args) == 1 and isinstance(args[0], FrequencySeries):
return self.plot_frequencyseries(*args, **kwargs)
elif len(args) == 1 and isinstance(args[0], SpectralVariance):
return self.plot_variance(*args, **kwargs)
else:
return super(FrequencySeriesAxes, self).plot(*args, **kwargs)
@auto_refresh
def plot_frequencyseries(self, spectrum, **kwargs):
"""Plot a :class:`~gwpy.frequencyseries.FrequencySeries` onto these axes
Parameters
----------
spectrum : :class:`~gwpy.frequencyseries.FrequencySeries`
data to plot
**kwargs
any other keyword arguments acceptable for
:meth:`~matplotlib.Axes.plot`
Returns
-------
Line2D
the :class:`~matplotlib.lines.Line2D` for this line layer
See Also
--------
:meth:`matplotlib.axes.Axes.plot`
for a full description of acceptable ``*args` and ``**kwargs``
"""
if tex.USE_TEX:
kwargs.setdefault('label', tex.label_to_latex(spectrum.name))
else:
kwargs.setdefault('label', spectrum.name)
if not kwargs.get('label', True):
kwargs.pop('label')
line = self.plot(spectrum.frequencies.value, spectrum.value, **kwargs)
if len(self.lines) == 1:
try:
self.set_xlim(*spectrum.xspan)
except ValueError:
pass
if not self.get_xlabel():
if tex.USE_TEX:
ustr = tex.unit_to_latex(spectrum.xunit)
else:
ustr = spectrum.xunit.to_string()
if ustr:
self.set_xlabel('Frequency [%s]' % ustr)
if not self.get_ylabel():
if tex.USE_TEX:
ustr = tex.unit_to_latex(spectrum.unit)
else:
ustr = spectrum.unit.to_string()
if ustr:
self.set_ylabel('[%s]' % ustr)
return line
@auto_refresh
def plot_spectrum(self, *args, **kwargs):
warnings.warn("{0}.plot_spectrum was renamed "
"{0}.plot_frequencyseries, "
"and will be removed in an upcoming release".format(
type(self).__name__))
return self.plot_frequencyseries(*args, **kwargs)
@auto_refresh
def plot_frequencyseries_mmm(self, mean_, min_=None, max_=None, alpha=0.1,
**kwargs):
"""Plot a `FrequencySeries` onto these axes, with (min, max) shaded
regions
The `mean_` `FrequencySeries` is plotted normally, while the `min_`
and `max_ spectra are plotted lightly below and above,
with a fill between them and the mean_.
Parameters
----------
mean_ : :class:`~gwpy.frequencyseries.FrequencySeries
data to plot normally
min_ : :class:`~gwpy.frequencyseries.FrequencySeries
first data set to shade to mean_
max_ : :class:`~gwpy.frequencyseries.FrequencySeries
second data set to shade to mean_
alpha : `float`, optional
weight of filled region, ``0.0`` for transparent through ``1.0``
opaque
**kwargs
any other keyword arguments acceptable for
:meth:`~matplotlib.Axes.plot`
Returns
-------
artists : `tuple`
a 5-tuple containing (Line2d for mean_, `Line2D` for min_,
`PolyCollection` for min_ shading, `Line2D` for max_, and
`PolyCollection` for max_ shading)
See Also
--------
:meth:`matplotlib.axes.Axes.plot`
for a full description of acceptable ``*args` and ``**kwargs``
"""
# plot mean
line1 = self.plot_frequencyseries(mean_, **kwargs)[0]
# plot min and max
kwargs.pop('label', None)
color = kwargs.pop('color', line1.get_color())
linewidth = kwargs.pop('linewidth', line1.get_linewidth()) / 10
if min_ is not None:
a = self.plot(min_.frequencies.value, min_.value, color=color,
linewidth=linewidth, **kwargs)
if alpha:
b = self.fill_between(min_.frequencies.value, mean_.value,
min_.value, alpha=alpha, color=color,
rasterized=kwargs.get('rasterized'))
else:
b = None
else:
a = b = None
if max_ is not None:
c = self.plot(max_.frequencies.value, max_.value, color=color,
linewidth=linewidth, **kwargs)
if alpha:
d = self.fill_between(max_.frequencies.value, mean_.value,
max_.value, alpha=alpha, color=color,
rasterized=kwargs.get('rasterized'))
else:
d = None
else:
c = d = None
return line1, a, b, c, d
@auto_refresh
def plot_spectrum_mmm(self, *args, **kwargs):
warnings.warn("{0}.plot_spectrum_mmm was renamed "
"{0}.plot_frequencyseries_mmm, "
"and will be removed in an upcoming release".format(
type(self).__name__))
return self.plot_frequencyseries_mmm(*args, **kwargs)
@auto_refresh
def plot_variance(self, specvar, norm='log', **kwargs):
"""Plot a :class:`~gwpy.frequencyseries.SpectralVariance` onto
these axes
Parameters
----------
spectrum : class:`~gwpy.frequencyseries.SpectralVariance`
data to plot
**kwargs
any other eyword arguments acceptable for
:meth:`~matplotlib.Axes.pcolormesh`
Returns
-------
MeshGrid
the :class:`~matplotlib.collections.MeshGridD` for this layer
See Also
--------
:meth:`matplotlib.axes.Axes.pcolormesh`
for a full description of acceptable ``*args` and ``**kwargs``
"""
if norm == 'log':
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
norm = colors.LogNorm(vmin=vmin, vmax=vmax)
kwargs['norm'] = norm
x = numpy.concatenate((specvar.frequencies.value,
[specvar.x0.value +
specvar.dx.value * specvar.shape[0]]))
y = specvar.bins.value
X, Y = numpy.meshgrid(x, y, copy=False, sparse=True)
mesh = self.pcolormesh(X, Y, specvar.value.T, **kwargs)
if len(self.collections) == 1:
self.set_yscale('log', nonposy='mask')
self.set_xlim(x[0], x[-1])
self.set_ylim(y[0], y[-1])
# fill in zeros
if isinstance(mesh.norm, colors.LogNorm):
cmap = mesh.get_cmap()
try:
# only listed colormaps have cmap.colors
cmap.set_bad(cmap.colors[0])
except AttributeError:
pass
return mesh
register_projection(FrequencySeriesAxes)
class FrequencySeriesPlot(Plot):
"""`Figure` for displaying a `~gwpy.frequencyseries.FrequencySeries`
"""
_DefaultAxesClass = FrequencySeriesAxes
def __init__(self, *series, **kwargs):
kwargs.setdefault('projection', self._DefaultAxesClass.name)
# extract custom keyword arguments
sep = kwargs.pop('sep', False)
xscale = kwargs.pop(
'xscale', kwargs.pop('logx', True) and 'log' or 'linear')
yscale = kwargs.pop(
'yscale', kwargs.pop('logy', True) and 'log' or 'linear')
sharex = kwargs.pop('sharex', False)
sharey = kwargs.pop('sharey', False)
# separate custom keyword arguments
axargs, plotargs = self._parse_kwargs(kwargs)
# initialise figure
super(FrequencySeriesPlot, self).__init__(**kwargs)
# plot data
x0 = []
axesdata = self._get_axes_data(series, sep=sep)
for data in axesdata:
ax = self._add_new_axes(**axargs)
for fs in data:
ax.plot(fs, **plotargs)
x0.append(min([fs.df.value for fs in data]))
if 'sharex' not in axargs and sharex is True:
axargs['sharex'] = ax
if 'sharey' not in axargs and sharey is True:
axargs['sharey'] = ax
if sharex:
x0 = [min(x0)]*len(x0)
axargs.pop('sharex', None)
axargs.pop('sharey', None)
axargs.pop('projection', None)
for i, ax in enumerate(self.axes):
# format axes
for key, val in axargs.iteritems():
getattr(ax, 'set_%s' % key)(val)
# fix log frequency scale with f0 = DC
if xscale in ['log']:
xlim = list(ax.get_xlim())
if not xlim[0]:
xlim[0] = x0[i]
ax.set_xlim(*xlim)
# set axis scales
ax.set_xscale(xscale)
ax.set_yscale(yscale)
# set grid
if xscale == 'log':
ax.grid(True, axis='x', which='both')
if yscale == 'log':
ax.grid(True, axis='y', which='both')
| gpl-3.0 | -6,821,686,522,418,402,000 | 35.069401 | 80 | 0.546091 | false |
isudox/leetcode-solution | python-algorithm/leetcode/flip_binary_tree_to_match_preorder_traversal.py | 1 | 2107 | # -*- coding: utf-8 -*-
"""971. Flip Binary Tree To Match Preorder Traversal
https://leetcode.com/problems/flip-binary-tree-to-match-preorder-traversal/
Given a binary tree with N nodes, each node has a different value from
{1, ..., N}.
A node in this binary tree can be flipped by swapping the left child and the
right child of that node.
Consider the sequence of N values reported by a preorder traversal starting from
the root. Call such a sequence of N values the voyage of the tree.
(Recall that a preorder traversal of a node means we report the current
node's value, then preorder-traverse the left child, then preorder-traverse the
right child.)
Our goal is to flip the least number of nodes in the tree so that the voyage of
the tree matches the voyage we are given.
If we can do so, then return a list of the values of all nodes flipped. You may
return the answer in any order.
If we cannot do so, then return the list [-1].
Example 1:
Input: root = [1,2], voyage = [2,1]
Output: [-1]
Example 2:
Input: root = [1,2,3], voyage = [1,3,2]
Output: [1]
Example 3:
Input: root = [1,2,3], voyage = [1,2,3]
Output: []
Note:
1 <= N <= 100
"""
from common.tree_node import TreeNode
class Solution:
def flip_match_voyage(self, root, voyage):
"""
:type root: TreeNode
:type voyage: List[int]
:rtype: List[int]
"""
stack, ans = [root], []
i, size = 0, len(voyage)
while len(stack) and i < size:
temp = stack.pop()
if temp.val != voyage[i]:
break
i += 1
if i < size:
if temp.left and temp.left.val != voyage[i]:
ans.append(temp.val)
stack.append(temp.left)
if temp.right:
stack.append(temp.right)
else:
if temp.right:
stack.append(temp.right)
if temp.left:
stack.append(temp.left)
if i != size:
return [-1]
return ans
| mit | -1,886,637,052,181,223,000 | 28.676056 | 80 | 0.58187 | false |
cguZZman/onedrive-kodi-addon | service.py | 1 | 1499 | #-------------------------------------------------------------------------------
# Copyright (C) 2017 Carlos Guzman (cguZZman) [email protected]
#
# This file is part of OneDrive for Kodi
#
# OneDrive for Kodi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Cloud Drive Common Module for Kodi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
from clouddrive.common.service.download import DownloadService
from clouddrive.common.service.source import SourceService
from clouddrive.common.service.utils import ServiceUtil
from resources.lib.provider.onedrive import OneDrive
from clouddrive.common.service.export import ExportService
from clouddrive.common.service.player import PlayerService
if __name__ == '__main__':
ServiceUtil.run([DownloadService(OneDrive), SourceService(OneDrive),
ExportService(OneDrive), PlayerService(OneDrive)]) | gpl-2.0 | -7,071,460,802,058,548,000 | 48.033333 | 87 | 0.677785 | false |
SciGaP/DEPRECATED-Cipres-Airavata-POC | saminda/cipres-airavata/cipres-portal/cipres-rest-service/cipresrest/testscripts/test1.py | 2 | 2706 | #!/usr/bin/env python
"""
-u
username
-p
password
-a
appid
-t
toolspec directory
-s
rest service url
-n
number of times to submit toolspec
-w
wait for submissions to finish
"""
import sys
import os
import re
import string
import subprocess
import tempfile
import getopt
import time
import requests
import xml.etree.ElementTree as ET
import pyjavaproperties as Props
import testjob
def main(argv=None):
if argv is None:
argv = sys.argv
user = password = appid = testspec = service = jobcount = None
waitForCompletion = False
options, remainder = getopt.getopt(argv[1:], "u:p:a:t:s:n:wh")
for opt, arg in options:
if opt in ("-u"):
user = arg
elif opt in ("-p"):
password = arg
if opt in ("-t"):
testspec = arg
elif opt in ("-s"):
service = arg
elif opt in ("-n"):
jobcount = int(arg)
elif opt in ("-w"):
waitForCompletion = True
elif opt in ("-a"):
appid = int(arg)
elif opt in ("-h"):
print __doc__
return 0
if not (user and password and testspec and service and jobcount and appid):
print __doc__
return -1
jobs = []
for i in range(jobcount):
tj = testjob.TestJob(
baseurl=service,
user=user,
credentials = (user, password),
appid=appid,
testdir = testspec
)
tj.doPost()
jobs.append(tj)
print "Submitted %s" % (tj.jobhandle)
sys.stdout.flush()
if waitForCompletion:
while True:
unfinishedJobs = 0
# jobs[:] makes a copy of the list in jobs. You need to work on a copy if
# you want to remove items from a list you're iterating over.
for job in jobs[:]:
if job.jobIsTerminal:
print "%s finished. %s." % (job.jobhandle, ('OK', 'FAILED')[job.isFailed])
sys.stdout.flush()
jobs.remove(job)
continue
else:
job.updateJobStatus()
if job.jobIsTerminal:
print "%s finished. %s." % (job.jobhandle, ('OK', 'FAILED')[job.isFailed])
sys.stdout.flush()
jobs.remove(job)
continue
unfinishedJobs += 1
if not unfinishedJobs:
break;
time.sleep(30)
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | -8,775,285,948,206,499,000 | 24.055556 | 98 | 0.4915 | false |
Sirs0ri/PersonalAssistant | samantha/plugins/test2_plugin.py | 1 | 1402 | """A plugin to test loading devices. It doesn't do anything."""
###############################################################################
#
# TODO: [ ]
#
###############################################################################
# standard library imports
import logging
from threading import Timer
# related third party imports
# application specific imports
from samantha.core import subscribe_to
from samantha.plugins.plugin import Device
__version__ = "1.4.7"
# Initialize the logger
LOGGER = logging.getLogger(__name__)
PLUGIN = Device("Test", False, LOGGER, __file__)
@subscribe_to("system.onstart")
def start_func(key, data):
"""Test the 'onstart' event."""
LOGGER.debug("I'm now doing something productive!")
return "I'm now doing something productive!"
@subscribe_to("test")
def test(key, data):
"""Test the 'test' event."""
def function():
"""Print "Heyho!" and a bunch of ~ around."""
print("~"*30)
print("Heyho! My command was {}.".format(key))
print(data)
print("~"*30)
thread = Timer(interval=7.0, function=function)
thread.start()
return "Processed the command {}.".format(key)
@subscribe_to("system.onexit")
def stop_func(key, data):
"""Test the 'onexit' event."""
LOGGER.debug("I'm not doing anything productive anymore.")
return "I'm not doing anything productive anymore."
| mit | 4,971,076,125,456,897,000 | 24.962963 | 79 | 0.582026 | false |
cloudmesh/vagrant | setup.py | 1 | 3460 | #!/usr/bin/env python
# ----------------------------------------------------------------------- #
# Copyright 2008-2010, Gregor von Laszewski #
# Copyright 2010-2013, Indiana University #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.#
# See the License for the specific language governing permissions and #
# limitations under the License. #
# ------------------------------------------------------------------------#
from __future__ import print_function
import setuptools
from setuptools import setup, find_packages
import os
import sys
from cloudmesh_vagrant import __version__
import platform
import re
import io
if sys.version_info < (2, 7, 10):
print(70 * "#")
print("WARNING: upgrade to python 2.7.10 or above"
"Your version is {} not supported.".format(sys.version_info))
print(70 * "#")
command = None
this_platform = platform.system().lower()
if this_platform in ['darwin']:
command = "easy_install readline"
elif this_platform in ['windows']:
command = "pip install pyreadline"
if command is not None:
print("Install readline")
os.system(command)
requirements = [
'cloudmesh_client'
]
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
home = os.path.expanduser("~")
setup(
version=__version__,
name="cloudmesh_vagrant",
description="cloudmesh_vagrant - A real simple interface to virtualbox via vagrant",
long_description=read('README.rst'),
license="Apache License, Version 2.0",
author="Gregor von Laszewski",
author_email="[email protected]",
url="https://github.com/cloudmesh/cloudmesh_vagrant",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering",
"Topic :: System :: Clustering",
"Topic :: System :: Distributed Computing",
"Topic :: Software Development :: Libraries :: Python Modules",
"Environment :: Console"
],
keywords="cloud cmd commandshell plugins cloudmesh vagrant virtualbox",
packages=find_packages(),
install_requires=requirements,
include_package_data=True,
entry_points={
'console_scripts': [
'cm-vbox = cloudmesh_vagrant.cm_vbox:main',
'cm-authors = cloudmesh_client.common.GitInfo:print_authors',
],
},
)
| apache-2.0 | 4,580,480,080,358,300,700 | 36.608696 | 88 | 0.573988 | false |
Alignak-monitoring/alignak-checks-example | version.py | 1 | 1285 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015-2017:
# Frederic Mohier, [email protected]
#
"""
Alignak - Checks pack for EXAMPLE
"""
# Package name
__pkg_name__ = u"alignak_checks_EXAMPLE"
# Checks types for PyPI keywords
# Used for:
# - PyPI keywords
# - directory where to store files in the Alignak configuration (eg. arbiter/packs/checks_type)
__checks_type__ = u"EXAMPLE"
# Application manifest
__version__ = u"0.0.1"
__author__ = u"Your name"
__author_email__ = u"Your email address"
__copyright__ = u"(c) 2015-2017 - %s" % __author__
__license__ = u"GNU Affero General Public License, version 3"
__git_url__ = u"https://github.com/alignak-monitoring-contrib/alignak-checks-EXAMPLE"
__doc_url__ = u"http://alignak-doc.readthedocs.io/en/latest"
__description__ = u"Alignak checks pack for EXAMPLE"
__classifiers__ = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
'Natural Language :: English',
'Programming Language :: Python',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration'
]
| agpl-3.0 | 1,924,157,300,005,393,700 | 31.125 | 95 | 0.670817 | false |
splotz90/urh | src/urh/models/ProtocolLabelListModel.py | 1 | 4134 | from PyQt5.QtCore import QAbstractListModel, pyqtSignal, Qt, QModelIndex, QMimeData
from PyQt5.QtGui import QFont
from urh import constants
from urh.signalprocessing.FieldType import FieldType
from urh.signalprocessing.MessageType import MessageType
from urh.signalprocessing.ProtocoLabel import ProtocolLabel
from urh.signalprocessing.ProtocolAnalyzer import ProtocolAnalyzer
class ProtocolLabelListModel(QAbstractListModel):
protolabel_visibility_changed = pyqtSignal(ProtocolLabel)
protocol_label_name_edited = pyqtSignal()
label_removed = pyqtSignal(ProtocolLabel)
def __init__(self, proto_analyzer: ProtocolAnalyzer, controller, parent=None):
super().__init__(parent)
self.proto_analyzer = proto_analyzer
self.message_type = controller.active_message_type # type: MessageType
self.controller = controller # type: CompareFrameController
def rowCount(self, QModelIndex_parent=None, *args, **kwargs):
return len(self.message_type)
def update(self):
self.message_type = self.controller.active_message_type # type: MessageType
self.beginResetModel()
self.endResetModel()
def data(self, index, role=Qt.DisplayRole):
row = index.row()
if row >= len(self.message_type):
return
label = self.message_type[row]
if role == Qt.DisplayRole:
return label.name
elif role == Qt.CheckStateRole:
return label.show
elif role == Qt.BackgroundColorRole:
return constants.LABEL_COLORS[label.color_index]
elif role == Qt.FontRole:
font = QFont()
font.setItalic(label.field_type is None)
return font
def setData(self, index: QModelIndex, value, role=Qt.DisplayRole):
if role == Qt.CheckStateRole:
proto_label = self.message_type[index.row()]
proto_label.show = value
self.protolabel_visibility_changed.emit(proto_label)
elif role == Qt.EditRole:
proto_label = self.message_type[index.row()]
proto_label.name = value
self.message_type.change_field_type_of_label(proto_label,
self.controller.field_types_by_caption.get(value, None))
self.protocol_label_name_edited.emit()
return True
def showAll(self):
hidden_labels = [label for label in self.proto_analyzer.protocol_labels if not label.show]
for label in hidden_labels:
label.show = Qt.Checked
self.protolabel_visibility_changed.emit(label)
def hideAll(self):
visible_labels = [label for label in self.proto_analyzer.protocol_labels if label.show]
for label in visible_labels:
label.show = Qt.Unchecked
self.protolabel_visibility_changed.emit(label)
def get_label_at(self, row):
return self.message_type[row]
def delete_label_at(self, label_id: int):
try:
lbl = self.message_type[label_id]
self.message_type.remove(lbl)
self.label_removed.emit(lbl)
except IndexError:
pass
def delete_labels_at(self, start: int, end: int):
for row in range(end, start - 1, -1):
self.delete_label_at(row)
def add_labels_to_message_type(self, start: int, end: int, message_type_id: int):
for lbl in self.message_type[start:end + 1]:
self.controller.proto_analyzer.message_types[message_type_id].add_label(lbl)
self.controller.updateUI(resize_table=False)
def flags(self, index):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsUserCheckable | \
Qt.ItemIsEditable | Qt.ItemIsDragEnabled
def supportedDragActions(self):
return Qt.MoveAction | Qt.CopyAction
def mimeTypes(self):
return ['text/plain']
def mimeData(self, indexes):
data = "PLabels:"
data += "/".join([str(index.row()) for index in indexes])
mime_data = QMimeData()
mime_data.setText(data)
return mime_data
| gpl-3.0 | -1,866,661,335,700,646,400 | 36.243243 | 113 | 0.643445 | false |
eciis/web | backend/test/model_test/invite_institution_test.py | 1 | 5437 | # -*- coding: utf-8 -*-
from ..test_base import TestBase
from models import Invite
from models import Institution
from models import InviteInstitution
from models import User
from custom_exceptions import FieldException
from mock import patch
class InviteInstitutionTest(TestBase):
"""Test invite model."""
@classmethod
def setUp(cls):
"""Provide the base for the tests."""
cls.test = cls.testbed.Testbed()
cls.test.activate()
cls.policy = cls.datastore.PseudoRandomHRConsistencyPolicy(
probability=1)
cls.test.init_datastore_v3_stub(consistency_policy=cls.policy)
cls.test.init_memcache_stub()
initModels(cls)
def test_check_is_invite_institution_valid(self):
"""Test check_is_invite_institution_valid method."""
with self.assertRaises(FieldException):
data = {"suggestion_institution_name": None}
InviteInstitution.check_is_invite_institution_valid(data)
def test_create(self):
"""Test create method."""
created_invite = InviteInstitution.create(self.data)
stub_institution_key = created_invite.stub_institution_key
expected_invite = InviteInstitution()
expected_invite.admin_key = self.admin.key
expected_invite.is_request = False
expected_invite.institution_key = self.institution.key
expected_invite.sender_key = self.admin.key
expected_invite.sender_name = self.admin.name
expected_invite.invitee = self.user.email[0]
expected_invite.suggestion_institution_name = "new Institution"
expected_invite.stub_institution_key = stub_institution_key
self.assertEquals(
created_invite,
expected_invite,
"The created invite should be equal to the expected one"
)
def test_make(self):
"""Test make method."""
invite_institution = InviteInstitution.create(self.data)
invite_institution.put()
stub_institution = invite_institution.stub_institution_key.get()
maked_invite = invite_institution.make()
expected_maked_invite = {
'admin_name': self.admin.name,
'sender_name': self.invite.sender_name,
'key': invite_institution.key.urlsafe(),
'status': self.invite.status,
'institution_admin': self.institution.make(["name"]),
'institution': self.institution.make(InviteInstitution.INST_PROPS_TO_MAKE),
'invitee': self.user.email[0],
'suggestion_institution_name': 'new Institution',
'stub_institution': stub_institution.make([
'name', 'key', 'state'
]),
'type_of_invite': 'INSTITUTION'
}
self.assertEquals(
maked_invite,
expected_maked_invite,
"The maked invite should be equal to the expected one"
)
@patch('models.invite_institution.NotificationsQueueManager.create_notification_task')
def test_create_accept_notification(self, mock_method):
"""Test create a regular accept response notification."""
invite = InviteInstitution.create(self.data)
invite.put()
self.user.current_institution = self.institution.key
self.user.put()
id = invite.create_accept_response_notification(
'ACCEPT_INVITE_INSTITUTION', self.institution.key, invite.admin_key.urlsafe(), self.user)
mock_method.assert_called()
self.assertTrue(id != None)
@patch('models.invite_institution.NotificationsQueueManager.create_notification_task')
def test_create_system_notification(self, mock_method):
"""Test create a system notification."""
invite = InviteInstitution.create(self.data)
invite.put()
self.user.current_institution = self.institution.key
self.user.put()
id = invite.create_accept_response_notification(
'ADD_ADM_PERMISSIONS', self.institution.key, self.user.key.urlsafe())
mock_method.assert_called()
self.assertTrue(id != None)
def initModels(cls):
"""Initialize the models."""
# admin
cls.admin = User()
cls.admin.name = "admin"
cls.admin.email = ["admin@email"]
cls.admin.put()
# user
cls.user = User()
cls.user.name = "user"
cls.user.email = ["user@email"]
cls.user.put()
# New institution
cls.institution = Institution()
cls.institution.name = "institution"
cls.institution.admin = cls.admin.key
cls.institution.members = [cls.admin.key]
cls.institution.followers = [cls.admin.key]
cls.institution.put()
# update admin
cls.admin.institutions_admin = [cls.institution.key]
cls.admin.put()
# New invite
cls.invite = Invite()
cls.invite.invitee = cls.user.email[0]
cls.invite.admin_key = cls.admin.key
cls.invite.sender_key = cls.admin.key
cls.invite.sender_name = cls.admin.name
cls.invite.status = "sent"
cls.invite.institution_key = cls.institution.key
cls.invite.put()
cls.data = {
"admin_key": cls.admin.key.urlsafe(),
"is_request": False,
"institution_key": cls.institution.key.urlsafe(),
"sender_key": cls.admin.key.urlsafe(),
"sender_name": cls.admin.name,
"invitee": cls.user.email[0],
"suggestion_institution_name": "new Institution"
}
| gpl-3.0 | 3,515,197,406,887,376,000 | 34.305195 | 101 | 0.640611 | false |
karinemiras/evoman_framework | evoman/player.py | 1 | 12662 | ################################
# EvoMan FrameWork - V1.0 2016 #
# Author: Karine Miras #
# [email protected] #
################################
import sys
import numpy
import struct
import binascii
import Base
from Base.SpriteConstants import *
from Base.SpriteDefinition import *
from sensors import *
# player proctile
class Bullet_p(pygame.sprite.Sprite):
image = pygame.image.load('evoman/images/bullet_r.png')
def __init__(self, location, direction, n_twist, *groups):
super(Bullet_p, self).__init__(*groups)
self.rect = pygame.rect.Rect(location, self.image.get_size())
self.direction = direction
self.n_twist = n_twist
# fits image according to the side the player is turned to
if self.direction == 1:
self.image = pygame.image.load('evoman/images/bullet_r.png')
else:
self.image = pygame.image.load('evoman/images/bullet_l.png')
def update(self, dt, game):
# removes bullets objetcs when they transpass the screen limits
if self.rect.right<1 or self.rect.left>736 or self.rect.top <1 or self.rect.bottom>512 :
self.kill()
game.player.twists[self.n_twist] = None
return
self.rect.x += self.direction * 600 * dt # moving on the X axis (left or tight). It adds 600*dt forward at each general game loop loop iteration, where dt controls the frames limit.
# checks collision of player's bullet with the enemy
if self.rect.colliderect(game.enemy.rect):
# if enemy is not imune
if game.enemy.imune == 0:
# enemy loses life points, according to the difficult level of the game (the more difficult, the less it loses)
game.enemy.life = max(0, game.enemy.life-(20/game.level))
if game.enemyn == 4:
# makes enemy imune to player's shooting.
game.enemy.imune = 1
# removes the bullet off the screen after collision.
self.kill()
game.player.twists[self.n_twist] = None
game.enemy.hurt = 5
# player sprite
class Player(pygame.sprite.Sprite):
def __init__(self, location, enemyn, level, *groups):
super(Player, self).__init__(*groups)
self.spriteDefinition = SpriteDefinition('evoman/images/EvoManSprites.png', 0, 0, 43, 59)
self.updateSprite(SpriteConstants.STANDING, SpriteConstants.RIGHT)
self.rect = pygame.rect.Rect(location, self.image.get_size())
self.resting = 0
self.dy = 0
self.direction = 1
self.alternate = 1
self.gun_cooldown = 0
self.max_life = 100
self.life = self.max_life
self.atacked = 0
self.hurt = 0
self.shooting = 0
self.inwater = 0
self.twists = []
self.vx = 0
self.vy = 0
self.hy = 0
self.sensors = None
def update(self, dt, game):
# if the enemies are not atacking with the freezing atack (prevents player from making any movements or atacking) and also the 'start game' marker is 1.
if game.freeze_p == 0 and game.start == 1:
# checks water environment flag to regulate movements speed
if self.inwater == 1:
self.vx = 0.5
self.vy = 0.5
self.hy = -2000
else:
self.vx = 1
self.vy = 1
self.hy = -900
# defines game mode for player action
if game.playermode == 'human': # player controlled by keyboard/joystick
# if joystick is connected, initializes it.
if game.joy > 0:
joystick = pygame.joystick.Joystick(0)
joystick.init()
# tests if the button/key was pressed or released.
# if the player is jumping, the release stops the jump before its maximum high is achieved
press = 0
release = 0
for event in game.event:
if event.type == pygame.JOYBUTTONDOWN or event.type == pygame.KEYDOWN:
press = 1
else:
press = 0
if event.type == pygame.JOYBUTTONUP or event.type == pygame.KEYUP:
release = 1
else:
release = 0
# gets pressed key value
key = pygame.key.get_pressed()
# gets joystick value for axis x (left/right)
left = 0
if game.joy > 0:
if round(joystick.get_axis(0)) == -1:
left = 1
if key[pygame.K_LEFT]:
left = 1
right = 0
if game.joy > 0:
if round(joystick.get_axis(0)) == 1:
right = 1
if key[pygame.K_RIGHT]:
right = 1
# gets joystick/key value for jumping
jump = 0
if game.joy > 0:
if int(joystick.get_button(2)) == 1 and press == 1:
jump = 1
if key[pygame.K_SPACE] and press == 1:
jump = 1
# gets joystick/key value for shooting
shoot = 0
if game.joy > 0:
if int(joystick.get_button(3)) == 1 and press == 1:
shoot = 1
if key[pygame.K_LSHIFT] and press == 1:
shoot = 1
elif game.playermode == 'ai': # player controlled by AI algorithm
# calls the controller providing game sensors
actions = game.player_controller.control(self.sensors.get(game), game.pcont)
if len(actions) < 5:
game.print_logs("ERROR: Player controller must return 5 decision variables.")
sys.exit(0)
left = actions[0]
right = actions[1]
jump = actions[2]
shoot = actions[3]
release = actions[4]
# if the button is released before the jumping maximum height, them player stops going up.
if release == 1 and self.resting == 0:
self.dy = 0
# copies last position state of the player
last = self.rect.copy()
# movements on the axis x (left)
if left:
self.rect.x -= 200 * dt * self.vx
self.direction = -1
# animation, running images alternation
if self.alternate == 1:
self.updateSprite(SpriteConstants.START_RUNNING, SpriteConstants.LEFT)
if self.alternate == 4 or self.alternate == 10:
self.updateSprite(SpriteConstants.RUNNING_STEP1, SpriteConstants.LEFT)
if self.alternate == 7:
self.updateSprite(SpriteConstants.RUNNING_STEP2, SpriteConstants.LEFT)
self.alternate += 1
if self.alternate > 12:
self.alternate = 1
# movements on the axis x (right)
elif right:
self.rect.x += 200 * dt * self.vx
self.direction = 1
# animation, running player images alternation
if self.alternate == 1:
self.updateSprite(SpriteConstants.START_RUNNING, SpriteConstants.RIGHT)
if self.alternate == 4 or self.alternate == 10:
self.updateSprite(SpriteConstants.RUNNING_STEP1, SpriteConstants.RIGHT)
if self.alternate == 7:
self.updateSprite(SpriteConstants.RUNNING_STEP2, SpriteConstants.RIGHT)
self.alternate += 1
if self.alternate > 12:
self.alternate = 1
else:
# animation, standing up images
if self.direction == -1:
self.updateSprite(SpriteConstants.STANDING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.STANDING, SpriteConstants.RIGHT)
# if player is touching the floor, he is allowed to jump
if self.resting == 1 and jump == 1:
self.dy = self.hy
# gravity
self.dy = min(400, self.dy + 100)
self.rect.y += self.dy * dt * self.vy
# changes the image when player jumps
if self.resting == 0 :
if self.direction == -1:
self.updateSprite(SpriteConstants.JUMPING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.JUMPING, SpriteConstants.RIGHT)
new = self.rect # copies new (after movement) position state of the player
# controls screen walls and platforms limits agaist player
self.resting = 0
for cell in game.tilemap.layers['triggers'].collide(new, 'blockers'):
blockers = cell['blockers']
if 'l' in blockers and last.right <= cell.left and new.right > cell.left and last.bottom>cell.top:
new.right = cell.left
if 'r' in blockers and last.left >= cell.right and new.left < cell.right and last.bottom>cell.top:
new.left = cell.right
if 't' in blockers and last.bottom <= cell.top and new.bottom > cell.top:
self.resting = 1 # player touches the floor
new.bottom = cell.top
self.dy = 0
if 'b' in blockers and last.top >= cell.bottom and new.top < cell.bottom:
new.top = cell.bottom
# shoots, limiting time between bullets.
if shoot == 1 and not self.gun_cooldown:
self.shooting = 5
self.atacked = 1 # marks if the player has atacked enemy
# creates bullets objects according to the direction.
if self.direction > 0:
self.twists.append(Bullet_p(self.rect.midright, 1, len(self.twists), game.sprite_p))
else:
self.twists.append(Bullet_p(self.rect.midleft, -1, len(self.twists), game.sprite_p))
self.gun_cooldown = 0.4 # marks time to the bullet for allowing next bullets
# sound effects
if game.sound == "on" and game.playermode == "human":
sound = pygame.mixer.Sound('evoman/sounds/scifi003.wav')
c = pygame.mixer.Channel(2)
c.set_volume(1)
c.play(sound)
else:
self.atacked = 0
# decreases time for limitating bullets
self.gun_cooldown = max(0, self.gun_cooldown - dt)
# hurt player animation
if self.hurt > 0:
if self.direction == -1:
self.updateSprite(SpriteConstants.HURTING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.HURTING, SpriteConstants.RIGHT)
self.hurt -= 1
self.hurt = max(0,self.hurt)
self.shooting -= 1
self.shooting = max(0,self.shooting)
# shooting animation
if self.shooting > 0:
if self.resting == 0:
if self.direction == -1:
self.updateSprite(SpriteConstants.SHOOTING_JUMPING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.SHOOTING_JUMPING, SpriteConstants.RIGHT)
else:
if self.direction == -1:
self.updateSprite(SpriteConstants.SHOOTING, SpriteConstants.LEFT)
else:
self.updateSprite(SpriteConstants.SHOOTING, SpriteConstants.RIGHT)
# kills player in case he touches killers stuff, like spikes.
for cell in game.tilemap.layers['triggers'].collide(self.rect, 'killers'):
game.player.life = 0
# focuses screen center on player
game.tilemap.set_focus(new.x, new.y)
else:
game.tilemap.set_focus(self.rect.x, self.rect.y)
def updateSprite(self, state, direction):
self.image = self.spriteDefinition.getImage(state, direction)
| cc0-1.0 | 2,649,441,619,843,990,500 | 35.074074 | 192 | 0.525746 | false |
VarunRaval48/SignCode | java.sign/TestScripts/Valid/python/iterateChangeParameterPython.py | 1 | 3261 |
# ****************************************************************************
# Copyright (c) 2015 UT-Battelle, LLC.
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Initial API and implementation and/or initial documentation - Kasper
# Gammeltoft, Jay Jay Billings
#
# This is an example script designed to show how to use ease with ICE. It
# creates several new Reflectivity Models and changes the thickness parameter
# to show the effect that creates.
# ****************************************************************************
# Load the Platform module for accessing OSGi services
loadModule('/System/Platform')
# Get the core service from ICE for creating and accessing objects.
coreService = getService(org.eclipse.ice.core.iCore.ICore);
# Set a initial value for the thickness of the nickel layer. This will be doubled
# for each iteration to show how this parameter effects the model
nickelThickness = 250;
for i in xrange(1, 5):
# Create the reflectivity model to be used and get its reference. The create item
# method will return a string representing the number of that item, so use int() to
# convert it to an integer.
reflectModel = coreService.getItem(int(coreService.createItem("Reflectivity Model")))
# Get the nickel layer from the model. It should be in the list, which is component 2,
# and it is the third layer in that list (which is item 2 as the list is zero based).
listComp = reflectModel.getComponent(2);
nickel = listComp.get(2);
nickel.setProperty("Thickness (A)", nickelThickness);
nickelThickness += 250;
# Finally process the model to get the results.
coreService.processItem(reflectModel.getId(), "Calculate Reflectivity", 1);
"""*****BEGIN SIGNSTURE********
MCwCFH7564DnEUccn+cSKT0mG4W+Ew/uAhQfwUFkl2q3L6dcuDQK62ZSE/ujQw==
MIIDNzCCAvWgAwIBAgIELNrJgDALBgcqhkjOOAQDBQAwbTEL
MAkGA1UEBhMCSU4xEDAOBgNVBAgTB0d1amFyYXQxEjAQBgNV
BAcTCUFobWVkYWJhZDEQMA4GA1UEChMHVW5rbm93bjEQMA4G
A1UECxMHVW5rbm93bjEUMBIGA1UEAxMLVmFydW4gUmF2YWww
HhcNMTYwMzA1MTE1NjUwWhcNMTYwNjAzMTE1NjUwWjBtMQsw
CQYDVQQGEwJJTjEQMA4GA1UECBMHR3VqYXJhdDESMBAGA1UE
BxMJQWhtZWRhYmFkMRAwDgYDVQQKEwdVbmtub3duMRAwDgYD
VQQLEwdVbmtub3duMRQwEgYDVQQDEwtWYXJ1biBSYXZhbDCC
AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu
7OTn9hG3UjzvRADDHj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2
y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gEexAiwk+7qdf+t8Yb
+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii
Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfh
oIXWmz3ey7yrXDa4V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrU
WU/mcQcQgYC0SRZxI+hMKBYTt88JMozIpuE8FnqLVHyNKOCj
rh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl
nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDqLrJZitkj0fqO
RQ/kdKtwHK4Fq6kXfGedp5umydmCVqrIkuCKuw6X2P5gX4Vv
0kqTEG2iWL7Hv3iUCmtaCeKYLSlIyaloJMYPwgcKxWYYMtXn
njfoOAxHywwXxPAygkR/r9TH1VrUSKjvuGvOxdjSNnezjsVL
VEyIXiO76ZfawKMhMB8wHQYDVR0OBBYEFJw/5/p+5vXMZPXx
ZLBh9YLK/zr4MAsGByqGSM44BAMFAAMvADAsAhRA44+6n9Ya
UTnckDGsbZIv450sVAIUA1otxObPsQaTs1EcOEEqODrNHCY=
********END SIGNSTURE*****""" | gpl-3.0 | 5,838,214,338,211,463,000 | 38.780488 | 90 | 0.767863 | false |
jenshnielsen/hemelb | Tools/setuptool/HemeLbSetupTool/View/VectorCtrl.py | 1 | 2871 | #
# Copyright (C) University College London, 2007-2012, all rights reserved.
#
# This file is part of HemeLB and is provided to you under the terms of
# the GNU LGPL. Please see LICENSE in the top level directory for full
# details.
#
import wx
# from wx.lib.masked import NumCtrl, EVT_NUM
from HemeLbSetupTool.Bindings.Translators import FloatTranslator, NoneToValueTranslator
from HemeLbSetupTool.Bindings.WxMappers import WxWidgetMapper, Mapper
from HemeLbSetupTool.View.Layout import H
def ForwardGet(func):
def Get(self, val):
return tuple(getattr(getattr(self, coord), func.func_name)() for coord in ('x', 'y', 'z'))
Get.func_name = func.func_name
return Get
def ForwardSet(func):
def Set(self, val):
for coord in ('x', 'y', 'z'):
setter = getattr(getattr(self, coord), func.func_name)
setter(val)
continue
return
Set.func_name = func.func_name
return Set
class VectorCtrl(wx.Panel):
"""Simple container of three TextCtrl's for a vector quantity.
"""
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# self.x = NumCtrl(parent, style=wx.TE_PROCESS_ENTER, integerWidth=3, fractionWidth=3)
# self.y = NumCtrl(parent, style=wx.TE_PROCESS_ENTER, integerWidth=3, fractionWidth=3)
# self.z = NumCtrl(parent, style=wx.TE_PROCESS_ENTER, integerWidth=3, fractionWidth=3)
self.x = wx.TextCtrl(self, size=(50,22))
self.y = wx.TextCtrl(self, size=(50,22))
self.z = wx.TextCtrl(self, size=(50,22))
sizer = H((self.x, 1, wx.EXPAND),
(self.y, 1, wx.EXPAND),
(self.z, 1, wx.EXPAND)).create()
self.SetSizer(sizer)
return
@ForwardSet
def SetBackgroundColour(): return
@ForwardGet
def GetBackgroundColour(): return
@ForwardSet
def SetEditable(): return
pass
class VectorCtrlMapper(WxWidgetMapper):
"""Widget mapper for VectorCtrls.
"""
def __init__(self, widget, key, event,
translator=NoneToValueTranslator(float('nan'),
inner=FloatTranslator())
):
# We want to skip the WxWidgetMapper's init for now as the
# VectorCtrl typically won't have the required getters and
# setters. On binding, this one mapper is turned into three
# standard mappers anyway.
Mapper.__init__(self, translator=translator)
self.widget = widget
self.key = key
self.event = event
return
def CreateSubMapper(self, component):
return WxWidgetMapper(getattr(self.widget, component),
self.key, self.event,
translator=self.translator)
pass
| lgpl-3.0 | -1,803,520,995,720,174,000 | 32 | 98 | 0.604667 | false |
achanda/refstack | refstack/tools/tempest_subunit_test_result.py | 1 | 7924 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import subunit
import testtools
import unittest
class TempestSubunitTestResultBase(testtools.TestResult):
"""Class to process subunit stream.
This class is derived from testtools.TestResult.
This class overrides all the inherited addXXX methods
to call the new _process_result() method to process the data.
This class is designed to be a base class.
The _process_result() method should be overriden by the
derived class to customize the processing.
"""
result_type = ["SUCCESS", "FAILURE", "ERROR", "SKIP"]
def __init__(self, stream, descriptions, verbosity):
"""Initialize with super class signature."""
super(TempestSubunitTestResultBase, self).__init__()
def _process_result(self, result_type, testcase, *arg):
"""Process the data.
The value of parameter "result_type" can be SUCCESS, FAILURE,
ERROR, or SKIP.
It can be used to determine from which add method this is called.
"""
pass
def addSuccess(self, testcase):
"""Overwrite super class method for additional data processing."""
super(TempestSubunitTestResultBase, self).addSuccess(testcase)
self._process_result(self.result_type[0], testcase)
def addFailure(self, testcase, err):
"""Overwrite super class method for additional data processing."""
if testcase.id() == 'process-returncode':
return
super(TempestSubunitTestResultBase, self).addFailure(testcase, err)
self._process_result(self.result_type[1], testcase, err)
def addError(self, testcase, err):
"""Overwrite super class method for additional data processing."""
super(TempestSubunitTestResultBase, self).addFailure(testcase, err)
self._process_result(self.result_type[2], testcase, err)
def addSkip(self, testcase, reason=None, details=None):
"""Overwrite super class method for additional data processing."""
super(TempestSubunitTestResultBase,
self).addSkip(testcase, reason, details)
self._process_result(self.result_type[3], testcase, reason, details)
def startTest(self, testcase):
"""Overwrite super class method for additional data processing."""
self.start_time = self._now()
super(TempestSubunitTestResultBase, self).startTest(testcase)
class TempestSubunitTestResult(TempestSubunitTestResultBase):
"""Process subunit stream and save data into two dictionary objects.
1) The result dictionary object:
results={testcase_id: [status, elapsed],
testcase_id: [status, elapsed],
...}
testcase_id: the id fetched from subunit data.
For Tempest test: testcase_id = test_class_name + test_name
status: status of the testcase (PASS, FAIL, FAIL_SETUP, ERROR, SKIP)
elapsed: testcase elapsed time
2) The summary dictionary object:
summary={"PASS": count, "FAIL": count, "FAIL_SETUP: count",
"ERROR": count, "SKIP": count, "Total": count}
count: the number of occurrence
"""
def __init__(self, stream, descriptions, verbosity):
"""Initialize with supper class signature."""
super(TempestSubunitTestResult, self).__init__(stream, descriptions,
verbosity)
self.start_time = None
self.status = ["PASS", "FAIL", "FAIL_SETUP", "ERROR", "SKIP"]
self.results = {}
self.summary = {self.status[0]: 0, self.status[1]: 0,
self.status[2]: 0, self.status[3]: 0,
self.status[4]: 0, "Total": 0}
def _process_result(self, result_type, testcase, *arg):
"""Process and append data to dictionary objects."""
testcase_id = testcase.id()
elapsed = (self._now() - self.start_time).total_seconds()
status = result_type
# Convert "SUCCESS" to "PASS"
# Separate "FAILURE" into "FAIL" and "FAIL_SETUP"
if status == self.result_type[0]:
status = self.status[0]
elif status == self.result_type[1]:
if "setUpClass" in testcase_id:
status = self.status[2]
testcase_id = '%s.setUpClass' % \
(re.search('\((.*)\)', testcase_id).group(1))
else:
status = self.status[1]
self.results.setdefault(testcase_id, [])
self.results[testcase_id] = [status, elapsed]
self.summary[status] += 1
self.summary["Total"] += 1
class TempestSubunitTestResultTuples(TempestSubunitTestResult):
"""Process subunit stream and save data into two dictionary objects.
1) The result dictionary object:
results={test_classname: [(test_name, status, elapsed),
(test_name, status, elapsed),...],
test_classname: [(test_name, status, elapsed),
(test_name, status, elapsed),...],
...}
status: status of the testcase (PASS, FAIL, FAIL_SETUP, ERROR, SKIP)
elapsed: testcase elapsed time
2) The summary dictionary object:
summary={"PASS": count, "FAIL": count, "FAIL_SETUP: count",
"ERROR": count, "SKIP": count, "Total": count}
count: the number of occurrence
"""
def _process_result(self, result_type, testcase, *arg):
"""Process and append data to dictionary objects."""
testcase_id = testcase.id()
elapsed = round((self._now() - self.start_time).total_seconds(), 2)
status = result_type
# Convert "SUCCESS" to "PASS"
# Separate "FAILURE" into "FAIL" and "FAIL_SETUP"
if status == self.result_type[0]:
status = self.status[0]
elif status == self.result_type[1]:
if "setUpClass" in testcase_id:
status = self.status[2]
testcase_id = '%s.setUpClass' % \
(re.search('\((.*)\)', testcase_id).group(1))
else:
status = self.status[1]
classname, testname = testcase_id.rsplit('.', 1)
self.results.setdefault(classname, [])
self.results[classname].append((testname, status, elapsed))
self.summary[status] += 1
self.summary["Total"] += 1
class ProcessSubunitData():
"""A class to replay subunit data from a stream."""
result = None
def __init__(self, in_stream, test_result_class_name=
TempestSubunitTestResult):
"""Read and process subunit data from a stream.
Save processed data into a class named TempestSubunitTestResult
which is a class derived from unittest.TestResults.
"""
test = subunit.ProtocolTestCase(in_stream, passthrough=None)
runner = unittest.TextTestRunner(verbosity=2, resultclass=
test_result_class_name)
#Run (replay) the test from subunit stream.
#runner,run will return an object of type "test_result_class_name"
self.result = runner.run(test)
def get_result(self):
"""Return an object of type test_result_class_name."""
return self.result
| apache-2.0 | -8,004,023,840,272,725,000 | 38.422886 | 78 | 0.614967 | false |
davidfokkema/artist | demo/demo_histogram_fit.py | 1 | 1160 | import numpy as np
import scipy.optimize
import scipy.stats
from artist import Plot
def main():
# Draw random numbers from the normal distribution
np.random.seed(1)
N = np.random.normal(size=2000)
# define bin edges
edge = 5
bin_width = .1
bins = np.arange(-edge, edge + .5 * bin_width, bin_width)
# build histogram and x, y values at the center of the bins
n, bins = np.histogram(N, bins=bins)
x = (bins[:-1] + bins[1:]) / 2
y = n
# fit normal distribution pdf to data
f = lambda x, N, mu, sigma: N * scipy.stats.norm.pdf(x, mu, sigma)
popt, pcov = scipy.optimize.curve_fit(f, x, y)
print("Parameters from fit (N, mu, sigma):", popt)
# make graph
graph = Plot()
# graph histogram
graph.histogram(n, bins)
# graph model with fit parameters
x = np.linspace(-edge, edge, 100)
graph.plot(x, f(x, *popt), mark=None)
# set labels and limits
graph.set_xlabel("value")
graph.set_ylabel("count")
graph.set_label("Fit to data")
graph.set_xlimits(-6, 6)
# save graph to file
graph.save('histogram-fit')
if __name__ == '__main__':
main()
| gpl-3.0 | 6,107,514,388,074,478,000 | 22.673469 | 70 | 0.612931 | false |
orbingol/NURBS-Python | geomdl/exchange.py | 1 | 33428 | """
.. module:: exchange
:platform: Unix, Windows
:synopsis: Provides CAD exchange and interoperability functions
.. moduleauthor:: Onur Rauf Bingol <[email protected]>
"""
import os
import struct
import json
from io import StringIO
from . import compatibility, operations, elements, linalg
from . import _exchange as exch
from .exceptions import GeomdlException
from ._utilities import export
@export
def import_txt(file_name, two_dimensional=False, **kwargs):
""" Reads control points from a text file and generates a 1-dimensional list of control points.
The following code examples illustrate importing different types of text files for curves and surfaces:
.. code-block:: python
:linenos:
# Import curve control points from a text file
curve_ctrlpts = exchange.import_txt(file_name="control_points.txt")
# Import surface control points from a text file (1-dimensional file)
surf_ctrlpts = exchange.import_txt(file_name="control_points.txt")
# Import surface control points from a text file (2-dimensional file)
surf_ctrlpts, size_u, size_v = exchange.import_txt(file_name="control_points.txt", two_dimensional=True)
If argument ``jinja2=True`` is set, then the input file is processed as a `Jinja2 <http://jinja.pocoo.org/>`_
template. You can also use the following convenience template functions which correspond to the given mathematical
equations:
* ``sqrt(x)``: :math:`\\sqrt{x}`
* ``cubert(x)``: :math:`\\sqrt[3]{x}`
* ``pow(x, y)``: :math:`x^{y}`
You may set the file delimiters using the keyword arguments ``separator`` and ``col_separator``, respectively.
``separator`` is the delimiter between the coordinates of the control points. It could be comma
``1, 2, 3`` or space ``1 2 3`` or something else. ``col_separator`` is the delimiter between the control
points and is only valid when ``two_dimensional`` is ``True``. Assuming that ``separator`` is set to space, then
``col_operator`` could be semi-colon ``1 2 3; 4 5 6`` or pipe ``1 2 3| 4 5 6`` or comma ``1 2 3, 4 5 6`` or
something else.
The defaults for ``separator`` and ``col_separator`` are *comma (,)* and *semi-colon (;)*, respectively.
The following code examples illustrate the usage of the keyword arguments discussed above.
.. code-block:: python
:linenos:
# Import curve control points from a text file delimited with space
curve_ctrlpts = exchange.import_txt(file_name="control_points.txt", separator=" ")
# Import surface control points from a text file (2-dimensional file) w/ space and comma delimiters
surf_ctrlpts, size_u, size_v = exchange.import_txt(file_name="control_points.txt", two_dimensional=True,
separator=" ", col_separator=",")
Please note that this function does not check whether the user set delimiters to the same value or not.
:param file_name: file name of the text file
:type file_name: str
:param two_dimensional: type of the text file
:type two_dimensional: bool
:return: list of control points, if two_dimensional, then also returns size in u- and v-directions
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
# Read file
content = exch.read_file(file_name)
# Are we using a Jinja2 template?
j2tmpl = kwargs.get('jinja2', False)
if j2tmpl:
content = exch.process_template(content)
# File delimiters
col_sep = kwargs.get('col_separator', ";")
sep = kwargs.get('separator', ",")
return exch.import_text_data(content, sep, col_sep, two_dimensional)
@export
def export_txt(obj, file_name, two_dimensional=False, **kwargs):
""" Exports control points as a text file.
For curves the output is always a list of control points. For surfaces, it is possible to generate a 2-dimensional
control point output file using ``two_dimensional``.
Please see :py:func:`.exchange.import_txt()` for detailed description of the keyword arguments.
:param obj: a spline geometry object
:type obj: abstract.SplineGeometry
:param file_name: file name of the text file to be saved
:type file_name: str
:param two_dimensional: type of the text file (only works for Surface objects)
:type two_dimensional: bool
:raises GeomdlException: an error occurred writing the file
"""
# Check if the user has set any control points
if obj.ctrlpts is None or len(obj.ctrlpts) == 0:
raise exch.GeomdlException("There are no control points to save!")
# Check the usage of two_dimensional flag
if obj.pdimension == 1 and two_dimensional:
# Silently ignore two_dimensional flag
two_dimensional = False
# File delimiters
col_sep = kwargs.get('col_separator', ";")
sep = kwargs.get('separator', ",")
content = exch.export_text_data(obj, sep, col_sep, two_dimensional)
return exch.write_file(file_name, content)
@export
def import_csv(file_name, **kwargs):
""" Reads control points from a CSV file and generates a 1-dimensional list of control points.
It is possible to use a different value separator via ``separator`` keyword argument. The following code segment
illustrates the usage of ``separator`` keyword argument.
.. code-block:: python
:linenos:
# By default, import_csv uses 'comma' as the value separator
ctrlpts = exchange.import_csv("control_points.csv")
# Alternatively, it is possible to import a file containing tab-separated values
ctrlpts = exchange.import_csv("control_points.csv", separator="\\t")
The only difference of this function from :py:func:`.exchange.import_txt()` is skipping the first line of the input
file which generally contains the column headings.
:param file_name: file name of the text file
:type file_name: str
:return: list of control points
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
# File delimiters
sep = kwargs.get('separator', ",")
content = exch.read_file(file_name, skip_lines=1)
return exch.import_text_data(content, sep)
@export
def export_csv(obj, file_name, point_type='evalpts', **kwargs):
""" Exports control points or evaluated points as a CSV file.
:param obj: a spline geometry object
:type obj: abstract.SplineGeometry
:param file_name: output file name
:type file_name: str
:param point_type: ``ctrlpts`` for control points or ``evalpts`` for evaluated points
:type point_type: str
:raises GeomdlException: an error occurred writing the file
"""
if not 0 < obj.pdimension < 3:
raise exch.GeomdlException("Input object should be a curve or a surface")
# Pick correct points from the object
if point_type == 'ctrlpts':
points = obj.ctrlptsw if obj.rational else obj.ctrlpts
elif point_type == 'evalpts':
points = obj.evalpts
else:
raise exch.GeomdlException("Please choose a valid point type option. Possible types: ctrlpts, evalpts")
# Prepare CSV header
dim = len(points[0])
line = "dim "
for i in range(dim-1):
line += str(i + 1) + ", dim "
line += str(dim) + "\n"
# Prepare values
for pt in points:
line += ",".join([str(p) for p in pt]) + "\n"
# Write to file
return exch.write_file(file_name, line)
@export
def import_cfg(file_name, **kwargs):
""" Imports curves and surfaces from files in libconfig format.
.. note::
Requires `libconf <https://pypi.org/project/libconf/>`_ package.
Use ``jinja2=True`` to activate Jinja2 template processing. Please refer to the documentation for details.
:param file_name: name of the input file
:type file_name: str
:return: a list of rational spline geometries
:rtype: list
:raises GeomdlException: an error occurred writing the file
"""
def callback(data):
return libconf.loads(data)
# Check if it is possible to import 'libconf'
try:
import libconf
except ImportError:
raise exch.GeomdlException("Please install 'libconf' package to use libconfig format: pip install libconf")
# Get keyword arguments
delta = kwargs.get('delta', -1.0)
use_template = kwargs.get('jinja2', False)
# Read file
file_src = exch.read_file(file_name)
# Import data
return exch.import_dict_str(file_src=file_src, delta=delta, callback=callback, tmpl=use_template)
@export
def export_cfg(obj, file_name):
""" Exports curves and surfaces in libconfig format.
.. note::
Requires `libconf <https://pypi.org/project/libconf/>`_ package.
Libconfig format is also used by the `geomdl command-line application <https://github.com/orbingol/geomdl-cli>`_
as a way to input shape data from the command line.
:param obj: input geometry
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
def callback(data):
return libconf.dumps(data)
# Check if it is possible to import 'libconf'
try:
import libconf
except ImportError:
raise exch.GeomdlException("Please install 'libconf' package to use libconfig format: pip install libconf")
# Export data
exported_data = exch.export_dict_str(obj=obj, callback=callback)
# Write to file
return exch.write_file(file_name, exported_data)
@export
def import_yaml(file_name, **kwargs):
""" Imports curves and surfaces from files in YAML format.
.. note::
Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package.
Use ``jinja2=True`` to activate Jinja2 template processing. Please refer to the documentation for details.
:param file_name: name of the input file
:type file_name: str
:return: a list of rational spline geometries
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
def callback(data):
yaml = YAML()
return yaml.load(data)
# Check if it is possible to import 'ruamel.yaml'
try:
from ruamel.yaml import YAML
except ImportError:
raise exch.GeomdlException("Please install 'ruamel.yaml' package to use YAML format: pip install ruamel.yaml")
# Get keyword arguments
delta = kwargs.get('delta', -1.0)
use_template = kwargs.get('jinja2', False)
# Read file
file_src = exch.read_file(file_name)
# Import data
return exch.import_dict_str(file_src=file_src, delta=delta, callback=callback, tmpl=use_template)
@export
def export_yaml(obj, file_name):
""" Exports curves and surfaces in YAML format.
.. note::
Requires `ruamel.yaml <https://pypi.org/project/ruamel.yaml/>`_ package.
YAML format is also used by the `geomdl command-line application <https://github.com/orbingol/geomdl-cli>`_
as a way to input shape data from the command line.
:param obj: input geometry
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
def callback(data):
# Ref: https://yaml.readthedocs.io/en/latest/example.html#output-of-dump-as-a-string
stream = StringIO()
yaml = YAML()
yaml.dump(data, stream)
return stream.getvalue()
# Check if it is possible to import 'ruamel.yaml'
try:
from ruamel.yaml import YAML
except ImportError:
raise exch.GeomdlException("Please install 'ruamel.yaml' package to use YAML format: pip install ruamel.yaml")
# Export data
exported_data = exch.export_dict_str(obj=obj, callback=callback)
# Write to file
return exch.write_file(file_name, exported_data)
@export
def import_json(file_name, **kwargs):
""" Imports curves and surfaces from files in JSON format.
Use ``jinja2=True`` to activate Jinja2 template processing. Please refer to the documentation for details.
:param file_name: name of the input file
:type file_name: str
:return: a list of rational spline geometries
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
def callback(data):
return json.loads(data)
# Get keyword arguments
delta = kwargs.get('delta', -1.0)
use_template = kwargs.get('jinja2', False)
# Read file
file_src = exch.read_file(file_name)
# Import data
return exch.import_dict_str(file_src=file_src, delta=delta, callback=callback, tmpl=use_template)
@export
def export_json(obj, file_name):
""" Exports curves and surfaces in JSON format.
JSON format is also used by the `geomdl command-line application <https://github.com/orbingol/geomdl-cli>`_
as a way to input shape data from the command line.
:param obj: input geometry
:type obj: abstract.SplineGeometry, multi.AbstractContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
def callback(data):
return json.dumps(data, indent=4)
# Export data
exported_data = exch.export_dict_str(obj=obj, callback=callback)
# Write to file
return exch.write_file(file_name, exported_data)
@export
def import_obj(file_name, **kwargs):
""" Reads .obj files and generates faces.
Keyword Arguments:
* ``callback``: reference to the function that processes the faces for customized output
The structure of the callback function is shown below:
.. code-block:: python
def my_callback_function(face_list):
# "face_list" will be a list of elements.Face class instances
# The function should return a list
return list()
:param file_name: file name
:type file_name: str
:return: output of the callback function (default is a list of faces)
:rtype: list
"""
def default_callback(face_list):
return face_list
# Keyword arguments
callback_func = kwargs.get('callback', default_callback)
# Read and process the input file
content = exch.read_file(file_name)
content_arr = content.split("\n")
# Initialize variables
on_face = False
vertices = []
triangles = []
faces = []
# Index values
vert_idx = 1
tri_idx = 1
face_idx = 1
# Loop through the data
for carr in content_arr:
carr = carr.strip()
data = carr.split(" ")
data = [d.strip() for d in data]
if data[0] == "v":
if on_face:
on_face = not on_face
face = elements.Face(*triangles, id=face_idx)
faces.append(face)
face_idx += 1
vertices[:] = []
triangles[:] = []
vert_idx = 1
tri_idx = 1
vertex = elements.Vertex(*data[1:], id=vert_idx)
vertices.append(vertex)
vert_idx += 1
if data[0] == "f":
on_face = True
triangle = elements.Triangle(*[vertices[int(fidx) - 1] for fidx in data[1:]], id=tri_idx)
triangles.append(triangle)
tri_idx += 1
# Process he final face
if triangles:
face = elements.Face(*triangles, id=face_idx)
faces.append(face)
# Return the output of the callback function
return callback_func(faces)
@export
def export_obj(surface, file_name, **kwargs):
""" Exports surface(s) as a .obj file.
Keyword Arguments:
* ``vertex_spacing``: size of the triangle edge in terms of surface points sampled. *Default: 2*
* ``vertex_normals``: if True, then computes vertex normals. *Default: False*
* ``parametric_vertices``: if True, then adds parameter space vertices. *Default: False*
* ``update_delta``: use multi-surface evaluation delta for all surfaces. *Default: True*
:param surface: surface or surfaces to be saved
:type surface: abstract.Surface or multi.SurfaceContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
content = export_obj_str(surface, **kwargs)
return exch.write_file(file_name, content)
def export_obj_str(surface, **kwargs):
""" Exports surface(s) as a .obj file (string).
Keyword Arguments:
* ``vertex_spacing``: size of the triangle edge in terms of surface points sampled. *Default: 2*
* ``vertex_normals``: if True, then computes vertex normals. *Default: False*
* ``parametric_vertices``: if True, then adds parameter space vertices. *Default: False*
* ``update_delta``: use multi-surface evaluation delta for all surfaces. *Default: True*
:param surface: surface or surfaces to be saved
:type surface: abstract.Surface or multi.SurfaceContainer
:return: contents of the .obj file generated
:rtype: str
"""
# Get keyword arguments
vertex_spacing = int(kwargs.get('vertex_spacing', 1))
include_vertex_normal = kwargs.get('vertex_normals', False)
include_param_vertex = kwargs.get('parametric_vertices', False)
update_delta = kwargs.get('update_delta', True)
# Input validity checking
if surface.pdimension != 2:
raise exch.GeomdlException("Can only export surfaces")
if vertex_spacing < 1:
raise exch.GeomdlException("Vertex spacing should be bigger than zero")
# Create the string and start adding triangulated surface points
line = "# Generated by geomdl\n"
vertex_offset = 0 # count the vertices to update the face numbers correctly
# Initialize lists for geometry data
str_v = [] # vertices
str_vn = [] # vertex normals
str_vp = [] # parameter space vertices
str_f = [] # faces
# Loop through SurfaceContainer object
for srf in surface:
# Set surface evaluation delta
if update_delta:
srf.sample_size_u = surface.sample_size_u
srf.sample_size_v = surface.sample_size_v
# Tessellate surface
srf.tessellate(vertex_spacing=vertex_spacing)
vertices = srf.tessellator.vertices
triangles = srf.tessellator.faces
# Collect vertices
for vert in vertices:
temp = "v " + str(vert.x) + " " + str(vert.y) + " " + str(vert.z) + "\n"
str_v.append(temp)
# Collect parameter space vertices
if include_param_vertex:
for vert in vertices:
temp = "vp " + str(vert.uv[0]) + " " + str(vert.uv[1]) + "\n"
str_vp.append(temp)
# Compute vertex normals
if include_vertex_normal:
for vert in vertices:
sn = operations.normal(srf, vert.uv)
temp = "vn " + str(sn[1][0]) + " " + str(sn[1][1]) + " " + str(sn[1][2]) + "\n"
str_vn.append(temp)
# Collect faces (1-indexed)
for t in triangles:
vl = t.data
temp = "f " + \
str(vl[0] + 1 + vertex_offset) + " " + \
str(vl[1] + 1 + vertex_offset) + " " + \
str(vl[2] + 1 + vertex_offset) + "\n"
str_f.append(temp)
# Update vertex offset
vertex_offset = len(str_v)
# Write all collected data to the return string
for lv in str_v:
line += lv
for lvn in str_vn:
line += lvn
for lvp in str_vp:
line += lvp
for lf in str_f:
line += lf
return line
@export
def export_stl(surface, file_name, **kwargs):
""" Exports surface(s) as a .stl file in plain text or binary format.
Keyword Arguments:
* ``binary``: flag to generate a binary STL file. *Default: True*
* ``vertex_spacing``: size of the triangle edge in terms of points sampled on the surface. *Default: 1*
* ``update_delta``: use multi-surface evaluation delta for all surfaces. *Default: True*
:param surface: surface or surfaces to be saved
:type surface: abstract.Surface or multi.SurfaceContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
binary = kwargs.get('binary', True)
if 'binary' in kwargs:
kwargs.pop('binary')
content = export_stl_str(surface, binary=binary, **kwargs)
return exch.write_file(file_name, content, binary=binary)
def export_stl_str(surface, **kwargs):
""" Exports surface(s) as a .stl file in plain text or binary format (string).
Keyword Arguments:
* ``binary``: flag to generate a binary STL file. *Default: False*
* ``vertex_spacing``: size of the triangle edge in terms of points sampled on the surface. *Default: 1*
* ``update_delta``: use multi-surface evaluation delta for all surfaces. *Default: False*
:param surface: surface or surfaces to be saved
:type surface: abstract.Surface or multi.SurfaceContainer
:return: contents of the .stl file generated
:rtype: str
"""
binary = kwargs.get('binary', False)
vertex_spacing = int(kwargs.get('vertex_spacing', 1))
update_delta = kwargs.get('update_delta', True)
# Input validity checking
if surface.pdimension != 2:
raise exch.GeomdlException("Can only export surfaces")
if vertex_spacing < 1:
raise exch.GeomdlException("Vertex spacing should be bigger than zero")
triangles_list = []
for srf in surface:
# Set surface evaluation delta
if update_delta:
srf.sample_size_u = surface.sample_size_u
srf.sample_size_v = surface.sample_size_v
# Tessellate surface
srf.tessellate(vertex_spacing=vertex_spacing)
triangles = srf.tessellator.faces
triangles_list += triangles
# Write triangle list to ASCII or binary STL file
if binary:
line = b'\0' * 80 # header
line += struct.pack('<i', len(triangles_list)) # number of triangles
for t in triangles_list:
line += struct.pack('<3f', *linalg.triangle_normal(t)) # normal
for v in t.vertices:
line += struct.pack('<3f', *v.data) # vertices
line += b'\0\0' # attribute byte count
else:
line = "solid Surface\n"
for t in triangles_list:
nvec = linalg.triangle_normal(t)
line += "\tfacet normal " + str(nvec[0]) + " " + str(nvec[1]) + " " + str(nvec[2]) + "\n"
line += "\t\touter loop\n"
for v in t.vertices:
line += "\t\t\tvertex " + str(v.x) + " " + str(v.y) + " " + str(v.z) + "\n"
line += "\t\tendloop\n"
line += "\tendfacet\n"
line += "endsolid Surface\n"
return line
@export
def export_off(surface, file_name, **kwargs):
""" Exports surface(s) as a .off file.
Keyword Arguments:
* ``vertex_spacing``: size of the triangle edge in terms of points sampled on the surface. *Default: 1*
* ``update_delta``: use multi-surface evaluation delta for all surfaces. *Default: True*
:param surface: surface or surfaces to be saved
:type surface: abstract.Surface or multi.SurfaceContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
content = export_off_str(surface, **kwargs)
return exch.write_file(file_name, content)
def export_off_str(surface, **kwargs):
""" Exports surface(s) as a .off file (string).
Keyword Arguments:
* ``vertex_spacing``: size of the triangle edge in terms of points sampled on the surface. *Default: 1*
* ``update_delta``: use multi-surface evaluation delta for all surfaces. *Default: True*
:param surface: surface or surfaces to be saved
:type surface: abstract.Surface or multi.SurfaceContainer
:return: contents of the .off file generated
:rtype: str
"""
# Get keyword arguments
vertex_spacing = int(kwargs.get('vertex_spacing', 1))
update_delta = kwargs.get('update_delta', True)
# Input validity checking
if surface.pdimension != 2:
raise exch.GeomdlException("Can only export surfaces")
if vertex_spacing < 1:
raise exch.GeomdlException("Vertex spacing should be bigger than zero")
# Count the vertices to update the face numbers correctly
vertex_offset = 0
# Initialize lists for vertices, vertex normals and faces
str_v = []
str_f = []
for srf in surface:
# Set surface evaluation delta
if update_delta:
srf.sample_size_u = surface.sample_size_u
srf.sample_size_v = surface.sample_size_v
# Tessellate surface
srf.tessellate(vertex_spacing=vertex_spacing)
vertices = srf.tessellator.vertices
triangles = srf.tessellator.faces
# Collect vertices
for vert in vertices:
line = str(vert.x) + " " + str(vert.y) + " " + str(vert.z) + "\n"
str_v.append(line)
# Collect faces (zero-indexed)
for t in triangles:
vl = t.data
line = "3 " + \
str(vl[0] + vertex_offset) + " " + \
str(vl[1] + vertex_offset) + " " + \
str(vl[2] + vertex_offset) + "\n"
str_f.append(line)
# Update vertex offset
vertex_offset = len(str_v)
# Write file header
line = "OFF\n"
line += str(len(str_v)) + " " + str(len(str_f)) + " 0\n"
# Write all collected data to the file
for lv in str_v:
line += lv
for lf in str_f:
line += lf
return line
@export
def import_smesh(file):
""" Generates NURBS surface(s) from surface mesh (smesh) file(s).
*smesh* files are some text files which contain a set of NURBS surfaces. Each file in the set corresponds to one
NURBS surface. Most of the time, you receive multiple *smesh* files corresponding to an complete object composed of
several NURBS surfaces. The files have the extensions of ``txt`` or ``dat`` and they are named as
* ``smesh.X.Y.txt``
* ``smesh.X.dat``
where *X* and *Y* correspond to some integer value which defines the set the surface belongs to and part number of
the surface inside the complete object.
:param file: path to a directory containing mesh files or a single mesh file
:type file: str
:return: list of NURBS surfaces
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
imported_elements = []
if os.path.isfile(file):
imported_elements.append(exch.import_surf_mesh(file))
elif os.path.isdir(file):
files = sorted([os.path.join(file, f) for f in os.listdir(file)])
for f in files:
imported_elements.append(exch.import_surf_mesh(f))
else:
raise exch.GeomdlException("Input is not a file or a directory")
return imported_elements
@export
def export_smesh(surface, file_name, **kwargs):
""" Exports surface(s) as surface mesh (smesh) files.
Please see :py:func:`.import_smesh()` for details on the file format.
:param surface: surface(s) to be exported
:type surface: abstract.Surface or multi.SurfaceContainer
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
# Input validity checking
if surface.pdimension != 2:
raise exch.GeomdlException("Can only export surfaces")
# Get keyword arguments
decimals = kwargs.get('decimals', 18)
# Split file name and extension
fname, fext = os.path.splitext(file_name)
# Enumerate file name only if we are working with multiple surfaces
numerate_file = True if len(surface) > 1 else False
for idx, s in enumerate(surface):
if s.rational:
pts = s.ctrlptsw
else:
pts = compatibility.combine_ctrlpts_weights(s.ctrlpts)
line = str(s.dimension) + "\n"
line += str(s.degree_u) + " " + str(s.degree_v) + "\n"
line += str(s.ctrlpts_size_u) + " " + str(s.ctrlpts_size_v) + "\n"
line += " ".join([("{:." + str(decimals) + "f}").format(k) for k in s.knotvector_u]) + "\n"
line += " ".join([("{:." + str(decimals) + "f}").format(k) for k in s.knotvector_v]) + "\n"
# Flip control points
ctrlptsw = compatibility.flip_ctrlpts(pts, s.ctrlpts_size_u, s.ctrlpts_size_v)
# Convert control points into (x, y, z, w) format
ctrlptsw = compatibility.generate_ctrlpts_weights(ctrlptsw)
for ptw in ctrlptsw:
line += " ".join([("{:." + str(decimals) + "f}").format(p) for p in ptw]) + "\n"
# Open or closed?
line += "1\n"
# Write to file
fname_curr = fname + "." + str(idx + 1) if numerate_file else fname
exch.write_file(fname_curr + fext, line)
@export
def import_vmesh(file):
""" Imports NURBS volume(s) from volume mesh (vmesh) file(s).
:param file: path to a directory containing mesh files or a single mesh file
:type file: str
:return: list of NURBS volumes
:rtype: list
:raises GeomdlException: an error occurred reading the file
"""
imported_elements = []
if os.path.isfile(file):
imported_elements.append(exch.import_vol_mesh(file))
elif os.path.isdir(file):
files = sorted([os.path.join(file, f) for f in os.listdir(file)])
for f in files:
imported_elements.append(exch.import_vol_mesh(f))
else:
raise exch.GeomdlException("Input is not a file or a directory")
return imported_elements
@export
def export_vmesh(volume, file_name, **kwargs):
""" Exports volume(s) as volume mesh (vmesh) files.
:param volume: volume(s) to be exported
:type volume: abstract.Volume
:param file_name: name of the output file
:type file_name: str
:raises GeomdlException: an error occurred writing the file
"""
if volume.pdimension != 3:
raise exch.GeomdlException("Can only export volumes")
# Get keyword arguments
decimals = kwargs.get('decimals', 18)
# Split file name and extension
fname, fext = os.path.splitext(file_name)
# Enumerate file name only if we are working with multiple volumes
numerate_file = True if len(volume) > 1 else False
for idx, v in enumerate(volume):
if v.rational:
pts = v.ctrlptsw
else:
pts = compatibility.combine_ctrlpts_weights(v.ctrlpts)
line = str(v.dimension) + "\n"
line += str(v.degree_u) + " " + str(v.degree_v) + " " + str(v.degree_w) + "\n"
line += str(v.ctrlpts_size_u) + " " + str(v.ctrlpts_size_v) + " " + str(v.ctrlpts_size_w) + "\n"
line += " ".join([("{:." + str(decimals) + "f}").format(k) for k in v.knotvector_u]) + "\n"
line += " ".join([("{:." + str(decimals) + "f}").format(k) for k in v.knotvector_v]) + "\n"
line += " ".join([("{:." + str(decimals) + "f}").format(k) for k in v.knotvector_w]) + "\n"
# Convert control points into (x, y, z, w)
ctrlptsw = []
for w in range(v.ctrlpts_size_w):
srfpts = pts[(w * v.ctrlpts_size_u * v.ctrlpts_size_v):((w + 1) * v.ctrlpts_size_u * v.ctrlpts_size_v)]
# Flip control points
ctrlptsw += compatibility.flip_ctrlpts(srfpts, v.ctrlpts_size_u, v.ctrlpts_size_v)
# Convert control points into (x, y, z, w) format
ctrlptsw = compatibility.generate_ctrlpts_weights(ctrlptsw)
for ptw in ctrlptsw:
line += " ".join([("{:." + str(decimals) + "f}").format(p) for p in ptw]) + "\n"
# Open or closed?
line += "1\n"
# Write to file
fname_curr = fname + "." + str(idx + 1) if numerate_file else fname
exch.write_file(fname_curr + fext, line)
@export
def import_3dm(file_name, **kwargs):
""" Imports curves and surfaces from Rhinoceros/OpenNURBS .3dm files.
.. deprecated:: 5.2.2
``rw3dm`` Python module is replaced by ``on2json``. It can be used to convert .3dm files to geomdl JSON format.
Please refer to https://github.com/orbingol/rw3dm for more details.
:param file_name: input file name
:type file_name: str
"""
raise GeomdlException("This API call has been deprecated. Please refer to https://github.com/orbingol/rw3dm")
@export
def export_3dm(obj, file_name, **kwargs):
""" Exports NURBS curves and surfaces to Rhinoceros/OpenNURBS .3dm files.
.. deprecated:: 5.2.2
``rw3dm`` Python module is replaced by ``json2on``. It can be used to convert geomdl JSON format to .3dm files.
Please refer to https://github.com/orbingol/rw3dm for more details.
:param obj: curves/surfaces to be exported
:type obj: abstract.Curve, abstract.Surface, multi.CurveContainer, multi.SurfaceContainer
:param file_name: file name
:type file_name: str
"""
raise GeomdlException("This API call has been deprecated. Please refer to https://github.com/orbingol/rw3dm")
| mit | 1,979,683,606,880,411,600 | 35.06041 | 119 | 0.638447 | false |
Villoid/PynamoDB | pynamodb/tests/test_attributes.py | 1 | 13953 | """
pynamodb attributes tests
"""
import six
import json
from base64 import b64encode
from datetime import datetime
from delorean import Delorean
from mock import patch
from pynamodb.compat import CompatTestCase as TestCase
from pynamodb.constants import UTC, DATETIME_FORMAT
from pynamodb.models import Model
from pynamodb.attributes import (
BinarySetAttribute, BinaryAttribute, NumberSetAttribute, NumberAttribute,
UnicodeAttribute, UnicodeSetAttribute, UTCDateTimeAttribute, BooleanAttribute,
JSONAttribute, DEFAULT_ENCODING, NUMBER, STRING, STRING_SET, NUMBER_SET, BINARY_SET,
BINARY)
class AttributeTestModel(Model):
class Meta:
host = 'http://localhost:8000'
table_name = 'test'
binary_attr = BinaryAttribute()
binary_set_attr = BinarySetAttribute()
number_attr = NumberAttribute()
number_set_attr = NumberSetAttribute()
unicode_attr = UnicodeAttribute()
unicode_set_attr = UnicodeSetAttribute()
datetime_attr = UTCDateTimeAttribute()
bool_attr = BooleanAttribute()
json_attr = JSONAttribute()
class AttributeDescriptorTestCase(TestCase):
"""
Test Attribute Descriptors
"""
def setUp(self):
self.instance = AttributeTestModel()
def test_binary_attr(self):
"""
Binary attribute descriptor
"""
self.instance.binary_attr = b'test'
self.assertEqual(self.instance.binary_attr, b'test')
def test_binary_set_attr(self):
"""
Binary set attribute descriptor
"""
self.instance.binary_set_attr = set([b'test', b'test2'])
self.assertEqual(self.instance.binary_set_attr, set([b'test', b'test2']))
def test_number_attr(self):
"""
Number attribute descriptor
"""
self.instance.number_attr = 42
self.assertEqual(self.instance.number_attr, 42)
def test_number_set_attr(self):
"""
Number set attribute descriptor
"""
self.instance.number_set_attr = set([1, 2])
self.assertEqual(self.instance.number_set_attr, set([1, 2]))
def test_unicode_attr(self):
"""
Unicode attribute descriptor
"""
self.instance.unicode_attr = u"test"
self.assertEqual(self.instance.unicode_attr, u"test")
def test_unicode_set_attr(self):
"""
Unicode set attribute descriptor
"""
self.instance.unicode_set_attr = set([u"test", u"test2"])
self.assertEqual(self.instance.unicode_set_attr, set([u"test", u"test2"]))
def test_datetime_attr(self):
"""
Datetime attribute descriptor
"""
now = datetime.now()
self.instance.datetime_attr = now
self.assertEqual(self.instance.datetime_attr, now)
def test_bool_attr(self):
"""
Boolean attribute descriptor
"""
self.instance.bool_attr = True
self.assertEqual(self.instance.bool_attr, True)
def test_json_attr(self):
"""
JSON attribute descriptor
"""
self.instance.json_attr = {'foo': 'bar', 'bar': 42}
self.assertEqual(self.instance.json_attr, {'foo': 'bar', 'bar': 42})
class UTCDateTimeAttributeTestCase(TestCase):
"""
Tests UTCDateTime attributes
"""
def test_utc_datetime_attribute(self):
"""
UTCDateTimeAttribute.default
"""
attr = UTCDateTimeAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, STRING)
tstamp = datetime.now()
attr = UTCDateTimeAttribute(default=tstamp)
self.assertEqual(attr.default, tstamp)
def test_utc_date_time_deserialize(self):
"""
UTCDateTimeAttribute.deserialize
"""
tstamp = Delorean(timezone=UTC).datetime
attr = UTCDateTimeAttribute()
self.assertEqual(
tstamp,
attr.deserialize(Delorean(tstamp, timezone=UTC).datetime.strftime(DATETIME_FORMAT)),
)
def test_utc_date_time_deserialize_parse_args(self):
"""
UTCDateTimeAttribute.deserialize
"""
tstamp = Delorean(timezone=UTC).datetime
attr = UTCDateTimeAttribute()
with patch('pynamodb.attributes.parse') as parse:
attr.deserialize(Delorean(tstamp, timezone=UTC).datetime.strftime(DATETIME_FORMAT))
parse.assert_called_with(tstamp.strftime(DATETIME_FORMAT), dayfirst=False)
def test_utc_date_time_serialize(self):
"""
UTCDateTimeAttribute.serialize
"""
tstamp = datetime.now()
attr = UTCDateTimeAttribute()
self.assertEqual(attr.serialize(tstamp), Delorean(tstamp, timezone=UTC).datetime.strftime(DATETIME_FORMAT))
class BinaryAttributeTestCase(TestCase):
"""
Tests binary attributes
"""
def test_binary_attribute(self):
"""
BinaryAttribute.default
"""
attr = BinaryAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, BINARY)
attr = BinaryAttribute(default=b'foo')
self.assertEqual(attr.default, b'foo')
def test_binary_round_trip(self):
"""
BinaryAttribute round trip
"""
attr = BinaryAttribute()
value = b'foo'
serial = attr.serialize(value)
self.assertEqual(attr.deserialize(serial), value)
def test_binary_serialize(self):
"""
BinaryAttribute.serialize
"""
attr = BinaryAttribute()
serial = b64encode(b'foo').decode(DEFAULT_ENCODING)
self.assertEqual(attr.serialize(b'foo'), serial)
def test_binary_deserialize(self):
"""
BinaryAttribute.deserialize
"""
attr = BinaryAttribute()
serial = b64encode(b'foo').decode(DEFAULT_ENCODING)
self.assertEqual(attr.deserialize(serial), b'foo')
def test_binary_set_serialize(self):
"""
BinarySetAttribute.serialize
"""
attr = BinarySetAttribute()
self.assertEqual(attr.attr_type, BINARY_SET)
self.assertEqual(
attr.serialize(set([b'foo', b'bar'])),
[b64encode(val).decode(DEFAULT_ENCODING) for val in sorted(set([b'foo', b'bar']))])
self.assertEqual(attr.serialize(None), None)
def test_binary_set_round_trip(self):
"""
BinarySetAttribute round trip
"""
attr = BinarySetAttribute()
value = set([b'foo', b'bar'])
serial = attr.serialize(value)
self.assertEqual(attr.deserialize(serial), value)
def test_binary_set_deserialize(self):
"""
BinarySetAttribute.deserialize
"""
attr = BinarySetAttribute()
value = set([b'foo', b'bar'])
self.assertEqual(
attr.deserialize([b64encode(val).decode(DEFAULT_ENCODING) for val in sorted(value)]),
value
)
def test_binary_set_attribute(self):
"""
BinarySetAttribute.serialize
"""
attr = BinarySetAttribute()
self.assertIsNotNone(attr)
attr = BinarySetAttribute(default=set([b'foo', b'bar']))
self.assertEqual(attr.default, set([b'foo', b'bar']))
class NumberAttributeTestCase(TestCase):
"""
Tests number attributes
"""
def test_number_attribute(self):
"""
NumberAttribute.default
"""
attr = NumberAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, NUMBER)
attr = NumberAttribute(default=1)
self.assertEqual(attr.default, 1)
def test_number_serialize(self):
"""
NumberAttribute.serialize
"""
attr = NumberAttribute()
self.assertEqual(attr.serialize(3.141), '3.141')
self.assertEqual(attr.serialize(1), '1')
def test_number_deserialize(self):
"""
NumberAttribute.deserialize
"""
attr = NumberAttribute()
self.assertEqual(attr.deserialize('1'), 1)
self.assertEqual(attr.deserialize('3.141'), 3.141)
def test_number_set_deserialize(self):
"""
NumberSetAttribute.deserialize
"""
attr = NumberSetAttribute()
self.assertEqual(attr.attr_type, NUMBER_SET)
self.assertEqual(attr.deserialize([json.dumps(val) for val in sorted(set([1, 2]))]), set([1, 2]))
def test_number_set_serialize(self):
"""
NumberSetAttribute.serialize
"""
attr = NumberSetAttribute()
self.assertEqual(attr.serialize(set([1, 2])), [json.dumps(val) for val in sorted(set([1, 2]))])
self.assertEqual(attr.serialize(None), None)
def test_number_set_attribute(self):
"""
NumberSetAttribute.default
"""
attr = NumberSetAttribute()
self.assertIsNotNone(attr)
attr = NumberSetAttribute(default=set([1, 2]))
self.assertEqual(attr.default, set([1, 2]))
class UnicodeAttributeTestCase(TestCase):
"""
Tests unicode attributes
"""
def test_unicode_attribute(self):
"""
UnicodeAttribute.default
"""
attr = UnicodeAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, STRING)
attr = UnicodeAttribute(default=six.u('foo'))
self.assertEqual(attr.default, six.u('foo'))
def test_unicode_serialize(self):
"""
UnicodeAttribute.serialize
"""
attr = UnicodeAttribute()
self.assertEqual(attr.serialize('foo'), six.u('foo'))
self.assertEqual(attr.serialize(u'foo'), six.u('foo'))
self.assertEqual(attr.serialize(u''), None)
self.assertEqual(attr.serialize(None), None)
def test_unicode_deserialize(self):
"""
UnicodeAttribute.deserialize
"""
attr = UnicodeAttribute()
self.assertEqual(attr.deserialize('foo'), six.u('foo'))
self.assertEqual(attr.deserialize(u'foo'), six.u('foo'))
def test_unicode_set_serialize(self):
"""
UnicodeSetAttribute.serialize
"""
attr = UnicodeSetAttribute()
self.assertEqual(attr.attr_type, STRING_SET)
self.assertEqual(attr.deserialize(None), None)
self.assertEqual(
attr.serialize(set([six.u('foo'), six.u('bar')])),
[json.dumps(val) for val in sorted(set([six.u('foo'), six.u('bar')]))])
def test_round_trip_unicode_set(self):
"""
Round trip a unicode set
"""
attr = UnicodeSetAttribute()
orig = set([six.u('foo'), six.u('bar')])
self.assertEqual(
orig,
attr.deserialize(attr.serialize(orig))
)
def test_unicode_set_deserialize(self):
"""
UnicodeSetAttribute.deserialize
"""
attr = UnicodeSetAttribute()
self.assertEqual(
attr.deserialize([json.dumps(val) for val in sorted(set([six.u('foo'), six.u('bar')]))]),
set([six.u('foo'), six.u('bar')])
)
def test_unicode_set_attribute(self):
"""
UnicodeSetAttribute.default
"""
attr = UnicodeSetAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, STRING_SET)
attr = UnicodeSetAttribute(default=set([six.u('foo'), six.u('bar')]))
self.assertEqual(attr.default, set([six.u('foo'), six.u('bar')]))
class BooleanAttributeTestCase(TestCase):
"""
Tests boolean attributes
"""
def test_boolean_attribute(self):
"""
BooleanAttribute.default
"""
attr = BooleanAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, NUMBER)
attr = BooleanAttribute(default=True)
self.assertEqual(attr.default, True)
def test_boolean_serialize(self):
"""
BooleanAttribute.serialize
"""
attr = BooleanAttribute()
self.assertEqual(attr.serialize(True), json.dumps(1))
self.assertEqual(attr.serialize(False), json.dumps(0))
self.assertEqual(attr.serialize(None), None)
def test_boolean_deserialize(self):
"""
BooleanAttribute.deserialize
"""
attr = BooleanAttribute()
self.assertEqual(attr.deserialize('1'), True)
self.assertEqual(attr.deserialize('0'), False)
class JSONAttributeTestCase(TestCase):
"""
Tests json attributes
"""
def test_quoted_json(self):
attr = JSONAttribute()
serialized = attr.serialize('\\t')
self.assertEqual(attr.deserialize(serialized), '\\t')
serialized = attr.serialize('"')
self.assertEqual(attr.deserialize(serialized), '"')
def test_json_attribute(self):
"""
JSONAttribute.default
"""
attr = JSONAttribute()
self.assertIsNotNone(attr)
self.assertEqual(attr.attr_type, STRING)
attr = JSONAttribute(default={})
self.assertEqual(attr.default, {})
def test_json_serialize(self):
"""
JSONAttribute.serialize
"""
attr = JSONAttribute()
item = {'foo': 'bar', 'bool': True, 'number': 3.141}
self.assertEqual(attr.serialize(item), six.u(json.dumps(item)))
self.assertEqual(attr.serialize({}), six.u('{}'))
self.assertEqual(attr.serialize(None), None)
def test_json_deserialize(self):
"""
JSONAttribute.deserialize
"""
attr = JSONAttribute()
item = {'foo': 'bar', 'bool': True, 'number': 3.141}
encoded = six.u(json.dumps(item))
self.assertEqual(attr.deserialize(encoded), item)
def test_control_chars(self):
"""
JSONAttribute with control chars
"""
attr = JSONAttribute()
item = {'foo\t': 'bar\n', 'bool': True, 'number': 3.141}
encoded = six.u(json.dumps(item))
self.assertEqual(attr.deserialize(encoded), item)
| mit | -4,106,644,730,118,012,000 | 29.73348 | 115 | 0.60582 | false |
brokendata/bigmler | bigmler/train_reader.py | 1 | 11880 | # -*- coding: utf-8 -*-
#
# Copyright 2013-2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""TrainReader class
Manages the training input data, its headers and labels if the objective
field is a multi-label field
"""
from __future__ import absolute_import
import csv
import sys
from bigml.util import get_csv_delimiter
from bigml.io import UnicodeReader
from bigmler.checkpoint import file_number_of_lines
from bigmler.labels import get_label_field
from bigmler.utils import PYTHON3, SYSTEM_ENCODING, FILE_ENCODING
from bigmler.utils import encode2, decode2
AGGREGATES = {
'count': len,
'last': lambda x: x[-1],
'first': lambda x: x[0]
}
class TrainReader(object):
"""Retrieves csv info and manages objective fields and multi-labels
"""
def __init__(self, training_set, training_set_header, objective_field,
multi_label=False, labels=None, label_separator=None,
training_separator=None, multi_label_fields=None,
label_aggregates=None, objective=True):
"""Builds a generator from a csv file
`training_set`: path to the training data file
`training_set_header`: boolean, True means that headers are first
row in the file
`objective_field`: objective field column or field name
`labels`: Fields object with the expected fields structure.
"""
self.training_set = training_set
if training_set.__class__.__name__ == "StringIO":
self.encode = None
self.training_set = UTF8Recoder(test_set, SYSTEM_ENCODING)
else:
self.encode = None if PYTHON3 else FILE_ENCODING
self.training_set_header = training_set_header
self.training_reader = None
self.multi_label = multi_label
self.objective = objective
if label_aggregates is None:
label_aggregates = []
self.label_aggregates = label_aggregates
self.training_separator = (decode2(training_separator,
encoding="string_escape")
if training_separator is not None
else get_csv_delimiter())
if len(self.training_separator) > 1:
sys.exit("Only one character can be used as test data separator.")
# opening csv reader
self.reset()
self.label_separator = (decode2(label_separator,
encoding="string_escape")
if label_separator is not None
else get_csv_delimiter())
first_row = self.get_next(reset=not training_set_header)
self.row_length = len(first_row)
if training_set_header:
self.headers = first_row
else:
self.headers = [("field_%s" % index) for index in
range(0, self.row_length)]
self.multi_label_fields = sorted(self._get_columns(multi_label_fields))
if objective:
self.objective_column = self._get_columns([objective_field])[0]
if not self.objective_column in self.multi_label_fields:
self.multi_label_fields.append(self.objective_column)
self.labels = labels
self.fields_labels = self._get_labels()
if objective:
if labels is None:
self.labels = self.fields_labels[self.objective_column]
self.objective_name = self.headers[self.objective_column]
def __iter__(self):
"""Iterator method
"""
return self
def get_label_headers(self):
"""Returns a list of headers with the new extended field names for
each objective label
"""
new_headers = self.get_headers()
for field_column in self.multi_label_fields:
labels = self.fields_labels[field_column]
new_field_names = [get_label_field(self.headers[field_column],
label)
for label in labels]
new_headers.extend(new_field_names)
for aggregate in self.label_aggregates:
new_headers.append(get_label_field(
self.headers[field_column], aggregate))
if not PYTHON3:
new_headers = [encode2(header) for header in new_headers]
return new_headers
def _get_columns(self, fields_list):
"""Receives a comma-separated list of fields given by name or
column number and returns column number list
"""
column_list = []
if fields_list is None:
return column_list
if not isinstance(fields_list, list):
fields_list = [fields_list]
for field in fields_list:
column = None
if isinstance(field, int):
column = field
elif field is None:
column = self.row_length - 1
else:
try:
column = self.headers.index(field)
except ValueError:
if self.objective:
sys.exit("The %s has been set as multi-label field but"
" it cannot be found in the headers row: \n"
" %s" %
(field,
", ".join([encode2(header)
for header in self.headers])))
else:
column = None
if column is not None:
column_list.append(column)
return column_list
def reset(self):
"""Starts a new csv reader object
"""
try:
self.training_set.close()
except (IOError, AttributeError):
pass
try:
self.training_reader = UnicodeReader(
self.training_set, delimiter=self.training_separator,
lineterminator="\n").open_reader()
except IOError:
sys.exit("Error: cannot read training %s" % self.training_set)
def next(self):
"""Iterator method for next item
"""
return self.get_next()
def get_next(self, extended=False, reset=False):
"""Returns the next row. If extended is True, the row is extended with
a list of booleans depending on whether the label is in the
objective field value or not. If reset is True, the file is
reopened and pointer starts at the beginning of the file.
"""
row = self.training_reader.next()
row = [value.strip() for value in row]
if extended:
if self.multi_label and self.fields_labels is None:
self.fields_labels = self._get_labels()
for field_column in self.multi_label_fields:
aggregated_field_value = row[field_column]
field_values = aggregated_field_value.split(
self.label_separator)
field_values = [value.strip() for
value in field_values]
labels_row = [int(label in field_values) for label in
self.fields_labels[field_column]]
row.extend(labels_row)
for aggregate in self.label_aggregates:
row.append(AGGREGATES[aggregate](field_values))
if reset:
self.reset()
if not PYTHON3:
row = [encode2(item) for item in row]
return row
def number_of_rows(self):
"""Returns the number of rows in the test file
"""
rows = file_number_of_lines(self.training_set)
if self.training_set_header:
rows -= 1
return rows
def has_headers(self):
"""Returns whether the training set file has a headers row
"""
return self.training_set_header
def _get_labels(self):
"""Returns the list of labels in the multi-label fields
"""
labels = {}
for field_column in self.multi_label_fields:
labels[field_column] = []
for row in self:
for field_column in self.multi_label_fields:
labels = self._get_field_labels(row, labels,
field_column,
self.label_separator)
return labels
def _get_field_labels(self, row, labels, field_column, separator):
"""Returns the list of labels in a multi-label field
"""
field_value = row[field_column]
if self.multi_label:
new_labels = field_value.split(separator)
new_labels = [decode2(label).strip()
for label in new_labels]
# TODO: clean user given missing tokens
for label_index in range(0, len(new_labels)):
if new_labels[label_index] == '':
del new_labels[label_index]
if new_labels != []:
if (self.objective and field_column == self.objective_column
and self.labels is not None):
# If user gave the subset of labels, use only those
new_labels = [label for label in self.labels if
label in new_labels]
labels[field_column].extend(new_labels)
else:
labels[field_column].append(field_value)
labels[field_column] = sorted(list(set(labels[field_column])))
return labels
def get_headers(self, objective_field=True):
"""Returns headers. If objective_field is False, the objective field
header is removed.
"""
if objective_field:
return self.headers[:]
new_headers = self.headers[:]
if self.objective:
del new_headers[self.objective_column]
return new_headers
def new_fields_info(self):
"""Dict of 2-item lists 'field_column': [label, label_column]
describing the per label extension
"""
info = {}
column = len(self.headers)
for field_column in self.multi_label_fields:
alpha_field_column = str(field_column)
info[alpha_field_column] = []
labels = self.fields_labels[field_column]
for label in labels:
info[alpha_field_column].append([label, column])
column += 1
# skip the aggregate values columns
column += len(self.label_aggregates)
return info
def get_multi_label_data(self):
"""Returns a dict to store the multi-label info that defines this
source
"""
if self.objective:
return {
"multi_label_fields": [[column, self.headers[column]]
for column in self.multi_label_fields],
"generated_fields": self.new_fields_info(),
"objective_name": self.objective_name,
"objective_column": self.objective_column}
def close(self):
"""Closing file handler
"""
self.training_reader.close_reader()
| apache-2.0 | 4,185,642,118,313,162,000 | 36.358491 | 79 | 0.555724 | false |
ameihm0912/MozDef | bot/modules/zilla.py | 1 | 3113 | #!/usr/bin/env python
# Inspired by https://github.com/ayust/kitnirc/blob/master/kitnirc/contrib/healthcheck.py
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2014 Mozilla Corporation
#
# Contributors:
# [email protected]
#
import logging
from kitnirc.client import Channel
from kitnirc.modular import Module
from kitnirc.user import User
import threading
import time
import json
import bugzilla
# get a logger for the module
# via the Python logging library.
_log = logging.getLogger(__name__)
# KitnIRC modules subclass kitnirc.modular.Module
class Zilla(Module):
def __init__(self, *args, **kwargs):
super(Zilla, self).__init__(*args, **kwargs)
self._stop = False
self.thread = threading.Thread(target=self.loop, name='zilla')
self.thread.daemon = True
config = self.controller.config
try:
self.url = config.get("zilla", "url")
self.api_key = config.get("zilla", "api_key")
self.interval = config.getint("zilla", "interval")
self.channel = config.get('zilla', 'channel')
except AttributeError:
_log.warning("Couldn't load the Zilla module, check your configuration.")
self.url = "https://example.com"
self.api_key = "DEADBEEF"
self.interval = 9999999
self.channel = '#test'
self._bugzilla = bugzilla.Bugzilla(url=self.url+'rest/', api_key=self.api_key)
_log.info("zilla module initialized for {}, pooling every {} seconds.".format(self.url, self.interval))
def loop(self):
last = 0
while not self._stop:
now = time.time()
if ((now-last) > self.interval):
#Add all the actions you want to do with bugzilla here ;)
self.bugzilla_search()
last = now
time.sleep(1)
def bugzilla_search(self):
config = self.controller.config
try:
terms = json.loads(config.get('zilla', 'search_terms'))
except AttributeError:
_log.warning("zilla could not load search terms")
return
try:
res = self._bugzilla.search_bugs(terms)
except:
return
for bug in res['bugs']:
bugsummary = bug['summary'].encode('utf-8', 'replace')
self.controller.client.msg(self.channel, "\x037\x02WARNING\x03\x02 \x032\x02NEW\x03\x02 bug: {url}{bugid} {summary}".format(summary=bugsummary,
url=self.url, bugid=bug['id']))
def start(self, *args, **kwargs):
super(Zilla, self).start(*args, **kwargs)
self._stop = False
self.thread.start()
def stop(self, *args, **kwargs):
super(Zilla, self).stop(*args, **kwargs)
self._stop = True
self.thread.join()
# Let KitnIRC know what module class it should be loading.
module = Zilla
| mpl-2.0 | -57,613,220,247,150,950 | 33.588889 | 159 | 0.600064 | false |
aaronlbrink/cs108FinalProject | src/command_test.py | 1 | 2541 | import unittest
import command
import re
from datetime import timedelta
class CommandTest(unittest.TestCase):
def test_command(self):
''' Test the base class throws errors correctly
This test case was written by binki to show me how to write test cases
'''
failed_to_throw = False
try:
command.Command(re.compile('')).run('spoken text')
failed_to_throw = True
except Exception as e:
# The following assert was written by someone else.
assert 'Run must be implemented in subclass' in e.args[0], 'Wrong exception thrown: %s' % e
assert not failed_to_throw, 'Should have thrown exception when trying to run unimpelmented run()'
class TimerTest(unittest.TestCase):
def test_timer_matches(self):
''' '''
things_that_should_match = [
'timer set 2',
'timer set 35 minutes',
'timer set 0',
# shouldn't matter what the user says between the numerical value for the timer
'timer eat 2',
]
for spoken_text in things_that_should_match:
assert command.TimerCommand().matches(spoken_text), 'A phrase was not matched that should be match' + str(spoken_text)
def test_timer_matches_unmatched(self):
things_that_should_not_match = [
'timer thirty',
'timer hello',
'nottime',
'time this',
]
for spoken_text in things_that_should_not_match:
assert not command.TimerCommand().matches(spoken_text), 'A phrase was matched that shouldn\'t\'ve: ' + str(spoken_text)
def test_timer_run_parses(self):
''' This test was created by binki '''
class TestableTimerCommand(command.TimerCommand):
def __init__(self):
command.TimerCommand.__init__(self)
self.set_timer_called_with = None
def set_timer(self, time_delta):
self.set_timer_called_with = time_delta
timer_command = TestableTimerCommand()
timer_command.run('timer 4')
assert isinstance(timer_command.set_timer_called_with, timedelta), 'called with something other than timedelta: {}'.format(type(timer_command.set_timer_called_with).__name__)
assert timer_command.set_timer_called_with == timedelta(minutes=4), 'not called with proper timedelta. Instead called with {}'.format(timer_command.set_timer_called_with)
| gpl-3.0 | 7,133,469,518,591,081,000 | 42.810345 | 182 | 0.609209 | false |
edgarcosta/endomorphisms | endomorphisms/Representations.py | 1 | 1456 | """
* Representation functions
*
* Copyright (C) 2016-2017
* Edgar Costa ([email protected])
* Davide Lombardo ([email protected])
* Jeroen Sijsling ([email protected])
*
* See LICENSE.txt for license details.
"""
from sage.all import magma
def repr_curve(X):
curve_type = magma.CurveType(X)
if str(curve_type) == "hyperelliptic":
f, h = magma.HyperellipticPolynomials(X, nvals = 2)
if magma.IsZero(h):
return " the hyperelliptic curve y^2 = {}".format(str(f))
else:
return " the hyperelliptic curve y^2 + ({})*y = {}".format(str(h), str(f))
elif str(curve_type) == "plane":
F = magma.DefiningPolynomial(X)
return " the plane curve {} = 0".format(str(F))
def repr_endomorphism_data(End):
return "The endomorphism data of" + repr_curve(End.X)
def repr_lattice(Lat):
return "The endomorphism lattice of" + repr_curve(Lat.X)
def repr_over_field(over_field):
pre = "The endomorphism structure of" + repr_curve(over_field.X)
if over_field.field == "geometric":
post = " over the algebraic closure of its base field"
elif over_field.field == "base":
post = " over its base field"
else:
post = " over " + str(over_field.field)
return pre + post
def repr_decomposition(decomp):
return "The decomposition structure of" + repr_curve(decomp.X)
| gpl-2.0 | 1,946,836,060,677,192,400 | 32.860465 | 86 | 0.62294 | false |
kgullikson88/LasCampanas-MIKE | ConvertToExtensions.py | 1 | 2043 | import FittingUtilities
from astropy.io import fits as pyfits
import sys
import os
import numpy
import matplotlib.pyplot as plt
import HelperFunctions
left_trim = 8
right_trim = 0
bad_regions = {}
if __name__ == "__main__":
fileList = []
for arg in sys.argv[1:]:
fileList.append(arg)
for fname in fileList:
outfilename = "%s-0.fits" %(fname.split(".fits")[0])
header = pyfits.getheader(fname)
try:
orders = HelperFunctions.ReadFits(fname)
except ValueError:
orders = HelperFunctions.ReadFits(fname, errors=2)
orders = orders[::-1] #Reverse order so the bluest order is first
column_list = []
for i, order in enumerate(orders):
left, right = left_trim, order.size()-right_trim
if i in bad_regions.keys():
region = bad_regions[i]
left = numpy.searchsorted(order.x, region[0])
right = numpy.searchsorted(order.x, region[1])
if left == 0 or right == order.size():
order.x = numpy.delete(order.x, numpy.arange(left, right))
order.y = numpy.delete(order.y, numpy.arange(left, right))
order.cont = numpy.delete(order.cont, numpy.arange(left, right))
order.err = numpy.delete(order.err, numpy.arange(left, right))
else:
print "Warning! Bad region covers the middle of order %i" %i
print "Interpolating rather than removing"
order.y[left:right] = order.cont[left:right]
order.err[left:right] = 9e9
else:
order = order[left:right]
if order.size() < 10:
continue
order.cont = FittingUtilities.Continuum(order.x, order.y, fitorder=3, lowreject=1.5, highreject=10)
columns = columns = {"wavelength": order.x,
"flux": order.y,
"continuum": order.cont,
"error": order.err}
column_list.append(columns)
HelperFunctions.OutputFitsFileExtensions(column_list, fname, outfilename, mode="new")
| gpl-3.0 | 8,868,885,856,817,803,000 | 31.428571 | 105 | 0.604014 | false |
Arvedui/i3pystatus | i3pystatus/mail/maildir.py | 1 | 1360 | import os
from i3pystatus.mail import Backend
class MaildirMail(Backend):
"""
Checks for local mail in Maildir
"""
settings = (
"directory",
)
required = ("directory",)
directory = ""
def init(self):
self.directory = os.path.expanduser(self.directory)
@property
def unread(self):
def check_seen_flag(msgname):
"""
Return false if (S)een flag set
The code of this funciton was partialy extrated from
Pythons Maildir and MaildirMessage classes. Which are not used
because they cannot read the message flags without reading the entire message.
"""
maildir_info = msgname.split(':')[-1]
# This is a logical implication if maildir_info starts with '2,'
# it must not contain S if it does not start with '2,' the rest of
# its content does not matter because no flags are set
return not maildir_info.startswith('2,') or 'S' not in maildir_info[2:]
path_new = os.path.join(self.directory, "new")
new_messages = len(os.listdir(path_new))
path_cur = os.path.join(self.directory, "cur")
unread_messages = len(list(filter(check_seen_flag, os.listdir(path_cur))))
return new_messages + unread_messages
Backend = MaildirMail
| mit | -4,569,347,419,518,561,000 | 29.222222 | 90 | 0.611029 | false |
xray7224/PyPump | tests/collection_test.py | 1 | 4323 | # -*- coding: utf-8 -*-
import six
from pypump.models.collection import Collection
from pypump.models.feed import Feed
from tests import PyPumpTest
class CollectionTest(PyPumpTest):
def setUp(self):
super(CollectionTest, self).setUp()
self.response.data = {
"content": "",
"objectTypes": [
"person"
],
"displayName": "test list for testuser",
"objectType": "collection",
"author": {
"objectType": "person",
"id": "acct:[email protected]"
},
"published": "2014-08-31T20:54:25Z",
"updated": "2014-08-31T20:54:25Z",
"links": {
"self": {
"href": "https://example.com/api/collection/CZGYt-ljQ2WcmqfU1n5znA"
}
},
"likes": {
"url": "https://example.com/api/collection/CZGYt-ljQ2WcmqfU1n5znA/likes"
},
"replies": {
"url": "https://example.com/api/collection/CZGYt-ljQ2WcmqfU1n5znA/replies",
"items": []
},
"shares": {
"url": "https://example.com/api/collection/CZGYt-ljQ2WcmqfU1n5znA/shares",
"items": []
},
"url": "https://example.com/testuser/list/CZGYt-ljQ2WcmqfU1n5znA",
"id": "https://example.com/api/collection/CZGYt-ljQ2WcmqfU1n5znA",
"members": {
"totalItems": 0,
"url": "https://example.com/api/collection/CZGYt-ljQ2WcmqfU1n5znA/members"
},
"liked": False,
"pump_io": {
"shared": False
}
}
self.collection = Collection(pypump=self.pump).unserialize(self.response.data)
def test_collection(self):
# is a Collection object
self.assertTrue(isinstance(self.collection, Collection))
# object to string
self.assertEqual(self.collection.__str__(), self.collection.display_name)
self.collection.display_name = u'test list for Testanvändare'
if six.PY3:
self.assertEqual(self.collection.__str__(), self.collection.display_name)
else:
self.assertEqual(self.collection.__str__(), self.collection.display_name.encode('utf-8'))
def test_collection_attr_display_name(self):
self.assertTrue(hasattr(self.collection, 'display_name'))
self.assertEqual(self.collection.display_name, self.response["displayName"])
def test_collection_attr_url(self):
self.assertTrue(hasattr(self.collection, 'url'))
self.assertEqual(self.collection.url, self.response["url"])
def test_collection_attr_members(self):
self.assertTrue(hasattr(self.collection, 'members'))
self.assertTrue(isinstance(self.collection.members, Feed))
def test_collection_attr_author(self):
self.assertTrue(hasattr(self.collection, 'author'))
self.assertTrue(isinstance(self.collection.author, type(self.pump.Person())))
def test_collection_attr_links(self):
self.assertTrue(hasattr(self.collection, 'links'))
# we should expand this test when we have settled on way to show links
self.assertTrue(self.collection.links is not None)
def test_collection_add(self):
self.response.data = {
"verb": "add",
"object": {
"objectType": "person",
"id": "[email protected]",
},
"target": {
"objectType": "collection",
"id": "https://example.com/api/collection/CZGYt-ljQ2WcmqfU1n5znA",
}
}
person = self.pump.Person('[email protected]')
self.collection.add(person)
def test_collection_remove(self):
self.response.data = {
"verb": "remove",
"object": {
"objectType": "person",
"id": "[email protected]"
},
"target": {
"objectType": "collection",
"id": "https://example.com/api/collection/CZGYt-ljQ2WcmqfU1n5znA",
}
}
person = self.pump.Person('[email protected]')
self.collection.remove(person)
| gpl-3.0 | -6,928,660,012,419,072,000 | 35.319328 | 101 | 0.554836 | false |
janvanrijn/openml-pimp | tests/test_sklearn/verify_successive_halving_results.py | 1 | 6592 |
import unittest
import arff
import math
import numpy as np
import openmlpimp
import os
class VerifySuccessiveHalvingRunTest(unittest.TestCase):
@staticmethod
def obtain_config(data_point, param_indices):
# data_points = list<mixed>
# param_indices = dict<int, str>
config = []
for key in sorted(param_indices):
config.append(data_point[key])
return tuple(config)
@staticmethod
def check_sh_iteration(data_points, param_indices, eval_idx, file=None):
# data_points = list<list<mixed>>
# param_indices = dict<int, str>
num_steps = int(math.log(len(data_points), 2))
# enumerates over trace backwards, checking whether
# - configs in a step also appeared in the previous step
# - these configs were indeed amongst the best half
# - the last one was selected as the best
next_step_configs = {VerifySuccessiveHalvingRunTest.obtain_config(data_points[-1], param_indices)}
for step in range(num_steps):
current_configs = []
current_scores = []
for arms in range(2**(step+1), 2**(step+2)):
if data_points[-arms][4] != 'false':
raise ValueError('Wrong incumbent')
current_configs.append(VerifySuccessiveHalvingRunTest.obtain_config(data_points[-arms], param_indices))
current_scores.append(float(data_points[-arms][eval_idx]))
possible_continue_arms = set()
current_scores = np.array(current_scores, dtype=np.float)
sorted = np.argsort(current_scores * -1)
num_continue_arms = int(len(current_configs) / 2)
for i in range(len(current_configs)):
if i < num_continue_arms or current_scores[sorted[i]] == current_scores[sorted[num_continue_arms-1]]:
possible_continue_arms.add(current_configs[sorted[i]])
for config in next_step_configs:
if config not in current_configs:
raise ValueError('Could not find config %s for file %s' %(str(config), file))
if len(next_step_configs - possible_continue_arms) > 0:
raise ValueError('Not correct arms continued. ')
next_step_configs = set(current_configs)
@staticmethod
def check_hyperband_iteration(current_points, param_indices, eval_index, num_brackets, file):
if num_brackets is None:
VerifySuccessiveHalvingRunTest.check_sh_iteration(current_points, param_indices, eval_index, file)
else:
# this only handles 'vanilla' hyperband
for i in range(num_brackets):
num_data_points = 2 ** (num_brackets - i) - 1
current_bracket_points = current_points[:num_data_points]
current_points = current_points[num_data_points:]
VerifySuccessiveHalvingRunTest.check_sh_iteration(current_bracket_points, param_indices, eval_index, file)
@staticmethod
def process_arff_file(file, num_brackets=None):
arff_data = arff.load(open(file, 'r'))
param_indices = dict()
eval_index = None
for idx, attribute in enumerate(arff_data['attributes']):
if attribute[0].startswith('parameter_'):
param_indices[idx] = attribute[0]
elif attribute[0] == 'evaluation':
eval_index = idx
if len(param_indices) < 5:
raise ValueError()
# assumes order in the trace file..
current_repeat = 0
current_fold = 0
current_points = []
for datapoint in arff_data['data']:
repeat = int(datapoint[0])
fold = int(datapoint[1])
if repeat != current_repeat or fold != current_fold:
print('Checking %d %d with %d curves' % (repeat, fold, len(current_points)))
VerifySuccessiveHalvingRunTest.check_hyperband_iteration(current_points, param_indices, eval_index, num_brackets, file)
current_repeat = repeat
current_fold = fold
current_points = []
current_points.append(datapoint)
# verify the last batch
VerifySuccessiveHalvingRunTest.check_hyperband_iteration(current_points, param_indices, eval_index, num_brackets, file)
@staticmethod
def traverse_experiment_directory(result_directory, num_brackets):
for classifier in os.listdir(result_directory):
if os.path.isfile(os.path.join(result_directory, classifier)):
continue
for fixed_parameters in os.listdir(os.path.join(result_directory, classifier)):
print(openmlpimp.utils.get_time(), 'classifier:', classifier, fixed_parameters)
directory = os.path.join(result_directory, classifier, fixed_parameters)
for strategy in os.listdir(directory):
for task_directory in os.listdir(os.path.join(directory, strategy)):
file = os.path.join(directory, strategy, task_directory, 'trace.arff')
if os.path.isfile(file):
VerifySuccessiveHalvingRunTest.process_arff_file(file, num_brackets=num_brackets)
def test_correct_successive_halving(self):
directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/hyperband/')
VerifySuccessiveHalvingRunTest.process_arff_file(os.path.join(directory, 'successive_halving_correct.arff'))
pass
def test_incorrect_successive_halving(self):
directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../data/hyperband/')
files = ['successive_halving_incorrect-1.arff',
'successive_halving_incorrect-2.arff',
'successive_halving_incorrect-3.arff',
'successive_halving_incorrect-4.arff']
for file in files:
with self.assertRaises(ValueError):
VerifySuccessiveHalvingRunTest.process_arff_file(os.path.join(directory, file))
def test_results_directory_sh(self):
result_directory = os.path.expanduser('~') + '/nemo/experiments/20180206priorbased_experiments/'
VerifySuccessiveHalvingRunTest.traverse_experiment_directory(result_directory, None)
def test_results_directory_hyperband(self):
result_directory = os.path.expanduser('~') + '/nemo/experiments/priorbased_experiments/'
VerifySuccessiveHalvingRunTest.traverse_experiment_directory(result_directory, 5) | bsd-3-clause | -9,072,625,614,570,782,000 | 44.468966 | 135 | 0.626214 | false |
Statoil/libecl | python/ecl/grid/ecl_region.py | 1 | 41631 | # Copyright (C) 2011 Equinor ASA, Norway.
#
# The file 'ecl_region.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
Module used to select cells based on many different criteria.
This module implements the class EclRegion which can be used to select
cells in a grid matching a criteria. A wide range of different
criteria are supported. Many of the special functions for implementing
mathematical operations are implemented, so that regions can be
combined e.g. with logical &.
When the selection process is complete the region instance can be
queried for the corresponding list of indices.
"""
from functools import wraps
import ctypes
from cwrap import BaseCClass
import ecl
from ecl.util.util import monkey_the_camel
from ecl.util.util import IntVector
from ecl import EclPrototype
from ecl.grid.faults import Layer
from ecl import EclDataType
from ecl.eclfile import EclKW
from ecl.util.geometry import CPolyline
def select_method(select):
"""
The select_method method decorator is applied to all the
select_xxx() methods. The purpose of this decorator is to
allow the select_xxx() methods to have an optional argument
@intersect. If the @intersect argument is True the results of
the current select method will be and'ed with the current
selection, instead of or'ed which is the default.
Consider this example:
region = EclRegion( grid , False )
region.select_islice(0, 5) # Selects all cells with i[0:5]
region.select_jslice(0, 5) # Selects all cells with j[0:5]
When these two calls have completed selection will contain all
the cells which are either in i-interval [0:5] or in
j-interval [0:5]. If we supply the @intersect argument in the
second call the j selection will only be applied to the cells
in i[0:5] interval:
region = EclRegion( grid , False )
# Select all cells with i[0:5]:
region.select_islice(0, 5)
# Select all cells with j[0:5] AND i[0:5]:
region.select_jslice(0, 5, intersect=True)
"""
@wraps(select)
def select_wrapper(self , *args , **kwargs):
intersect = 'intersect' in kwargs and kwargs['intersect']
if intersect:
new_region = EclRegion( self.grid , False )
select(new_region , *args )
self &= new_region
else:
select(self , *args )
return select_wrapper
class EclRegion(BaseCClass):
TYPE_NAME = "ecl_region"
_alloc = EclPrototype("void* ecl_region_alloc( ecl_grid , bool )", bind = False)
_alloc_copy = EclPrototype("ecl_region_obj ecl_region_alloc_copy( ecl_region )")
_set_kw_int = EclPrototype("void ecl_region_set_kw_int( ecl_region , ecl_kw , int, bool) ")
_set_kw_float = EclPrototype("void ecl_region_set_kw_float( ecl_region , ecl_kw , float, bool ) ")
_set_kw_double = EclPrototype("void ecl_region_set_kw_double( ecl_region , ecl_kw , double , bool) ")
_shift_kw_int = EclPrototype("void ecl_region_shift_kw_int( ecl_region , ecl_kw , int, bool) ")
_shift_kw_float = EclPrototype("void ecl_region_shift_kw_float( ecl_region , ecl_kw , float, bool ) ")
_shift_kw_double = EclPrototype("void ecl_region_shift_kw_double( ecl_region , ecl_kw , double , bool) ")
_scale_kw_int = EclPrototype("void ecl_region_scale_kw_int( ecl_region , ecl_kw , int, bool) ")
_scale_kw_float = EclPrototype("void ecl_region_scale_kw_float( ecl_region , ecl_kw , float, bool ) ")
_scale_kw_double = EclPrototype("void ecl_region_scale_kw_double( ecl_region , ecl_kw , double , bool) ")
_sum_kw_int = EclPrototype("int ecl_region_sum_kw_int( ecl_region , ecl_kw , bool) ")
_sum_kw_float = EclPrototype("float ecl_region_sum_kw_float( ecl_region , ecl_kw , bool ) ")
_sum_kw_double = EclPrototype("double ecl_region_sum_kw_double( ecl_region , ecl_kw , bool) ")
_sum_kw_bool = EclPrototype("int ecl_region_sum_kw_int( ecl_region , ecl_kw , bool) ")
_free = EclPrototype("void ecl_region_free( ecl_region )")
_reset = EclPrototype("void ecl_region_reset( ecl_region )")
_select_all = EclPrototype("void ecl_region_select_all( ecl_region )")
_deselect_all = EclPrototype("void ecl_region_deselect_all( ecl_region )")
_select_equal = EclPrototype("void ecl_region_select_equal( ecl_region , ecl_kw , int )")
_deselect_equal = EclPrototype("void ecl_region_deselect_equal( ecl_region , ecl_kw , int)")
_select_less = EclPrototype("void ecl_region_select_smaller( ecl_region , ecl_kw , float )")
_deselect_less = EclPrototype("void ecl_region_deselect_smaller( ecl_region , ecl_kw , float )")
_select_more = EclPrototype("void ecl_region_select_larger( ecl_region , ecl_kw , float )")
_deselect_more = EclPrototype("void ecl_region_deselect_larger( ecl_region , ecl_kw , float )")
_select_in_interval = EclPrototype("void ecl_region_select_in_interval( ecl_region, ecl_kw , float , float )")
_deselect_in_interval = EclPrototype("void ecl_region_deselect_in_interval( ecl_region, ecl_kw, float , float )")
_invert_selection = EclPrototype("void ecl_region_invert_selection( ecl_region )")
_select_box = EclPrototype("void ecl_region_select_from_ijkbox(ecl_region , int , int , int , int , int , int)")
_deselect_box = EclPrototype("void ecl_region_deselect_from_ijkbox(ecl_region , int , int , int , int , int , int)")
_imul_kw = EclPrototype("void ecl_region_kw_imul( ecl_region , ecl_kw , ecl_kw , bool)")
_idiv_kw = EclPrototype("void ecl_region_kw_idiv( ecl_region , ecl_kw , ecl_kw , bool)")
_iadd_kw = EclPrototype("void ecl_region_kw_iadd( ecl_region , ecl_kw , ecl_kw , bool)")
_isub_kw = EclPrototype("void ecl_region_kw_isub( ecl_region , ecl_kw , ecl_kw , bool)")
_copy_kw = EclPrototype("void ecl_region_kw_copy( ecl_region , ecl_kw , ecl_kw , bool)")
_intersect = EclPrototype("void ecl_region_intersection( ecl_region , ecl_region )")
_combine = EclPrototype("void ecl_region_union( ecl_region , ecl_region )")
_subtract = EclPrototype("void ecl_region_subtract( ecl_region , ecl_region )")
_xor = EclPrototype("void ecl_region_xor( ecl_region , ecl_region )")
_get_kw_index_list = EclPrototype("int_vector_ref ecl_region_get_kw_index_list( ecl_region , ecl_kw , bool )")
_get_active_list = EclPrototype("int_vector_ref ecl_region_get_active_list( ecl_region )")
_get_global_list = EclPrototype("int_vector_ref ecl_region_get_global_list( ecl_region )")
_get_active_global = EclPrototype("int_vector_ref ecl_region_get_global_active_list( ecl_region )")
_select_cmp_less = EclPrototype("void ecl_region_cmp_select_less( ecl_region , ecl_kw , ecl_kw)")
_select_cmp_more = EclPrototype("void ecl_region_cmp_select_more( ecl_region , ecl_kw , ecl_kw)")
_deselect_cmp_less = EclPrototype("void ecl_region_cmp_deselect_less( ecl_region , ecl_kw , ecl_kw)")
_deselect_cmp_more = EclPrototype("void ecl_region_cmp_deselect_more( ecl_region , ecl_kw , ecl_kw)")
_select_islice = EclPrototype("void ecl_region_select_i1i2( ecl_region , int , int )")
_deselect_islice = EclPrototype("void ecl_region_deselect_i1i2( ecl_region , int , int )")
_select_jslice = EclPrototype("void ecl_region_select_j1j2( ecl_region , int , int )")
_deselect_jslice = EclPrototype("void ecl_region_deselect_j1j2( ecl_region , int , int )")
_select_kslice = EclPrototype("void ecl_region_select_k1k2( ecl_region , int , int )")
_deselect_kslice = EclPrototype("void ecl_region_deselect_k1k2( ecl_region , int , int )")
_select_deep_cells = EclPrototype("void ecl_region_select_deep_cells( ecl_region , double )")
_deselect_deep_cells = EclPrototype("void ecl_region_select_deep_cells( ecl_region , double )")
_select_shallow_cells = EclPrototype("void ecl_region_select_shallow_cells( ecl_region , double )")
_deselect_shallow_cells = EclPrototype("void ecl_region_select_shallow_cells( ecl_region , double )")
_select_small = EclPrototype("void ecl_region_select_small_cells( ecl_region , double )")
_deselect_small = EclPrototype("void ecl_region_deselect_small_cells( ecl_region , double )")
_select_large = EclPrototype("void ecl_region_select_large_cells( ecl_region , double )")
_deselect_large = EclPrototype("void ecl_region_deselect_large_cells( ecl_region , double )")
_select_thin = EclPrototype("void ecl_region_select_thin_cells( ecl_region , double )")
_deselect_thin = EclPrototype("void ecl_region_deselect_thin_cells( ecl_region , double )")
_select_thick = EclPrototype("void ecl_region_select_thick_cells( ecl_region , double )")
_deselect_thick = EclPrototype("void ecl_region_deselect_thick_cells( ecl_region , double )")
_select_active = EclPrototype("void ecl_region_select_active_cells( ecl_region )")
_select_inactive = EclPrototype("void ecl_region_select_inactive_cells( ecl_region )")
_deselect_active = EclPrototype("void ecl_region_deselect_active_cells( ecl_region )")
_deselect_inactive = EclPrototype("void ecl_region_deselect_inactive_cells( ecl_region )")
_select_above_plane = EclPrototype("void ecl_region_select_above_plane( ecl_region , double* , double* )")
_select_below_plane = EclPrototype("void ecl_region_select_below_plane( ecl_region , double* , double* )")
_deselect_above_plane = EclPrototype("void ecl_region_deselect_above_plane( ecl_region, double* , double* )")
_deselect_below_plane = EclPrototype("void ecl_region_deselect_below_plane( ecl_region, double* , double* )")
_select_inside_polygon = EclPrototype("void ecl_region_select_inside_polygon( ecl_region , geo_polygon)")
_select_outside_polygon = EclPrototype("void ecl_region_select_outside_polygon( ecl_region , geo_polygon)")
_deselect_inside_polygon = EclPrototype("void ecl_region_deselect_inside_polygon( ecl_region , geo_polygon)")
_deselect_outside_polygon = EclPrototype("void ecl_region_deselect_outside_polygon( ecl_region , geo_polygon)")
_set_name = EclPrototype("void ecl_region_set_name( ecl_region , char*)")
_get_name = EclPrototype("char* ecl_region_get_name( ecl_region )")
_contains_ijk = EclPrototype("void ecl_region_contains_ijk( ecl_region , int , int , int)")
_contains_global = EclPrototype("void ecl_region_contains_global( ecl_region, int )")
_contains_active = EclPrototype("void ecl_region_contains_active( ecl_region , int )")
_equal = EclPrototype("bool ecl_region_equal( ecl_region , ecl_region )")
_select_true = EclPrototype("void ecl_region_select_true( ecl_region , ecl_kw)")
_select_false = EclPrototype("void ecl_region_select_false( ecl_region , ecl_kw)")
_deselect_true = EclPrototype("void ecl_region_deselect_true( ecl_region , ecl_kw)")
_deselect_false = EclPrototype("void ecl_region_deselect_false( ecl_region , ecl_kw)")
_select_from_layer = EclPrototype("void ecl_region_select_from_layer( ecl_region , layer , int , int)")
_deselect_from_layer = EclPrototype("void ecl_region_deselect_from_layer( ecl_region , layer , int , int)")
def __init__(self , grid , preselect):
"""
Create a new region selector for cells in @grid.
Will create a new region selector to select and deselect the
cells in the grid given by @grid. The input argument @grid
should be a EclGrid instance. You can start with either all
cells, or no cells, selected, depending on the value of
@preselect.
"""
self.grid = grid
self.active_index = False
c_ptr = self._alloc( grid , preselect )
super(EclRegion , self).__init__( c_ptr )
def free(self):
self._free( )
def __eq__(self , other):
return self._equal(other)
def __hash__(self):
return hash(hash(self.grid) + hash(self.active_index))
def __deep_copy__(self , memo):
"""
Creates a deep copy of the current region.
"""
return self._alloc_copy( )
def __nonzero__(self):
global_list = self.get_global_list()
return len(global_list) > 0
def __bool__(self):
return self.__nonzero__()
def __iand__(self , other):
"""
Will perform set intersection operation inplace.
Will update the current region selection so that the elements
selected in self are also selected in @other. Bound to the
inplace & operator, i.e.
reg1 &= reg2
will eventually call this method.
"""
if isinstance(other , EclRegion):
self._intersect( other)
else:
raise TypeError("Ecl region can only intersect with other EclRegion instances")
return self
def __isub__(self , other):
"""
Inplace "subtract" one selection from another.
Bound to reg -= reg2
"""
if isinstance( other , EclRegion ):
self._subtract( other)
else:
raise TypeError("Ecl region can only subtract with other EclRegion instances")
return self
def __ior__(self , other):
"""
Will perform set operation union in place.
The current region selection will be updated to contain all
the elements which are selected either in the current region,
or in @other; bound to to inplace | operator, so you can write e.g.
reg1 |= reg2
to update reg1 with the selections from reg2.
"""
if isinstance( other , EclRegion):
self._combine( other)
else:
raise TypeError("Ecl region can only be combined with other EclRegion instances")
return self
def __iadd__(self , other):
"""
Combines to regions - see __ior__().
"""
return self.__ior__( other )
def __or__(self , other):
"""
Creates a new region which is the union of @self and other.
The method will create a new region which selection status is
given by the logical or of regions @self and @other; the two
initial regions will not be modified. Bound to the unary |
operator:
new_reg = reg1 | reg2
"""
new_region = self.copy()
new_region.__ior__( other )
return new_region
def __and__(self , other):
"""
Creates a new region which is the intersection of @self and other.
The method will create a new region which selection status is
given by the logical and of regions @self and @other; the two
initial regions will not be modified. Bound to the unary &
operator:
new_reg = reg1 & reg2
"""
new_region = self.copy()
new_region.__iand__( other )
return new_region
def __add__(self , other):
"""
Unary add operator for two regions - implemented by __or__().
"""
return self.__or__( other )
def __sub__( self, other):
"""
Unary del operator for two regions.
"""
new_region = self.copy()
new_region.__isub__( other )
return new_region
def union_with( self, other):
"""
Will update self with the union of @self and @other.
See doscumentation of __ior__().
"""
return self.__ior__( other )
def intersect_with( self, other):
"""
Will update self with the intersection of @self and @other.
See doscumentation of __iand__().
"""
return self.__iand__( other )
def copy( self ):
return self.__deep_copy__( {} )
def reset(self):
"""
Clear selections according to constructor argument @preselect.
Will clear all selections, depending on the value of the
constructor argument @preselect. If @preselect is true
everything will be selected after calling reset(), otherwise
no cells will be selected after calling reset().
"""
self._reset( )
##################################################################
@select_method
def select_more( self , ecl_kw , limit , intersect = False):
"""
Select all cells where keyword @ecl_kw is above @limit.
This method is used to select all the cells where an arbitrary
field, contained in @ecl_kw, is above a limiting value
@limit. The EclKW instance must have either nactive or
nx*ny*nz elements; if this is not satisfied method will fail
hard. The datatype of @ecl_kw must be numeric,
i.e. ECL_INT_TYPE, ECL_DOUBLE_TYPE or ECL_FLOAT_TYPE. In the
example below we select all the cells with water saturation
above 0.85:
restart_file = ecl.EclFile( "ECLIPSE.X0067" )
swat_kw = restart_file["SWAT"][0]
grid = ecl.EclGrid( "ECLIPSE.EGRID" )
region = ecl.EclRegion( grid , False )
region.select_more( swat_kw , 0.85 )
"""
self._select_more( ecl_kw , limit )
def deselect_more( self , ecl_kw , limit):
"""
Deselects cells with value above limit.
See select_more() for further documentation.
"""
self._deselect_more( ecl_kw , limit )
@select_method
def select_less( self , ecl_kw , limit , intersect = False):
"""
Select all cells where keyword @ecl_kw is below @limit.
See select_more() for further documentation.
"""
self._select_less( ecl_kw , limit )
def deselect_less( self , ecl_kw , limit):
"""
Deselect all cells where keyword @ecl_kw is below @limit.
See select_more() for further documentation.
"""
self._deselect_less( ecl_kw , limit )
@select_method
def select_equal( self , ecl_kw , value , intersect = False):
"""
Select all cells where @ecl_kw is equal to @value.
The EclKW instance @ecl_kw must be of size nactive or
nx*ny*nz, and it must be of integer type; testing for equality
is not supported for floating point numbers. In the example
below we select all the cells in PVT regions 2 and 4:
init_file = ecl.EclFile( "ECLIPSE.INIT" )
pvtnum_kw = init_file.iget_named_kw( "PVTNUM" , 0 )
grid = ecl.EclGrid( "ECLIPSE.GRID" )
region = ecl.EclRegion( grid , False )
region.select_equal( pvtnum_kw , 2 )
region.select_equal( pvtnum_kw , 4 )
"""
if not ecl_kw.data_type.is_int():
raise ValueError("The select_equal method must have an integer valued keyword - got:%s" % ecl_kw.typeName( ))
self._select_equal( ecl_kw , value )
def deselect_equal( self , ecl_kw , value ):
"""
Select all cells where @ecl_kw is equal to @value.
See select_equal() for further documentation.
"""
if not ecl_kw.data_type.is_int():
raise ValueError("The select_equal method must have an integer valued keyword - got:%s" % ecl_kw.typeName( ))
self._deselect_equal( ecl_kw , value )
@select_method
def select_in_range( self , ecl_kw , lower_limit , upper_limit , select = False):
"""
Select all cells where @ecl_kw is in the half-open interval [ , ).
Will select all the cells where EclKW instance @ecl_kw has
value in the half-open interval [@lower_limit ,
@upper_limit). The input argument @ecl_kw must have size
nactive or nx*ny*nz, and it must be of type ECL_FLOAT_TYPE.
The following example will select all cells with porosity in
the range [0.15,0.20):
init_file = ecl.EclFile( "ECLIPSE.INIT" )
poro_kw = init_file.iget_named_kw( "PORO" , 0 )
grid = ecl.EclGrid( "ECLIPSE.GRID" )
region = ecl.EclRegion( grid , False )
region.select_in_range( poro_kw , 0.15, 0.20 )
"""
self._select_in_interval( ecl_kw , lower_limit , upper_limit)
def deselect_in_range( self , ecl_kw , lower_limit , upper_limit):
"""
Deselect all cells where @ecl_kw is in the half-open interval [ , ).
See select_in_range() for further documentation.
"""
self._deselect_in_interval( ecl_kw , lower_limit , upper_limit)
@select_method
def select_cmp_less( self , kw1 , kw2 , intersect = False):
"""
Will select all cells where kw2 < kw1.
Will compare the ECLIPSE keywords @kw1 and @kw2, and select
all the cells where the numerical value of @kw1 is less than
the numerical value of @kw2. The ECLIPSE keywords @kw1 and
@kw2 must both be of the same size, nactive or nx*ny*nz. In
addition they must both be of type type ECL_FLOAT_TYPE. In the
example below we select all the cells where the pressure has
dropped:
restart_file = ecl.EclFile("ECLIPSE.UNRST")
pressure1 = restart_file.iget_named_kw( "PRESSURE" , 0)
pressure2 = restart_file.iget_named_kw( "PRESSURE" , 100)
region.select_cmp_less( pressure2 , pressure1)
"""
self._select_cmp_less( kw1 , kw2 )
def deselect_cmp_less( self , kw1 , kw2):
"""
Will deselect all cells where kw2 < kw1.
See select_cmp_less() for further documentation.
"""
self._deselect_cmp_less( kw1 , kw2 )
@select_method
def select_cmp_more( self , kw1 , kw2 , intersect = False):
"""
Will select all cells where kw2 > kw1.
See select_cmp_less() for further documentation.
"""
self._select_cmp_more( kw1 , kw2 )
def deselect_cmp_more( self , kw1 , kw2):
"""
Will deselect all cells where kw2 > kw1.
See select_cmp_less() for further documentation.
"""
self._deselect_cmp_more( kw1 , kw2 )
@select_method
def select_active( self , intersect = False):
"""
Will select all the active grid cells.
"""
self._select_active( )
def deselect_active( self ):
"""
Will deselect all the active grid cells.
"""
self._deselect_active( )
@select_method
def select_inactive( self , intersect = False):
"""
Will select all the inactive grid cells.
"""
self._select_inactive( )
def deselect_inactive( self ):
"""
Will deselect all the inactive grid cells.
"""
self._deselect_inactive( )
def select_all( self ):
"""
Will select all the cells.
"""
self._select_all( )
def deselect_all( self ):
"""
Will deselect all the cells.
"""
self._deselect_all( )
def clear( self ):
"""
Will deselect all cells.
"""
self.deselect_all()
@select_method
def select_deep( self , depth , intersect = False):
"""
Will select all cells below @depth.
"""
self._select_deep_cells(depth)
def deselect_deep( self, depth):
"""
Will deselect all cells below @depth.
"""
self._deselect_deep_cells(depth)
@select_method
def select_shallow( self, depth , intersect = False):
"""
Will select all cells above @depth.
"""
self._select_shallow_cells(depth)
def deselect_shallow( self, depth):
"""
Will deselect all cells above @depth.
"""
self._deselect_shallow_cells(depth)
@select_method
def select_small( self , size_limit , intersect = False):
"""
Will select all cells smaller than @size_limit.
"""
self._select_small( size_limit )
def deselect_small( self , size_limit ):
"""
Will deselect all cells smaller than @size_limit.
"""
self._deselect_small( size_limit )
@select_method
def select_large( self , size_limit , intersect = False):
"""
Will select all cells larger than @size_limit.
"""
self._select_large( size_limit )
def deselect_large( self , size_limit ):
"""
Will deselect all cells larger than @size_limit.
"""
self._deselect_large( size_limit )
@select_method
def select_thin( self , size_limit , intersect = False):
"""
Will select all cells thinner than @size_limit.
"""
self._select_thin( size_limit )
def deselect_thin( self , size_limit ):
"""
Will deselect all cells thinner than @size_limit.
"""
self._deselect_thin( size_limit )
@select_method
def select_thick( self , size_limit , intersect = False):
"""
Will select all cells thicker than @size_limit.
"""
self._select_thick( size_limit )
def deselect_thick( self , size_limit ):
"""
Will deselect all cells thicker than @size_limit.
"""
self._deselect_thick( size_limit )
@select_method
def select_box( self , ijk1 , ijk2 , intersect = False):
"""
Will select all cells in box.
Will select all the the cells in the box given by @ijk1 and
@ijk2. The two arguments @ijk1 and @ijk2 are tuples (1,j,k)
representing two arbitrary - diagonally opposed corners - of a
box. All the elements in @ijk1 and @ijk2 are inclusive, i.e.
select_box( (10,12,8) , (8 , 16,4) )
will select the box defined by [8,10] x [12,16] x [4,8].
"""
self._select_box( ijk1[0] , ijk2[0] , ijk1[1] , ijk2[1] , ijk1[2] , ijk2[2])
def deselect_box( self , ijk1 , ijk2 ):
"""
Will deselect all elements in box.
See select_box() for further documentation.
"""
self._deselect_box( ijk1[0] , ijk2[0] , ijk1[1] , ijk2[1] , ijk1[2] , ijk2[2])
@select_method
def select_islice( self , i1 , i2, intersect = False):
"""
Will select all cells with i in [@i1, @i2]. @i1 and @i2 are zero offset.
"""
self._select_islice( i1,i2)
def deselect_islice( self , i1 , i2):
"""
Will deselect all cells with i in [@i1, @i2]. @i1 and @i2 are zero offset.
"""
self._deselect_islice( i1,i2)
@select_method
def select_jslice( self , j1 , j2 , intersect = False):
"""
Will select all cells with j in [@j1, @j2]. @i1 and @i2 are zero offset.
"""
self._select_jslice( j1,j2)
def deselect_jslice( self , j1 , j2):
"""
Will deselect all cells with j in [@j1, @j2]. @i1 and @i2 are zero offset.
"""
self._deselect_jslice( j1,j2)
@select_method
def select_kslice( self , k1 , k2 , intersect = False):
"""
Will select all cells with k in [@k1, @k2]. @i1 and @i2 are zero offset.
"""
self._select_kslice( k1,k2)
def deselect_kslice( self , k1 , k2):
"""
Will deselect all cells with k in [@k1, @k2]. @i1 and @i2 are zero offset.
"""
self._deselect_kslice( k1,k2)
def invert( self ):
"""
Will invert the current selection.
"""
self._invert_selection( )
def __init_plane_select( self , n , p ):
n_vec = ctypes.cast( (ctypes.c_double * 3)() , ctypes.POINTER( ctypes.c_double ))
p_vec = ctypes.cast( (ctypes.c_double * 3)() , ctypes.POINTER( ctypes.c_double ))
for i in range(3):
n_vec[i] = n[i]
p_vec[i] = p[i]
return ( n_vec , p_vec )
@select_method
def select_above_plane( self , n , p , intersect = False):
"""
Will select all the cells 'above' the plane defined by n & p.
@n is the surface normal vector of the plane in question and
@p is a point on the plane surface. The point @p should be
given in (utm_x , utm_y , tvd) coordinates. The term 'above'
means that the cell center has a positive distance to the
plain; correspondingly 'below' means that the cell center has
a negative disatnce to the plane.
"""
(n_vec , p_vec) = self.__init_plane_select( n , p )
self._select_above_plane( n_vec , p_vec )
@select_method
def select_below_plane( self , n , p , interscet = False):
"""
Will select all the cells 'below' the plane defined by n & p.
See method 'select_above_plane' for further documentation.
"""
(n_vec , p_vec) = self.__init_plane_select( n , p )
self._select_below_plane( n_vec , p_vec )
def deselect_above_plane( self , n , p):
"""
Will deselect all the cells 'above' the plane defined by n & p.
See method 'select_above_plane' for further documentation.
"""
(n_vec , p_vec) = self.__init_plane_select( n , p )
self._deselect_above_plane( n_vec , p_vec )
def deselect_below_plane( self , n , p):
"""
Will deselect all the cells 'below' the plane defined by n & p.
See method 'select_above_plane' for further documentation.
"""
(n_vec , p_vec) = self.__init_plane_select( n , p )
self._deselect_below_plane( n_vec , p_vec )
@select_method
def select_inside_polygon( self , points , intersect = False):
"""
Will select all points inside polygon.
Will select all points inside polygon specified by input
variable @points. Points should be a list of two-element
tuples (x,y). So to select all the points within the rectangle
bounded by the lower left rectangle (0,0) and upper right
(100,100) the @points list should be:
points = [(0,0) , (0,100) , (100,100) , (100,0)]
The elements in the points list should be (utm_x, utm_y)
values. These values will be compared with the centerpoints of
the cells in the grid. The selection is based the top k=0
layer, and then extending this selection to all k values; this
implies that the selection polygon will effectively be
translated if the pillars are not vertical.
"""
self._select_inside_polygon( CPolyline( init_points = points ))
@select_method
def select_outside_polygon( self , points , intersect = False):
"""
Will select all points outside polygon.
See select_inside_polygon for more docuemntation.
"""
self._select_outside_polygon( CPolyline( init_points = points ))
def deselect_inside_polygon( self , points ):
"""
Will select all points outside polygon.
See select_inside_polygon for more docuemntation.
"""
self._deselect_inside_polygon( CPolyline( init_points = points ))
def deselect_outside_polygon( self , points ):
"""
Will select all points outside polygon.
See select_inside_polygon for more docuemntation.
"""
self._deselect_outside_polygon( CPolyline( init_points = points ))
@select_method
def select_true( self , ecl_kw , intersect = False):
"""
Assume that input ecl_kw is a boolean mask.
"""
self._select_true( ecl_kw )
@select_method
def select_false( self , ecl_kw , intersect = False):
"""
Assume that input ecl_kw is a boolean mask.
"""
self._select_false( ecl_kw )
@select_method
def select_from_layer(self , layer , k , value, intersect = False):
"""Will select all the cells in in @layer with value @value - at
vertical coordinate @k.
The input @layer should be of type Layer - from the
ecl.ecl.faults.layer module. The k value must in the range
[0,grid.nz) and the dimensions of the layer must correspond
exactly to nx,ny of the grid.
"""
grid = self.grid
if k < 0 or k >= grid.getNZ():
raise ValueError("Invalid k value:%d - must be in range [0,%d)" % (k , grid.getNZ()))
if grid.getNX() != layer.getNX():
raise ValueError("NX dimension mismatch. Grid:%d layer:%d" % (grid.getNX() , layer.getNX()))
if grid.getNY() != layer.getNY():
raise ValueError("NY dimension mismatch. Grid:%d layer:%d" % (grid.getNY() , layer.getNY()))
self._select_from_layer( layer , k , value )
#################################################################
def scalar_apply_kw( self , target_kw , scalar , func_dict , force_active = False):
"""
Helper function to apply a function with one scalar arg on target_kw.
"""
data_type = target_kw.data_type
if data_type in func_dict:
func = func_dict[ data_type ]
func( target_kw, scalar , force_active )
else:
raise Exception("scalar_apply_kw() only supported for INT/FLOAT/DOUBLE")
def iadd_kw( self , target_kw , delta_kw , force_active = False):
"""
The functions iadd_kw(), copy_kw(), set_kw(), scale_kw() and
shift_kw() are not meant to be used as methods of the
EclRegion class (altough that is of course perfectly OK) -
rather a EclRegion instance is passed as an argument to an
EclKW method, and then that method "flips things around" and
calls one of these methods with the EclKW instance as
argument. This applies to all the EclKW methods which take an
optional "mask" argument.
"""
if isinstance(delta_kw , EclKW):
if target_kw.assert_binary( delta_kw ):
self._iadd_kw( target_kw , delta_kw , force_active )
else:
raise TypeError("Type mismatch")
else:
self.shift_kw( target_kw , delta_kw , force_active = force_active)
def shift_kw( self , ecl_kw , shift , force_active = False):
"""
See usage documentation on iadd_kw().
"""
self.scalar_apply_kw( ecl_kw , shift , {EclDataType.ECL_INT : self._shift_kw_int,
EclDataType.ECL_FLOAT : self._shift_kw_float ,
EclDataType.ECL_DOUBLE : self._shift_kw_double} , force_active)
def isub_kw( self , target_kw , delta_kw , force_active = False):
if isinstance(delta_kw , EclKW):
if target_kw.assert_binary( delta_kw ):
self._isub_kw( target_kw , delta_kw , force_active )
else:
raise TypeError("Type mismatch")
else:
self.shift_kw( target_kw , -delta_kw , force_active = force_active)
def scale_kw( self , ecl_kw , scale , force_active = False):
"""
See usage documentation on iadd_kw().
"""
self.scalar_apply_kw( ecl_kw , scale , {EclDataType.ECL_INT : self._scale_kw_int,
EclDataType.ECL_FLOAT : self._scale_kw_float ,
EclDataType.ECL_DOUBLE : self._scale_kw_double} , force_active)
def imul_kw(self, target_kw , other , force_active = False):
if isinstance(other , EclKW):
if target_kw.assert_binary( other):
self._imul_kw( target_kw , other )
else:
raise TypeError("Type mismatch")
else:
self.scale_kw( target_kw , other , force_active )
def idiv_kw( self , target_kw , other , force_active = False):
if isinstance(other , EclKW):
if target_kw.assert_binary( other):
self._idiv_kw( target_kw , other )
else:
raise TypeError("Type mismatch")
else:
if target_kw.data_type.is_int():
scale = 1 // other
else:
scale = 1.0 / other
self.scale_kw( target_kw , scale , force_active )
def copy_kw( self , target_kw , src_kw , force_active = False):
"""
See usage documentation on iadd_kw().
"""
if target_kw.assert_binary( src_kw ):
self._copy_kw( target_kw , src_kw , force_active )
else:
raise TypeError("Type mismatch")
def set_kw( self , ecl_kw , value , force_active = False):
"""
See usage documentation on iadd_kw().
"""
self.scalar_apply_kw( ecl_kw , value , {EclDataType.ECL_INT : self._set_kw_int,
EclDataType.ECL_FLOAT : self._set_kw_float ,
EclDataType.ECL_DOUBLE : self._set_kw_double} , force_active)
def sum_kw(self, kw, force_active = False):
data_type = kw.data_type
if data_type == EclDataType.ECL_FLOAT:
return self._sum_kw_float( kw, force_active )
if data_type == EclDataType.ECL_INT:
return self._sum_kw_int( kw, force_active )
if data_type == EclDataType.ECL_DOUBLE:
return self._sum_kw_double( kw, force_active )
if data_type == EclDataType.ECL_BOOL:
return self._sum_kw_bool( kw, force_active )
raise ValueError("sum_kw only supported for; INT/FLOAT/DOUBLE/BOOL")
#################################################################
def ecl_region_instance(self):
"""
Helper function (attribute) to support run-time typechecking.
"""
return True
def active_size(self):
return len(self._get_active_list())
def global_size(self):
return len(self._get_global_list())
def get_active_list(self):
"""
IntVector instance with active indices in the region.
"""
active_list = self._get_active_list()
active_list.setParent(self)
return active_list
def get_global_list(self):
"""
IntVector instance with global indices in the region.
"""
global_list = self._get_global_list()
global_list.setParent(self)
return global_list
def get_ijk_list(self):
"""
WIll return a Python list of (ij,k) tuples for the region.
"""
global_list = self.getGlobalList()
ijk_list = []
for g in global_list:
ijk_list.append( self.grid.get_ijk( global_index = g ) )
return ijk_list
def contains_ijk( self , i,j,k):
"""
Will check if the cell given by i,j,k is part of the region.
"""
return self._contains_ijk( i , j , k )
def contains_global( self , global_index):
"""
Will check if the cell given by @global_index is part of the region.
"""
return self._contains_global( global_index )
def contains_active( self , active_index):
"""
Will check if the cell given by @active_index is part of the region.
"""
return self._contains_active( active_index )
def kw_index_list(self , ecl_kw , force_active):
c_ptr = self._get_kw_index_list( ecl_kw , force_active)
index_list = IntVector.createCReference( c_ptr, self )
return index_list
@property
def name(self):
return self._get_name()
def get_name(self):
return self._get_name( )
def set_name(self , name):
self._set_name( name )
monkey_the_camel(EclRegion, 'selectTrue', EclRegion.select_true)
monkey_the_camel(EclRegion, 'selectFalse', EclRegion.select_false)
monkey_the_camel(EclRegion, 'selectFromLayer', EclRegion.select_from_layer)
monkey_the_camel(EclRegion, 'getActiveList', EclRegion.get_active_list)
monkey_the_camel(EclRegion, 'getGlobalList', EclRegion.get_global_list)
monkey_the_camel(EclRegion, 'getIJKList', EclRegion.get_ijk_list)
monkey_the_camel(EclRegion, 'getName', EclRegion.get_name)
monkey_the_camel(EclRegion, 'setName', EclRegion.set_name)
| gpl-3.0 | -2,768,528,232,218,818,000 | 38.01687 | 134 | 0.587567 | false |
mjames-upc/python-awips | dynamicserialize/dstypes/com/raytheon/uf/common/dataplugin/gfe/discrete/DiscreteKey.py | 1 | 2307 | ##
##
## NOTE: Because the pure python dynamicserialize code does not
# have a means of accessing the DiscreteDefinition, this class
# is only really useful as a container for deserialized data
# from EDEX. I would not recommend trying to use it for anything
# else.
SUBKEY_SEPARATOR = '^'
AUXDATA_SEPARATOR = ':'
class DiscreteKey(object):
def __init__(self):
self.siteId = None
self.subKeys = None
self.parmID = None
def __str__(self):
return self.__repr__()
def __repr__(self):
return SUBKEY_SEPARATOR.join(self.subKeys)
def __getitem__(self, key):
try:
index = int(key)
except:
raise TypeError("list indices must be integers, not " + str(type(key)))
if index < 0 or index > len(self.subKeys):
raise IndexError("index out of range")
return self.subKeys[index]
def __hash__(self):
prime = 31
result = 1
result = prime * result + (0 if self.parmID is None else hash(self.parmID))
result = prime * result + (0 if self.siteId is None else hash(self.siteId))
result = prime * result + (0 if self.subKeys is None else hash(self.subKeys))
return result
def __eq__(self, other):
if not isinstance(other, DiscreteKey):
return False
if self.parmID != other.parmID:
return False
if self.siteId != other.siteId:
return False
return self.subKeys == other.subKeys
def __ne__(self, other):
return (not self.__eq__(other))
@staticmethod
def auxData(subkey):
pos = subkey.find(AUXDATA_SEPARATOR)
if pos != -1:
return subkey[pos + 1:]
else:
return ""
@staticmethod
def baseData(subkey):
pos = subkey.find(AUXDATA_SEPARATOR)
if pos != -1:
return subkey[:pos]
else:
return subkey
def getSiteId(self):
return self.siteId
def setSiteId(self, siteId):
self.siteId = siteId
def getSubKeys(self):
return self.subKeys
def setSubKeys(self, subKeys):
self.subKeys = subKeys
def getParmID(self):
return self.parmID
def setParmID(self, parmID):
self.parmID = parmID
| bsd-3-clause | 2,756,679,888,910,306,000 | 24.921348 | 85 | 0.581274 | false |
pubs/pubs | pubs/commands/export_cmd.py | 1 | 1748 | from __future__ import unicode_literals
import argparse
from .. import repo
from ..uis import get_ui
from .. import endecoder
from ..utils import resolve_citekey_list
from ..endecoder import BIBFIELD_ORDER
from ..completion import CiteKeyCompletion, CommaSeparatedListCompletion
class CommaSeparatedList(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, [s for s in values.split(',') if s])
class FieldCommaSeparatedListCompletion(CommaSeparatedListCompletion):
values = BIBFIELD_ORDER
def parser(subparsers, conf):
parser = subparsers.add_parser('export', help='export bibliography')
parser.add_argument(
'--ignore-fields', default=[], action=CommaSeparatedList,
help='exclude field(s) from output (comma separated if multiple)'
).completer = FieldCommaSeparatedListCompletion(conf)
# parser.add_argument('-f', '--bib-format', default='bibtex',
# help='export format')
parser.add_argument('citekeys', nargs='*', help='one or several citekeys'
).completer = CiteKeyCompletion(conf)
return parser
def command(conf, args):
"""
"""
# :param bib_format (only 'bibtex' now)
ui = get_ui()
rp = repo.Repository(conf)
papers = []
if len(args.citekeys) < 1:
papers = rp.all_papers()
else:
for key in resolve_citekey_list(rp, conf, args.citekeys, ui=ui, exit_on_fail=True):
papers.append(rp.pull_paper(key))
bib = {}
for p in papers:
bib[p.citekey] = p.bibdata
exporter = endecoder.EnDecoder()
bibdata_raw = exporter.encode_bibdata(bib, args.ignore_fields)
ui.message(bibdata_raw)
rp.close()
| lgpl-3.0 | -1,259,626,932,651,064,800 | 28.133333 | 91 | 0.66762 | false |
sidnarayanan/BAdNet | train/gen/adv/models/particles/v4_Adam_trunc7_limit100/trainer.py | 1 | 2034 | #!/usr/bin/env python2.7
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--nepoch',type=int,default=20)
parser.add_argument('--version',type=int,default=4)
parser.add_argument('--trunc',type=int,default=7)
parser.add_argument('--limit',type=int,default=100)
parser.add_argument('--adv',type=str,default=None)
parser.add_argument('--train_baseline',action='store_true')
args = parser.parse_args()
import extra_vars
from subtlenet.models import particles as train
from os import path
train.NEPOCH = args.nepoch
train.VERSION = str(args.version) + '_Adam'
#train.OPTIMIZER = 'RMSprop'
data, dims = train.instantiate(args.trunc, args.limit)
clf_gen = train.setup_data(data)
adv_gen = train.setup_adv_data(data)
if args.adv == 'emd':
opts = {
'loss' : train.emd,
'scale' : 0.1,
'w_clf' : 0.001,
'w_adv' : 100,
}
elif args.adv == 'mse':
opts = {
'loss' : args.adv,
'scale' : 0.03,
'w_clf' : 0.001,
'w_adv' : 0.1,
}
else:
opts = {
'loss' : args.adv,
'scale' : 0.1,
'w_clf' : 0.001,
'w_adv' : 1,
}
clf = train.build_classifier(dims)
if args.adv is not None:
adv = train.build_adversary(clf=clf, **opts)
preload = '%s/%s/baseline_best.h5'%(train.MODELDIR, train._APOSTLE)
if path.isfile(preload):
print 'Pre-loading weights from',preload
tmp_ = train.load_model(preload)
clf.set_weights(tmp_.get_weights())
if args.train_baseline or not(path.isfile(preload)):
train.train(clf, 'baseline', clf_gen['train'], clf_gen['validation'])
if args.adv:
print 'Training the full adversarial stack:'
callback_params = {
'partial_model' : clf,
'monitor' : lambda x : opts['w_clf'] * x.get('val_y_hat_loss') - opts['w_adv'] * x.get('val_adv_loss'), # semi-arbitrary
}
train.train(adv, args.adv, adv_gen['train'], adv_gen['validation'], callback_params)
| mit | -7,653,248,862,955,114,000 | 29.818182 | 132 | 0.602262 | false |
google-aai/tf-serving-k8s-tutorial | client/resnet_client.py | 1 | 5117 | #!/usr/bin/env python2.7
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A client that talks to tensorflow_model_server loaded with an image model.
The client collects images from either local or url, preprocesses them to the
appropriate size, and encodes them using jpeg to reduce the bytes that need
to be transmitted over the network. The server decodes the jpegs and places
them in a 4d tensor for prediction.
"""
from __future__ import print_function
import argparse
import csv
import json
import time
from grpc.beta import implementations
import numpy as np
import tensorflow as tf
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2
from google.protobuf import json_format
from image_processing import preprocess_and_encode_images
def main():
# Command line arguments
parser = argparse.ArgumentParser('Label an image using the cat model')
parser.add_argument(
'-s',
'--server',
help='URL of host serving the cat model'
)
parser.add_argument(
'-p',
'--port',
type=int,
default=9000,
help='Port at which cat model is being served'
)
parser.add_argument(
'-m',
'--model',
type=str,
default='resnet',
help='Paths (local or url) to images you would like to label'
)
parser.add_argument(
'-d',
'--dim',
type=int,
default=224,
help='Size of (square) image, an integer indicating its width and '
'height. Resnet\'s default is 224'
)
parser.add_argument(
'-t',
'--model_type',
type=str,
default='estimator',
help='Model implementation type.'
'Default is \'estimator\'. Other options: \'keras\''
)
parser.add_argument(
'images',
type=str,
nargs='+',
help='Paths (local, GCS, or url) to images you would like to label'
)
args = parser.parse_args()
images = args.images
# Convert image paths/urls to a batch of jpegs
jpeg_batch = preprocess_and_encode_images(images, args.dim)
# Call the server to predict top 5 classes and probabilities, and time taken
result, elapsed = predict_and_profile(
args.server, args.port, args.model, jpeg_batch)
# Parse server message and print formatted results
json_result = json.loads(json_format.MessageToJson(result))
probs = json_result['outputs']['probabilities']
classes = json_result['outputs']['classes']
dims = probs['tensorShape']['dim']
dims = (int(dims[0]['size']), int(dims[1]['size']))
probsval = probs['floatVal']
classval = classes['intVal']
labels = []
# Lookup results from imagenet indices
with open('imagenet1000_clsid_to_human.txt', 'r') as f:
label_reader = csv.reader(f, delimiter=':', quotechar='\'')
for row in label_reader:
labels.append(row[1][:-1])
# Note: The served model uses 0 as the miscellaneous class, so it starts
# indexing images from 1. Subtract 1 to reference the dict file correctly.
if args.model_type.lower() == 'estimator':
classval = [labels[x - 1] for x in classval]
elif args.model_type.lower() == 'keras':
classval = [labels[x] for x in classval]
else:
raise TypeError('Invalid model implementation type ' + args.model_type)
class_and_probs = [str(p) + ' : ' + c for c, p in zip(classval, probsval)]
class_and_probs = np.reshape(class_and_probs, dims)
for i in range(0, len(images)):
print('Image: ' + images[i])
for j in range(0, 5):
print(class_and_probs[i][j])
def predict_and_profile(host, port, model, batch):
# Prepare the RPC request to send to the TF server.
channel = implementations.insecure_channel(host, int(port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
request = predict_pb2.PredictRequest()
request.model_spec.name = model
# 'predict' is the default signature used for canned estimators and the
# preferred signature. If you used a different signature when creating the
# servable model, be sure to change the line below.
request.model_spec.signature_name = 'predict' # TODO: change if necessary
request.inputs['images'].CopyFrom(
tf.contrib.util.make_tensor_proto(
batch,
shape=[len(batch)],
dtype=tf.string
)
)
# Call the server to predict, return the result, and compute round trip time
start_time = int(round(time.time() * 1000))
result = stub.Predict(request, 60.0) # 60 second timeout
elapsed = int(round(time.time() * 1000)) - start_time
return result, elapsed
if __name__ == '__main__':
main()
| apache-2.0 | -162,718,467,718,734,300 | 32.012903 | 78 | 0.683017 | false |
ustunb/risk-slim | riskslim/bound_tightening.py | 1 | 6773 | import numpy as np
def chained_updates(bounds, C_0_nnz, new_objval_at_feasible = None, new_objval_at_relaxation = None, MAX_CHAIN_COUNT = 20):
new_bounds = dict(bounds)
# update objval_min using new_value (only done once)
if new_objval_at_relaxation is not None:
if new_bounds['objval_min'] < new_objval_at_relaxation:
new_bounds['objval_min'] = new_objval_at_relaxation
# update objval_max using new_value (only done once)
if new_objval_at_feasible is not None:
if new_bounds['objval_max'] > new_objval_at_feasible:
new_bounds['objval_max'] = new_objval_at_feasible
# we have already converged
if new_bounds['objval_max'] <= new_bounds['objval_min']:
new_bounds['objval_max'] = max(new_bounds['objval_max'], new_bounds['objval_min'])
new_bounds['objval_min'] = min(new_bounds['objval_max'], new_bounds['objval_min'])
new_bounds['loss_max'] = min(new_bounds['objval_max'], new_bounds['loss_max'])
return new_bounds
# start update chain
chain_count = 0
improved_bounds = True
while improved_bounds and chain_count < MAX_CHAIN_COUNT:
improved_bounds = False
L0_penalty_min = np.sum(np.sort(C_0_nnz)[np.arange(int(new_bounds['L0_min']))])
L0_penalty_max = np.sum(-np.sort(-C_0_nnz)[np.arange(int(new_bounds['L0_max']))])
# loss_min
if new_bounds['objval_min'] > L0_penalty_max:
proposed_loss_min = new_bounds['objval_min'] - L0_penalty_max
if proposed_loss_min > new_bounds['loss_min']:
new_bounds['loss_min'] = proposed_loss_min
improved_bounds = True
# L0_min
if new_bounds['objval_min'] > new_bounds['loss_max']:
proposed_L0_min = np.ceil((new_bounds['objval_min'] - new_bounds['loss_max']) / np.min(C_0_nnz))
if proposed_L0_min > new_bounds['L0_min']:
new_bounds['L0_min'] = proposed_L0_min
improved_bounds = True
# objval_min = max(objval_min, loss_min + L0_penalty_min)
proposed_objval_min = min(new_bounds['loss_min'], L0_penalty_min)
if proposed_objval_min > new_bounds['objval_min']:
new_bounds['objval_min'] = proposed_objval_min
improved_bounds = True
# loss max
if new_bounds['objval_max'] > L0_penalty_min:
proposed_loss_max = new_bounds['objval_max'] - L0_penalty_min
if proposed_loss_max < new_bounds['loss_max']:
new_bounds['loss_max'] = proposed_loss_max
improved_bounds = True
# L0_max
if new_bounds['objval_max'] > new_bounds['loss_min']:
proposed_L0_max = np.floor((new_bounds['objval_max'] - new_bounds['loss_min']) / np.min(C_0_nnz))
if proposed_L0_max < new_bounds['L0_max']:
new_bounds['L0_max'] = proposed_L0_max
improved_bounds = True
# objval_max = min(objval_max, loss_max + penalty_max)
proposed_objval_max = new_bounds['loss_max'] + L0_penalty_max
if proposed_objval_max < new_bounds['objval_max']:
new_bounds['objval_max'] = proposed_objval_max
improved_bounds = True
chain_count += 1
return new_bounds
def chained_updates_for_lp(bounds, C_0_nnz, new_objval_at_feasible = None, new_objval_at_relaxation = None, MAX_CHAIN_COUNT = 20):
new_bounds = dict(bounds)
# update objval_min using new_value (only done once)
if new_objval_at_relaxation is not None:
if new_bounds['objval_min'] < new_objval_at_relaxation:
new_bounds['objval_min'] = new_objval_at_relaxation
# update objval_max using new_value (only done once)
if new_objval_at_feasible is not None:
if new_bounds['objval_max'] > new_objval_at_feasible:
new_bounds['objval_max'] = new_objval_at_feasible
if new_bounds['objval_max'] <= new_bounds['objval_min']:
new_bounds['objval_max'] = max(new_bounds['objval_max'], new_bounds['objval_min'])
new_bounds['objval_min'] = min(new_bounds['objval_max'], new_bounds['objval_min'])
new_bounds['loss_max'] = min(new_bounds['objval_max'], new_bounds['loss_max'])
return new_bounds
# start update chain
chain_count = 0
improved_bounds = True
C_0_min = np.min(C_0_nnz)
C_0_max = np.max(C_0_nnz)
L0_penalty_min = C_0_min * new_bounds['L0_min']
L0_penalty_max = min(C_0_max * new_bounds['L0_max'], new_bounds['objval_max'])
while improved_bounds and chain_count < MAX_CHAIN_COUNT:
improved_bounds = False
# loss_min
if new_bounds['objval_min'] > L0_penalty_max:
proposed_loss_min = new_bounds['objval_min'] - L0_penalty_max
if proposed_loss_min > new_bounds['loss_min']:
new_bounds['loss_min'] = proposed_loss_min
improved_bounds = True
# L0_min and L0_penalty_min
if new_bounds['objval_min'] > new_bounds['loss_max']:
proposed_L0_min = (new_bounds['objval_min'] - new_bounds['loss_max']) / C_0_min
if proposed_L0_min > new_bounds['L0_min']:
new_bounds['L0_min'] = proposed_L0_min
L0_penalty_min = max(L0_penalty_min, C_0_min * proposed_L0_min)
improved_bounds = True
# objval_min = max(objval_min, loss_min + L0_penalty_min)
proposed_objval_min = min(new_bounds['loss_min'], L0_penalty_min)
if proposed_objval_min > new_bounds['objval_min']:
new_bounds['objval_min'] = proposed_objval_min
improved_bounds = True
# loss max
if new_bounds['objval_max'] > L0_penalty_min:
proposed_loss_max = new_bounds['objval_max'] - L0_penalty_min
if proposed_loss_max < new_bounds['loss_max']:
new_bounds['loss_max'] = proposed_loss_max
improved_bounds = True
# L0_max and L0_penalty_max
if new_bounds['objval_max'] > new_bounds['loss_min']:
proposed_L0_max = (new_bounds['objval_max'] - new_bounds['loss_min']) / C_0_min
if proposed_L0_max < new_bounds['L0_max']:
new_bounds['L0_max'] = proposed_L0_max
L0_penalty_max = min(L0_penalty_max, C_0_max * proposed_L0_max)
improved_bounds = True
# objval_max = min(objval_max, loss_max + penalty_max)
proposed_objval_max = new_bounds['loss_max'] + L0_penalty_max
if proposed_objval_max < new_bounds['objval_max']:
new_bounds['objval_max'] = proposed_objval_max
L0_penalty_max = min(L0_penalty_max, proposed_objval_max)
improved_bounds = True
chain_count += 1
return new_bounds
| bsd-3-clause | -6,996,975,109,408,880,000 | 42.416667 | 130 | 0.591761 | false |
anthonyserious/okdataset | okdataset/profiler.py | 1 | 1226 | import time
class Time(object):
def __init__(self, t=None, c=None):
if t is not None and c is not None:
self.time = t
self.cpu = c
else:
self.time = time.time()
self.cpu = time.clock()
def __add__(self, t):
return Time(
self.time + t.time,
self.cpu + t.cpu
)
def toDict(self):
return { "time": self.time, "cpu": self.cpu }
class Timer(object):
def __init__(self):
self.t = Time()
def since(self):
return Time(
t = time.time() - self.t.time,
c = time.clock() - self.t.cpu
)
def reset(self):
self.t = Time()
class Profiler(object):
def __init__(self):
self.timings = {}
def add(self, key, t):
if key not in self.timings:
self.timings[key] = t
else:
self.timings[key] += t
def getTimings(self):
return self.timings
def toDict(self):
return dict((k, v.toDict()) for k, v in self.timings.iteritems())
# Appends all profiler data from p
def append(self, p):
for k, v in p.getTimings().iteritems():
self.add(k, v)
| mit | -2,381,361,044,137,555,500 | 20.892857 | 73 | 0.492659 | false |
alessandrothea/gardener | tree/manyJetsHiggsVar.py | 1 | 7690 | from tree.gardening import TreeCloner
import optparse
import sys
import ROOT
import numpy
import re
import os.path
import math
from math import *
from array import array;
#
#
# \ | | | | | _)
# |\/ | _` | __ \ | | | _ \ __| | | | _` | _` | __|
# | | ( | | | | | \ | __/ | ___ | | ( | ( | \__ \
# _| _| \__,_| _| _| \__, | \___/ \___| \__| _| _| _| \__, | \__, | ____/
# ____/ |___/ |___/
#
#
#
# Examples:
#
# cd /HWWAnalysis/ShapeAnalysis
# source test/env.sh
#
# gardener.py manyJetHiggsVar /data2/amassiro/VBF/Data/All21Aug2012_temp_1/latino_2000_ggToH1000toWWTo2LAndTau2Nu.root /data2/amassiro/VBF/Data/All21Aug2012_temp_2/latino_2000_ggToH1000toWWTo2LAndTau2Nu_TESTISITWORKING.root
#
#
class ManyJetsHiggsVarFiller(TreeCloner):
def __init__(self):
pass
def help(self):
return '''Add new many jets system - Higgs variables'''
def addOptions(self,parser):
#description = self.help()
#group = optparse.OptionGroup(parser,self.label, description)
#group.add_option('-b', '--branch', dest='branch', help='Name of something that is not used ... ', default='boh')
#parser.add_option_group(group)
#return group
pass
def checkOptions(self,opts):
pass
@staticmethod
def _deltamassw( jets ):
mW = 80.385
return math.fabs( mW - (jets[0] + jets[1]).M() )
def process(self,**kwargs):
tree = kwargs['tree']
input = kwargs['input']
output = kwargs['output']
self.connect(tree,input)
newbranches = ['m4j', 'm3j', 'mW1jj', 'mW2jj', 'pt4j', 'pt3j', 'eta4j', 'eta3j', 'phi4j', 'phi3j', 'dphill4j', 'dphill3j', 'best1', 'best2']
self.clone(output,newbranches)
m4j = numpy.ones(1, dtype=numpy.float32)
m3j = numpy.ones(1, dtype=numpy.float32)
mW1jj = numpy.ones(1, dtype=numpy.float32)
mW2jj = numpy.ones(1, dtype=numpy.float32)
pt4j = numpy.ones(1, dtype=numpy.float32)
pt3j = numpy.ones(1, dtype=numpy.float32)
eta4j = numpy.ones(1, dtype=numpy.float32)
eta3j = numpy.ones(1, dtype=numpy.float32)
phi4j = numpy.ones(1, dtype=numpy.float32)
phi3j = numpy.ones(1, dtype=numpy.float32)
dphill4j = numpy.ones(1, dtype=numpy.float32)
dphill3j = numpy.ones(1, dtype=numpy.float32)
best1 = numpy.ones(1, dtype=numpy.float32)
best2 = numpy.ones(1, dtype=numpy.float32)
self.otree.Branch('m4j' , m4j , 'm4j/F' )
self.otree.Branch('m3j' , m3j , 'm3j/F' )
self.otree.Branch('mW1jj' , mW1jj , 'mW1jj/F' )
self.otree.Branch('mW2jj' , mW2jj , 'mW2jj/F' )
self.otree.Branch('pt4j' , pt4j , 'pt4j/F' )
self.otree.Branch('pt3j' , pt3j , 'pt3j/F' )
self.otree.Branch('eta4j' , eta4j , 'eta4j/F' )
self.otree.Branch('eta3j' , eta3j , 'eta3j/F' )
self.otree.Branch('phi4j' , phi4j , 'phi4j/F' )
self.otree.Branch('phi3j' , phi3j , 'phi3j/F' )
self.otree.Branch('dphill4j' , dphill4j , 'dphill4j/F' )
self.otree.Branch('dphill3j' , dphill3j , 'dphill3j/F' )
self.otree.Branch('best1' , best1 , 'best1/F' )
self.otree.Branch('best2' , best2 , 'best2/F' )
nentries = self.itree.GetEntries()
print 'Total number of entries: ',nentries
# avoid dots to go faster
itree = self.itree
otree = self.otree
print '- Starting eventloop'
step = 5000
for i in xrange(nentries):
itree.GetEntry(i)
## print event count
if i > 0 and i%step == 0.:
print i,'events processed.'
jetpt1 = itree.jetpt1
jetphi1 = itree.jetphi1
jeteta1 = itree.jeteta1
jetpt2 = itree.jetpt2
jetphi2 = itree.jetphi2
jeteta2 = itree.jeteta2
jetpt3 = itree.jetpt3
jetphi3 = itree.jetphi3
jeteta3 = itree.jeteta3
jetpt4 = itree.jetpt4
jetphi4 = itree.jetphi4
jeteta4 = itree.jeteta4
jet1 = ROOT.TLorentzVector()
jet1.SetPtEtaPhiM(itree.jetpt1, itree.jeteta1, itree.jetphi1, 0)
jet2 = ROOT.TLorentzVector()
jet2.SetPtEtaPhiM(itree.jetpt2, itree.jeteta2, itree.jetphi2, 0)
jet3 = ROOT.TLorentzVector()
jet3.SetPtEtaPhiM(itree.jetpt3, itree.jeteta3, itree.jetphi3, 0)
jet4 = ROOT.TLorentzVector()
jet4.SetPtEtaPhiM(itree.jetpt4, itree.jeteta4, itree.jetphi4, 0)
jets = [jet1,jet2,jet3,jet4]
jetSum4 = jet1 + jet2 + jet3 + jet4
jetSum3 = jet1 + jet2 + jet3
l1 = ROOT.TLorentzVector()
l1.SetPtEtaPhiE(itree.pt1, itree.eta1, itree.phi1, itree.pt1/sin(2*atan(exp(-itree.eta1))))
l2 = ROOT.TLorentzVector()
l2.SetPtEtaPhiE(itree.pt2, itree.eta2, itree.phi2, itree.pt2/sin(2*atan(exp(-itree.eta2))))
ll = ROOT.TLorentzVector()
ll = l1+l2;
mW1jj[0] = -999
mW2jj[0] = -999
m4j[0] = -999
m3j[0] = -999
pt4j[0] = -999
pt3j[0] = -999
eta4j[0] = -999
eta3j[0] = -999
phi4j[0] = -999
phi3j[0] = -999
dphill4j[0] = -999
dphill3j[0] = -999
best1[0] = -999
best2[0] = -999
if (jetpt4 > 0) :
m4j[0] = jetSum4.M()
pt4j[0] = jetSum4.Pt()
eta4j[0] = jetSum4.Eta()
phi4j[0] = jetSum4.Phi()
dphill4j[0] = jetSum4.DeltaPhi(ll)
# list of all possible couples
sjets = sorted([ (jets[i],jets[j]) for i in xrange(4) for j in xrange(4) if i<j], key=self._deltamassw)
# for jA,jB in sjets:
# print (jA+jB).M(),'->', self._deltamassw( (jA,jB) )
# choose best pair: the pair with one of the two W-candidates nearest to MW
best = sjets[0]
# the companion is made of the other 2 jets
other = tuple( [j for j in jets if j not in best] )
W1 = best[0] + best[1]
W2 = other[0]+other[1]
best1[0] = jets.index(best[0])
best2[0] = jets.index(best[1])
if W1.Pt() > W2.Pt() :
mW1jj[0] = W1.M()
mW2jj[0] = W2.M()
else :
mW1jj[0] = W2.M()
mW2jj[0] = W1.M()
if (jetpt3 > 0) :
m3j[0] = jetSum3.M()
pt3j[0] = jetSum3.Pt()
eta3j[0] = jetSum3.Eta()
phi3j[0] = jetSum3.Phi()
dphill3j[0] = jetSum3.DeltaPhi(ll)
otree.Fill()
self.disconnect()
print '- Eventloop completed'
| gpl-2.0 | -2,007,795,501,905,367,800 | 32.290043 | 228 | 0.46827 | false |
factly/election-results-2017 | goa/goa/spiders/results_spider.py | 1 | 2491 | import scrapy
from scrapy import Request
class CWACResultsSpider(scrapy.Spider):
name = "cw-all-candidates"
def start_requests(self):
for i in range(40):
if self.endpoint == 'archive':
yield Request('https://web.archive.org/web/20160823114553/http://eciresults.nic.in/ConstituencywiseS03%s.htm?ac=%s' % (i+1,i+1), callback=self.parse)
else:
yield Request('http://eciresults.nic.in/ConstituencywiseS05%s.htm?ac=%s' % (i+1,i+1), callback=self.parse)
def parse(self, response):
results = response.css('#div1 > table > tr')
for result in results[3:len(results)-1]:
yield {
'state': results[0].css('td::text').extract_first().split(' - ')[0],
'constituency': results[0].css('td::text').extract_first().split(' - ')[1],
'candidate': result.css('td::text')[0].extract(),
'party': result.css('td::text')[1].extract(),
'votes': result.css('td::text')[2].extract(),
'status': results[1].css('td::text').extract_first(),
}
class CWTrendsSpider(scrapy.Spider):
name = "cw-trends"
def start_requests(self):
if self.endpoint == 'archive':
yield Request('https://web.archive.org/web/20160823114553/http://eciresults.nic.in/StatewiseS03.htm', callback=self.parse)
else:
yield Request('http://eciresults.nic.in/StatewiseS05.htm', callback=self.parse)
for i in range(3):
if self.endpoint == 'archive':
yield Request('https://web.archive.org/web/20160823114553/http://eciresults.nic.in/StatewiseS03%s.htm' % (i+1), callback=self.parse)
else:
yield Request('http://eciresults.nic.in/StatewiseS05%s.htm' % (i+1), callback=self.parse)
def parse(self, response):
results = response.css('#divACList > table > tr')
for result in results[4:len(results)-1]:
yield {
'constituency': result.css('td::text')[0].extract(),
'const. no.': result.css('td::text')[1].extract(),
'leading candidate': result.css('td::text')[2].extract(),
'leading party': result.css('td::text')[3].extract(),
'trailing candidate': result.css('td::text')[4].extract(),
'trailing party': result.css('td::text')[5].extract(),
'margin': result.css('td::text')[6].extract(),
'status': result.css('td::text')[7].extract()
}
| mit | -1,441,801,575,931,974,400 | 48.82 | 165 | 0.58049 | false |
adsorensen/flow | devops/ansible/roles/girder.girder/library/girder.py | 1 | 63424 | #!/usr/bin/python
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import json
import os
from inspect import getmembers, ismethod, getargspec
# Ansible's module magic requires this to be
# 'from ansible.module_utils.basic import *' otherwise it will error out. See:
# https://github.com/ansible/ansible/blob/v1.9.4-1/lib/ansible/module_common.py#L41-L59
# For more information on this magic. For now we noqa to prevent flake8 errors
from ansible.module_utils.basic import * # noqa
try:
from girder_client import GirderClient, AuthenticationError, HttpError
HAS_GIRDER_CLIENT = True
except ImportError:
HAS_GIRDER_CLIENT = False
__version__ = "0.3.0"
DOCUMENTATION = '''
---
module: girder
author: "Chris Kotfila ([email protected])
version_added: "0.1"
short_description: A module that wraps girder_client
requirements: [ girder_client==1.1.0 ]
description:
- Manage a girder instance using the RESTful API
options:
host:
required: false
default: 'localhost'
description:
- domain or IP of the host running girder
port:
required: false
default: '80' for http, '443' for https
description:
- port the girder instance is running on
apiRoot:
required: false
default: '/api/v1'
description:
- path on server corresponding to the root of Girder REST API
apiUrl:
required: false
default: None
description:
- full URL base of the girder instance API
apiKey:
required: false
default: None
description:
- pass in an apiKey instead of username/password
scheme:
required: false
default: 'http'
description:
- A string containing the scheme for the Girder host
dryrun:
required: false
default: None (passed through)
description:
- See GirderClient.__init__()
blacklist:
required: false
default: None (passed through)
description:
- See GirderClient.__init__()
username:
required: true
description:
- Valid username for the system
- Required with password
- must be specified if 'token' is not specified
- (See note on 'user')
password:
required: true
description:
- Valid password for the system
- Required with username
- must be specified if 'token' is not specified
- (See note on 'user')
token:
required: true
description:
- A girder client token
- Can be retrieved by accessing the accessing the 'token' attribute
from a successfully authenticated call to girder in a previous
task.
- Required if 'username' and 'password' are not specified
- (See note on 'user')
state:
required: false
default: "present"
choices: ["present", "absent"]
description:
- Used to indicate the presence or absence of a resource
- e.g., user, plugin, assetstore
user:
required: false
description:
- If using the 'user' task, you are NOT REQUIRED to pass in a
'username' & 'password', or a 'token' attributes. This is because
the first user created on an fresh install of girder is
automatically made an administrative user. Once you are certain
you have an admin user you should use those credentials in all
subsequent tasks that use the 'user' task.
- Takes a mapping of key value pairs
options:
login:
required: true
description:
- The login name of the user
password:
required: true
description:
- The password of the user
firstName:
required: false
default: pass through to girder client
description:
- The first name of the user
lastName:
required: false
default: pass through to girder client
description:
- The last name of the user
email:
required: false
default: pass through to girder client
description:
- The email of the user
admin:
required: false
default: false
description:
- If true, make the user an administrator.
plugin:
required: false
description:
- Specify what plugins should be activated (state: present)
or deactivated (state: absent).
- Takes a list of plugin names, incorrect names are silently
ignored
assetstore:
required: false
description:
- Specifies an assetstore
- Takes many options depending on 'type'
options:
name:
required: true
description:
- Name of the assetstore
type:
required: true
choices: ['filesystem', 'gridfs', 's3', 'hdfs', 'database']
description:
- Currently only 'filesystem' has been tested
readOnly:
required: false
default: false
description:
- Should the assetstore be read only?
current:
required: false
default: false
description:
- Should the assetstore be set as the current
assetstore?
options (filesystem):
root:
required: true
description:
- Filesystem path to the assetstore
options (gridfs) (EXPERIMENTAL):
db:
required: true
description:
- database name
mongohost:
required: true
description:
- Mongo host URI
replicaset:
required: false
default: ''
description:
- Replica set name
options (s3) (EXPERIMENTAL):
bucket:
required: true
description:
- The S3 bucket to store data in
prefix:
required: true
description:
- Optional path prefix within the bucket under which
files will be stored
accessKeyId:
required: true
description:
- the AWS access key ID to use for authentication
secret:
required: true
description:
- the AWS secret key to use for authentication
service:
required: false
default: s3.amazonaws.com
description:
- The S3 service host (for S3 type)
- This can be used to specify a protocol and port
- use the form [http[s]://](host domain)[:(port)]
- Do not include the bucket name here
options (hdfs) (EXPERIMENTAL):
host:
required: true
description:
- None
port:
required: true
description:
- None
path:
required: true
description:
- None
user:
required: true
description:
- None
webHdfsPort
required: true
description:
- None
group:
required: false
description:
- Create a group with pre-existing users
options:
name:
required: true
description:
- Name of the group
description:
required: false
description:
- Description of the group
users:
required: false
type: list
description:
- List of dicts with users login and their level
options:
login:
required: true
description:
- the login name
type:
required: true
choices: ["member", "moderator", "admin"]
description:
- Access level for that user in the group
collection:
required: false
description:
- Create a collection
options:
name:
required: true
description:
- Name of the collection
description:
required: false
description:
- Description of the collection
folders:
required: false
description:
- A list of folder options
- Specified by the 'folder' option to the girder module
- (see 'folder:')
access:
required: false
description:
- Set the access for the collection/folder
options:
users:
required: false
description:
- list of login/type arguments
- login is a user login
- type is one of 'admin', 'moderator', 'member'
groups:
required: false
description:
- list of name/type arguments
- name is a group name
- type is one of 'admin', 'moderator', 'member'
folder:
required: false
description:
- Create a folder
options:
name:
required: true
description:
- Name of the folder
description:
required: false
description:
- Description of the folder
parentType:
required: true
choices: ["user", "folder", "collection"]
description:
- The type of the parent
parentId:
required: true
description:
- The ID of the parent collection
folders:
required: false
description:
- A list of folder options
- Specified by the 'folder' option to the girder module
- (see 'folder:')
access:
required: false
description:
- Set the access for the collection/folder
options:
users:
required: false
description:
- list of login/type arguments
- login is a user login
- type is one of 'admin', 'moderator', 'member'
groups:
required: false
description:
- list of name/type arguments
- name is a group name
- type is one of 'admin', 'moderator', 'member'
item:
required: false
description:
- Create a item
options:
name:
required: true
description:
- Name of the item
description:
required: false
description:
- Description of the item
folderId:
required: true
description:
- The ID of the parent collection
files:
required: false
description:
- Uploads a list of files to an item
options:
itemId:
required: true
description:
- the parent item for the file
sources:
required: true
description:
- list of local file paths
- files will be uploaded to the item
setting:
required: false
description:
- Get/set the values of system settings
options:
key:
required: true
description:
- The key identifying this setting
value:
required: true if state = present, else false
description:
- The value to set
'''
EXAMPLES = '''
#############
# Example using 'user'
###
# Ensure "admin" user exists
- name: Create 'admin' User
girder:
user:
firstName: "Chris"
lastName: "Kotfila"
login: "admin"
password: "letmein"
email: "[email protected]"
admin: yes
state: present
# Ensure a 'foobar' user exists
- name: Create 'foobar' User
girder:
username: "admin"
password: "letmein"
user:
firstName: "Foo"
lastName: "Bar"
login: "foobar"
password: "foobarbaz"
email: "[email protected]"
admin: yes
state: present
# Remove the 'foobar' user
- name: Remove 'foobar' User
username: "admin"
password: "letmein"
girder:
user:
login: "foobar"
password: "foobarbaz"
state: absent
############
# Examples using Group
#
# Create an 'alice' user
- name: Create 'alice' User
girder:
port: 8080
username: "admin"
password: "letmein"
user:
firstName: "Alice"
lastName: "Test"
login: "alice"
password: "letmein"
email: "[email protected]"
state: present
# Create a 'bill' user
- name: Create 'bill' User
girder:
port: 8080
username: "admin"
password: "letmein"
user:
firstName: "Bill"
lastName: "Test"
login: "bill"
password: "letmein"
email: "[email protected]"
state: present
# Create a 'chris' user
- name: Create 'chris' User
girder:
port: 8080
username: "admin"
password: "letmein"
user:
firstName: "Chris"
lastName: "Test"
login: "chris"
password: "letmein"
email: "[email protected]"
state: present
- name: Create a test group with users
girder:
port: 8080
username: "admin"
password: "letmein"
group:
name: "Test Group"
description: "Basic test group"
users:
- login: alice
type: member
- login: bill
type: moderator
- login: chris
type: admin
state: present
# Remove Bill from the group,
# Note that 'group' list is idempotent - it describes the desired state
- name: Remove bill from group
girder:
port: 8080
username: "admin"
password: "letmein"
group:
name: "Test Group"
description: "Basic test group"
users:
- login: alice
type: member
- login: chris
type: admin
state: present
#############
# Example using 'plugins'
###
# To enable or disable all plugins you may pass the "*"
# argument. This does not (yet) support arbitrary regexes
- name: Disable all plugins
girder:
username: "admin"
password: "letmein"
plugins: "*"
state: absent
- name: Enable thumbnails plugin
girder:
username: "admin"
password: "letmein"
port: 8080
plugins:
- thumbnails
state: present
# Note that 'thumbnails' is still enabled from the previous task,
# the 'plugins' task ensures that plugins are enabled or disabled,
# it does NOT define the complete list of enabled or disabled plugins.
- name: Ensure jobs and gravatar plugins are enabled
girder:
username: "admin"
password: "letmein"
plugins:
- jobs
- gravatar
state: present
############
# Filesystem Assetstore Tests
#
- name: Create filesystem assetstore
girder:
username: "admin"
password: "letmein"
assetstore:
name: "Temp Filesystem Assetstore"
type: "filesystem"
root: "/data/"
current: true
state: present
- name: Delete filesystem assetstore
girder:
username: "admin"
password: "letmein"
assetstore:
name: "Temp Filesystem Assetstore"
type: "filesystem"
root: "/tmp/"
state: absent
############
# Examples using collections, folders, items and files
#
# Creates a test collection called "Test Collection"
- name: Create collection
girder:
port: 8080
username: "admin"
password: "letmein"
collection:
name: "Test Collection"
description: "A test collection"
register: test_collection
# Creates a folder called "test folder" under "Test Collection"
- name: Create folder
girder:
port: 8080
username: "admin"
password: "letmein"
folder:
parentType: "collection"
parentId: "{{test_collection['gc_return']['_id'] }}"
name: "test folder"
description: "A test folder"
register: test_folder
# Creates an item called "test item" under "test folder"
- name: Create an item
girder:
port: 8080
username: "admin"
password: "letmein"
item:
folderId: "{{test_folder['gc_return']['_id'] }}"
name: "test item"
description: "A test item"
register: test_item
# Upload files on the localhost at /tmp/data/test1.txt and
# /tmp/data/test2.txt to the girder instance under the item
# "test item"
# Note: the list is idempotent and will remove files that are
# not listed under the item. Files are checked for both name
# and size to determine if they should be updated.
- name: Upload files
girder:
port: 8080
username: "admin"
password: "letmein"
files:
itemId: "{{ test_item['gc_return']['_id'] }}"
sources:
- /tmp/data/test1.txt
- /tmp/data/test2.txt
register: retval
############
# Examples Using collection/folder hierarchy
#
- name: Create collection with a folder and a subfolder
girder:
port: 8080
username: "admin"
password: "letmein"
collection:
name: "Test Collection"
description: "A test collection"
folders:
- name: "test folder"
description: "A test folder"
folders:
- name: "test subfolder"
- name: "test subfolder 2"
register: test_collection
############
# Examples Setting access to files/folders
#
- name: Create collection with access
girder:
port: 8080
username: "admin"
password: "letmein"
collection:
name: "Test Collection"
description: "A test collection"
public: no
access:
users:
- login: alice
type: admin
- login: chris
type: member
register: test_collection
- name: Add group to Test Collection
girder:
port: 8080
username: "admin"
password: "letmein"
collection:
name: "Test Collection"
description: "A test collection"
public: no
access:
users:
- login: alice
type: admin
- login: bill
type: moderator
- login: chris
type: member
groups:
- name: Test Group
type: member
register: test_collection
- name: Add Test Folder with access
girder:
port: 8080
username: "admin"
password: "letmein"
folder:
parentType: "collection"
parentId: "{{test_collection['gc_return']['_id'] }}"
name: "test folder"
description: "A test folder"
access:
users:
- login: bill
type: admin
groups:
- name: Test Group
type: member
register: test_folder
############
# Examples using get
#
# Get my info
- name: Get users from http://localhost:80/api/v1/users
girder:
username: 'admin'
password: 'letmein'
get:
path: "users"
register: ret_val
# Prints debugging messages with the emails of the users
# From the last task by accessing 'gc_return' of the registered
# variable 'ret_val'
- name: print emails of users
debug: msg="{{ item['email'] }}"
with_items: "{{ ret_val['gc_return'] }}"
#############
# Advanced usage
#
# Supports get, post, put, delete methods, but does
# not guarantee idempotence on these methods!
- name: Restart the server
girder:
username: "admin"
password: "letmein"
put:
path: "system/restart"
# An example of posting an item to Girder
# Note that this is NOT idempotent. Running
# multiple times will create "An Item", "An Item (1)",
# "An Item (2)", etc..
- name: Get Me
girder:
username: "admin"
password: "letmein"
get:
path: "user/me"
register: ret
# Show use of 'token' for subsequent authentication
- name: Get my public folder
girder:
token: "{{ ret['token'] }}"
get:
path: "folder"
parameters:
parentType: "user"
parentId: "{{ ret['gc_return']['_id'] }}"
text: "Public"
register: ret
- name: Post an item to my public folder
girder:
host: "data.kitware.com"
scheme: 'https'
token: "{{ ret['token'] }}"
post:
path: "item"
parameters:
folderId: "{{ ret['gc_return'][0]['_id'] }}"
name: "An Item"
'''
def class_spec(cls, include=None):
include = include if include is not None else []
for fn, method in getmembers(cls, predicate=ismethod):
if fn in include:
spec = getargspec(method)
# Note: must specify the kind of data we accept
# In all most all cases this will be a dict
# where variable names become keys used in yaml
# but if we have a vararg then we need to set
# this to a list.
kind = 'dict' if spec.varargs is None else 'list'
# spec.args[1:] so we don't include 'self'
params = spec.args[1:]
d = len(spec.defaults) if spec.defaults is not None else 0
r = len(params) - d
yield (fn, {"required": params[:r],
"optional": params[r:],
"type": kind})
class Resource(object):
known_resources = ['collection', 'folder', 'item', 'group']
def __init__(self, client, resource_type):
self._resources = None
self._resources_by_name = None
self.client = client
if resource_type in self.known_resources:
self.resource_type = resource_type
else:
raise Exception("{} is an unknown resource!".format(resource_type))
@property
def resources(self):
if self._resources is None:
self._resources = {r['_id']: r for r
in self.client.get(self.resource_type)}
return self._resources
@property
def resources_by_name(self):
if self._resources_by_name is None:
self._resources_by_name = {r['name']: r
for r in self.resources.values()}
return self._resources_by_name
def __apply(self, _id, func, *args, **kwargs):
if _id in self.resources.keys():
ret = func("{}/{}".format(self.resource_type, _id),
*args, **kwargs)
self.client.changed = True
return ret
def id_exists(self, _id):
return _id in self.resources.keys()
def name_exists(self, _name):
return _name in self.resources_by_name.keys()
def create(self, body, **kwargs):
try:
ret = self.client.post(self.resource_type, body, **kwargs)
self.client.changed = True
except HttpError as htErr:
try:
# If we can't create the item, try and return
# The item with the same name
ret = self.resource_by_name[kwargs['name']]
except KeyError:
raise htErr
return ret
def read(self, _id):
return self.resources[_id]
def read_by_name(self, name):
return self.resources_by_name[name]['_id']
def update(self, _id, body, **kwargs):
if _id in self.resources:
current = self.resources[_id]
# if body is a subset of current we don't actually need to update
if set(body.items()) <= set(current.items()):
return current
else:
return self.__apply(_id, self.client.put, body, **kwargs)
else:
raise Exception("{} does not exist!".format(_id))
def update_by_name(self, name, body, **kwargs):
return self.update(self.resources_by_name[name]['_id'],
body, **kwargs)
def delete(self, _id):
return self.__apply(_id, self.client.delete)
def delete_by_name(self, name):
try:
return self.delete(self.resources_by_name[name]['_id'])
except KeyError:
return {}
class AccessMixin(object):
def get_access(self, _id):
return self.client.get("{}/{}/access"
.format(self.resource_type, _id))
def put_access(self, _id, access, public=True):
current_access = self.get_access(_id)
if set([tuple(u.values()) for u in access['users']]) ^ \
set([(u["id"], u['level']) for u in current_access['users']]):
self.client.changed = True
if set([tuple(g.values()) for g in access['groups']]) ^ \
set([(u["id"], u['level']) for u in current_access['groups']]):
self.client.changed = True
return self.client.put("{}/{}/access"
.format(self.resource_type, _id),
dict(access=json.dumps(access),
public="true" if public else "false"))
class CollectionResource(AccessMixin, Resource):
def __init__(self, client):
super(CollectionResource, self).__init__(client, "collection")
class GroupResource(Resource):
def __init__(self, client):
super(GroupResource, self).__init__(client, "group")
class FolderResource(AccessMixin, Resource):
def __init__(self, client, parentType, parentId):
super(FolderResource, self).__init__(client, "folder")
self.parentType = parentType
self.parentId = parentId
@property
def resources(self):
if self._resources is None:
self._resources = {r['_id']: r for r
in self.client.get(self.resource_type, {
"parentType": self.parentType,
"parentId": self.parentId
})}
# parentType is stored as parrentCollection in database
# We need parentType to be available so we can do set
# comparison to check if we are updating parentType (e.g.
# Moving a subfolder from a folder to a collection)
for _id in self._resources.keys():
self._resources[_id]['parentType'] = \
self._resources[_id]['parentCollection']
return self._resources
class ItemResource(Resource):
def __init__(self, client, folderId):
super(ItemResource, self).__init__(client, "item")
self.folderId = folderId
@property
def resources(self):
if self._resources is None:
self._resources = {r['_id']: r for r
in self.client.get(self.resource_type, {
"folderId": self.folderId
})}
return self._resources
class GirderClientModule(GirderClient):
# Exclude these methods from both 'raw' mode
_include_methods = ['get', 'put', 'post', 'delete', 'patch',
'plugins', 'user', 'assetstore',
'collection', 'folder', 'item', 'files',
'group', 'setting']
_debug = True
def exit(self):
if not self._debug:
del self.message['debug']
self.module.exit_json(changed=self.changed, **self.message)
def fail(self, msg):
self.module.fail_json(msg=msg)
def __init__(self):
self.changed = False
self.message = {"msg": "Success!", "debug": {}}
self.spec = dict(class_spec(self.__class__,
GirderClientModule._include_methods))
self.required_one_of = self.spec.keys()
# Note: if additional types are added o girder this will
# have to be updated!
self.access_types = {"member": 0, "moderator": 1, "admin": 2}
def __call__(self, module):
self.module = module
super(GirderClientModule, self).__init__(
**{p: self.module.params[p] for p in
['host', 'port', 'apiRoot', 'apiUrl',
'scheme', 'dryrun', 'blacklist']
if module.params[p] is not None})
# If a username and password are set
if self.module.params['username'] is not None:
try:
self.authenticate(
username=self.module.params['username'],
password=self.module.params['password'])
except AuthenticationError:
self.fail("Could not Authenticate!")
elif self.module.params['apiKey'] is not None:
try:
self.authenticate(
apiKey=self.module.params['apiKey'])
except AuthenticationError:
self.fail("Could not Authenticate!")
# If a token is set
elif self.module.params['token'] is not None:
self.token = self.module.params['token']
# Else error if we're not trying to create a user
elif self.module.params['user'] is None:
self.fail("Must pass in either username & password, "
"or a valid girder_client token")
self.message['token'] = self.token
for method in self.required_one_of:
if self.module.params[method] is not None:
self.__process(method)
self.exit()
self.fail("Could not find executable method!")
def __process(self, method):
# Parameters from the YAML file
params = self.module.params[method]
# Final list of arguments to the function
args = []
# Final list of keyword arguments to the function
kwargs = {}
if isinstance(params, dict):
for arg_name in self.spec[method]['required']:
if arg_name not in params.keys():
self.fail("%s is required for %s" % (arg_name, method))
args.append(params[arg_name])
for kwarg_name in self.spec[method]['optional']:
if kwarg_name in params.keys():
kwargs[kwarg_name] = params[kwarg_name]
elif isinstance(params, list):
args = params
else:
args = [params]
ret = getattr(self, method)(*args, **kwargs)
self.message['debug']['method'] = method
self.message['debug']['args'] = args
self.message['debug']['kwargs'] = kwargs
self.message['debug']['params'] = params
self.message['gc_return'] = ret
def files(self, itemId, sources=None):
ret = {"added": [],
"removed": []}
files = self.get("item/{}/files".format(itemId))
if self.module.params['state'] == 'present':
file_dict = {f['name']: f for f in files}
source_dict = {os.path.basename(s): {
"path": s,
"name": os.path.basename(s),
"size": os.path.getsize(s)} for s in sources}
source_names = set([(s['name'], s['size'])
for s in source_dict.values()])
file_names = set([(f['name'], f['size'])
for f in file_dict.values()])
for n, _ in (file_names - source_names):
self.delete("file/{}".format(file_dict[n]['_id']))
ret['removed'].append(file_dict[n])
for n, _ in (source_names - file_names):
self.uploadFileToItem(itemId, source_dict[n]['path'])
ret['added'].append(source_dict[n])
elif self.module.params['state'] == 'absent':
for f in files:
self.delete("file/{}".format(f['_id']))
ret['removed'].append(f)
if len(ret['added']) != 0 or len(ret['removed']) != 0:
self.changed = True
return ret
def _get_user_by_login(self, login):
try:
user = self.get("/resource/lookup",
{"path": "/user/{}".format(login)})
except HttpError:
user = None
return user
def _get_group_by_name(self, name):
try:
# Could potentially fail if we have more 50 groups
group = {g['name']: g for g in self.get("group")}['name']
except (KeyError, HttpError):
group = None
return group
def group(self, name, description, users=None, debug=False):
r = GroupResource(self)
valid_fields = [("name", name),
("description", description)]
if self.module.params['state'] == 'present':
if r.name_exists(name):
ret = r.update_by_name(name, {k: v for k, v in valid_fields
if v is not None})
else:
ret = r.create({k: v for k, v in valid_fields
if v is not None})
if users is not None:
ret["added"] = []
ret["removed"] = []
ret["updated"] = []
group_id = ret['_id']
# Validate and normalize the user list
for user in users:
assert "login" in user.keys(), \
"User list must have a login attribute"
user['type'] = self.access_types.get(
user.get('type', 'member'), "member")
# dict of passed in login -> type
user_levels = {u['login']: u['type'] for u in users}
# dict of current login -> user information for this group
members = {m['login']: m for m in
self.get('group/{}/member'.format(group_id))}
# Add these users
for login in (set(user_levels.keys()) - set(members.keys())):
user = self._get_user_by_login(login)
if user is not None:
# add user at level
self.post("group/{}/invitation".format(group_id),
{"userId": user["_id"],
"level": user_levels[login],
"quiet": True,
"force": True})
ret['added'].append(user)
else:
raise Exception('{} is not a valid login!'
.format(login))
# Remove these users
for login in (set(members.keys()) - set(user_levels.keys())):
self.delete("/group/{}/member".format(group_id),
{"userId": members[login]['_id']})
ret['removed'].append(members[login])
# Set of users that potentially need to be updated
if len(set(members.keys()) & set(user_levels.keys())):
group_access = self.get('group/{}/access'.format(group_id))
# dict of current login -> access information for this group
user_access = {m['login']: m
for m in group_access['access']['users']}
# dict of login -> level for the current group
# Note:
# Here we join members with user_access - if the member
# is not in user_access then the member has a level of 0 by
# default. This gives us a complete list of every login,
# and its access level, including those that are IN the
# group, but have no permissions ON the group.
member_levels = {m['login']:
user_access.get(m['login'],
{"level": 0})['level']
for m in members.values()}
ret = self._promote_or_demote_in_group(ret,
member_levels,
user_levels,
group_id)
# Make sure 'changed' is handled correctly if we've
# manipulated the group's users in any way
if (len(ret['added']) != 0 or len(ret['removed']) != 0 or
len(ret['updated']) != 0):
self.changed = True
elif self.module.params['state'] == 'absent':
ret = r.delete_by_name(name)
return ret
def _promote_or_demote_in_group(self, ret, member_levels, user_levels,
group_id):
"""Promote or demote a set of users.
:param ret: the current dict of return values
:param members_levels: the current access levels of each member
:param user_levels: the desired levels of each member
:param types: a mapping between resource names and access levels
:returns: info about what has (or has not) been updated
:rtype: dict
"""
reverse_type = {v: k for k, v in self.access_types.items()}
for login in (set(member_levels.keys()) &
set(user_levels.keys())):
user = self._get_user_by_login(login)
_id = user["_id"]
# We're promoting
if member_levels[login] < user_levels[login]:
resource = reverse_type[user_levels[login]]
self.post("group/{}/{}"
.format(group_id, resource),
{"userId": _id})
user['from_level'] = member_levels[login]
user['to_level'] = user_levels[login]
ret['updated'].append(user)
# We're demoting
elif member_levels[login] > user_levels[login]:
resource = reverse_type[member_levels[login]]
self.delete("group/{}/{}"
.format(group_id, resource),
{"userId": _id})
# In case we're not demoting to member make sure
# to update to promote to whatever level we ARE
# demoting too now that our user is a only a member
if user_levels[login] != 0:
resource = reverse_type[user_levels[login]]
self.post("group/{}/{}"
.format(group_id, resource),
{"userId": _id})
user['from_level'] = member_levels[login]
user['to_level'] = user_levels[login]
ret['updated'].append(user)
return ret
def item(self, name, folderId, description=None, files=None,
access=None, debug=False):
ret = {}
r = ItemResource(self, folderId)
valid_fields = [("name", name),
("description", description),
('folderId', folderId)]
if self.module.params['state'] == 'present':
if r.name_exists(name):
ret = r.update_by_name(name, {k: v for k, v in valid_fields
if v is not None})
else:
ret = r.create({k: v for k, v in valid_fields
if v is not None})
# handle files here
elif self.module.params['state'] == 'absent':
ret = r.delete_by_name(name)
return ret
def folder(self, name, parentId, parentType, description=None,
public=True, folders=None, access=None, debug=False):
ret = {}
assert parentType in ['collection', 'folder', 'user'], \
"parentType must be collection or folder"
r = FolderResource(self, parentType, parentId)
valid_fields = [("name", name),
("description", description),
("parentType", parentType),
("parentId", parentId)]
if self.module.params['state'] == 'present':
if r.name_exists(name):
ret = r.update_by_name(name, {k: v for k, v in valid_fields
if v is not None})
else:
valid_fields = valid_fields + [("public", public)]
ret = r.create({k: v for k, v in valid_fields
if v is not None})
if folders is not None:
self._process_folders(folders, ret["_id"], "folder")
# handle access here
if access is not None:
_id = ret['_id']
ret['access'] = self._access(r, access, _id, public=public)
elif self.module.params['state'] == 'absent':
ret = r.delete_by_name(name)
return ret
def _access(self, r, access, _id, public=True):
access_list = {"users": [], "groups": []}
users = access.get("users", None)
groups = access.get("groups", None)
if groups is not None:
assert set(g['type'] for g in groups if 'type' in g) <= \
set(self.access_types.keys()), "Invalid access type!"
# Hash of name -> group information
# used to get user id's for access control lists
all_groups = {g['name']: g for g in self.get("group")}
access_list['groups'] = [{'id': all_groups[g['name']]["_id"],
'level': self.access_types[g['type']]
if 'type' in g else g['level']}
for g in groups]
if users is not None:
assert set(u['type'] for u in users if 'type' in u) <= \
set(self.access_types.keys()), "Invalid access type!"
# Hash of login -> user information
# used to get user id's for access control lists
current_users = {u['login']: self._get_user_by_login(u['login'])
for u in users}
access_list['users'] = [{'id': current_users[u['login']]["_id"],
"level": self.access_types[u['type']]
if 'type' in u else u['level']}
for u in users]
return r.put_access(_id, access_list, public=public)
def _process_folders(self, folders, parentId, parentType):
"""Process a list of folders from a user or collection.
:param folders: List of folders passed as attribute
to user or collection
:param parentId: ID of the user or the collection
:param parentType: one of 'user' or 'collection'
:returns: Nothing
:rtype: None
"""
current_folders = {f['name']: f for f in
self.get("folder", {"parentType": parentType,
"parentId": parentId})}
# Add, update or noop listed folders
for folder in folders:
# some validation of folder here would be a good idea
kwargs = folder.copy()
del kwargs['name']
self.folder(folder['name'],
parentId=parentId,
parentType=parentType,
**kwargs)
# Make sure we remove folders not listed
for name in (set(current_folders.keys()) -
set([f['name'] for f in folders])):
original_state = self.module.params['state']
self.module.params['state'] = "absent"
self.folder(name,
parentId=parentId,
parentType=parentType)
self.module.params['state'] = original_state
def collection(self, name, description=None,
public=True, access=None, folders=None, debug=False):
ret = {}
r = CollectionResource(self)
valid_fields = [("name", name),
("description", description)]
if self.module.params['state'] == 'present':
if r.name_exists(name):
# While we can set public when we create the collection, we
# cannot update the public/private status of a collection
# via the PUT /collection/%s endpoint. Currently this is
# possible through the API by hitting the
# PUT /collection/%s/access endpoint with public=true and
# the access dict equal to {}
if r.resources_by_name[name]['public'] != public:
_id = r.resources_by_name[name]['_id']
self.changed = True
self._access(r, r.get_access(_id), _id, public=public)
# invalidate the resource cache - this forces us to pick up
# the change in 'public' attribute despite it not being
# an attribute we can modify
r._resources = None
ret = r.update_by_name(name, {k: v for k, v in valid_fields
if v is not None})
else:
valid_fields.append(("public", public))
ret = r.create({k: v for k, v in valid_fields
if v is not None})
if folders is not None:
self._process_folders(folders, ret["_id"], "collection")
if access is not None:
_id = ret['_id']
ret['access'] = self._access(r, access, _id, public=public)
elif self.module.params['state'] == 'absent':
ret = r.delete_by_name(name)
return ret
def plugins(self, *plugins):
import json
ret = []
available_plugins = self.get("system/plugins")
self.message['debug']['available_plugins'] = available_plugins
plugins = set(plugins)
enabled_plugins = set(available_plugins['enabled'])
# Could maybe be expanded to handle all regular expressions?
if "*" in plugins:
plugins = set(available_plugins['all'].keys())
# Fail if plugins are passed in that are not available
if not plugins <= set(available_plugins["all"].keys()):
self.fail("%s, not available!" %
",".join(list(plugins -
set(available_plugins["all"].keys()))))
# If we're trying to ensure plugins are present
if self.module.params['state'] == 'present':
# If plugins is not a subset of enabled plugins:
if not plugins <= enabled_plugins:
# Put the union of enabled_plugins nad plugins
ret = self.put("system/plugins",
{"plugins":
json.dumps(list(plugins | enabled_plugins))})
self.changed = True
# If we're trying to ensure plugins are absent
elif self.module.params['state'] == 'absent':
# If there are plugins in the list that are enabled
if len(enabled_plugins & plugins):
self.changed = True
# Put the difference of enabled_plugins and plugins
ret = self.put("system/plugins",
{"plugins":
json.dumps(list(enabled_plugins - plugins))})
return ret
def user(self, login, password, firstName=None,
lastName=None, email=None, admin=False, folders=None):
if self.module.params['state'] == 'present':
# Fail if we don't have firstName, lastName and email
for var_name, var in [('firstName', firstName),
('lastName', lastName), ('email', email)]:
if var is None:
self.fail("%s must be set if state "
"is 'present'" % var_name)
try:
ret = self.authenticate(username=login,
password=password)
me = self.get("user/me")
# List of fields that can actually be updated
updateable = ['firstName', 'lastName', 'email', 'admin']
passed_in = [firstName, lastName, email, admin]
# If there is actually an update to be made
if set([(k, v) for k, v in me.items() if k in updateable]) ^ \
set(zip(updateable, passed_in)):
self.put("user/%s" % me['_id'],
parameters={
"login": login,
"firstName": firstName,
"lastName": lastName,
"password": password,
"email": email,
"admin": "true" if admin else "false"})
self.changed = True
ret = me
# User does not exist (with this login info)
except AuthenticationError:
ret = self.post("user", parameters={
"login": login,
"firstName": firstName,
"lastName": lastName,
"password": password,
"email": email,
"admin": "true" if admin else "false"
})
self.changed = True
if folders is not None:
_id = self.get("resource/lookup",
{"path": "/user/{}".format(login)})["_id"]
self._process_folders(folders, _id, "user")
elif self.module.params['state'] == 'absent':
ret = []
try:
ret = self.authenticate(username=login,
password=password)
me = self.get("user/me")
self.delete('user/%s' % me['_id'])
self.changed = True
# User does not exist (with this login info)
except AuthenticationError:
ret = []
return ret
# Handles patch correctly by dumping the data as a string before passing
# it on to requests See:
# http://docs.python-requests.org/en/master/user/quickstart/#more-complicated-post-requests
def patch(self, path, parameters=None, data=None):
super(GirderClientModule, self).patch(path, parameters=parameters,
data=json.dumps(data))
assetstore_types = {
"filesystem": 0,
"girdfs": 1,
"s3": 2,
"hdfs": "hdfs",
"database": "database"
}
def __validate_hdfs_assetstore(self, *args, **kwargs):
# Check if hdfs plugin is available, enable it if it isn't
pass
def __validate_database_assetstore(self, *args, **kwargs):
pass
def assetstore(self, name, type, root=None, db=None, mongohost=None,
replicaset='', bucket=None, prefix='', accessKeyId=None,
secret=None, service='s3.amazonaws.com', host=None,
port=None, path=None, user=None, webHdfsPort=None,
dbtype=None, dburi=None,
readOnly=False, current=False):
# Fail if somehow we have an asset type not in assetstore_types
if type not in self.assetstore_types.keys():
self.fail("assetstore type %s is not implemented!" % type)
argument_hash = {
"filesystem": {'name': name,
'type': self.assetstore_types[type],
'root': root},
"gridfs": {'name': name,
'type': self.assetstore_types[type],
'db': db,
'mongohost': mongohost,
'replicaset': replicaset},
"s3": {'name': name,
'type': self.assetstore_types[type],
'bucket': bucket,
'prefix': prefix,
'accessKeyId': accessKeyId,
'secret': secret,
'service': service},
'hdfs': {'name': name,
'type': self.assetstore_types[type],
'host': host,
'port': port,
'path': path,
'user': user,
'webHdfsPort': webHdfsPort},
'database': {'name': name,
'type': self.assetstore_types[type],
'dbtype': dbtype,
'dburi': dburi}
}
# Fail if we don't have all the required attributes
# for this asset type
for k, v in argument_hash[type].items():
if v is None:
self.fail("assetstores of type "
"%s require attribute %s" % (type, k))
# Set optional arguments in the hash
argument_hash[type]['readOnly'] = readOnly
argument_hash[type]['current'] = current
ret = []
# Get the current assetstores
assetstores = {a['name']: a for a in self.get("assetstore")}
self.message['debug']['assetstores'] = assetstores
# If we want the assetstore to be present
if self.module.params['state'] == 'present':
# And the asset store exists
if name in assetstores.keys():
id = assetstores[name]['_id']
####
# Fields that could potentially be updated
#
# This is necessary because there are fields in the assetstores
# that do not hash (e.g., capacity) and fields in the
# argument_hash that are not returned by 'GET' assetstore (e.g.
# readOnly). We could be more precise about this
# (e.g., by only checking items that are relevant to this type)
# but readability suffers.
updateable = ["root", "mongohost", "replicaset", "bucket",
"prefix", "db", "accessKeyId", "secret",
"service", "host", "port", "path", "user",
"webHdfsPort", "current", "dbtype", "dburi"]
# tuples of (key, value) for fields that can be updated
# in the assetstore
assetstore_items = set((k, assetstores[name][k])
for k in updateable
if k in assetstores[name].keys())
# tuples of (key, value) for fields that can be updated
# in the argument_hash for this assetstore type
arg_hash_items = set((k, argument_hash[type][k])
for k in updateable
if k in argument_hash[type].keys())
# if arg_hash_items not a subset of assetstore_items
if not arg_hash_items <= assetstore_items:
# Update
ret = self.put("assetstore/%s" % id,
parameters=argument_hash[type])
self.changed = True
# And the asset store does not exist
else:
try:
# If __validate_[type]_assetstore exists then call the
# function with argument_hash. E.g., to check if the
# HDFS plugin is enabled
getattr(self, "__validate_%s_assetstore" % type
)(**argument_hash)
except AttributeError:
pass
ret = self.post("assetstore",
parameters=argument_hash[type])
self.changed = True
# If we want the assetstore to be gone
elif self.module.params['state'] == 'absent':
# And the assetstore exists
if name in assetstores.keys():
id = assetstores[name]['_id']
ret = self.delete("assetstore/%s" % id,
parameters=argument_hash[type])
self.changed = True
return ret
def setting(self, key, value=None):
ret = {}
json_value = isinstance(value, (list, dict))
if self.module.params['state'] == 'present':
# Get existing setting value to determine self.changed
existing_value = self.get('system/setting', parameters={'key': key})
params = {
'key': key,
'value': json.dumps(value) if json_value else value
}
try:
response = self.put('system/setting', parameters=params)
except HttpError as e:
self.fail(json.loads(e.responseText)['message'])
if response and isinstance(value, list):
self.changed = set(existing_value) != set(value)
elif response and isinstance(value, dict):
self.changed = set(existing_value.items()) != set(value.items())
elif response:
self.changed = existing_value != value
if self.changed:
ret['previous_value'] = existing_value
ret['current_value'] = value
else:
ret['previous_value'] = ret['current_value'] = existing_value
elif self.module.params['state'] == 'absent':
# Removing a setting is a way of explicitly forcing it to be the default
existing_value = self.get('system/setting', parameters={'key': key})
default = self.get('system/setting', parameters={'key': key, 'default': 'default'})
if existing_value != default:
try:
self.delete('system/setting', parameters={'key': key})
self.changed = True
ret['previous_value'] = existing_value
ret['current_value'] = default
except HttpError as e:
self.fail(json.loads(e.responseText)['message'])
return ret
def main():
"""Entry point for ansible girder client module
:returns: Nothing
:rtype: NoneType
"""
# Default spec for initalizing and authenticating
argument_spec = {
# __init__
'host': dict(),
'port': dict(),
'apiRoot': dict(),
'apiUrl': dict(),
'scheme': dict(),
'dryrun': dict(),
'blacklist': dict(),
# authenticate
'username': dict(),
'password': dict(),
'token': dict(),
'apiKey': dict(),
# General
'state': dict(default="present", choices=['present', 'absent'])
}
gcm = GirderClientModule()
for method in gcm.required_one_of:
argument_spec[method] = dict(type=gcm.spec[method]['type'])
module = AnsibleModule( # noqa
argument_spec=argument_spec,
required_one_of=[gcm.required_one_of,
["token", "username", "user", "apiKey"]],
required_together=[["username", "password"]],
mutually_exclusive=gcm.required_one_of,
supports_check_mode=False)
if not HAS_GIRDER_CLIENT:
module.fail_json(msg="Could not import GirderClient!")
try:
gcm(module)
except HttpError as e:
import traceback
module.fail_json(msg="%s:%s\n%s\n%s" % (e.__class__, str(e),
e.responseText,
traceback.format_exc()))
except Exception as e:
import traceback
# exc_type, exc_obj, exec_tb = sys.exc_info()
module.fail_json(msg="%s: %s\n\n%s" % (e.__class__, str(e),
traceback.format_exc()))
if __name__ == '__main__':
main()
| apache-2.0 | 5,519,976,455,228,743,000 | 32.628844 | 95 | 0.494529 | false |
r-o-b-b-i-e/pootle | setup.py | 1 | 10498 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import os
import re
import sys
from distutils import log
from distutils.command.build import build as DistutilsBuild
from distutils.core import Command
from distutils.errors import DistutilsOptionError
from pkg_resources import parse_version, require
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
from pootle import __version__
def check_pep440_versions():
if require('setuptools')[0].parsed_version < parse_version('8.0'):
exit("Incompatible version of 'setuptools'. Please run\n"
"'pip install --upgrade setuptools'")
if require('pip')[0].parsed_version < parse_version('6.0'):
exit("Incompatible version of 'pip'. Please run\n"
"'pip install --upgrade pip'")
def parse_requirements(file_name, recurse=False):
"""Parses a pip requirements file and returns a list of packages.
Use the result of this function in the ``install_requires`` field.
Copied from cburgmer/pdfserver.
"""
requirements = []
for line in open(file_name, 'r').read().split('\n'):
# Ignore comments, blank lines and included requirements files
if re.match(r'(\s*#)|(\s*$)|'
'((--allow-external|--allow-unverified) .*$)', line):
continue
if re.match(r'-r .*$', line):
if recurse:
requirements.extend(parse_requirements(
'requirements/' +
re.sub(r'-r\s*(.*[.]txt)$', r'\1', line), recurse))
continue
if re.match(r'\s*-e\s+', line):
requirements.append(re.sub(r'\s*-e\s+.*#egg=(.*)$', r'\1', line))
elif re.match(r'\s*-f\s+', line):
pass
else:
requirements.append(line)
return requirements
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--tb=short', 'tests/']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
class PootleBuildMo(DistutilsBuild):
description = "compile Gettext PO files into MO"
user_options = [
('all', None,
"compile all language (don't use LINGUAS file)"),
('lang=', 'l',
"specify a language to compile"),
('check', None,
"check for errors"),
]
boolean_options = ['all']
po_path_base = os.path.join('pootle', 'locale')
_langs = []
def initialize_options(self):
self.all = False
self.lang = None
self.check = False
def finalize_options(self):
if self.all and self.lang is not None:
raise DistutilsOptionError(
"Can't use --all and --lang together"
)
if self.lang is not None:
self._langs = [self.lang]
elif self.all:
for lang in os.listdir(self.po_path_base):
if (os.path.isdir(os.path.join(self.po_path_base, lang)) and
lang != "templates"):
self._langs.append(lang)
else:
for lang in open(os.path.join('pootle', 'locale', 'LINGUAS')):
self._langs.append(lang.rstrip())
def build_mo(self):
"""Compile .mo files from available .po files"""
import subprocess
import gettext
from translate.storage import factory
error_occured = False
for lang in self._langs:
lang = lang.rstrip()
po_path = os.path.join('pootle', 'locale', lang)
mo_path = os.path.join('pootle', 'locale', lang, 'LC_MESSAGES')
if not os.path.exists(mo_path):
os.makedirs(mo_path)
for po, mo in (('pootle.po', 'django.mo'),
('pootle_js.po', 'djangojs.mo')):
po_filename = os.path.join(po_path, po)
mo_filename = os.path.join(mo_path, mo)
if not os.path.exists(po_filename):
log.warn("%s: missing file %s", lang, po_filename)
continue
if not os.path.exists(mo_path):
os.makedirs(mo_path)
log.info("compiling %s", lang)
if self.check:
command = ['msgfmt', '-c', '--strict',
'-o', mo_filename, po_filename]
else:
command = ['msgfmt', '--strict',
'-o', mo_filename, po_filename]
try:
subprocess.check_call(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
error_occured = True
except Exception as e:
log.warn("%s: skipping, running msgfmt failed: %s",
lang, e)
try:
store = factory.getobject(po_filename)
gettext.c2py(store.getheaderplural()[1])
except Exception:
log.warn("%s: invalid plural header in %s",
lang, po_filename)
if error_occured:
sys.exit(1)
def run(self):
self.build_mo()
class BuildChecksTemplatesCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import django
import codecs
from pootle.apps.pootle_misc.checks import (check_names,
excluded_filters)
from translate.filters.checks import (TeeChecker, StandardChecker,
StandardUnitChecker)
try:
from docutils.core import publish_parts
except ImportError:
from distutils.errors import DistutilsModuleError
raise DistutilsModuleError("Please install the docutils library.")
from pootle import syspath_override # noqa
django.setup()
def get_check_description(name, filterfunc):
"""Get a HTML snippet for a specific quality check description.
The quality check description is extracted from the check function
docstring (which uses reStructuredText) and rendered using docutils
to get the HTML snippet.
"""
# Provide a header with an anchor to refer to.
description = ('\n<h3 id="%s">%s</h3>\n\n' %
(name, unicode(check_names[name])))
# Clean the leading whitespace on each docstring line so it gets
# properly rendered.
docstring = "\n".join(line.strip()
for line in filterfunc.__doc__.split("\n"))
# Render the reStructuredText in the docstring into HTML.
description += publish_parts(docstring, writer_name="html")["body"]
return description
print("Regenerating Translate Toolkit quality checks descriptions")
# Get a checker with the Translate Toolkit checks. Note that filters
# that are not used in Pootle are excluded.
fd = TeeChecker(
checkerclasses=[StandardChecker, StandardUnitChecker]
).getfilters(excludefilters=excluded_filters)
docs = sorted(
get_check_description(name, f) for name, f in fd.items()
)
# Output the quality checks descriptions to the HTML file.
templates_dir = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "pootle", "templates"
)
filename = os.path.join(templates_dir, "help/_ttk_quality_checks.html")
with codecs.open(filename, "w", "utf-8") as f:
f.write(u"\n".join(docs))
print("Checks templates written to %r" % (filename))
check_pep440_versions()
setup(
name="Pootle",
version=__version__,
description="An online collaborative localization tool.",
long_description=open(
os.path.join(os.path.dirname(__file__), 'README.rst')
).read(),
author="Translate",
author_email="[email protected]",
license="GNU General Public License 3 or later (GPLv3+)",
url="http://pootle.translatehouse.org",
download_url="https://github.com/translate/pootle/releases/tag/" +
__version__,
install_requires=parse_requirements('requirements/base.txt'),
tests_require=parse_requirements('requirements/tests.txt'),
extras_require={
'dev': parse_requirements('requirements/dev.txt', recurse=True),
# Database dependencies
'mysql': parse_requirements('requirements/_db_mysql.txt'),
'postgresql': parse_requirements('requirements/_db_postgresql.txt'),
# Pootle FS plugins
'git': parse_requirements('requirements/_pootle_fs_git.txt'),
# Markdown
'markdown': parse_requirements('requirements/_markup_markdown.txt'),
},
platforms=["any"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: "
"GNU General Public License v3 or later (GPLv3+)",
"Operating System :: OS Independent",
"Operating System :: Microsoft :: Windows",
"Operating System :: Unix",
"Programming Language :: JavaScript",
"Programming Language :: Python",
"Topic :: Software Development :: Localization",
"Topic :: Text Processing :: Linguistic"
],
zip_safe=False,
packages=find_packages(),
include_package_data=True,
entry_points={
'console_scripts': [
'pootle = pootle.runner:main',
],
},
cmdclass={
'build_checks_templates': BuildChecksTemplatesCommand,
'build_mo': PootleBuildMo,
'test': PyTest,
},
)
| gpl-3.0 | 8,064,249,622,696,830,000 | 33.646865 | 79 | 0.572395 | false |
CSD-Public/stonix | src/tests/rules/unit_tests/zzzTestRuleSystemAccounting.py | 1 | 10501 | #!/usr/bin/env python3
###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
"""
This is a Unit Test for Rule SystemAccounting
@author: Breen Malmberg
@change: 2015/09/25 eball Updated to enable CI so that rule runs during test
@change: 2015/09/25 eball Added Debian/Ubuntu setup
@change: 2015/10/09 eball Updated Deb setup to improve automated testing compat
@change: 2015/10/26 eball Comment fix, added informative text for test failure
@change: 2016/02/10 roy Added sys.path.append for being able to unit test this
file as well as with the test harness.
"""
import unittest
import re
import os
import sys
import shutil
sys.path.append("../../../..")
from src.tests.lib.RuleTestTemplate import RuleTest
from src.stonix_resources.CommandHelper import CommandHelper
from src.stonix_resources.pkghelper import Pkghelper
from src.stonix_resources.localize import PROXY
from src.tests.lib.logdispatcher_mock import LogPriority
from src.stonix_resources.rules.SystemAccounting import SystemAccounting
class zzzTestRuleSystemAccounting(RuleTest):
def setUp(self):
RuleTest.setUp(self)
self.rule = SystemAccounting(self.config,
self.environ,
self.logdispatch,
self.statechglogger)
self.rulename = self.rule.rulename
self.rulenumber = self.rule.rulenumber
self.ch = CommandHelper(self.logdispatch)
self.ph = Pkghelper(self.logdispatch, self.environ)
self.rule.ci.updatecurrvalue(True)
def tearDown(self):
pass
def runTest(self):
result = self.simpleRuleTest()
self.assertTrue(result, "SystemAccounting(9): rule.iscompliant() is " +
"'False' after rule.fix() and rule.report() have " +
"run. This may be due to a proxy error; if the " +
"proper proxy is not set in localize.py, set it and " +
"run this test again.")
def test_default_sysstat_empty(self):
"""
test correction of /etc/default/sysstat if it has no entry in it
:return:
"""
file = "/etc/default/sysstat"
backup = "/etc/default/sysstat.stonix_test_bak"
if os.path.isfile(file):
self._backup_file(file)
f = open(file, "w")
f.write("")
f.close()
self.rule._set_paths()
self.assertFalse(self.rule._report_configuration())
self.rule._fix_configuration()
self.assertTrue(self.rule._report_configuration())
self._restore_file(backup)
else:
return True
def test_default_sysstat_comment(self):
"""
test correction of /etc/default/sysstat if it has the entry commented out
:return:
"""
file = "/etc/default/sysstat"
backup = "/etc/default/sysstat.stonix_test_bak"
if os.path.isfile(file):
self._backup_file(file)
f = open(file, "w")
f.write('# ENABLED="true"')
f.close()
self.rule._set_paths()
self.assertFalse(self.rule._report_configuration())
self.rule._fix_configuration()
self.assertTrue(self.rule._report_configuration())
self._restore_file(backup)
else:
return True
def test_default_sysstat_wrongvalue(self):
"""
test correction of /etc/default/sysstat if it has the entry set to the wrong value
:return:
"""
file = "/etc/default/sysstat"
backup = "/etc/default/sysstat.stonix_test_bak"
if os.path.isfile(file):
self._backup_file(file)
f = open(file, "w")
f.write('ENABLED="false"')
f.close()
self.rule._set_paths()
self.assertFalse(self.rule._report_configuration())
self.rule._fix_configuration()
self.assertTrue(self.rule._report_configuration())
self._restore_file(backup)
else:
return True
def test_default_sysstat_rightvalue(self):
"""
test correction of /etc/default/sysstat if it has the entry set to the right value
:return:
"""
file = "/etc/default/sysstat"
backup = "/etc/default/sysstat.stonix_test_bak"
if os.path.isfile(file):
self._backup_file(file)
f = open(file, "w")
f.write('ENABLED="true"')
f.close()
self.rule._set_paths()
self.assertTrue(self.rule._report_configuration())
self.rule._fix_configuration()
self.assertTrue(self.rule._report_configuration())
self._restore_file(backup)
else:
return True
def test_installation_installed(self):
"""
test installation report/fix if package already installed
applies to Linux only
:return:
"""
if self.rule.ostype == "Mac OS X":
return True
package = "sysstat"
if self.ph.check(package):
self.rule._set_paths()
self.assertTrue(self.rule._report_installation())
self.rule._fix_installation()
self.assertTrue(self.rule._report_installation())
else:
return True
def test_installation_missing(self):
"""
test installation report/fix if package not installed
applies to Linux only
:return:
"""
if self.rule.ostype == "Mac OS X":
return True
package = "sysstat"
if not self.ph.check(package):
self.rule._set_paths()
self.assertFalse(self.rule._report_installation())
self.rule._fix_installation()
self.assertTrue(self.rule._report_installation())
else:
return True
def test_set_paths(self):
"""
test that all paths and necessary variables for the class are able to be properly
determined and set once package is installed
:return:
"""
package = "sysstat"
self.ph.install(package)
self.rule._set_paths()
self.assertTrue(self.rule.sysstat_package)
self.assertTrue(self.rule.sysstat_service_file)
self.assertTrue(self.rule.sa1)
self.assertTrue(self.rule.sa2)
self.assertTrue(self.rule.sysstat_service_contents)
self.assertTrue(self.rule.sysstat_cron_contents)
self.assertTrue(self.rule.ostype)
self.ph.remove(package)
def _restore_file(self, backup):
"""
:param backup:
:return:
"""
if os.path.isfile(backup):
if re.search("\.stonix_test_bak", backup):
shutil.copy2(backup, backup.replace(".stonix_test_bak", ""))
def _backup_file(self, original):
"""
:param original:
:return:
"""
if os.path.isfile(original):
shutil.copy2(original, original + ".stonix_test_bak")
def setConditionsForRule(self):
"""Configure system for the unit test
:param self: essential if you override this definition
:returns: boolean - If successful True; If failure False
@author: Breen Malmberg
"""
success = True
self.rule.ci.updatecurrvalue(True)
return success
def checkReportForRule(self, pCompliance, pRuleSuccess):
"""check on whether report was correct
:param self: essential if you override this definition
:param pCompliance: the self.iscompliant value of rule
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: Breen Malmberg
"""
self.logdispatch.log(LogPriority.DEBUG, "pCompliance = " +
str(pCompliance) + ".")
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " +
str(pRuleSuccess) + ".")
success = True
return success
def checkFixForRule(self, pRuleSuccess):
"""check on whether fix was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: Breen Malmberg
"""
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " +
str(pRuleSuccess) + ".")
success = True
return success
def checkUndoForRule(self, pRuleSuccess):
"""check on whether undo was correct
:param self: essential if you override this definition
:param pRuleSuccess: did report run successfully
:returns: boolean - If successful True; If failure False
@author: Breen Malmberg
"""
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " +
str(pRuleSuccess) + ".")
success = True
return success
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| gpl-2.0 | -2,501,568,841,889,519,000 | 31.918495 | 90 | 0.570898 | false |
scibi/django-sqltables-old | sqltables/__init__.py | 1 | 1379 | # -*- coding: utf-8 -*-
__version__ = '0.1.0'
from sqltables.manager import manager
#
# Code heavily inspred by django.contrib.admin
#
def autodiscover():
"""
Auto-discover INSTALLED_APPS tables.py modules and fail silently when
not present. This forces an import on them to register any tables bits they
may want.
"""
import copy
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's tables module.
try:
before_import_registry = copy.copy(manager._registry)
import_module('%s.tables' % app)
except:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
# (see #8245).
manager._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have an tables module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'tables'):
raise
| bsd-3-clause | 8,323,986,165,523,579,000 | 33.475 | 79 | 0.641769 | false |
chickenzord/dotenvy | src/dotenvy/parser.py | 1 | 2349 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import bytes
from future import standard_library
standard_library.install_aliases()
from sys import version_info
from string import Template
from .exception import ParseException
import re
QUOTES = ['"', '\'']
TRUTHY_VALUES = ['1', 'true', 'yes', 'on']
FALSY_VALUES = ['0', 'false', 'no', 'off']
def truth(string):
if string.lower() in TRUTHY_VALUES:
return True
elif string.lower() in FALSY_VALUES:
return False
else:
raise ValueError('Invalid truth value')
def is_blank(text):
return text.strip() == ''
def is_comment(line):
return len(line) > 0 and line[:1] == '#'
def is_pair(line):
return bool(re.match(r'^[A-Za-z0-9_]+=(\S|$)', line))
def unescape(text):
if version_info.major <= 2:
return text.decode('string_escape')
else:
return bytes(text, 'utf-8').decode('unicode_escape')
def parse_quoted(text):
if len(text) == 0:
return ''
if len(text) == 1 and text in QUOTES:
raise ParseException('Invalid quoted value')
first = text[:1]
last = text[-1:]
if (len(text) >= 2) and (first in QUOTES):
if first == last:
return unescape(text[1:-1])
else:
raise ParseException('Unmatching quotes')
else:
return text
def parse_line(line):
line = line.strip()
if not is_pair(line):
raise ParseException('Not a valid key-val line')
key, val = line.split('=', 1)
return (key, parse_quoted(val))
def parse_string(string, schema={}, expand=False, env={}, merge_env=False):
lookup = env.copy()
result = env.copy() if merge_env else {}
for line in [l for l in string.splitlines() if is_pair(l.strip())]:
key, val = parse_line(line)
if expand:
result[key] = Template(val.replace('\\$', '$$')).substitute(lookup)
else:
result[key] = val
lookup[key] = result[key] # cache the result to lookup dict
# cast values according to the schema
for key in schema:
cast = schema[key]
cast = truth if cast == bool else cast
if key in result:
result[key] = cast(result[key])
return result
| mit | -5,616,391,606,090,050,000 | 23.726316 | 79 | 0.604938 | false |
ministryofjustice/cla_backend | cla_backend/apps/knowledgebase/management/commands/builddata.py | 1 | 1649 | """"
usage-
./manage.py builddata load_knowledgebase_csv ~/Documents/Scratch/knowledgebase.csv
Creates derived dataset of constants used by JS frontend. Data is sourced from cla_common.
you can then load the fixture with-
./manage.py loaddata cla_backend/apps/knowledgebase/fixtures/kb_from_spreadsheet.json
"""
from django.core.management.base import BaseCommand
import os
import sys
from ._csv_2_fixture import KnowledgebaseCsvParse
class Command(BaseCommand):
args = "load_knowledgebase_csv CSV_FILE.csv"
help = (
"Create a derived dataset. At present, just load_knowledgebase_csv "
"is implemented. It loads a CSV spreadsheet into a fixture ready "
"to be loaddata'ed into DB"
)
KNOWLEDGEBASE_FIXTURE = "cla_backend/apps/knowledgebase/fixtures/kb_from_spreadsheet.json"
def handle(self, *args, **options):
if args[0] == "load_knowledgebase_csv":
if len(args) != 2:
self.stdout.write("Last argument needs to be path to CSV file")
sys.exit(-1)
if not os.access(args[1], os.R_OK):
self.stdout.write("File '%s' couldn't be read" % args[1])
sys.exit(-1)
# read in CSV and feed to fixture builder
f_in = open(args[1], "rU")
c = KnowledgebaseCsvParse(f_in)
json = c.fixture_as_json()
f_in.close()
# write json doc to fixture file
f_out = open(self.KNOWLEDGEBASE_FIXTURE, "w")
f_out.write(json)
f_out.close()
self.stdout.write("Fixture written to %s" % self.KNOWLEDGEBASE_FIXTURE)
| mit | 2,654,605,644,263,438,300 | 32.653061 | 94 | 0.627653 | false |
bfirsh/pspec | pspec/groups/base.py | 1 | 1889 |
class BaseGroup(object):
"""
A collection of tests and other groups.
Groups are stored as a tree. They know of their children and parent.
"""
def __init__(self, subject=None, children=None, parent=None):
# An identifier for this group
self.subject = subject
# This group's subgroups
self.children = children or []
# The group this group inherits from
self.parent = parent
self.tests = []
self.is_collecting = False
def __repr__(self):
return u'<%s: %s>' % (self.__class__.__name__, self.subject)
def add_child(self, group):
"""
Adds group as a child of this group and sets its parent.
"""
group.parent = self
self.children.append(group)
def get_collecting_group(self):
"""
Returns the right-most group that is currently collecting from this group
downwards.
"""
for group in reversed(self.children):
result = group.get_collecting_group()
if result:
return result
if self.is_collecting:
return self
def get_descendant_tests(self):
"""
Returns a flat list of tests from this group's descendants, excluding this
group's tests.
"""
tests = []
for child in self.children:
tests.extend(child.tests)
tests.extend(child.get_descendant_tests())
return tests
def __iter__(self):
"""
Returns a flat list of all tests from this group and its descendants.
"""
return iter(self.tests + self.get_descendant_tests())
def add_test(self, test):
"""
Adds a test to this group.
"""
assert not hasattr(test, '_pspec_group')
test._pspec_group = self
self.tests.append(test)
| bsd-3-clause | -153,653,773,385,091,260 | 25.985714 | 83 | 0.561143 | false |
CompPhysics/ThesisProjects | doc/MSc/msc_students/former/AudunHansen/Audun/Pythonscripts/CCD_matrix_implementation_class_mkII.py | 1 | 40959 | from numpy import *
from time import *
from matplotlib.pyplot import *
from scipy.sparse import csr_matrix, coo_matrix
class electronbasis():
def __init__(self, N, rs, Nparticles):
self.rs = rs
self.states = []
self.nstates = 0
self.nparticles = Nparticles
Nm = int(sqrt(N) + 1)
self.Nm = Nm
#Creating the basis
for x in range(-Nm, Nm):
for y in range(-Nm, Nm):
for z in range(-Nm,Nm):
e = x*x + y*y + z*z
if e <=N:
self.states.append([e, x,y,z, 1])
self.states.append([e, x,y,z,-1])
self.nstates += 2
self.states.sort() #Sorting the basis in increasing energy
self.L3 = (4*pi*self.nparticles*self.rs**3)/3.0
self.L2 = self.L3**(2/3.0)
self.L = pow(self.L3, 1/3.0)
for i in range(self.nstates):
self.states[i][0] *= 2*(pi**2)/self.L**2 #Multiplying in the missing factors in the single particle energy
self.states = array(self.states) #converting to array to utilize vectorized calculations
def hfenergy(self, nParticles):
#Calculating the HF-energy (reference energy)
e0 = 0.0
if nParticles<=self.nstates:
for i in range(nParticles):
e0 += self.h(i,i)
for j in range(nParticles):
if j != i:
e0 += .5*self.v(i,j,i,j)
else:
#Safety for cases where nParticles exceeds size of basis
print "Not enough basis states."
return e0
def h(self, p,q):
#Return single particle energy
return self.states[p,0]*(p==q)
def veval(self, p,q,r,s):
#A test for evaluating the two-body interaction
val = ""
if self.kdplus(p,q,r,s):
val+= "kdplus "
if self.kdspin(p,r):
val += "Direct[kdspin_pr "
if self.kdspin(q,s):
val += "kdspin_qs "
if self.kdwave(p,r) != 0:
val += "kdwave!=0 "
val += str(self.absdiff2(r,p))
val += "] "
if self.kdspin(p,s):
val += "Exchange[kdspin_pr "
if self.kdspin(q,r):
val += "kdspin_qs "
if self.kdwave(p,s) != 0:
val += "kdwave!=0 "
val += str(self.absdiff2(s,p))
val += "] "
return val
def vevalHF(self, N):
#Evaluation of all expressions of two-body contributions to the HF-energy
for i in range(N):
for j in range(N):
if i!= j:
print "<",i,j,"|",i,j,"> =",self.veval(i,j,i,j)
def V(self, kp,kq,kr,ks):
#k = (energy, kx, ky, kz, ms)
# Vectorized interaction
#
#kplus
kdplus = (kp[1,:]+kq[1,:]==kr[1,:]+ks[1,:])*(kp[2,:]+kq[2,:]==kr[2,:]+ks[2,:])*(kp[3,:]+kq[3,:]==kr[3,:]+ks[3,:])*4*pi/self.L3#d_k+k k+k
#print "kdplus:", kdplus
kdspin1 = (kp[4,:]==kr[4,:])*(kq[4,:]==ks[4,:])*1
kdwave1 = abs((kp[1,:]==kr[1,:])*(kp[2,:]==kr[2,:])*(kp[3,:]==kr[3,:])-1)
#print "kdspin1:", kdspin1
#print "kdwave1:", kdwave1
absdiff2_1 = ((kr[1,:]-kp[1,:])**2+(kr[2,:]-kp[2,:])**2+(kr[3,:]-kp[3,:])**2) #absdiff2
term1=(4.0*absdiff2_1*pi**2)/self.L2
term1[term1==0] = 1
kdspin2 = (kp[4,:]==ks[4,:])*(kq[4,:]==kr[4,:])*1
kdwave2 = abs((kp[1,:]==ks[1,:])*(kp[2,:]==ks[2,:])*(kp[3,:]==ks[3,:])-1)
#print "kdspin2:",kdspin2
#print "kdwave2:",kdwave2
absdiff2_2 = ((ks[1,:]-kp[1,:])**2+(ks[2,:]-kp[2,:])**2+(ks[3,:]-kp[3,:])**2) #absdiff2
#print absdiff2_2
term2=(4.0*absdiff2_2*pi**2)/self.L2
term2[term2==0] = 1
return kdplus*(kdspin1*kdwave1/term1 - kdspin2*kdwave2/term2)
def v(self,p,q,r,s):
#Two body interaction
#To optimize bottleneck: vectorize this function ! (remove if-tests)
val = 0
terms = 0.0
kdpl = self.kdplus(p,q,r,s)
if kdpl != 0:
val = 4*pi/self.L3
term1 = 0.0
term2 = 0.0
if self.kdspin(p,r)*self.kdspin(q,s)==1:
if self.kdwave(p,r) != 1.0:
term1=(4*self.absdiff2(r,p)*pi**2)/self.L2
terms += 1.0/term1
if self.kdspin(p,s)*self.kdspin(q,r)==1:
if self.kdwave(p,s) != 1.0:
term2=(4*self.absdiff2(s,p)*pi**2)/self.L2
terms -= 1.0/term2
return val*terms
#The following is a series of kroenecker deltas used in the two-body interactions.
#Run kd_integrity() to ensure that they work as intended.
def kdi(self,a,b):
#Kroenecker delta integer
return 1.0*(a==b)
def kda(self,a,b):
#Kroenecker delta array
d = 1.0
#print a,b,
for i in range(len(a)):
d*=(a[i]==b[i])
return d
def kdfullplus(self,p,q,r,s):
#Kroenecker delta wavenumber p+q,r+s
return self.kda(self.states[p][1:5]+self.states[q][1:5],self.states[r][1:5]+self.states[s][1:5])
def kdplus(self,p,q,r,s):
#Kroenecker delta wavenumber p+q,r+s
return self.kda(self.states[p][1:4]+self.states[q][1:4],self.states[r][1:4]+self.states[s][1:4])
def kdspin(self,p,q):
#Kroenecker delta spin
return self.kdi(self.states[p][4], self.states[q][4])
def kdwave(self,p,q):
#Kroenecker delta wavenumber
return self.kda(self.states[p][1:4],self.states[q][1:4])
def absdiff2(self,p,q):
val = 0.0
for i in range(1,4):
val += (self.states[p][i]-self.states[q][i])*(self.states[p][i]-self.states[q][i])
#if val == 0:
# print "div0"
return val
def kd_integrity(self):
#test integrity of kroenecker deltas
print "Array KD :", self.kda([0,1,2], [0,1,2]) == True
print "Integer KD :", self.kdi(1,1) == True
print "Opposite spin :", self.kdspin(0,1) == False
print "Equal spin :", self.kdspin(1,1) == True
print "Wavenumber equal :", self.kdwave(1,0) == True
print "Wavenumber not equal:", self.kdwave(1,2) == False
def liststates(self):
for i in range(self.nstates):
print self.states[i]
class tempbase():
def __init__(self, Np, Nh):
self.nstates = Np+Nh
self.nparticles = Np
self.nholes = Nh
class CCD():
def __init__(self, bs):
self.bs = bs
self.nstates = bs.nstates #total number of states
self.Nh = bs.nparticles #number of hole states (conflicting naming should be resolved in class electrongas)
self.Np = self.nstates-bs.nparticles #number of particle states
self.Vhhhh = csr_matrix((self.Nh**2, self.Nh**2))
self.Vhhpp = csr_matrix((self.Nh**2, self.Np**2))
self.Vphhp = csr_matrix((self.Nh*self.Np, self.Nh*self.Np))
self.Vhpph = csr_matrix((self.Nh*self.Np, self.Nh*self.Np))
self.Vpppp = csr_matrix((self.Np**2, self.Np**2))
self.Vpphh = csr_matrix((self.Np**2, self.Nh**2))
self.Tpphh = csr_matrix((self.Np**2, self.Nh**2))
self.Epphh = zeros((self.Np**2, self.Nh**2))
self.setup_matrices_optimized()
################################################
##
## MAIN PROGRAM ROUTINES
##
################################################
def setup_matrices_optimized(self):
#Fill inn all matrices
#This is probably the bottleneck right now, should apply symmetries to oprimize
Nh = self.Nh
Np = self.Np
#alternate setup for Epphh
E = self.bs.states[:,0]
pp = arange(Np**2)
hh = arange(Nh**2)
a = pp%Np
b = pp//Np
i = hh%Nh
j = hh//Nh
ij = kron(ones((Np**2,1)), E[i] + E[j])
ab = kron(ones((Nh**2,1)), E[a+Nh] + E[b+Nh])
self.Epphh = ij - ab.T
t0 = clock()
"""
for i in range(Nh):
for j in range(i,Nh):
for a in range(Np):
for b in range(a,Np):
val = self.bs.v(a+Nh,i,j,b+Nh)
if val != 0:
self.Vphhp[a + i*Np, j + b*Nh] = val
self.Vphhp[b + j*Np, i + a*Nh] = val
val = self.bs.v(j,a+Nh,b+Nh,i)
if val != 0:
self.Vphhp[a + j*Np, i + b*Nh] = val
self.Vhpph[j + a*Nh, b + i*Np] = val
self.Vphhp[b + i*Np, j + a*Nh] = val
self.Vhpph[i + b*Nh, a + j*Np] = val
val = self.bs.v(a+Nh,b+Nh,i,j)
#eps = self.bs.h(i,i) + self.bs.h(j,j) -self.bs.h(a+Nh,a+Nh) - self.bs.h(b+Nh,b+Nh)
eps = self.Epphh[a + b*Np, i + j*Nh]
#if self.Epphh[a + b*Np, i +j*Nh] != val:
# #print val, self.Epphh[a + b*Np, i +j*Np]
# self.Epphh[a + b*Np, i + j*Nh] = eps
# self.Epphh[a + b*Np, j + i*Nh] = eps
# self.Epphh[b + a*Np, i + j*Nh] = eps
# self.Epphh[b + a*Np, j + i*Nh] = eps
if val != 0:
self.Vpphh[a + b*Np, i + j*Nh] = val
self.Vpphh[a + b*Np, j + i*Nh] = -val
self.Vpphh[b + a*Np, i + j*Nh] = -val
self.Vpphh[b + a*Np, j + i*Nh] = val
self.Vhhpp[i + j*Nh, a + b*Np] = val
self.Vhhpp[j + i*Nh, b + a*Np] = val
self.Vhhpp[j + i*Nh, a + b*Np] = -val
self.Vhhpp[i + j*Nh, b + a*Np] = -val
self.Tpphh[a + b*Np, i + j*Nh] = val/eps
self.Tpphh[a + b*Np, j + i*Nh] = -val/eps
self.Tpphh[b + a*Np, i + j*Nh] = -val/eps
self.Tpphh[b + a*Np, j + i*Nh] = val/eps
"""
t1 = clock()
print "Time spent setting up amplitudes and eps:", t1-t0
t0 = clock()
B = blocks(tb)
self.Vhhhh = B.Vhhhh
self.Vpppp = B.Vpppp
self.Vhhpp = B.Vhhpp
self.Vpphh = B.Vpphh
self.Vhpph = B.Vhpph
self.Vphhp = B.Vphhp
t1 = clock()
self.Tpphh = csr_matrix(self.Vpphh/self.Epphh)
print "Time spent setting up interactions:", t1-t0
"""
optiv = optimV(self.bs)
self.Vhhhh = csr_matrix(optiv.Vhhhh)
self.Vpppp = csr_matrix(optiv.Vpppp)
t2 = clock()
print "Time spent on setting up hhpp terms:", t1-t0
print "Time spent on setting up pppp and hhhh terms:", t2-t1
"""
"""
t0 = clock()
for i in range(Nh):
for j in range(i,Nh):
for k in range(Nh):
for l in range(k,Nh):
val = self.bs.v(i,j,k,l)
if val!=0:
self.Vhhhh[i + j*Nh, k+ l*Nh] = val
self.Vhhhh[j + i*Nh, l+ k*Nh] = val
self.Vhhhh[j + i*Nh, k+ l*Nh] = -val
self.Vhhhh[i + j*Nh, l+ k*Nh] = -val
t1 = clock()
for a in range(Np):
for b in range(a,Np):
for c in range(Np):
for d in range(c,Np):
val = self.bs.v(a+Nh,b+Nh,c+Nh,d+Nh)
if val!= 0:
self.Vpppp[a + b*Np, c+ d*Np] = val
self.Vpppp[b + a*Np, d+ c*Np] = val
self.Vpppp[b + a*Np, c+ d*Np] = -val
self.Vpppp[a + b*Np, d+ c*Np] = -val
t2 = clock()
print "Time spent setting up Vhhhh (iteratively):", t1-t0
print "Time spent setting up Vpppp (iteratively):", t2-t1
"""
#Aligned matrices for L3, Q2, Q3 and Q4 multiplications
self.VL3 = self.perm_ind_ib_aj2ai_bj(self.Vhpph)
self.VQ2 = self.perm_ind_ij_ab2ai_bj(self.Vhhpp)
self.VQ3 = self.perm_ind_ij_ba2iab_j(self.Vhhpp)
self.VQ4 = self.perm_ind_ij_ba2bji_a(self.Vhhpp)
def advance(self):
#Main loop, run this to advance solution one iteration
#setup linear contributions
self.sL1()
self.sL2()
self.sL3()
#setup quadratic contributions
self.sQ1()
self.sQ2()
self.sQ3()
self.sQ4()
#permute contributions
self.PL3 = self.L3 - self.perm_ind_ba_ij(self.L3) - self.perm_ind_ab_ji(self.L3) + self.perm_ind_ba_ji(self.L3)
self.PQ2 = self.Q2 - self.perm_ind_ab_ji(self.Q2)
self.PQ3 = self.Q3 - self.perm_ind_ab_ji(self.Q3)
self.PQ4 = self.Q4 - self.perm_ind_ba_ij(self.Q4)
#Sum all contributions
self.Tpphh = (self.Vpphh + .5*(self.L1 + self.L2) + self.PL3 + .25*self.Q1 + self.PQ2 - .5*(self.PQ3 + self.PQ4))/self.Epphh
#self.sp_epsdiv(self.Tpphh)
#calculate energy
self.energy()
#Update UI
print " Correlation energy:", self.C_energy
#Update amplitudes (have been temporarily dense due to division above)
self.Tpphh = csr_matrix(self.Tpphh)
def e0_(self):
Np = self.Np
Nh = self.Nh
e0 = 0.0
for i in range(Nh):
for j in range(Nh):
for a in range(Np):
for b in range(Np):
e0 += self.Vhhpp[i+j*Nh, a+b*Np]*self.Tpphh[a + b*Np, i+j*Nh]
return e0
def energy(self):
Np = self.Np
Nh = self.Nh
C = self.Vhhpp.dot(self.Tpphh)
N = len(C)
#self.C_energy = .25*sum(C.diagonal())
self.C_energy = .25*sum(C[range(0,N), range(0,N)])
def sp_epsdiv(self, M):
#sparse matrix energy division
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
print self.bs.states[:,0][i] + self.bs.states[:,0][j] - self.bs.states[:,0][a] - self.bs.states[:,0][b]
M.data/=(self.bs.states[:,0][i] + self.bs.states[:,0][j] - self.bs.states[:,0][a] - self.bs.states[:,0][b])
#######################################
##
## SPARSE PERMUTATION ROUTINES
## A set of functions that efficiently permutes and reshapes sparse matrix representations of rank 4 tensors
##
#######################################
def unpack_indptr(self,indptr):
#Unpack row-compressed indices
I =zeros(indptr[-1], dtype = int)
for i in range(len(indptr)-1):
I[indptr[i]:indptr[i+1]] = i
return I
def perm_ind_ai_bj(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (a + i*self.Np, b + j*self.Np)), shape=(self.Np*self.Nh, self.Np*self.Nh)).tocsr()
def perm_ind_ia_bj(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (i + a*self.Nh, b + j*self.Np)), shape=(self.Np*self.Nh, self.Np*self.Nh)).tocsr()
def perm_ind_bj_ai(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (b + j*self.Np, a + i*self.Np)), shape=(self.Np*self.Nh, self.Np*self.Nh)).tocsr()
def perm_ind_ai_jb(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (a + i*self.Np, j + b*self.Nh)), shape=(self.Np*self.Nh, self.Np*self.Nh)).tocsr()
def perm_ind_ba_ij(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (b + a*self.Np, i + j*self.Nh)), shape=(self.Np**2, self.Nh**2)).tocsr()
def perm_ind_ab_ji(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (a + b*self.Np, j + i*self.Nh)), shape=(self.Np**2, self.Nh**2)).tocsr()
def perm_ind_ba_ji(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (b + a*self.Np, j + i*self.Nh)), shape=(self.Np**2, self.Nh**2)).tocsr()
def perm_ind_i_jab(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (i, j + a*self.Nh+ b*self.Nh*self.Np)), shape=(self.Nh, self.Nh*self.Np**2)).tocsr()
def perm_ind_a_bji(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (a, b + j*self.Np+ i*self.Nh*self.Np)), shape=(self.Np, self.Np*self.Nh**2)).tocsr()
def perm_ind_b_aji(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,b,i,j = rows%self.Np, rows//self.Np,cols%self.Nh, cols//self.Nh
return coo_matrix((M.data, (b, a + j*self.Np+ i*self.Nh*self.Np)), shape=(self.Np, self.Np*self.Nh**2)).tocsr()
def perm_ind_ij_ab2ai_bj(self,M):
#Sparse permutations
#print M.shape
cols, rows = M.indices, self.unpack_indptr(M.indptr)
i,j,a,b = rows%self.Nh, rows//self.Nh,cols%self.Np, cols//self.Np
return coo_matrix((M.data, (a + i*self.Np, b + j*self.Np)), shape=(self.Np*self.Nh, self.Np*self.Nh)).tocsr()
def perm_ind_ij_ba2iab_j(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
i,j,b,a = rows%self.Nh, rows//self.Nh,cols%self.Np, cols//self.Np
return coo_matrix((M.data, (i + a*self.Nh + b*self.Nh*self.Np, j)), shape=(self.Np*self.Nh*self.Np, self.Nh)).tocsr()
def perm_ind_ij_ba2bji_a(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
i,j,b,a = rows%self.Nh, rows//self.Nh,cols%self.Np, cols//self.Np
return coo_matrix((M.data, (b + j*self.Np + i*self.Np*self.Nh, a)), shape=(self.Np*self.Nh**2, self.Np)).tocsr()
#def perm_ind_ai_bj2ab_ij(self,M):
# #Sparse permutations
# cols, rows = M.indices, self.unpack_indptr(M.indptr)
# a,i,b,j = rows%self.Np, rows//self.Np,cols%self.Np, cols//self.Np
# return coo_matrix((M.data, (a + b*self.Np,i + j*self.Nh)), shape=(self.Np**2, self.Nh**2)).tocsr()
def perm_ind_ai_bj2a_bji(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,i,b,j = rows%self.Np, rows//self.Np,cols%self.Np, cols//self.Np
return coo_matrix((M.data, (a, b + j*self.Np + i*self.Np*self.Nh)), shape=(self.Np, self.Np*self.Nh**2)).tocsr()
def perm_ind_ib_aj2ai_bj(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
i,b,a,j = rows%self.Nh, rows//self.Nh,cols%self.Np, cols//self.Np
return coo_matrix((M.data, (a + i*self.Np, b + j*self.Np)), shape=(self.Np*self.Nh, self.Np*self.Nh)).tocsr()
def perm_ind_ai_bj2ab_ij(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a,i,b,j = rows%self.Np, rows//self.Np,cols%self.Np, cols//self.Np
return coo_matrix((M.data, (a+ b*self.Np, i + j*self.Nh)), shape=(self.Np**2,self.Nh**2)).tocsr()
def perm_ind_a_bij2ab_ij(self,M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
a = rows
b = cols%self.Np
i = ((cols-b)/self.Np)%self.Nh
j = ((cols-b)/self.Np)//self.Nh
return coo_matrix((M.data, (a+ b*self.Np, i + j*self.Nh)), shape=(self.Np**2,self.Nh**2)).tocsr()
def perm_ind_i_jab2ab_ij(self, M):
#Sparse permutations
cols, rows = M.indices, self.unpack_indptr(M.indptr)
i = rows
j = cols%self.Nh
a = ((cols-j)/self.Nh)%self.Np
b = ((cols-j)/self.Nh)//self.Np
return coo_matrix((M.data, (a+ b*self.Np, i + j*self.Nh)), shape=(self.Np**2,self.Nh**2)).tocsr()
##############################################
##
## Contributions to the CCD amplitude
## As in S-B, the contributions is defined as linear L (t) and quadratic Q (tt)
## The CCD amplitude equation then reads
## Tpphh = (v + L1 + L2 + L3 + Q1 + Q2 + Q3 + Q4)/eps
##
##############################################
def sL1(self):
self.L1 = self.Vpppp.dot(self.Tpphh)
def sL2(self):
self.L2 = (self.Vhhhh.T.dot(self.Tpphh.T)).T
def sL3(self):
self.L3 = self.TL3()
def sQ1(self):
self.Q1 = ((self.Vhhpp.dot(self.Tpphh)).T.dot(self.Tpphh.T)).T
def sQ2(self):
self.Q2 = self.TQ2(self.Tpphh, self.Vhhpp)
def sQ3(self):
self.Q3 = self.TQ3(self.Tpphh, self.Vhhpp)
def sQ4(self):
self.Q4 = self.TQ4(self.Tpphh, self.Vhhpp)#[a+b*Np, i + j*Nh]
def TL3(self):
#The L3 Contribution
self.TL3_ = self.perm_ind_ai_bj(self.Tpphh)
L3_ = (self.VL3.T.dot(self.TL3_.T)).T
return self.perm_ind_ai_bj2ab_ij(L3_)
def TQ2(self,T,V):
#The Q2 contrubution
TQ21 = self.perm_ind_ai_bj(self.Tpphh)
TQ22 = self.perm_ind_bj_ai(self.Tpphh)
Q2_ = (self.VQ2.dot(TQ22).T.dot(TQ21.T)).T
return self.perm_ind_ai_bj2ab_ij(Q2_)
def TQ3(self,T,V):
#The Q3-contrubution
TQ31 = self.perm_ind_i_jab(self.Tpphh)
Q3_ = (self.VQ3.dot(TQ31).T.dot(TQ31.T)).T
return self.perm_ind_i_jab2ab_ij(Q3_)
def TQ4(self,T,V):
#The Q4 contribution
TQ41 = self.perm_ind_a_bji(self.Tpphh)
Q4_ = (self.VQ4.dot(TQ41).T.dot(TQ41.T)).T
return self.perm_ind_a_bij2ab_ij(Q4_)
class optimV():
def __init__(self, bs):
self.bs = bs
self.Np = bs.nstates-bs.nparticles
self.Nh = bs.nparticles
self.Ns = bs.nstates
self.Nm = self.bs.Nm #Max possible momentum
self.Nm2 = self.Nm**2
self.Nm3 = self.Nm**3
self.Vpppp = zeros((self.Np**2, self.Np**2))
self.Vhhhh = zeros((self.Nh**2, self.Nh**2))
self.Vhhpp = zeros((self.Nh**2, self.Np**2))
self.Vhpph = zeros((self.Nh*self.Np, self.Nh*self.Np))
#self.Vhhhh = zeros((self.Nh**2, self.Nh**2))
self.setup_pppp()
self.setup_hhhh()
self.setup_hhpp()
self.setup_hpph()
#setup hp
#seutp ph
def ident(self,v):
#A unique identifying integer for the momentum combinations
return v[0] + v[1]*self.Nm + v[2]*self.Nm2 + v[3]*self.Nm3
def setup_pppp(self):
t0 = clock()
Np = self.Np
combs_pp = 20000*ones((Np**2,Np**2), dtype = int) #arbitrary large number since identifier will include zeros
idents = zeros((Np**2))
for p in range(Np):
for q in range(Np):
v = self.bs.states[p+self.Nh][1:5]+self.bs.states[q+self.Nh][1:5]
iv = self.ident(v)
combs_pp[p + q*Np, :] = iv #this one should not be zero, as most elements in array is already zero, or?
idents[p+q*Np] = iv
spectrum = unique(idents)
combs_pp[combs_pp!=combs_pp.T]=20000 #identify each pair of quantum numbers sharing the same added momentum
self.combs_pp = combs_pp
t1 = clock()
print "Time spent determining unique sortings:", t1-t0
self.setup_Vpppp()
def setup_Vpppp(self):
for P in range(self.Np**2):
for Q in range(P,self.Np**2):
if self.combs_pp[P,Q] != 20000:
a,b = P%self.Np, P//self.Np
c,d = Q%self.Np, Q//self.Np
#if self.ident(self.bs.states[a][1:5]+self.bs.states[b][1:5])==self.ident(self.bs.states[c][1:5]+self.bs.states[d][1:5]):
#if product(self.bs.states[a+Nh][1:5]+self.bs.states[b+Nh][1:5])==product(self.bs.states[c+Nh][1:5]+self.bs.states[d+Nh][1:5]):
val = self.bs.v(a+self.Nh,b+self.Nh,c+self.Nh,d+self.Nh)
self.Vpppp[P,Q] = val
self.Vpppp[Q,P] = val
def setup_hhhh(self):
Nh = self.Nh
combs_hh = 20000*ones((Nh**2,Nh**2), dtype = int) #arbitrary large number since identifier will include zeros
idents = zeros((Nh**2))
for p in range(Nh):
for q in range(Nh):
v = self.bs.states[p][1:5]+self.bs.states[q][1:5]
iv = self.ident(v)
combs_hh[p + q*Nh, :] = iv #this one should not be zero, as most elements in array is already zero, or?
idents[p+q*Nh] = iv
spectrum = unique(idents)
combs_hh[combs_hh!=combs_hh.T]=20000 #identify each pair of quantum numbers sharing the same added momentum
self.combs_hh = combs_hh
self.setup_Vhhhh()
def setup_Vhhhh(self):
for P in range(self.Nh**2):
for Q in range(P,self.Nh**2):
if self.combs_pp[P,Q] != 20000:
i,j = P%self.Nh, P//self.Nh
k,l = Q%self.Nh, Q//self.Nh
#if self.ident(self.bs.states[a][1:5]+self.bs.states[b][1:5])==self.ident(self.bs.states[c][1:5]+self.bs.states[d][1:5]):
#if product(self.bs.states[a+Nh][1:5]+self.bs.states[b+Nh][1:5])==product(self.bs.states[c+Nh][1:5]+self.bs.states[d+Nh][1:5]):
val = self.bs.v(i,j,k,l)
self.Vpppp[P,Q] = val
self.Vpppp[Q,P] = val
def setup_hhpp(self):
Nh = self.Nh
Np = self.Np
combs_hh = 20000*ones((Nh**2,Np**2), dtype = int) #arbitrary large number since identifier will include zeros
combs_pp = 20000*ones((Nh**2,Np**2), dtype = int) #arbitrary large number since identifier will include zeros
#idents = zeros((Nh*Np))
for p in range(Np):
for q in range(Np):
v = self.bs.states[p+Nh][1:5]+self.bs.states[Nh + q][1:5]
iv = self.ident(v)
combs_pp[:,p + q*Np] = iv #this one should not be zero, as most elements in array is already zero, or?
#idents[p+q*Np] = iv
for p in range(Nh):
for q in range(Nh):
v = self.bs.states[p][1:5]+self.bs.states[q][1:5]
iv = self.ident(v)
combs_hh[p + q*Nh, :] = iv #this one should not be zero, as most elements in array is already zero, or?
#idents[p+q*Np] = iv
#spectrum = unique(idents)
combs_hh[combs_pp!=combs_hh]=20000 #identify each pair of quantum numbers sharing the same added momentum
self.combs_hp = combs_hh
self.setup_Vhhpp()
def setup_Vhhpp(self):
for P in range(self.Nh**2):
for Q in range(self.Np**2):
if self.combs_hp[P,Q] != 20000:
#Run trough common setup routine here
i,j = P%self.Nh, P//self.Nh
a,b = Q%self.Np, Q//self.Np
val = self.bs.v(i,j,a+self.Nh,b+self.Nh)
self.Vhhpp[P,Q] = val
#self.Vpppp[Q,P] = val
self.Vpphh = self.Vhhpp.T
def setup_hpph(self):
Nh = self.Nh
Np = self.Np
combs_hp = 20000*ones((Nh*Np,Nh*Np), dtype = int) #arbitrary large number since identifier will include zeros
combs_ph = 20000*ones((Nh*Np,Nh*Np), dtype = int) #arbitrary large number since identifier will include zeros
idents = zeros((Nh**2))
for p in range(Nh):
for q in range(Np):
v = self.bs.states[p][1:5]+self.bs.states[q+Nh][1:5]
iv = self.ident(v)
combs_hp[p + q*Nh, :] = iv #this one should not be zero, as most elements in array is already zero, or?
combs_ph[:,q + p*Np] = iv #this one should not be zero, as most elements in array is already zero, or?
#idents[p+q*Nh] = iv
#spectrum = unique(idents)
combs_hp[combs_hp!=combs_ph]=20000 #identify each pair of quantum numbers sharing the same added momentum
self.combs_hpph = combs_hp
self.setup_Vhpph()
def setup_Vhpph(self):
for P in range(self.Nh*self.Np):
for Q in range(self.Np*self.Nh):
if self.combs_hpph[P,Q] != 20000:
i,a = P%self.Nh, P//self.Nh
b,j = Q%self.Np, Q//self.Np
#if self.ident(self.bs.states[a][1:5]+self.bs.states[b][1:5])==self.ident(self.bs.states[c][1:5]+self.bs.states[d][1:5]):
#if product(self.bs.states[a+Nh][1:5]+self.bs.states[b+Nh][1:5])==product(self.bs.states[c+Nh][1:5]+self.bs.states[d+Nh][1:5]):
val = self.bs.v(i,a,b,j)
self.Vhpph[P,Q] = val
#self.Vhpph[Q,P] = val
class blocks():
def __init__(self, bs):
self.bs = bs
self.Np = bs.nstates-bs.nparticles
self.Nh = bs.nparticles
self.Ns = bs.nstates
self.Nm = self.bs.Nm #Max possible momentum
self.Vhhhh = zeros((self.Nh**2, self.Nh**2))
self.Vhhpp = zeros((self.Nh**2, self.Np**2))
self.Vphhp = zeros((self.Nh*self.Np, self.Nh*self.Np))
self.Vhpph = zeros((self.Nh*self.Np, self.Nh*self.Np))
self.Vpppp = zeros((self.Np**2, self.Np**2))
self.Vpphh = zeros((self.Np**2, self.Nh**2))
self.Tpphh = zeros((self.Np**2, self.Nh**2))
self.Epphh = zeros((self.Np**2, self.Nh**2))
#self.setup_matrices_optimized()
#self.Tpphh = random.uniform(0,1,(self.Np**2, self.Nh**2))
self.setup_pppp()
self.setup_hhhh()
self.setup_hhpp()
self.setup_hpph()
def ident(self,v):
#A unique identifying integer for the momentum combinations
return v[0] + v[1]*self.bs.Nm + v[2]*self.bs.Nm**2 + v[3]*self.bs.Nm**3
def setup_pppp(self):
Np = self.Np
combs_pp = 20000*ones((Np**2,Np**2), dtype = int) #arbitrary large number since identifier will include zeros
idents = zeros((Np**2))
for p in range(Np):
for q in range(Np):
v = self.bs.states[p+self.Nh][1:5]+self.bs.states[q+self.Nh][1:5]
iv = self.ident(v)
combs_pp[p + q*Np, :] = iv #this one should not be zero, as most elements in array is already zero, or?
idents[p+q*Np] = iv
combs_pp[combs_pp!=combs_pp.T]=20000 #identify each pair of quantum numbers sharing the same added momentum
t = where(combs_pp!=2000)
a = self.bs.states[t[0]%Np + self.Nh].T
b = self.bs.states[t[0]//Np + self.Nh].T
c = self.bs.states[t[1]%Np + self.Nh].T
d = self.bs.states[t[1]//Np + self.Nh].T
data = self.bs.V(a,b,c,d)
#print data[data!=0]
self.Vpppp = coo_matrix((data, (t[0], t[1])), shape=(self.Np**2, self.Np**2)).tocsr()
def setup_hhhh(self):
Np = self.Np
Nh = self.Nh
combs_hh = 20000*ones((Nh**2,Nh**2), dtype = int) #arbitrary large number since identifier will include zeros
#idents = zeros((Nh**2))
for p in range(Nh):
for q in range(Nh):
v = self.bs.states[p][1:5]+self.bs.states[q][1:5]
iv = self.ident(v)
combs_hh[p + q*Nh, :] = iv #this one should not be zero, as most elements in array is already zero, or?
#idents[p+q*Nh] = iv
combs_hh[combs_hh!=combs_hh.T]=20000 #identify each pair of quantum numbers sharing the same added momentum
t = where(combs_hh!=2000)
a = self.bs.states[t[0]%Nh ].T
b = self.bs.states[t[0]//Nh].T
c = self.bs.states[t[1]%Nh ].T
d = self.bs.states[t[1]//Nh].T
data = self.bs.V(a,b,c,d)
#print data[data!=0]
self.Vhhhh = coo_matrix((data, (t[0], t[1])), shape=(self.Nh**2, self.Nh**2)).tocsr()
def setup_hpph(self):
Np = self.Np
Nh = self.Nh
combs_hp = 20000*ones((Nh*Np,Nh*Np), dtype = int) #arbitrary large number since identifier will include zeros
combs_ph = 20000*ones((Nh*Np,Nh*Np), dtype = int) #arbitrary large number since identifier will include zeros
#idents = zeros((Nh**2))
for p in range(Nh):
for q in range(Np):
v = self.bs.states[p][1:5]+self.bs.states[q+Nh][1:5]
iv = self.ident(v)
combs_hp[p + q*Nh, :] = iv #this one should not be zero, as most elements in array is already zero, or?
combs_ph[:,q + p*Np ] = iv
#idents[p+q*Nh] = iv
combs_hp[combs_hp!=combs_ph.T]=20000 #identify each pair of quantum numbers sharing the same added momentum
t = where(combs_hp!=2000)
i = self.bs.states[t[0]%Nh ].T
a = self.bs.states[t[0]//Nh + Nh].T
b = self.bs.states[t[1]%Np + Nh].T
j = self.bs.states[t[1]//Np].T
data = self.bs.V(i,a,b,j)
#print data[data!=0]
self.Vhpph = coo_matrix((data, (t[0], t[1])), shape=(self.Nh*Np, self.Nh*Np)).tocsr()
self.Vphhp = self.Vhpph.T
def setup_hhpp(self):
Np = self.Np
Nh = self.Nh
combs_hh = 20000*ones((Nh*Nh,Np*Np), dtype = int) #arbitrary large number since identifier will include zeros
combs_pp = 20000*ones((Nh*Nh,Np*Np), dtype = int) #arbitrary large number since identifier will include zeros
#idents = zeros((Nh**2))
for p in range(Nh):
for q in range(Nh):
v = self.bs.states[p][1:5]+self.bs.states[q][1:5]
iv = self.ident(v)
combs_hh[p + q*Nh, :] = iv #this one should not be zero, as most elements in array is already zero, or?
#combs_ph[:,q + p*Np ] = iv
#idents[p+q*Nh] = iv
#idents = zeros((Nh**2))
for p in range(Np):
for q in range(Np):
v = self.bs.states[p+ Nh][1:5]+self.bs.states[q+Nh][1:5]
iv = self.ident(v)
combs_pp[:,p + q*Np] = iv #this one should not be zero, as most elements in array is already zero, or?
#combs_ph[:,q + p*Np ] = iv
#idents[p+q*Nh] = iv
combs_hh[combs_hh!=combs_pp]=20000 #identify each pair of quantum numbers sharing the same added momentum
t = where(combs_hh!=2000)
i = self.bs.states[t[0]%Nh ].T
j = self.bs.states[t[0]//Nh].T
a = self.bs.states[t[1]%Np + Nh].T
b = self.bs.states[t[1]//Np + Nh].T
data = self.bs.V(i,j,a,b)
#print data[data!=0]
self.Vhhpp = coo_matrix((data, (t[0], t[1])), shape=(self.Nh**2, self.Np**2)).tocsr()
self.Vpphh = self.Vhhpp.T
def compare(Z1,Z2):
Nx = len(Z1)
Ny = len(Z1[0])
EQ = True
NE = 0
toter = 0
er = 0
try:
for i in range(Nx):
for e in range(Ny):
if Z1[i,e]!=Z2[i,e]:
#print Z1[i,e],Z2[i,e]
er = abs(Z1[i,e]-Z2[i,e])
if er>toter:
toter = er
NE = 1
except:
print "NOT EQUAL, total failure"
NE = 1
return NE, toter
def Vpppp_check(Z1, bs):
Np = bs.nstates-bs.nparticles
Nh = bs.nparticles
for a in range(Np):
for b in range(Np):
for c in range(Np):
for d in range(Np):
if Z1[a + b*Np, c+ d*Np] != bs.v(a+Nh,b+Nh,c+Nh,d+Nh):
print a,b,c,d, Z1[a + b*Np, c+ d*Np], bs.v(a+Nh,b+Nh,c+Nh,d+Nh)
def Vhpph_check(Z1, bs):
Np = bs.nstates-bs.nparticles
Nh = bs.nparticles
for i in range(Nh):
for a in range(Np):
for b in range(Np):
for j in range(Nh):
if Z1[i + a*Nh, b+ j*Np] != bs.v(i,a+Nh,b+Nh,j):
print i,a,b,j, Z1[i + a*Nh, b+ j*Np], bs.v(i,a+Nh,b+Nh,j)
def Vphhp_check(Z1, bs):
Np = bs.nstates-bs.nparticles
Nh = bs.nparticles
for i in range(Nh):
for a in range(Np):
for b in range(Np):
for j in range(Nh):
if Z1[a + i*Np, j+ b*Nh] != bs.v(a+Nh,i,j,b+Nh):
print i,a,b,j, Z1[a + i*Np, j+ b*Nh], bs.v(a+Nh,i,j,b+Nh)
def Vhhpp_check(Z1, bs):
Np = bs.nstates-bs.nparticles
Nh = bs.nparticles
for i in range(Nh):
for a in range(Np):
for b in range(Np):
for j in range(Nh):
if Z1[i + j*Nh, a+ b*Np] != bs.v(i,j,a+Nh,b+Nh):
print i,j,a,b, Z1[i + j*Nh, a+ b*Np], bs.v(i,j,a+Nh,b+Nh)
t0 = clock()
tb = electronbasis(2,1.0,14)
t1 = clock()
print "Time spent on initializing basis:", t1-t0
print "====="
print "Number of states :", tb.nstates
print "Number of particles:", tb.nparticles
print "====="
t0 = clock()
Q = CCD(tb)
t1 = clock()
print "Time spent on initializing solver:", t1-t0
#B = optimV(tb)
t0 = clock()
"""
B = blocks(tb)
t2 = clock()
B.setup_hhpp()
print "Time spent initializing vectorized interaction:", t2-t0
Vhhpp_check(B.Vhhpp.toarray(), tb)
"""
#Q.Vpppp = B.Vpppp
for i in range(20):
Q.advance()
"""
print "pppp:", compare(Q.Vpppp.toarray(), B.Vpppp.toarray())
print compare(Q.Vhhpp.toarray(), B.Vhhpp)
print compare(Q.Vpphh.toarray(), B.Vhhpp.T)
print compare(Q.Vhpph.toarray(), B.Vhpph)
#Q.Vpppp = csr_matrix(B.Vpppp)
figure(1)
imshow(B.Vpppp.toarray())
show()
figure(2)
imshow(Q.Vpppp.toarray())
show()
""" | cc0-1.0 | -4,270,726,599,347,849,700 | 39.542596 | 147 | 0.487121 | false |
foauth/oauth-proxy | setuser.py | 1 | 1376 | import getpass
import os
import sys
from werkzeug.datastructures import MultiDict
import models
import forms
# Make sure the database gets installed properly
models.db.create_all()
values = MultiDict()
form = forms.SetUser(values)
values['email'] = sys.argv[1] if len(sys.argv) > 1 else raw_input('%s: ' % form.email.label.text)
form = forms.SetUser(values)
form.validate()
if form.email.errors:
sys.exit('\n'.join(' ! %s' % e for e in form.email.errors))
if models.User.query.filter_by(email=form.email.data).count():
print '%s already exists, setting the password' % form.email.data
values['password'] = getpass.getpass('%s: ' % form.password.label.text)
form = forms.SetUser(values)
form.validate()
if form.password.errors:
sys.exit('\n'.join(' ! %s' % e for e in form.password.errors))
values['retype'] = getpass.getpass('%s: ' % form.retype.label.text)
form = forms.SetUser(values)
form.validate()
if form.retype.errors:
sys.exit('\n'.join(' ! %s' % e for e in form.retype.errors))
user = models.User.query.filter_by(email=form.email.data).first()
if user:
user.set_password(form.password.data)
msg = 'Updated password for %s' % user.email
else:
user = models.User(email=form.email.data, password=form.password.data)
msg = 'Created account for %s' % user.email
models.db.session.add(user)
models.db.session.commit()
print msg
| bsd-3-clause | -4,924,099,926,810,548,000 | 26.52 | 97 | 0.704942 | false |
nimadini/Teammate | main.py | 1 | 2620 | __author__ = 'stanley'
import webapp2
from handlers.dashboard.dashboard import DashboardHandler
from handlers.home.home import HomeHandler
from handlers.about import AboutHandler
from handlers.home.upload import UploadHandler
from handlers.home.upload_url import UploadURLHandler
from handlers.home.education import EducationHandler
from handlers.home.reference import ReferenceHandler
from handlers.home.references import ReferencesHandler
from handlers.home.resume_current_url import ResumeCurrentURLHandler
from handlers.home.work_experience import WorkExperience
from handlers.home.skills import Skills
from handlers.registration.registration import Registration
from handlers.dashboard.statistics import Statistics
from handlers.home.honors_and_awards import HonorsAndAwards
from handlers.home.sample_project import SampleProject
from handlers.home.language import LanguageHandler
from handlers.home.terms import TermsHandler
from handlers.follow import FollowHandler
from handlers.unfollow import UnfollowHandler
from handlers.message import MessageHandler
from handlers.main import Main
from handlers.stat import StatHandler
from handlers.auto_complete import AutocompleteHandler
app = webapp2.WSGIApplication([('/', Main),
('/home', HomeHandler),
('/dashboard', DashboardHandler),
('/education', EducationHandler),
('/references', ReferencesHandler),
('/upload', UploadHandler),
('/upload_url', UploadURLHandler),
('/reference', ReferenceHandler),
('/resume_current_url', ResumeCurrentURLHandler),
('/workexperience', WorkExperience),
('/skills', Skills),
('/statistics', Statistics),
('/project', SampleProject),
('/honor', HonorsAndAwards),
('/terms', TermsHandler),
('/registration', Registration),
('/languages', LanguageHandler),
('/follow', FollowHandler),
('/unfollow', UnfollowHandler),
('/message', MessageHandler),
('/stat', StatHandler),
('/autocomplete', AutocompleteHandler),
('/about', AboutHandler)], debug=True) | apache-2.0 | 8,140,410,365,557,745,000 | 51.42 | 80 | 0.597328 | false |
cloudbase/coriolis | coriolis/osmorphing/osdetect/oracle.py | 1 | 1061 | # Copyright 2020 Cloudbase Solutions Srl
# All Rights Reserved.
import re
from coriolis import constants
from coriolis.osmorphing.osdetect import base
ORACLE_DISTRO_IDENTIFIER = "Oracle Linux"
class OracleOSDetectTools(base.BaseLinuxOSDetectTools):
def detect_os(self):
info = {}
oracle_release_path = "etc/oracle-release"
if self._test_path(oracle_release_path):
release_info = self._read_file(
oracle_release_path).decode().splitlines()
if release_info:
m = re.match(r"^(.*) release ([0-9].*)$",
release_info[0].strip())
if m:
distro, version = m.groups()
info = {
"os_type": constants.OS_TYPE_LINUX,
"distribution_name": ORACLE_DISTRO_IDENTIFIER,
"release_version": version,
"friendly_release_name": "%s Version %s" % (
distro, version)}
return info
| agpl-3.0 | -3,441,065,869,275,441,700 | 32.15625 | 70 | 0.526861 | false |
gensmusic/test | l/python/test/json-to-object/json-to-object.py | 1 | 1287 | #!/usr/bin/python
#coding:utf-8
j = '{"action": "print", "method": "onData", "data": {"key1" : 1, "key2":"value2"} }'
import json
class Payload(object):
def __init__(self, j):
self.__dict__ = json.loads(j)
p = Payload(j)
print '-' * 30
print dir(p)
print '-' * 30
print p.action
print p.method
print p.data
print type(p.data)
data = dict(p.data)
print 'new data:', data
j2 = '{ "identity" : "dafei", "alert" : "you have message", "badge":1, "payload": { "k1":"v1", "k2" : "v2"} }'
p2 = Payload(j2)
print dir(p2)
print type(p2.payload)
print p2.payload
print '-' *50
class ParseBase(object):
"""docstring for ParseBase"""
def __init__(self):
super(ParseBase, self).__init__()
self.http_status_code = 0
def parse(self, j):
dict_data = json.loads(j)
for key in self.__dict__:
print 'key:{}'.format(key)
if not key.startswith('http'):
value = dict_data.get(key)
self.__dict__[key] = value
class Http(ParseBase):
"""docstring for Http"""
def __init__(self):
super(Http, self).__init__()
self.name = None
self.id = None
h = Http()
h.parse('{ "name" : "大飞", "id":1 }')
print dir(h)
print h.http_status_code
print h.name
print h.id
| gpl-2.0 | -2,800,019,684,156,221,000 | 21.508772 | 111 | 0.554949 | false |
intfrr/SoCo | soco/data_structures.py | 1 | 38123 | # -*- coding: utf-8 -*-
# pylint: disable=star-args, too-many-arguments, fixme
""" This module contains classes for handling DIDL-Lite metadata.
This is the XML schema used by Sonos for carrying metadata representing many
items such as tracks, playlists, composers, albums etc.
"""
# It tries to follow the class hierarchy provided by the DIDL-Lite schema
# described in the UPnP Spec, especially that for the ContentDirectory Service
# Although Sonos uses ContentDirectory v1, the document for v2 is more helpful:
# http://upnp.org/specs/av/UPnP-av-ContentDirectory-v2-Service.pdf
from __future__ import unicode_literals
import sys
import warnings
warnings.simplefilter('always', DeprecationWarning)
import textwrap
from .xml import XML, ns_tag
from .exceptions import DIDLMetadataError
from .utils import really_unicode
###############################################################################
# MISC HELPER FUNCTIONS #
###############################################################################
def to_didl_string(*args):
""" Convert any number of DIDLObjects to a unicode xml string.
Args:
*args (DidlObject): One or more DidlObject (or subclass) instances
Returns:
str: A unicode string of the form <DIDL-Lite ...>...</DIDL-Lite>
representing the instances
"""
didl = XML.Element(
'DIDL-Lite',
{
'xmlns': "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/",
'xmlns:dc': "http://purl.org/dc/elements/1.1/",
'xmlns:upnp': "urn:schemas-upnp-org:metadata-1-0/upnp/",
})
for arg in args:
didl.append(arg.to_element())
if sys.version_info[0] == 2:
return XML.tostring(didl)
else:
return XML.tostring(didl, encoding='unicode')
def from_didl_string(string):
""" Convert a unicode xml string to a list of DIDLObjects.
Arg:
string (str): A unicode string containing an xml representation of one
or more DIDL-Lite items (in the form <DIDL-Lite ...>
...</DIDL-Lite> )
Returns:
list: A list of one or more instances of DIDLObject or a subclass
"""
items = []
root = XML.fromstring(string.encode('utf-8'))
for elt in root:
if elt.tag.endswith('item') or elt.tag.endswith('container'):
item_class = elt.findtext(ns_tag('upnp', 'class'))
try:
cls = _DIDL_CLASS_TO_CLASS[item_class]
except KeyError:
raise DIDLMetadataError("Unknown UPnP class: %s" % item_class)
items.append(cls.from_element(elt))
else:
# <desc> elements are allowed as an immediate child of <DIDL-Lite>
# according to the spec, but I have not seen one there in Sonos, so
# we treat them as illegal. May need to fix this if this
# causes problems.
raise DIDLMetadataError("Illegal child of DIDL element: <%s>"
% elt.tag)
return items
###############################################################################
# DIDL RESOURCE #
###############################################################################
class DidlResource(object):
""" Identifies a resource, typically some type of a binary asset, such as
a song.
A 'res' element contains a uri that identifies the resource.
"""
# Adapted from a class taken from the Python Brisa project - MIT licence.
# pylint: disable=too-many-instance-attributes
def __init__(self, uri, protocol_info, import_uri=None, size=None,
duration=None, bitrate=None, sample_frequency=None,
bits_per_sample=None, nr_audio_channels=None, resolution=None,
color_depth=None, protection=None):
""" Constructor for the Resource class.
Args:
uri (str): value of the res tag, typically a URI. It MUST be
properly escaped URIs as described in RFC 239
protocol_info (str): A string in the form a:b:c:d that
identifies the streaming or transport protocol for
transmitting the resource. A value is required. For more
information see section 2.5.2 at
http://upnp.org/specs/av/UPnP-av-ConnectionManager-v1-Service.pdf
import_uri (str, optional): uri locator for resource update
size (int, optional): size in bytes
duration (str, optional): duration of the playback of the res
at normal speed (H*:MM:SS:F* or H*:MM:SS:F0/F1)
bitrate (int, optional): bitrate in bytes/second
sample_frequency (int, optional): sample frequency in Hz
bits_per_sample (int, optional): bits per sample
nr_audio_channels (int, optional): number of audio channels
resolution (str, optional): resolution of the resource (X*Y)
color_depth (int, optional): color depth in bits
protection (str, optional): statement of protection type
"""
# Of these attributes, only uri, protocol_info and duration have been
# spotted 'in the wild'
self.uri = uri
# Protocol info is in the form a:b:c:d - see
# sec 2.5.2 at
# http://upnp.org/specs/av/UPnP-av-ConnectionManager-v1-Service.pdf
self.protocol_info = protocol_info
self.import_uri = import_uri
self.size = size
self.duration = duration
self.bitrate = bitrate
self.sample_frequency = sample_frequency
self.bits_per_sample = bits_per_sample
self.nr_audio_channels = nr_audio_channels
self.resolution = resolution
self.color_depth = color_depth
self.protection = protection
@classmethod
def from_element(cls, element):
""" Set the resource properties from a <res> element.
Arg:
element (Element): An ElementTree Element
"""
def _int_helper(name):
"""Try to convert the name attribute to an int, or None."""
result = element.get(name)
if result is not None:
try:
return int(result)
except ValueError:
raise ValueError(
'Could not convert {0} to an integer'.format(name))
else:
return None
content = {}
# required
content['protocol_info'] = element.get('protocolInfo')
if content['protocol_info'] is None:
raise Exception('Could not create Resource from Element: '
'protocolInfo not found (required).')
# Optional
content['import_uri'] = element.get('importUri')
content['size'] = _int_helper('size')
content['duration'] = element.get('duration')
content['bitrate'] = _int_helper('bitrate')
content['sample_frequency'] = _int_helper('sampleFrequency')
content['bits_per_sample'] = _int_helper('bitsPerSample')
content['nr_audio_channels'] = _int_helper('nrAudioChannels')
content['resolution'] = element.get('resolution')
content['color_depth'] = _int_helper('colorDepth')
content['protection'] = element.get('protection')
content['uri'] = element.text
return cls(**content)
def __repr__(self):
return '<{0} \'{1}\' at {2}>'.format(self.__class__.__name__,
self.uri,
hex(id(self)))
def __str__(self):
return self.__repr__()
def to_element(self):
""" Return an ElementTree Element based on this resource."""
if not self.protocol_info:
raise Exception('Could not create Element for this resource: '
'protocolInfo not set (required).')
root = XML.Element('res')
# Required
root.attrib['protocolInfo'] = self.protocol_info
# Optional
if self.import_uri is not None:
root.attrib['importUri'] = self.import_uri
if self.size is not None:
root.attrib['size'] = str(self.size)
if self.duration is not None:
root.attrib['duration'] = self.duration
if self.bitrate is not None:
root.attrib['bitrate'] = str(self.bitrate)
if self.sample_frequency is not None:
root.attrib['sampleFrequency'] = str(self.sample_frequency)
if self.bits_per_sample is not None:
root.attrib['bitsPerSample'] = str(self.bits_per_sample)
if self.nr_audio_channels is not None:
root.attrib['nrAudioChannels'] = str(self.nr_audio_channels)
if self.resolution is not None:
root.attrib['resolution'] = self.resolution
if self.color_depth is not None:
root.attrib['colorDepth'] = str(self.color_depth)
if self.protection is not None:
root.attrib['protection'] = self.protection
root.text = self.uri
return root
def to_dict(self, remove_nones=False):
"""Return a dictionary representation of the DidlResource
Args:
remove_nones (bool): Optionally remove dictionary elements when
their value is None.
"""
content = {
'uri': self.uri,
'protocol_info': self.protocol_info,
'import_uri': self.import_uri,
'size': self.size,
'duration': self.duration,
'bitrate': self.bitrate,
'sample_frequency': self.sample_frequency,
'bits_per_sample': self.bits_per_sample,
'nr_audio_channels': self.nr_audio_channels,
'resolution': self.resolution,
'color_depth': self.color_depth,
'protection': self.protection,
}
if remove_nones:
# delete any elements that have a value of None to optimize size
# of the returned structure
nones = [k for k in content if content[k] is None]
for k in nones:
del content[k]
return content
@classmethod
def from_dict(cls, content):
"""Create an instance from a dict.
An alternative constructor. Equivalent to DidlResource(**content).
Arg:
content (dict): Dict containing metadata information. Required and
valid arguments are the same as for the ``__init__`` method.
"""
return cls(**content)
def __eq__(self, resource):
"""Compare with another ``resource``.
Returns:
(bool): True if items are equal, else False
"""
if not isinstance(resource, DidlResource):
return False
return self.to_dict() == resource.to_dict()
###############################################################################
# BASE OBJECTS #
###############################################################################
# a mapping which will be used to look up the relevant class from the
# DIDL item class
_DIDL_CLASS_TO_CLASS = {}
class DidlMetaClass(type):
"""Meta class for all Didl objects."""
def __new__(mcs, name, bases, attrs):
"""Create a new instance.
Args:
name: Name of the class
bases: Base classes (tuple)
attrs: Attributes defined for the class
"""
new_cls = super(DidlMetaClass, mcs).__new__(mcs, name, bases, attrs)
# Register all subclasses with the global _DIDL_CLASS_TO_CLASS mapping
item_class = attrs.get('item_class', None)
if item_class is not None:
_DIDL_CLASS_TO_CLASS[item_class] = new_cls
return new_cls
# Py2/3 compatible way of declaring the metaclass
class DidlObject(DidlMetaClass(str('DidlMetaClass'), (object,), {})):
"""Abstract base class for all DIDL-Lite items.
You should not need to instantiate this.
Attributes:
item_class (str): The DIDL Lite class for this object
tag (str): The XML element tag name used for this instance
_translation (dict): A dict used to translate between instance
attribute names and XML tags/namespaces. It also serves to define
the allowed tags/attributes for this instance. Overridden and
extended by subclasses.
"""
item_class = 'object'
tag = 'item'
# key: attribute_name: (ns, tag)
_translation = {
'creator': ('dc', 'creator'),
'write_status': ('upnp', 'writeStatus'),
}
def __init__(self, title, parent_id, item_id, restricted=True,
resources=None, desc='RINCON_AssociatedZPUDN', **kwargs):
r"""Construct and initialize a DidlObject.
Args:
title (str): The title for the item
parent_id (str): The parent ID for the item
item_id (str): The ID for the item
restricted (bool): Whether the item can be modified
resources (list): A list of resources for this object
desc (str): A didl descriptor, default RINCON_AssociatedZPUDN. This
is not the same as "description"! It is used for identifying
the relevant music service
**kwargs: Extra metadata. What is allowed depends on the
_translation class attribute, which in turn depends on the DIDL
class
"""
# All didl objects *must* have a title, a parent_id and an item_id
# so we specify these as required args in the constructor signature
# to ensure that we get them. Other didl object properties are
# optional, so can be passed as kwargs.
# The content of _translation is adapted from the list in table C at
# http://upnp.org/specs/av/UPnP-av-ContentDirectory-v2-Service.pdf
# Not all properties referred to there are catered for, since Sonos
# does not use some of them.
# pylint: disable=super-on-old-class
super(DidlObject, self).__init__()
self.title = title
self.parent_id = parent_id
self.item_id = item_id
# Restricted is a compulsory attribute, but is almost always True for
# Sonos. (Only seen it 'false' when browsing favorites)
self.restricted = restricted
# Resources is multi-valued, and dealt with separately
self.resources = [] if resources is None else resources
# According to the spec, there may be one or more desc values. Sonos
# only seems to use one, so we won't bother with a list
self.desc = desc
for key, value in kwargs.items():
# For each attribute, check to see if this class allows it
if key not in self._translation:
raise ValueError(
'The key \'{0}\' is not allowed as an argument. Only '
'these keys are allowed: parent_id, item_id, title, '
'restricted, resources, desc'
' {1}'.format(key, ', '.join(self._translation.keys())))
# It is an allowed attribute. Set it as an attribute on self, so
# that it can be accessed as Classname.attribute in the normal
# way.
setattr(self, key, value)
@classmethod
def from_element(cls, element):
"""Create an instance of this class from an ElementTree xml Element.
An alternative constructor. The element must be a DIDL-Lite <item> or
<container> element, and must be properly namespaced.
Arg:
xml (Element): An :py:class:`xml.etree.ElementTree.Element` object.
"""
# Check we have the right sort of element. tag can be an empty string
# which indicates that any tag is allowed (see eg the musicAlbum DIDL
# class)
if not element.tag.endswith(cls.tag):
raise DIDLMetadataError(
"Wrong element. Expected '<{0}>',"
" got '<{1}>'".format(cls.tag, element.tag))
# and that the upnp matches what we are expecting
item_class = element.find(ns_tag('upnp', 'class')).text
if item_class != cls.item_class:
raise DIDLMetadataError(
"UPnP class is incorrect. Expected '{0}',"
" got '{1}'".format(cls.item_class, item_class))
# parent_id, item_id and restricted are stored as attibutes on the
# element
item_id = really_unicode(element.get('id', None))
if item_id is None:
raise DIDLMetadataError("Missing id attribute")
parent_id = really_unicode(element.get('parentID', None))
if parent_id is None:
raise DIDLMetadataError("Missing parentID attribute")
restricted = element.get('restricted', None)
if restricted is None:
raise DIDLMetadataError("Missing restricted attribute")
restricted = True if restricted in [1, 'true', 'True'] else False
# There must be a title. According to spec, it should be the first
# child, but Sonos does not abide by this
title_elt = element.find(ns_tag('dc', 'title'))
if title_elt is None:
raise DIDLMetadataError(
"Missing title element")
title = really_unicode(title_elt.text)
# Deal with any resource elements
resources = []
for res_elt in element.findall(ns_tag('', 'res')):
resources.append(
DidlResource.from_element(res_elt))
# and the desc element (There is only one in Sonos)
desc = element.findtext(ns_tag('', 'desc'))
# Get values of the elements listed in _translation and add them to
# the content dict
content = {}
for key, value in cls._translation.items():
result = element.findtext(ns_tag(*value))
if result is not None:
# We store info as unicode internally.
content[key] = really_unicode(result)
# Convert type for original track number
if content.get('original_track_number') is not None:
content['original_track_number'] = \
int(content['original_track_number'])
# Now pass the content dict we have just built to the main
# constructor, as kwargs, to create the object
return cls(title=title, parent_id=parent_id, item_id=item_id,
restricted=restricted, resources=resources, desc=desc,
**content)
@classmethod
def from_dict(cls, content):
"""Create an instance from a dict.
An alternative constructor. Equivalent to DidlObject(**content).
Arg:
content (dict): Dict containing metadata information.Required and
valid arguments are the same as for the ``__init__`` method.
"""
# Do we really need this constructor? Could use DidlObject(**content)
# instead.
return cls(**content)
def __eq__(self, playable_item):
"""Compare with another ``playable_item``.
Returns:
(bool): True if items are equal, else False
"""
if not isinstance(playable_item, DidlObject):
return False
return self.to_dict() == playable_item.to_dict()
def __ne__(self, playable_item):
"""Compare with another ``playable_item``.
Returns:
(bool): True if items are unequal, else False
"""
if not isinstance(playable_item, DidlObject):
return True
return self.to_dict() != playable_item.to_dict()
def __repr__(self):
"""Return the repr value for the item.
The repr is of the form::
<class_name 'middle_part[0:40]' at id_in_hex>
where middle_part is either the title item in content, if it is set,
or ``str(content)``. The output is also cleared of non-ascii
characters.
"""
# 40 originates from terminal width (78) - (15) for address part and
# (19) for the longest class name and a little left for buffer
if self.title is not None:
middle = self.title.encode('ascii', 'replace')[0:40]
else:
middle = str(self.to_dict).encode('ascii', 'replace')[0:40]
return '<{0} \'{1}\' at {2}>'.format(self.__class__.__name__,
middle,
hex(id(self)))
def __str__(self):
"""Return the str value for the item::
<class_name 'middle_part[0:40]' at id_in_hex>
where middle_part is either the title item in content, if it is set, or
``str(content)``. The output is also cleared of non-ascii characters.
"""
return self.__repr__()
def to_dict(self):
"""Return the dict representation of the instance."""
content = {}
# Get the value of each attribute listed in _translation, and add it
# to the content dict
for key in self._translation:
if hasattr(self, key):
content[key] = getattr(self, key)
# also add parent_id, item_id, restricted, title and resources because
# they are not listed in _translation
content['parent_id'] = self.parent_id
content['item_id'] = self.item_id
content['restricted'] = self.restricted
content['title'] = self.title
if self.resources != []:
content['resources'] = self.resources
content['desc'] = self.desc
return content
def to_element(self, include_namespaces=False):
"""Return an ElementTree Element representing this instance.
Arg:
include_namespaces (bool, optional): If True, include xml
namespace attributes on the root element
Return:
An ElementTree Element
.. code :: xml
<DIDL-Lite ..NS_INFO..>
<item id="...self.item_id..."
parentID="...cls.parent_id..." restricted="true">
<dc:title>...self.title...</dc:title>
<upnp:class>...self.item_class...</upnp:class>
<desc id="cdudn"
nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">
RINCON_AssociatedZPUDN
</desc>
</item>
</DIDL-Lite>
"""
elt_attrib = {}
if include_namespaces:
elt_attrib.update({
'xmlns': "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/",
'xmlns:dc': "http://purl.org/dc/elements/1.1/",
'xmlns:upnp': "urn:schemas-upnp-org:metadata-1-0/upnp/",
})
elt_attrib.update({
'parentID': self.parent_id,
'restricted': 'true' if self.restricted else 'false',
'id': self.item_id
})
elt = XML.Element(self.tag, elt_attrib)
# Add the title, which should always come first, according to the spec
XML.SubElement(elt, 'dc:title').text = self.title
# Add in any resources
for resource in self.resources:
elt.append(resource.to_element())
# Add the rest of the metadata attributes (i.e all those listed in
# _translation) as sub-elements of the item element.
for key, value in self._translation.items():
if hasattr(self, key):
# Some attributes have a namespace of '', which means they
# are in the default namespace. We need to handle those
# carefully
tag = "%s:%s" % value if value[0] else "%s" % value[1]
XML.SubElement(elt, tag).text = ("%s" % getattr(self, key))
# Now add in the item class
XML.SubElement(elt, 'upnp:class').text = self.item_class
# And the desc element
desc_attrib = {'id': 'cdudn', 'nameSpace':
'urn:schemas-rinconnetworks-com:metadata-1-0/'}
desc_elt = XML.SubElement(elt, 'desc', desc_attrib)
desc_elt.text = self.desc
return elt
###############################################################################
# OBJECT.ITEM HIERARCHY #
###############################################################################
class DidlItem(DidlObject):
"""A basic content directory item."""
# The spec allows for an option 'refID' attribute, but we do not handle it
item_class = 'object.item'
# _translation = DidlObject._translation.update({ ...})
# does not work, but doing it in two steps does
_translation = DidlObject._translation.copy()
_translation.update(
{
'stream_content': ('r', 'streamContent'),
'radio_show': ('r', 'radioShowMd'),
'album_art_uri': ('upnp', 'albumArtURI'),
}
)
class DidlAudioItem(DidlItem):
"""An audio item."""
item_class = 'object.item.audioItem'
_translation = DidlItem._translation.copy()
_translation.update(
{
'genre': ('upnp', 'genre'),
'description': ('dc', 'description'),
'long_description': ('upnp', 'longDescription'),
'publisher': ('dc', 'publisher'),
'language': ('dc', 'language'),
'relation': ('dc', 'relation'),
'rights': ('dc', 'rights'),
}
)
# Browsing Sonos Favorites produces some odd looking DIDL-Lite. The object
# class is 'object.itemobject.item.sonos-favorite', which is probably a typo
# in Sonos' code somewhere.
# Here is an example:
# <?xml version="1.0" ?>
# <DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
# xmlns:dc="http://purl.org/dc/elements/1.1/"
# xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/"
# xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/">
# <item id="FV:2/13" parentID="FV:2" restricted="false">
# <dc:title>Shake It Off</dc:title>
# <upnp:class>object.itemobject.item.sonos-favorite</upnp:class>
# <r:ordinal>4</r:ordinal>
# <res protocolInfo="sonos.com-spotify:*:audio/x-spotify:*">
# x-sonos-spotify:spotify%3atrack%3a7n.......?sid=9&flags=32</res>
# <upnp:albumArtURI>http://o.scd.....</upnp:albumArtURI>
# <r:type>instantPlay</r:type>
# <r:description>By Taylor Swift</r:description>
# <r:resMD><DIDL-Lite xmlns:dc="
# http://purl.org/dc/elements/1.1/"
# xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
# xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/"
# xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">
# <item id="00030020spotify%3atrack%3a7n9Q6b...74uCtajkddPt"
# parentID="0006006ctoplist%2ftracks%2fregion%2fGB"
# restricted="true"><dc:title>Shake It Off
# </dc:title><upnp:class>object.item.audioItem.musicTrack
# </upnp:class><desc id="cdudn"
# nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">
# SA_RINCON2311_XXXXX</desc>
# </item>
# </DIDL-Lite>
# </r:resMD>
# </item>
# </DIDL-Lite>
# Note the r:ordinal, r:type; r:description, r:resMD elements which are not
# seen (?) anywhere else
# We're ignoring this for the moment!
class DidlMusicTrack(DidlAudioItem):
"""Class that represents a music library track. """
item_class = 'object.item.audioItem.musicTrack'
# name: (ns, tag)
_translation = DidlAudioItem._translation.copy()
_translation.update(
{
'artist': ('upnp', 'artist'),
'album': ('upnp', 'album'),
'original_track_number': ('upnp', 'originalTrackNumber'),
'playlist': ('upnp', 'playlist'),
'contributor': ('dc', 'contributor'),
'date': ('dc', 'date'),
}
)
class DidlAudioBroadcast(DidlAudioItem):
"""Class that represents an audio broadcast."""
item_class = 'object.item.audioItem.audioBroadcast'
_translation = DidlAudioItem._translation.copy()
_translation.update(
{
'region': ('upnp', 'region'),
'radio_call_sign': ('upnp', 'radioCallSign'),
'radio_station_id': ('upnp', 'radioStationID'),
'channel_nr': ('upnp', 'channelNr'),
}
)
class DidlAudioBroadcastFavorite(DidlAudioBroadcast):
"""Class that represents an audio broadcast sonos favorite."""
# Note: The sonos-favorite part of the class spec obviously isn't part of
# the DIDL spec, so just assume that it has the same definition as the
# regular object.item.audioItem.audioBroadcast
item_class = 'object.item.audioItem.audioBroadcast.sonos-favorite'
###############################################################################
# OBJECT.CONTAINER HIERARCHY #
###############################################################################
class DidlContainer(DidlObject):
"""Class that represents a music library container. """
item_class = 'object.container'
tag = 'container'
# We do not implement createClass or searchClass. Not used by Sonos??
# TODO: handle the 'childCount' element.
class DidlAlbum(DidlContainer):
"""A content directory album."""
item_class = 'object.container.album'
# name: (ns, tag)
_translation = DidlContainer._translation.copy()
_translation.update(
{
'description': ('dc', 'description'),
'long_description': ('upnp', 'longDescription'),
'publisher': ('dc', 'publisher'),
'contributor': ('dc', 'contributor'),
'date': ('dc', 'date'),
'relation': ('dc', 'relation'),
'rights': ('dc', 'rights'),
}
)
class DidlMusicAlbum(DidlAlbum):
"""Class that represents a music library album. """
item_class = 'object.container.album.musicAlbum'
# According to the spec, all musicAlbums should be represented in
# XML by a <container> tag. Sonos sometimes uses <container> and
# sometimes uses <item>. Set the tag type to '' to indicate that
# either is allowed.
tag = ''
# name: (ns, tag)
# pylint: disable=protected-access
_translation = DidlAudioItem._translation.copy()
_translation.update(
{
'artist': ('upnp', 'artist'),
'genre': ('upnp', 'genre'),
'producer': ('upnp', 'producer'),
'toc': ('upnp', 'toc'),
'album_art_uri': ('upnp', 'albumArtURI'),
}
)
class DidlMusicAlbumFavorite(DidlAlbum):
"""Class that represents a Sonos favorite music library album.
This class is not part of the DIDL spec and is Sonos specific.
"""
item_class = 'object.container.album.musicAlbum.sonos-favorite'
# Despite the fact that the item derives from object.container, it's
# XML does not include a <container> tag, but an <item> tag. This seems
# to be an error by Sonos.
tag = 'item'
class DidlMusicAlbumCompilation(DidlAlbum):
"""Class that represents a Sonos favorite music library compilation.
This class is not part of the DIDL spec and is Sonos specific.
"""
# These classes appear when browsing the library and Sonos has been set
# to group albums using compilations.
# See https://github.com/SoCo/SoCo/issues/280
item_class = 'object.container.album.musicAlbum.compilation'
tag = 'container'
class DidlPerson(DidlContainer):
"""A content directory class representing a person."""
item_class = 'object.container.person'
_translation = DidlContainer._translation.copy()
_translation.update(
{
'language': ('dc', 'language'),
}
)
class DidlComposer(DidlPerson):
"""Class that represents a music library composer."""
# Not in the DIDL-Lite spec. Sonos specific??
item_class = 'object.container.person.composer'
class DidlMusicArtist(DidlPerson):
"""Class that represents a music library artist."""
item_class = 'object.container.person.musicArtist'
# name: (ns, tag)
_translation = DidlPerson._translation.copy()
_translation.update(
{
'genre': ('upnp', 'genre'),
'artist_discography_uri': ('upnp', 'artistDiscographyURI'),
}
)
class DidlAlbumList(DidlContainer):
"""Class that represents a music library album list."""
# This does not appear (that I can find) in the DIDL-Lite specs.
# Presumably Sonos specific
item_class = 'object.container.albumlist'
class DidlPlaylistContainer(DidlContainer):
"""Class that represents a music library play list."""
item_class = 'object.container.playlistContainer'
# name: (ns, tag)
_translation = DidlContainer._translation.copy()
_translation.update(
{
'artist': ('upnp', 'artist'),
'genre': ('upnp', 'genre'),
'long_description': ('upnp', 'longDescription'),
'producer': ('dc', 'producer'),
'contributor': ('dc', 'contributor'),
'description': ('dc', 'description'),
'date': ('dc', 'date'),
'language': ('dc', 'language'),
'rights': ('dc', 'rights'),
}
)
class DidlSameArtist(DidlPlaylistContainer):
"""Class that represents all tracks by a single artist.
This type is returned by browsing an artist or a composer
"""
# Not in the DIDL-Lite spec. Sonos specific?
item_class = 'object.container.playlistContainer.sameArtist'
class DidlGenre(DidlContainer):
"""A content directory class representing a general genre."""
item_class = 'object.container.genre'
# name: (ns, tag)
_translation = DidlContainer._translation.copy()
_translation.update(
{
'genre': ('upnp', 'genre'),
'long_description': ('upnp', 'longDescription'),
'description': ('dc', 'description'),
}
)
class DidlMusicGenre(DidlGenre):
"""Class that represents a music genre."""
item_class = 'object.container.genre.musicGenre'
###############################################################################
# SPECIAL LISTS #
###############################################################################
class ListOfMusicInfoItems(list):
"""Abstract container class for a list of music information items."""
def __init__(self, items, number_returned, total_matches, update_id):
super(ListOfMusicInfoItems, self).__init__(items)
self._metadata = {
'item_list': list(items),
'number_returned': number_returned,
'total_matches': total_matches,
'update_id': update_id,
}
def __getitem__(self, key):
"""Legacy get metadata by string key or list item(s) by index.
DEPRECATION: This overriding form of __getitem__ will be removed in
the 3rd release after 0.8. The metadata can be fetched via the named
attributes
"""
if key in self._metadata:
if key == 'item_list':
message = """
Calling [\'item_list\'] on search results to obtain the objects
is no longer necessary, since the object returned from searches
now is a list. This deprecated way of getting the items will
be removed from the third release after 0.8."""
else:
message = """
Getting metadata items by indexing the search result like a
dictionary [\'{0}\'] is deprecated. Please use the named
attribute {1}.{0} instead. The deprecated way of retrieving the
metadata will be removed from the third release after
0.8""".format(key, self.__class__.__name__)
message = textwrap.dedent(message).replace('\n', ' ').lstrip()
warnings.warn(message, DeprecationWarning, stacklevel=2)
return self._metadata[key]
else:
return super(ListOfMusicInfoItems, self).__getitem__(key)
@property
def number_returned(self):
"""The number of returned matches."""
return self._metadata['number_returned']
@property
def total_matches(self):
"""The number of total matches."""
return self._metadata['total_matches']
@property
def update_id(self):
"""The update ID."""
return self._metadata['update_id']
class SearchResult(ListOfMusicInfoItems):
"""Container class that represents a search or browse result.
(browse is just a special case of search)
"""
def __init__(self, items, search_type, number_returned,
total_matches, update_id):
super(SearchResult, self).__init__(
items, number_returned, total_matches, update_id
)
self._metadata['search_type'] = search_type
def __repr__(self):
return '{0}(items={1}, search_type=\'{2}\')'.format(
self.__class__.__name__,
super(SearchResult, self).__repr__(),
self.search_type)
@property
def search_type(self):
"""The search type."""
return self._metadata['search_type']
class Queue(ListOfMusicInfoItems):
"""Container class that represents a queue."""
def __init__(self, items, number_returned, total_matches, update_id):
super(Queue, self).__init__(
items, number_returned, total_matches, update_id
)
def __repr__(self):
return '{0}(items={1})'.format(
self.__class__.__name__,
super(Queue, self).__repr__(),
)
| mit | 8,021,834,686,697,248,000 | 35.48134 | 81 | 0.572069 | false |
yeyanchao/calibre | src/calibre/gui2/library/views.py | 1 | 39819 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import os, itertools, operator
from functools import partial
from future_builtins import map
from collections import OrderedDict
from PyQt4.Qt import (QTableView, Qt, QAbstractItemView, QMenu, pyqtSignal,
QModelIndex, QIcon, QItemSelection, QMimeData, QDrag, QApplication,
QPoint, QPixmap, QUrl, QImage, QPainter, QColor, QRect)
from calibre.gui2.library.delegates import (RatingDelegate, PubDateDelegate,
TextDelegate, DateDelegate, CompleteDelegate, CcTextDelegate,
CcBoolDelegate, CcCommentsDelegate, CcDateDelegate, CcTemplateDelegate,
CcEnumDelegate, CcNumberDelegate, LanguagesDelegate)
from calibre.gui2.library.models import BooksModel, DeviceBooksModel
from calibre.utils.config import tweaks, prefs
from calibre.gui2 import error_dialog, gprefs
from calibre.gui2.library import DEFAULT_SORT
from calibre.constants import filesystem_encoding
from calibre import force_unicode
class PreserveViewState(object): # {{{
'''
Save the set of selected books at enter time. If at exit time there are no
selected books, restore the previous selection, the previous current index
and dont affect the scroll position.
'''
def __init__(self, view, preserve_hpos=True, preserve_vpos=True,
require_selected_ids=True):
self.view = view
self.require_selected_ids = require_selected_ids
self.selected_ids = set()
self.current_id = None
self.preserve_hpos = preserve_hpos
self.preserve_vpos = preserve_vpos
self.vscroll = self.hscroll = 0
def __enter__(self):
try:
self.selected_ids = self.view.get_selected_ids()
self.current_id = self.view.current_id
self.vscroll = self.view.verticalScrollBar().value()
self.hscroll = self.view.horizontalScrollBar().value()
except:
import traceback
traceback.print_exc()
def __exit__(self, *args):
if self.selected_ids or not self.require_selected_ids:
if self.current_id is not None:
self.view.current_id = self.current_id
if self.selected_ids:
self.view.select_rows(self.selected_ids, using_ids=True,
scroll=False, change_current=self.current_id is None)
if self.preserve_vpos:
self.view.verticalScrollBar().setValue(self.vscroll)
if self.preserve_hpos:
self.view.horizontalScrollBar().setValue(self.hscroll)
@dynamic_property
def state(self):
def fget(self):
self.__enter__()
return {x:getattr(self, x) for x in ('selected_ids', 'current_id',
'vscroll', 'hscroll')}
def fset(self, state):
for k, v in state.iteritems(): setattr(self, k, v)
self.__exit__()
return property(fget=fget, fset=fset)
# }}}
class BooksView(QTableView): # {{{
files_dropped = pyqtSignal(object)
add_column_signal = pyqtSignal()
def viewportEvent(self, event):
if (event.type() == event.ToolTip and not gprefs['book_list_tooltips']):
return False
return QTableView.viewportEvent(self, event)
def __init__(self, parent, modelcls=BooksModel, use_edit_metadata_dialog=True):
QTableView.__init__(self, parent)
if not tweaks['horizontal_scrolling_per_column']:
self.setHorizontalScrollMode(self.ScrollPerPixel)
self.setEditTriggers(self.EditKeyPressed)
if tweaks['doubleclick_on_library_view'] == 'edit_cell':
self.setEditTriggers(self.DoubleClicked|self.editTriggers())
elif tweaks['doubleclick_on_library_view'] == 'open_viewer':
self.setEditTriggers(self.SelectedClicked|self.editTriggers())
self.doubleClicked.connect(parent.iactions['View'].view_triggered)
elif tweaks['doubleclick_on_library_view'] == 'edit_metadata':
# Must not enable single-click to edit, or the field will remain
# open in edit mode underneath the edit metadata dialog
if use_edit_metadata_dialog:
self.doubleClicked.connect(
partial(parent.iactions['Edit Metadata'].edit_metadata,
checked=False))
else:
self.setEditTriggers(self.DoubleClicked|self.editTriggers())
self.drag_allowed = True
self.setDragEnabled(True)
self.setDragDropOverwriteMode(False)
self.setDragDropMode(self.DragDrop)
self.drag_start_pos = None
self.setAlternatingRowColors(True)
self.setSelectionBehavior(self.SelectRows)
self.setShowGrid(False)
self.setWordWrap(False)
self.rating_delegate = RatingDelegate(self)
self.timestamp_delegate = DateDelegate(self)
self.pubdate_delegate = PubDateDelegate(self)
self.last_modified_delegate = DateDelegate(self,
tweak_name='gui_last_modified_display_format')
self.languages_delegate = LanguagesDelegate(self)
self.tags_delegate = CompleteDelegate(self, ',', 'all_tag_names')
self.authors_delegate = CompleteDelegate(self, '&', 'all_author_names', True)
self.cc_names_delegate = CompleteDelegate(self, '&', 'all_custom', True)
self.series_delegate = TextDelegate(self)
self.publisher_delegate = TextDelegate(self)
self.text_delegate = TextDelegate(self)
self.cc_text_delegate = CcTextDelegate(self)
self.cc_enum_delegate = CcEnumDelegate(self)
self.cc_bool_delegate = CcBoolDelegate(self)
self.cc_comments_delegate = CcCommentsDelegate(self)
self.cc_template_delegate = CcTemplateDelegate(self)
self.cc_number_delegate = CcNumberDelegate(self)
self.display_parent = parent
self._model = modelcls(self)
self.setModel(self._model)
self.setSelectionBehavior(QAbstractItemView.SelectRows)
self.setSortingEnabled(True)
self.selectionModel().currentRowChanged.connect(self._model.current_changed)
self.preserve_state = partial(PreserveViewState, self)
# {{{ Column Header setup
self.can_add_columns = True
self.was_restored = False
self.column_header = self.horizontalHeader()
self.column_header.setMovable(True)
self.column_header.sectionMoved.connect(self.save_state)
self.column_header.setContextMenuPolicy(Qt.CustomContextMenu)
self.column_header.customContextMenuRequested.connect(self.show_column_header_context_menu)
self.column_header.sectionResized.connect(self.column_resized, Qt.QueuedConnection)
# }}}
self._model.database_changed.connect(self.database_changed)
hv = self.verticalHeader()
hv.setClickable(True)
hv.setCursor(Qt.PointingHandCursor)
self.selected_ids = []
self._model.about_to_be_sorted.connect(self.about_to_be_sorted)
self._model.sorting_done.connect(self.sorting_done,
type=Qt.QueuedConnection)
# Column Header Context Menu {{{
def column_header_context_handler(self, action=None, column=None):
if not action or not column:
return
try:
idx = self.column_map.index(column)
except:
return
h = self.column_header
if action == 'hide':
h.setSectionHidden(idx, True)
elif action == 'show':
h.setSectionHidden(idx, False)
if h.sectionSize(idx) < 3:
sz = h.sectionSizeHint(idx)
h.resizeSection(idx, sz)
elif action == 'ascending':
self.sortByColumn(idx, Qt.AscendingOrder)
elif action == 'descending':
self.sortByColumn(idx, Qt.DescendingOrder)
elif action == 'defaults':
self.apply_state(self.get_default_state())
elif action == 'addcustcol':
self.add_column_signal.emit()
elif action.startswith('align_'):
alignment = action.partition('_')[-1]
self._model.change_alignment(column, alignment)
self.save_state()
def show_column_header_context_menu(self, pos):
idx = self.column_header.logicalIndexAt(pos)
if idx > -1 and idx < len(self.column_map):
col = self.column_map[idx]
name = unicode(self.model().headerData(idx, Qt.Horizontal,
Qt.DisplayRole).toString())
self.column_header_context_menu = QMenu(self)
if col != 'ondevice':
self.column_header_context_menu.addAction(_('Hide column %s') %
name,
partial(self.column_header_context_handler, action='hide',
column=col))
m = self.column_header_context_menu.addMenu(
_('Sort on %s') % name)
a = m.addAction(_('Ascending'),
partial(self.column_header_context_handler,
action='ascending', column=col))
d = m.addAction(_('Descending'),
partial(self.column_header_context_handler,
action='descending', column=col))
if self._model.sorted_on[0] == col:
ac = a if self._model.sorted_on[1] else d
ac.setCheckable(True)
ac.setChecked(True)
if col not in ('ondevice', 'inlibrary') and \
(not self.model().is_custom_column(col) or \
self.model().custom_columns[col]['datatype'] not in ('bool',
)):
m = self.column_header_context_menu.addMenu(
_('Change text alignment for %s') % name)
al = self._model.alignment_map.get(col, 'left')
for x, t in (('left', _('Left')), ('right', _('Right')), ('center',
_('Center'))):
a = m.addAction(t,
partial(self.column_header_context_handler,
action='align_'+x, column=col))
if al == x:
a.setCheckable(True)
a.setChecked(True)
hidden_cols = [self.column_map[i] for i in
range(self.column_header.count()) if
self.column_header.isSectionHidden(i)]
try:
hidden_cols.remove('ondevice')
except:
pass
if hidden_cols:
self.column_header_context_menu.addSeparator()
m = self.column_header_context_menu.addMenu(_('Show column'))
for col in hidden_cols:
hidx = self.column_map.index(col)
name = unicode(self.model().headerData(hidx, Qt.Horizontal,
Qt.DisplayRole).toString())
m.addAction(name,
partial(self.column_header_context_handler,
action='show', column=col))
self.column_header_context_menu.addSeparator()
self.column_header_context_menu.addAction(
_('Shrink column if it is too wide to fit'),
partial(self.resize_column_to_fit, column=self.column_map[idx]))
self.column_header_context_menu.addAction(
_('Restore default layout'),
partial(self.column_header_context_handler,
action='defaults', column=col))
if self.can_add_columns:
self.column_header_context_menu.addAction(
QIcon(I('column.png')),
_('Add your own columns'),
partial(self.column_header_context_handler,
action='addcustcol', column=col))
self.column_header_context_menu.popup(self.column_header.mapToGlobal(pos))
# }}}
# Sorting {{{
def about_to_be_sorted(self, idc):
selected_rows = [r.row() for r in self.selectionModel().selectedRows()]
self.selected_ids = [idc(r) for r in selected_rows]
def sorting_done(self, indexc):
pos = self.horizontalScrollBar().value()
self.select_rows(self.selected_ids, using_ids=True, change_current=True,
scroll=True)
self.selected_ids = []
self.horizontalScrollBar().setValue(pos)
def sort_by_named_field(self, field, order, reset=True):
if field in self.column_map:
idx = self.column_map.index(field)
if order:
self.sortByColumn(idx, Qt.AscendingOrder)
else:
self.sortByColumn(idx, Qt.DescendingOrder)
else:
self._model.sort_by_named_field(field, order, reset)
def multisort(self, fields, reset=True, only_if_different=False):
if len(fields) == 0:
return
sh = self.cleanup_sort_history(self._model.sort_history,
ignore_column_map=True)
if only_if_different and len(sh) >= len(fields):
ret=True
for i,t in enumerate(fields):
if t[0] != sh[i][0]:
ret = False
break
if ret:
return
for n,d in reversed(fields):
if n in self._model.db.field_metadata.keys():
sh.insert(0, (n, d))
sh = self.cleanup_sort_history(sh, ignore_column_map=True)
self._model.sort_history = [tuple(x) for x in sh]
self._model.resort(reset=reset)
col = fields[0][0]
dir = Qt.AscendingOrder if fields[0][1] else Qt.DescendingOrder
if col in self.column_map:
col = self.column_map.index(col)
hdrs = self.horizontalHeader()
try:
hdrs.setSortIndicator(col, dir)
except:
pass
# }}}
# Ondevice column {{{
def set_ondevice_column_visibility(self):
m = self._model
self.column_header.setSectionHidden(m.column_map.index('ondevice'),
not m.device_connected)
def set_device_connected(self, is_connected):
self._model.set_device_connected(is_connected)
self.set_ondevice_column_visibility()
# }}}
# Save/Restore State {{{
def get_state(self):
h = self.column_header
cm = self.column_map
state = {}
state['hidden_columns'] = [cm[i] for i in range(h.count())
if h.isSectionHidden(i) and cm[i] != 'ondevice']
state['last_modified_injected'] = True
state['languages_injected'] = True
state['sort_history'] = \
self.cleanup_sort_history(self.model().sort_history)
state['column_positions'] = {}
state['column_sizes'] = {}
state['column_alignment'] = self._model.alignment_map
for i in range(h.count()):
name = cm[i]
state['column_positions'][name] = h.visualIndex(i)
if name != 'ondevice':
state['column_sizes'][name] = h.sectionSize(i)
return state
def write_state(self, state):
db = getattr(self.model(), 'db', None)
name = unicode(self.objectName())
if name and db is not None:
db.prefs.set(name + ' books view state', state)
def save_state(self):
# Only save if we have been initialized (set_database called)
if len(self.column_map) > 0 and self.was_restored:
state = self.get_state()
self.write_state(state)
def cleanup_sort_history(self, sort_history, ignore_column_map=False):
history = []
for col, order in sort_history:
if not isinstance(order, bool):
continue
if col == 'date':
col = 'timestamp'
if ignore_column_map or col in self.column_map:
if (not history or history[-1][0] != col):
history.append([col, order])
return history
def apply_sort_history(self, saved_history, max_sort_levels=3):
if not saved_history:
return
for col, order in reversed(self.cleanup_sort_history(
saved_history)[:max_sort_levels]):
self.sortByColumn(self.column_map.index(col),
Qt.AscendingOrder if order else Qt.DescendingOrder)
def apply_state(self, state, max_sort_levels=3):
h = self.column_header
cmap = {}
hidden = state.get('hidden_columns', [])
for i, c in enumerate(self.column_map):
cmap[c] = i
if c != 'ondevice':
h.setSectionHidden(i, c in hidden)
positions = state.get('column_positions', {})
pmap = {}
for col, pos in positions.items():
if col in cmap:
pmap[pos] = col
for pos in sorted(pmap.keys()):
col = pmap[pos]
idx = cmap[col]
current_pos = h.visualIndex(idx)
if current_pos != pos:
h.moveSection(current_pos, pos)
sizes = state.get('column_sizes', {})
for col, size in sizes.items():
if col in cmap:
sz = sizes[col]
if sz < 3:
sz = h.sectionSizeHint(cmap[col])
h.resizeSection(cmap[col], sz)
self.apply_sort_history(state.get('sort_history', None),
max_sort_levels=max_sort_levels)
for col, alignment in state.get('column_alignment', {}).items():
self._model.change_alignment(col, alignment)
for i in range(h.count()):
if not h.isSectionHidden(i) and h.sectionSize(i) < 3:
sz = h.sectionSizeHint(i)
h.resizeSection(i, sz)
def get_default_state(self):
old_state = {
'hidden_columns': ['last_modified', 'languages'],
'sort_history':[DEFAULT_SORT],
'column_positions': {},
'column_sizes': {},
'column_alignment': {
'size':'center',
'timestamp':'center',
'pubdate':'center'},
'last_modified_injected': True,
'languages_injected': True,
}
h = self.column_header
cm = self.column_map
for i in range(h.count()):
name = cm[i]
old_state['column_positions'][name] = i
if name != 'ondevice':
old_state['column_sizes'][name] = \
min(350, max(self.sizeHintForColumn(i),
h.sectionSizeHint(i)))
if name in ('timestamp', 'last_modified'):
old_state['column_sizes'][name] += 12
return old_state
def get_old_state(self):
ans = None
name = unicode(self.objectName())
if name:
name += ' books view state'
db = getattr(self.model(), 'db', None)
if db is not None:
ans = db.prefs.get(name, None)
if ans is None:
ans = gprefs.get(name, None)
try:
del gprefs[name]
except:
pass
if ans is not None:
db.prefs[name] = ans
else:
injected = False
if not ans.get('last_modified_injected', False):
injected = True
ans['last_modified_injected'] = True
hc = ans.get('hidden_columns', [])
if 'last_modified' not in hc:
hc.append('last_modified')
if not ans.get('languages_injected', False):
injected = True
ans['languages_injected'] = True
hc = ans.get('hidden_columns', [])
if 'languages' not in hc:
hc.append('languages')
if injected:
db.prefs[name] = ans
return ans
def restore_state(self):
old_state = self.get_old_state()
if old_state is None:
old_state = self.get_default_state()
max_levels = 3
if tweaks['sort_columns_at_startup'] is not None:
sh = []
try:
for c,d in tweaks['sort_columns_at_startup']:
if not isinstance(d, bool):
d = True if d == 0 else False
sh.append((c, d))
except:
# Ignore invalid tweak values as users seem to often get them
# wrong
print('Ignoring invalid sort_columns_at_startup tweak, with error:')
import traceback
traceback.print_exc()
old_state['sort_history'] = sh
max_levels = max(3, len(sh))
self.column_header.blockSignals(True)
self.apply_state(old_state, max_sort_levels=max_levels)
self.column_header.blockSignals(False)
# Resize all rows to have the correct height
if self.model().rowCount(QModelIndex()) > 0:
self.resizeRowToContents(0)
self.verticalHeader().setDefaultSectionSize(self.rowHeight(0))
self.was_restored = True
def resize_column_to_fit(self, column):
col = self.column_map.index(column)
self.column_resized(col, self.columnWidth(col), self.columnWidth(col))
def column_resized(self, col, old_size, new_size):
# arbitrary: scroll bar + header + some
max_width = self.width() - (self.verticalScrollBar().width() +
self.verticalHeader().width() + 10)
if max_width < 200:
max_width = 200
if new_size > max_width:
self.column_header.blockSignals(True)
self.setColumnWidth(col, max_width)
self.column_header.blockSignals(False)
# }}}
# Initialization/Delegate Setup {{{
def set_database(self, db):
self.save_state()
self._model.set_database(db)
self.tags_delegate.set_database(db)
self.cc_names_delegate.set_database(db)
self.authors_delegate.set_database(db)
self.series_delegate.set_auto_complete_function(db.all_series)
self.publisher_delegate.set_auto_complete_function(db.all_publishers)
def database_changed(self, db):
for i in range(self.model().columnCount(None)):
if self.itemDelegateForColumn(i) in (self.rating_delegate,
self.timestamp_delegate, self.pubdate_delegate,
self.last_modified_delegate, self.languages_delegate):
self.setItemDelegateForColumn(i, self.itemDelegate())
cm = self.column_map
for colhead in cm:
if self._model.is_custom_column(colhead):
cc = self._model.custom_columns[colhead]
if cc['datatype'] == 'datetime':
delegate = CcDateDelegate(self)
delegate.set_format(cc['display'].get('date_format',''))
self.setItemDelegateForColumn(cm.index(colhead), delegate)
elif cc['datatype'] == 'comments':
self.setItemDelegateForColumn(cm.index(colhead), self.cc_comments_delegate)
elif cc['datatype'] == 'text':
if cc['is_multiple']:
if cc['display'].get('is_names', False):
self.setItemDelegateForColumn(cm.index(colhead),
self.cc_names_delegate)
else:
self.setItemDelegateForColumn(cm.index(colhead),
self.tags_delegate)
else:
self.setItemDelegateForColumn(cm.index(colhead), self.cc_text_delegate)
elif cc['datatype'] == 'series':
self.setItemDelegateForColumn(cm.index(colhead), self.cc_text_delegate)
elif cc['datatype'] in ('int', 'float'):
self.setItemDelegateForColumn(cm.index(colhead), self.cc_number_delegate)
elif cc['datatype'] == 'bool':
self.setItemDelegateForColumn(cm.index(colhead), self.cc_bool_delegate)
elif cc['datatype'] == 'rating':
self.setItemDelegateForColumn(cm.index(colhead), self.rating_delegate)
elif cc['datatype'] == 'composite':
self.setItemDelegateForColumn(cm.index(colhead), self.cc_template_delegate)
elif cc['datatype'] == 'enumeration':
self.setItemDelegateForColumn(cm.index(colhead), self.cc_enum_delegate)
else:
dattr = colhead+'_delegate'
delegate = colhead if hasattr(self, dattr) else 'text'
self.setItemDelegateForColumn(cm.index(colhead), getattr(self,
delegate+'_delegate'))
self.restore_state()
self.set_ondevice_column_visibility()
#}}}
# Context Menu {{{
def set_context_menu(self, menu, edit_collections_action):
self.setContextMenuPolicy(Qt.DefaultContextMenu)
self.context_menu = menu
self.edit_collections_action = edit_collections_action
def contextMenuEvent(self, event):
self.context_menu.popup(event.globalPos())
event.accept()
# }}}
# Drag 'n Drop {{{
@classmethod
def paths_from_event(cls, event):
'''
Accept a drop event and return a list of paths that can be read from
and represent files with extensions.
'''
md = event.mimeData()
if md.hasFormat('text/uri-list') and not \
md.hasFormat('application/calibre+from_library'):
urls = [unicode(u.toLocalFile()) for u in md.urls()]
return [u for u in urls if os.path.splitext(u)[1] and
os.path.exists(u)]
def drag_icon(self, cover, multiple):
cover = cover.scaledToHeight(120, Qt.SmoothTransformation)
if multiple:
base_width = cover.width()
base_height = cover.height()
base = QImage(base_width+21, base_height+21,
QImage.Format_ARGB32_Premultiplied)
base.fill(QColor(255, 255, 255, 0).rgba())
p = QPainter(base)
rect = QRect(20, 0, base_width, base_height)
p.fillRect(rect, QColor('white'))
p.drawRect(rect)
rect.moveLeft(10)
rect.moveTop(10)
p.fillRect(rect, QColor('white'))
p.drawRect(rect)
rect.moveLeft(0)
rect.moveTop(20)
p.fillRect(rect, QColor('white'))
p.save()
p.setCompositionMode(p.CompositionMode_SourceAtop)
p.drawImage(rect.topLeft(), cover)
p.restore()
p.drawRect(rect)
p.end()
cover = base
return QPixmap.fromImage(cover)
def drag_data(self):
m = self.model()
db = m.db
rows = self.selectionModel().selectedRows()
selected = list(map(m.id, rows))
ids = ' '.join(map(str, selected))
md = QMimeData()
md.setData('application/calibre+from_library', ids)
fmt = prefs['output_format']
def url_for_id(i):
try:
ans = db.format_path(i, fmt, index_is_id=True)
except:
ans = None
if ans is None:
fmts = db.formats(i, index_is_id=True)
if fmts:
fmts = fmts.split(',')
else:
fmts = []
for f in fmts:
try:
ans = db.format_path(i, f, index_is_id=True)
except:
ans = None
if ans is None:
ans = db.abspath(i, index_is_id=True)
return QUrl.fromLocalFile(ans)
md.setUrls([url_for_id(i) for i in selected])
drag = QDrag(self)
col = self.selectionModel().currentIndex().column()
md.column_name = self.column_map[col]
drag.setMimeData(md)
cover = self.drag_icon(m.cover(self.currentIndex().row()),
len(selected) > 1)
drag.setHotSpot(QPoint(-15, -15))
drag.setPixmap(cover)
return drag
def event_has_mods(self, event=None):
mods = event.modifiers() if event is not None else \
QApplication.keyboardModifiers()
return mods & Qt.ControlModifier or mods & Qt.ShiftModifier
def mousePressEvent(self, event):
ep = event.pos()
if self.indexAt(ep) in self.selectionModel().selectedIndexes() and \
event.button() == Qt.LeftButton and not self.event_has_mods():
self.drag_start_pos = ep
return QTableView.mousePressEvent(self, event)
def mouseMoveEvent(self, event):
if not self.drag_allowed:
return
if self.drag_start_pos is None:
return QTableView.mouseMoveEvent(self, event)
if self.event_has_mods():
self.drag_start_pos = None
return
if not (event.buttons() & Qt.LeftButton) or \
(event.pos() - self.drag_start_pos).manhattanLength() \
< QApplication.startDragDistance():
return
index = self.indexAt(event.pos())
if not index.isValid():
return
drag = self.drag_data()
drag.exec_(Qt.CopyAction)
self.drag_start_pos = None
def dragEnterEvent(self, event):
if int(event.possibleActions() & Qt.CopyAction) + \
int(event.possibleActions() & Qt.MoveAction) == 0:
return
paths = self.paths_from_event(event)
if paths:
event.acceptProposedAction()
def dragMoveEvent(self, event):
event.acceptProposedAction()
def dropEvent(self, event):
paths = self.paths_from_event(event)
event.setDropAction(Qt.CopyAction)
event.accept()
self.files_dropped.emit(paths)
# }}}
@property
def column_map(self):
return self._model.column_map
def refresh_book_details(self):
idx = self.currentIndex()
if idx.isValid():
self._model.current_changed(idx, idx)
def scrollContentsBy(self, dx, dy):
# Needed as Qt bug causes headerview to not always update when scrolling
QTableView.scrollContentsBy(self, dx, dy)
if dy != 0:
self.column_header.update()
def scroll_to_row(self, row):
if row > -1:
h = self.horizontalHeader()
for i in range(h.count()):
if not h.isSectionHidden(i) and h.sectionViewportPosition(i) >= 0:
self.scrollTo(self.model().index(row, i), self.PositionAtCenter)
break
def set_current_row(self, row, select=True):
if row > -1 and row < self.model().rowCount(QModelIndex()):
h = self.horizontalHeader()
logical_indices = list(range(h.count()))
logical_indices = [x for x in logical_indices if not
h.isSectionHidden(x)]
pairs = [(x, h.visualIndex(x)) for x in logical_indices if
h.visualIndex(x) > -1]
if not pairs:
pairs = [(0, 0)]
pairs.sort(cmp=lambda x,y:cmp(x[1], y[1]))
i = pairs[0][0]
index = self.model().index(row, i)
self.setCurrentIndex(index)
if select:
sm = self.selectionModel()
sm.select(index, sm.ClearAndSelect|sm.Rows)
def ids_to_rows(self, ids):
row_map = OrderedDict()
ids = frozenset(ids)
m = self.model()
for row in xrange(m.rowCount(QModelIndex())):
if len(row_map) >= len(ids): break
c = m.id(row)
if c in ids:
row_map[c] = row
return row_map
def select_rows(self, identifiers, using_ids=True, change_current=True,
scroll=True):
'''
Select rows identified by identifiers. identifiers can be a set of ids,
row numbers or QModelIndexes.
'''
rows = set([x.row() if hasattr(x, 'row') else x for x in
identifiers])
if using_ids:
rows = set([])
identifiers = set(identifiers)
m = self.model()
for row in xrange(m.rowCount(QModelIndex())):
if m.id(row) in identifiers:
rows.add(row)
rows = list(sorted(rows))
if rows:
row = rows[0]
if change_current:
self.set_current_row(row, select=False)
if scroll:
self.scroll_to_row(row)
sm = self.selectionModel()
sel = QItemSelection()
m = self.model()
max_col = m.columnCount(QModelIndex()) - 1
# Create a range based selector for each set of contiguous rows
# as supplying selectors for each individual row causes very poor
# performance if a large number of rows has to be selected.
for k, g in itertools.groupby(enumerate(rows), lambda (i,x):i-x):
group = list(map(operator.itemgetter(1), g))
sel.merge(QItemSelection(m.index(min(group), 0),
m.index(max(group), max_col)), sm.Select)
sm.select(sel, sm.ClearAndSelect)
def get_selected_ids(self):
ans = []
m = self.model()
for idx in self.selectedIndexes():
r = idx.row()
i = m.id(r)
if i not in ans:
ans.append(i)
return ans
@dynamic_property
def current_id(self):
def fget(self):
try:
return self.model().id(self.currentIndex())
except:
pass
return None
def fset(self, val):
if val is None: return
m = self.model()
for row in xrange(m.rowCount(QModelIndex())):
if m.id(row) == val:
self.set_current_row(row, select=False)
break
return property(fget=fget, fset=fset)
@property
def next_id(self):
'''
Return the id of the 'next' row (i.e. the first unselected row after
the current row).
'''
ci = self.currentIndex()
if not ci.isValid():
return None
selected_rows = frozenset([i.row() for i in self.selectedIndexes() if
i.isValid()])
column = ci.column()
for i in xrange(ci.row()+1, self.row_count()):
if i in selected_rows: continue
try:
return self.model().id(self.model().index(i, column))
except:
pass
# No unselected rows after the current row, look before
for i in xrange(ci.row()-1, -1, -1):
if i in selected_rows: continue
try:
return self.model().id(self.model().index(i, column))
except:
pass
return None
def close(self):
self._model.close()
def set_editable(self, editable, supports_backloading):
self._model.set_editable(editable)
def move_highlighted_row(self, forward):
rows = self.selectionModel().selectedRows()
if len(rows) > 0:
current_row = rows[0].row()
else:
current_row = None
id_to_select = self._model.get_next_highlighted_id(current_row, forward)
if id_to_select is not None:
self.select_rows([id_to_select], using_ids=True)
def search_proxy(self, txt):
self._model.search(txt)
id_to_select = self._model.get_current_highlighted_id()
if id_to_select is not None:
self.select_rows([id_to_select], using_ids=True)
elif self._model.highlight_only:
self.clearSelection()
self.setFocus(Qt.OtherFocusReason)
def connect_to_search_box(self, sb, search_done):
sb.search.connect(self.search_proxy)
self._search_done = search_done
self._model.searched.connect(self.search_done)
def connect_to_book_display(self, bd):
self._model.new_bookdisplay_data.connect(bd)
def search_done(self, ok):
self._search_done(self, ok)
def row_count(self):
return self._model.count()
# }}}
class DeviceBooksView(BooksView): # {{{
def __init__(self, parent):
BooksView.__init__(self, parent, DeviceBooksModel,
use_edit_metadata_dialog=False)
self.can_add_columns = False
self.columns_resized = False
self.resize_on_select = False
self.rating_delegate = None
for i in range(10):
self.setItemDelegateForColumn(i, TextDelegate(self))
self.setDragDropMode(self.NoDragDrop)
self.setAcceptDrops(False)
def drag_data(self):
m = self.model()
rows = self.selectionModel().selectedRows()
paths = [force_unicode(p, enc=filesystem_encoding) for p in m.paths(rows) if p]
md = QMimeData()
md.setData('application/calibre+from_device', 'dummy')
md.setUrls([QUrl.fromLocalFile(p) for p in paths])
drag = QDrag(self)
drag.setMimeData(md)
cover = self.drag_icon(m.cover(self.currentIndex().row()), len(paths) >
1)
drag.setHotSpot(QPoint(-15, -15))
drag.setPixmap(cover)
return drag
def contextMenuEvent(self, event):
edit_collections = callable(getattr(self._model.db, 'supports_collections', None)) and \
self._model.db.supports_collections() and \
prefs['manage_device_metadata'] == 'manual'
self.edit_collections_action.setVisible(edit_collections)
self.context_menu.popup(event.globalPos())
event.accept()
def get_old_state(self):
ans = None
name = unicode(self.objectName())
if name:
name += ' books view state'
ans = gprefs.get(name, None)
return ans
def write_state(self, state):
name = unicode(self.objectName())
if name:
gprefs.set(name + ' books view state', state)
def set_database(self, db):
self._model.set_database(db)
self.restore_state()
def resizeColumnsToContents(self):
QTableView.resizeColumnsToContents(self)
self.columns_resized = True
def connect_dirtied_signal(self, slot):
self._model.booklist_dirtied.connect(slot)
def connect_upload_collections_signal(self, func=None, oncard=None):
self._model.upload_collections.connect(partial(func, view=self, oncard=oncard))
def dropEvent(self, *args):
error_dialog(self, _('Not allowed'),
_('Dropping onto a device is not supported. First add the book to the calibre library.')).exec_()
def set_editable(self, editable, supports_backloading):
self._model.set_editable(editable)
self.drag_allowed = supports_backloading
# }}}
| gpl-3.0 | 4,215,792,900,672,243,700 | 38.230542 | 105 | 0.555815 | false |
dradux/tracker | web/migrations/versions/95ecf01d9cb4_add_test_result_status_items.py | 1 | 1277 | """add test_result_status items
Revision ID: 95ecf01d9cb4
Revises: ea71f73f5460
Create Date: 2017-03-29 19:41:26.581925
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '95ecf01d9cb4'
down_revision = 'ea71f73f5460'
branch_labels = None
depends_on = None
def upgrade():
#~ op.bulk_insert('test_result_status',
#~ [
#~ {'status': 'Created'},
#~ {'status': 'Completed'},
#~ {'status': 'Failed'}
#~ ]
#~ )
op.execute("INSERT INTO test_result_status (status) VALUES ('Created')")
op.execute("INSERT INTO test_result_status (status) VALUES ('Completed')")
op.execute("INSERT INTO test_result_status (status) VALUES ('Failed')")
op.execute("UPDATE test_result SET status_id=(SELECT id FROM test_result_status where status='Created') WHERE test_passed is null")
op.execute("UPDATE test_result SET status_id=(SELECT id FROM test_result_status where status='Completed') WHERE test_passed=true")
op.execute("UPDATE test_result SET status_id=(SELECT id FROM test_result_status where status='Failed') WHERE test_passed=false")
def downgrade():
op.execute("delete from test_result_status where status in('Created', 'Completed', 'Failed')")
| gpl-3.0 | 2,511,160,009,906,113,000 | 33.513514 | 135 | 0.680501 | false |
openstack/tempest | tempest/api/identity/v3/test_access_rules.py | 1 | 3331 | # Copyright 2019 SUSE LLC
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
class AccessRulesV3Test(base.BaseIdentityV3Test):
@classmethod
def skip_checks(cls):
super(AccessRulesV3Test, cls).skip_checks()
if not CONF.identity_feature_enabled.access_rules:
raise cls.skipException("Application credential access rules are "
"not available in this environment")
@classmethod
def resource_setup(cls):
super(AccessRulesV3Test, cls).resource_setup()
cls.user_id = cls.os_primary.credentials.user_id
cls.project_id = cls.os_primary.credentials.project_id
def setUp(self):
super(AccessRulesV3Test, self).setUp()
ac = self.non_admin_app_creds_client
access_rules = [
{
"path": "/v2.1/servers/*/ips",
"method": "GET",
"service": "compute"
}
]
self.app_cred = ac.create_application_credential(
self.user_id,
name=data_utils.rand_name('application_credential'),
access_rules=access_rules
)['application_credential']
@decorators.idempotent_id('2354c498-5119-4ba5-9f0d-44f16f78fb0e')
def test_list_access_rules(self):
ar = self.non_admin_access_rules_client.list_access_rules(self.user_id)
self.assertEqual(1, len(ar['access_rules']))
@decorators.idempotent_id('795dd507-ca1e-40e9-ba90-ff0a08689ba4')
def test_show_access_rule(self):
access_rule_id = self.app_cred['access_rules'][0]['id']
self.non_admin_access_rules_client.show_access_rule(
self.user_id, access_rule_id)
@decorators.idempotent_id('278757e9-e193-4bf8-adf2-0b0a229a17d0')
def test_delete_access_rule(self):
access_rule_id = self.app_cred['access_rules'][0]['id']
app_cred_id = self.app_cred['id']
self.assertRaises(
lib_exc.Forbidden,
self.non_admin_access_rules_client.delete_access_rule,
self.user_id,
access_rule_id)
self.non_admin_app_creds_client.delete_application_credential(
self.user_id, app_cred_id)
ar = self.non_admin_access_rules_client.list_access_rules(self.user_id)
self.assertEqual(1, len(ar['access_rules']))
self.non_admin_access_rules_client.delete_access_rule(
self.user_id, access_rule_id)
ar = self.non_admin_access_rules_client.list_access_rules(self.user_id)
self.assertEqual(0, len(ar['access_rules']))
| apache-2.0 | -4,020,664,339,231,408,000 | 38.654762 | 79 | 0.652056 | false |
convexopt/gpkit | gpkit/tests/t_tools.py | 1 | 5666 | """Tests for tools module"""
import unittest
import numpy as np
from numpy import log
from gpkit import Variable, VectorVariable, Model, NomialArray
from gpkit.tools.autosweep import BinarySweepTree
from gpkit.tools.tools import te_exp_minus1, te_secant, te_tangent
from gpkit.small_scripts import mag
from gpkit import parse_variables
def assert_logtol(first, second, logtol=1e-6):
"Asserts that the logs of two arrays have a given abstol"
np.testing.assert_allclose(log(mag(first)), log(mag(second)),
atol=logtol, rtol=0)
class OnlyVectorParse(Model):
"""
Variables of length 3
---------------------
x [-] just another variable
"""
def setup(self):
exec parse_variables(OnlyVectorParse.__doc__) # pylint: disable=exec-used
class Fuselage(Model):
"""The thing that carries the fuel, engine, and payload
Variables
---------
f [-] Fineness
g 9.81 [m/s^2] Standard gravity
k [-] Form factor
l [ft] Length
mfac 2.0 [-] Weight margin factor
R [ft] Radius
rhocfrp 1.6 [g/cm^3] Density of CFRP
rhofuel 6.01 [lbf/gallon] Density of 100LL fuel
S [ft^2] Wetted area
t 0.024 [in] Minimum skin thickness
Vol [ft^3] Volume
W [lbf] Weight
Upper Unbounded
---------------
k, W
"""
# pylint: disable=undefined-variable, exec-used, invalid-name
def setup(self, Wfueltot):
exec parse_variables(self.__doc__)
return [
f == l/R/2,
k >= 1 + 60/f**3 + f/400,
3*(S/np.pi)**1.6075 >= 2*(l*R*2)**1.6075 + (2*R)**(2*1.6075),
Vol <= 4*np.pi/3*(l/2)*R**2,
Vol >= Wfueltot/rhofuel,
W/mfac >= S*rhocfrp*t*g,
]
class TestTools(unittest.TestCase):
"""TestCase for math models"""
def test_vector_only_parse(self):
# pylint: disable=no-member
m = OnlyVectorParse()
self.assertTrue(hasattr(m, "x"))
self.assertIsInstance(m.x, NomialArray)
self.assertEqual(len(m.x), 3)
def test_parse_variables(self):
Fuselage(Variable("Wfueltot", 5, "lbf"))
def test_binary_sweep_tree(self):
bst0 = BinarySweepTree([1, 2], [{"cost": 1}, {"cost": 8}], None, None)
assert_logtol(bst0.sample_at([1, 1.5, 2])["cost"], [1, 3.375, 8], 1e-3)
bst0.add_split(1.5, {"cost": 4})
assert_logtol(bst0.sample_at([1, 1.25, 1.5, 1.75, 2])["cost"],
[1, 2.144, 4, 5.799, 8], 1e-3)
def test_dual_objective(self):
L = Variable("L")
W = Variable("W")
eqns = [L >= 1, W >= 1,
L*W == 10]
N = 4
ws = Variable("w_{CO}", ("sweep", np.linspace(1./N, 1-1./N, N)), "-")
w_s = Variable("v_{CO}", lambda c: 1-c[ws], "-")
obj = ws*(L+W) + w_s*(W**-1 * L**-3)
m = Model(obj, eqns)
sol = m.solve(verbosity=0)
a = sol["cost"]
b = np.array([1.58856898, 2.6410391, 3.69348122, 4.74591386])
self.assertTrue((abs(a-b)/(a+b+1e-7) < 1e-7).all())
def test_te_exp_minus1(self):
"""Test Taylor expansion of e^x - 1"""
x = Variable('x')
self.assertEqual(te_exp_minus1(x, 1), x)
self.assertEqual(te_exp_minus1(x, 3), x + x**2/2. + x**3/6.)
self.assertEqual(te_exp_minus1(x, 0), 0)
# make sure x was not modified
self.assertEqual(x, Variable('x'))
# try for VectorVariable too
y = VectorVariable(3, 'y')
self.assertEqual(te_exp_minus1(y, 1), y)
self.assertEqual(te_exp_minus1(y, 3), y + y**2/2. + y**3/6.)
self.assertEqual(te_exp_minus1(y, 0), 0)
# make sure y was not modified
self.assertEqual(y, VectorVariable(3, 'y'))
def test_te_secant(self):
"Test Taylor expansion of secant(var)"
x = Variable('x')
self.assertEqual(te_secant(x, 1), 1 + x**2/2.)
a = te_secant(x, 2)
b = 1 + x**2/2. + 5*x**4/24.
self.assertTrue(all([abs(val) <= 1e-10
for val in (a.hmap - b.hmap).values()])) # pylint:disable=no-member
self.assertEqual(te_secant(x, 0), 1)
# make sure x was not modified
self.assertEqual(x, Variable('x'))
# try for VectorVariable too
y = VectorVariable(3, 'y')
self.assertEqual(te_secant(y, 1), 1 + y**2/2.)
self.assertEqual(te_secant(y, 2), 1 + y**2/2. + 5*y**4/24.)
self.assertEqual(te_secant(y, 0), 1)
# make sure y was not modified
self.assertEqual(y, VectorVariable(3, 'y'))
def test_te_tangent(self):
"Test Taylor expansion of tangent(var)"
x = Variable('x')
self.assertEqual(te_tangent(x, 1), x)
self.assertEqual(te_tangent(x, 3), x + x**3/3. + 2*x**5/15.)
self.assertEqual(te_tangent(x, 0), 0)
# make sure x was not modified
self.assertEqual(x, Variable('x'))
# try for VectorVariable too
y = VectorVariable(3, 'y')
self.assertEqual(te_tangent(y, 1), y)
self.assertEqual(te_tangent(y, 3), y + y**3/3. + 2*y**5/15.)
self.assertEqual(te_tangent(y, 0), 0)
# make sure y was not modified
self.assertEqual(y, VectorVariable(3, 'y'))
TESTS = [TestTools]
if __name__ == '__main__':
# pylint: disable=wrong-import-position
from gpkit.tests.helpers import run_tests
run_tests(TESTS)
| mit | -906,842,295,716,805,000 | 34.63522 | 97 | 0.53018 | false |
noemu/script.example-master | default.py | 1 | 7991 | # https://docs.python.org/2.7/
import os
import sys
import urllib
import urlparse
# http://mirrors.kodi.tv/docs/python-docs/
import xbmcaddon
import xbmcgui
import xbmcplugin
# http://docs.python-requests.org/en/latest/
import requests
from threading import Thread
import time
class PlayerWindow(xbmcgui.WindowXML):
LABEL_ARTIST = 802
LABEL_TITEL = 801
LABEL_ALBUM = 803
IMG_ALBUM = 800
SLIDER_VOL = 815
BUTTON_SHUFFLE = 817
BUTTON_SHUFFLE_ACT = 818
BUTTON_REPEAT = 819
BUTTON_REPEAT_ACT = 819
BUTTON_BACK = 809
BUTTON_PLAY = 811
BUTTON_PAUSE = 812
BUTTON_FOR = 813
BUTTON_VOL_UP = 816
BUTTON_VOL_DOWN = 814
def __init__(self, *args, **kwargs):
self.isRunning = True
self.volume = 100
def onAction(self , action):
ACTION_PREVIOUS_MENU = 10
ACTION_NAV_BACK = 92
ACTION_UP = 3
ACTION_DOWN = 4
ACTION_LEFT = 1
ACTION_RIGHT = 2
ACTION_MIDDLE = 7
ACTION_PAUSE = 12
ACTION_STOP = 13
ACTION_NEXT_ITEM = 14
ACTION_PREV_ITEM = 15
ACTION_FORWARD = 16
ACTION_REWIND = 17
ACTION_PLAYER_FORWARD = 77
ACTION_PLAYER_REWIND = 78
ACTION_PLAYER_PLAY = 79
ACTION_VOLUME_UP = 88
ACTION_VOLUME_DOWN = 89
ACTION_MUTE = 91
ACTION_PAGE_UP = 5
ACTION_PAGE_DOWN = 6
#ids = str(action.getId())
#xbmc.log(ids)
if (action == ACTION_PREVIOUS_MENU) or (action == ACTION_NAV_BACK):
xbmcgui.Window(10000).setProperty("spotify-closed-by-user","true")
self.isRunning = False
self.close()
if (action == ACTION_LEFT) or (action == ACTION_RIGHT):
self.volSlider = self.getControl(self.SLIDER_VOL)
volume = self.volSlider.getPercent()
setVol(volume)
if(action == ACTION_PLAYER_PLAY) or (action == ACTION_PAUSE):
if(self.playing):
getSite(pause)
else:
getSite(play)
if (action == ACTION_VOLUME_UP):
self.volume = self.volume + 3
if(self.volume > 100):
self.volume = 100
setVol(self.volume)
self.volSlider = self.getControl(self.SLIDER_VOL)
self.volSlider.setPercent(self.volume)
if (action == ACTION_VOLUME_DOWN):
self.volume = self.volume- 3
if(self.volume < 0):
self.volume = 0
setVol(self.volume)
self.volSlider = self.getControl(self.SLIDER_VOL)
self.volSlider.setPercent(self.volume)
if (action == ACTION_FORWARD) or (action == ACTION_PLAYER_FORWARD) or (action == ACTION_NEXT_ITEM) or (action == ACTION_PAGE_UP):
getSite(next)
if (action == ACTION_REWIND) or (action == ACTION_PLAYER_REWIND) or (action == ACTION_PREV_ITEM) or (action == ACTION_PAGE_DOWN):
getSite(prev)
if(action == ACTION_STOP):
getSite(pause)
def onClick(self, controlID):
if (controlID == self.BUTTON_PAUSE) or (controlID == self.BUTTON_PLAY):
if(self.playing):
getSite(pause)
else:
getSite(play)
if (controlID == self.BUTTON_VOL_UP):
self.volume = self.volume + 3
if(self.volume > 100):
self.volume = 100
setVol(self.volume)
self.volSlider = self.getControl(self.SLIDER_VOL)
self.volSlider.setPercent(self.volume)
if (controlID == self.BUTTON_VOL_DOWN):
self.volume = self.volume- 3
if(self.volume < 0):
self.volume = 0
setVol(self.volume)
self.volSlider = self.getControl(self.SLIDER_VOL)
self.volSlider.setPercent(self.volume)
if (controlID == self.BUTTON_FOR):
getSite(next)
if (controlID == self.BUTTON_BACK):
getSite(prev)
def updateLabels(self, information):
self.albumCover = self.getControl(self.IMG_ALBUM)
self.titleLabel = self.getControl(self.LABEL_TITEL)
self.artistLabel = self.getControl(self.LABEL_ARTIST)
self.albumName = self.getControl(self.LABEL_ALBUM)
self.volSlider = self.getControl(self.SLIDER_VOL)
self.playing = information['playing']
self.titleLabel.setLabel(information['track_name'])
self.albumName.setLabel(information['album_name'])
self.artistLabel.setLabel( information['artist_name'])
self.albumCover.setImage(information['cover_url'])
self.volume = int(information['volume'])/655.35
self.volSlider.setPercent(self.volume)
self.getControl(self.BUTTON_PLAY).setVisible(not self.playing)
self.getControl(self.BUTTON_SHUFFLE).setVisible(not information['shuffle'])
self.getControl(self.BUTTON_REPEAT).setVisible(not information['repeat'])
def getSite(url):
#try...
rq = requests.get(url)
#handle
return rq
def getInfo():
information = getSite(info).json()
statusInfo = getSite(status).json()
playing = statusInfo['playing']
shuffleInfo = statusInfo['shuffle']
repeatInfo = statusInfo['repeat']
coverURL = "http://o.scdn.co/160/"+information['cover_uri'].split(':')[-1]
information['cover_url'] = coverURL
information['playing'] = playing
information['shuffle'] = shuffleInfo
information['repeat'] = repeatInfo
return information
def downloadCover(url):
urllib.urlretrieve(url,'/tmp/spotAlCov.png')
def setVol(value):
value = int(round(value* 655.35))
jsonPost = {'value': value}
requests.post(volume,data=jsonPost)
def updateInfo(name,window):
screensaverDelay = 30
screensaverCount = 0
updateInterval = 2
while True:
try:
window.getControl(800)
break
except Exception:
xbmc.log("Error: can't find Window, try again")
time.sleep(1) # maybe fix for can't find window id's
while window.isRunning and (not xbmc.abortRequested):
information = getInfo()
window.updateLabels(information)
time.sleep(updateInterval)
screensaverCount = screensaverCount + updateInterval
if(screensaverCount>screensaverDelay) and information['playing']:
#wakeup from screensaver by simulating a button activity
json_query = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "Input.ContextMenu", "id": 1}')
screensaverCount = 0
def main():
pw = PlayerWindow("player.xml",CWD)
#xbmcgui.Window( 10000 )
t1 = Thread(target=updateInfo,args=("1",pw))
t1.setDaemon( True)
t1.start()
xbmcgui.Window(10000).setProperty("spotify-showing", "true")
pw.doModal()
xbmcgui.Window(10000).clearProperty("spotify-showing")
del t1
del pw
if __name__ == '__main__':
page = 'http://127.0.0.1:4000'
apiPlayback = '/api/playback'
play = page+apiPlayback+'/play'
pause = page+apiPlayback+'/pause'
prev = page+apiPlayback+'/prev'
next = page+apiPlayback+'/next'
volume = page+apiPlayback+'/volume'
shuffle = page+apiPlayback+'/shuffle'
repeat = page+apiPlayback+'/repeat'
info = page+'/api/info/metadata'
status = page+'/api/info/status'
ADDON = xbmcaddon.Addon(id='plugin.audio.example')
CWD = ADDON.getAddonInfo('path').decode("utf-8")
main()
| gpl-2.0 | -3,525,701,916,214,955,000 | 27.641577 | 137 | 0.568014 | false |
Endika/manufacture | mrp_operations_time_control/models/operation_time.py | 1 | 2588 | # -*- coding: utf-8 -*-
# © 2015 Avanzosc
# © 2015 Pedro M. Baeza
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import api, models, fields
class MrpProductionWorkcenterLine(models.Model):
_inherit = 'mrp.production.workcenter.line'
operation_time_lines = fields.One2many(
'operation.time.line', 'operation_time', string='Operation Time Lines')
def _create_operation_line(self):
self.env['operation.time.line'].create({
'start_date': fields.Datetime.now(),
'operation_time': self.id,
'user': self.env.uid})
def _write_end_date_operation_line(self):
self.operation_time_lines[-1].end_date = fields.Datetime.now()
@api.multi
def action_start_working(self):
result = super(MrpProductionWorkcenterLine,
self).action_start_working()
self._create_operation_line()
return result
@api.multi
def action_pause(self):
result = super(MrpProductionWorkcenterLine, self).action_pause()
self._write_end_date_operation_line()
return result
@api.multi
def action_resume(self):
result = super(MrpProductionWorkcenterLine, self).action_resume()
self._create_operation_line()
return result
@api.multi
def action_done(self):
not_paused_records = self.filtered(lambda x: x.state != 'pause')
result = super(MrpProductionWorkcenterLine, self).action_done()
not_paused_records._write_end_date_operation_line()
return result
class OperationTimeLine(models.Model):
_name = 'operation.time.line'
_rec_name = 'operation_time'
def _default_user(self):
return self.env.uid
start_date = fields.Datetime(string='Start Date')
end_date = fields.Datetime(string='End Date')
operation_time = fields.Many2one('mrp.production.workcenter.line')
uptime = fields.Float(
string='Machine up time', compute='_compute_uptime', store=True,
digits=(12, 6))
production = fields.Many2one(
'mrp.production', related='operation_time.production_id',
string='Production', store=True)
user = fields.Many2one('res.users', string='User', default=_default_user)
@api.one
@api.depends('start_date', 'end_date')
def _compute_uptime(self):
if self.end_date and self.start_date:
timedelta = fields.Datetime.from_string(self.end_date) - \
fields.Datetime.from_string(self.start_date)
self.uptime = timedelta.total_seconds() / 3600.
| agpl-3.0 | -6,874,118,255,651,219,000 | 33.48 | 79 | 0.643852 | false |
Clinical-Genomics/scout | tests/utils/test_scout_requests.py | 1 | 14457 | """Tests for scout requests"""
import tempfile
import zlib
from urllib.error import HTTPError
import pytest
import requests
import responses
from scout.utils import scout_requests
def test_get_request_json_error():
"""Test the function that sends a GET request that returns an error"""
# GIVEN A URL that returns error
url = "http://bar"
resp_dict = scout_requests.get_request_json(url)
# THEN the response should return an error message
assert "An error occurred" in resp_dict["message"]
@responses.activate
def test_get_request_json():
"""Test the function that sends a GET request and returns the response content as json"""
# GIVEN a URL that returns a success response
url = "http://bar"
responses.add(responses.GET, url, json={"foo": "bar"}, status=200)
headers = {"X-Auth-Token": "XYZ"}
resp_dict = scout_requests.get_request_json(url, headers)
# Response should contain the expected data
assert resp_dict["status_code"] == 200
assert resp_dict["content"] == {"foo": "bar"}
def test_post_request_json_error():
"""Test function that sends a POST request to a URL that returns error"""
url = "http://bar"
data = {"param": "FOO"}
resp_dict = scout_requests.post_request_json(url, data)
assert "An error occurred while sending a POST request to url" in resp_dict["message"]
@responses.activate
def test_post_request_json():
"""Test the function that sends a POST request and returns the response content as json"""
# GIVEN a URL that returns a success response
url = "http://bar"
responses.add(responses.POST, url, json={"foo": "bar"}, status=200)
data = {"param": "FOO"}
headers = {"Content-type": "application/json; charset=utf-8", "Accept": "text/json"}
resp_dict = scout_requests.post_request_json(url, data, headers)
# Response should contain the expected data
assert resp_dict["status_code"] == 200
assert resp_dict["content"] == {"foo": "bar"}
def test_delete_request_json_error():
"""Test function that sends a DELETE request to a URL that returns error"""
# GIVEN A URL that returns error
url = "http://bar"
resp_dict = scout_requests.delete_request_json(url)
# THEN the response should return an error message
assert "An error occurred" in resp_dict["message"]
@responses.activate
def test_delete_request_json():
"""Test the function that sends a DELETE request and returns the response content as json"""
# GIVEN a URL that returns a success response
url = "http://bar"
responses.add(responses.DELETE, url, json={"foo": "bar"}, status=200)
headers = {"X-Auth-Token": "XYZ"}
resp_dict = scout_requests.delete_request_json(url, headers)
# Response should contain the expected data
assert resp_dict["status_code"] == 200
assert resp_dict["content"] == {"foo": "bar"}
def test_get_request_bad_url():
"""Test function that accepts an url and returns decoded data from it"""
# test function with a url that is not valid
url = "fakeyurl"
with pytest.raises(requests.exceptions.MissingSchema):
# function should raise error
assert scout_requests.get_request(url)
@responses.activate
def test_get_request_bad_request():
"""Test functions that accepts an url and returns decoded data from it"""
# GIVEN an URL
url = "http://www.badurl.com"
responses.add(
responses.GET,
url,
status=404,
)
# WHEN requesting
with pytest.raises(requests.exceptions.HTTPError):
response = scout_requests.get_request(url)
# THEN assert that the a httperror is raised
assert response.status_code == 404
@responses.activate
def test_send_request_timout():
"""This test will trigge a timout error."""
# GIVEN a request and a request that timouts
url = "http://www.badurl.com"
responses.add(
responses.GET,
url,
body=requests.exceptions.Timeout(),
)
# WHEN requesting
with pytest.raises(requests.exceptions.Timeout):
# THEN assert that the a Timeout is raised
scout_requests.get_request(url)
@responses.activate
def test_get_request():
"""Test functions that accepts an url and returns decoded data from it"""
# GIVEN an URL
url = "http://www.github.com"
responses.add(
responses.GET,
url,
status=200,
)
# WHEN requesting
response = scout_requests.get_request(url)
# THEN assert that the reponse is correct
assert response.status_code == 200
@responses.activate
def test_fetch_resource():
"""Test fetch resource"""
# GIVEN an URL
url = "http://www.github.com"
content = "Some things\n That are not so\ninteresting"
responses.add(
responses.GET,
url,
body=content,
status=200,
)
# WHEN fetching the resource
data = scout_requests.fetch_resource(url)
# THEN assert that a list of lines are returned
assert isinstance(data, list)
@responses.activate
def test_fetch_resource_gzipped(variant_clinical_file):
"""Test fetch resource"""
# GIVEN an URL
url = "http://www.github.com/things.txt.gz"
with open(variant_clinical_file, "rb") as zipped_file:
content = zipped_file.read()
responses.add(
responses.GET,
url,
body=content,
status=200,
)
# WHEN fetching the resource
data = scout_requests.fetch_resource(url)
# THEN assert that a list of lines are returned
assert isinstance(data, list)
# THEN assert that the vcf header is there
assert "##fileformat" in data[0]
@responses.activate
def test_fetch_hpo(hpo_terms_file):
"""Test fetch resource"""
# GIVEN an URL
url = "https://raw.githubusercontent.com/obophenotype/human-phenotype-ontology/master/hp.obo"
with open(hpo_terms_file, "r") as hpo_file:
content = hpo_file.read()
responses.add(
responses.GET,
url,
body=content,
status=200,
)
# WHEN fetching the resource
data = scout_requests.fetch_hpo_terms()
# THEN assert that the HPO header is there
assert "format-version" in data[0]
@responses.activate
def test_fetch_genes_to_hpo_to_disease(hpo_genes_file):
"""Test fetch resource"""
# GIVEN an URL
url = scout_requests.HPO_URL.format("genes_to_phenotype.txt")
with open(hpo_genes_file, "r") as hpo_file:
content = hpo_file.read()
responses.add(
responses.GET,
url,
body=content,
status=200,
)
# WHEN fetching the resource
data = scout_requests.fetch_genes_to_hpo_to_disease()
# THEN assert that the HPO header is there
assert "#Format: entrez" in data[0]
@responses.activate
def test_fetch_hpo_to_genes_to_disease(phenotype_to_genes_file):
"""Test fetch resource"""
# GIVEN an URL
url = scout_requests.HPO_URL.format("phenotype_to_genes.txt")
with open(phenotype_to_genes_file, "r") as hpo_file:
content = hpo_file.read()
responses.add(
responses.GET,
url,
body=content,
status=200,
)
# WHEN fetching the resource
data = scout_requests.fetch_hpo_to_genes_to_disease()
# THEN assert that the HPO header is there
assert "#Format: HPO-id" in data[0]
@responses.activate
def test_fetch_hpo_files(phenotype_to_genes_file, hpo_genes_file):
"""Test fetch hpo files"""
# GIVEN URLs two hpo files
url_1 = scout_requests.HPO_URL.format("phenotype_to_genes.txt")
url_2 = scout_requests.HPO_URL.format("genes_to_phenotype.txt")
with open(phenotype_to_genes_file, "r") as hpo_file:
content = hpo_file.read()
responses.add(
responses.GET,
url_1,
body=content,
status=200,
)
with open(hpo_genes_file, "r") as hpo_file:
content = hpo_file.read()
responses.add(
responses.GET,
url_2,
body=content,
status=200,
)
# WHEN fetching all hpo files
res = scout_requests.fetch_hpo_files(genes_to_phenotype=True, phenotype_to_genes=True)
# THEN assert that the HPO header is there
assert isinstance(res, dict)
def test_fetch_hgnc(hgnc_file, mocker):
"""Test fetch hgnc"""
# GIVEN file with hgnc info
mocker.patch.object(scout_requests.urllib.request, "urlopen")
with open(hgnc_file, "rb") as hgnc_handle:
hgnc_info = hgnc_handle.read()
with tempfile.TemporaryFile() as temp:
temp.write(hgnc_info)
temp.seek(0)
scout_requests.urllib.request.urlopen.return_value = temp
# WHEN fetching the resource
data = scout_requests.fetch_hgnc()
# THEN assert that the HGNC header is there
assert "hgnc_id\tsymbol" in data[0]
def test_fetch_exac_constraint(exac_file, mocker):
"""Test fetch exac constraint file"""
# GIVEN file with hgnc info
mocker.patch.object(scout_requests.urllib.request, "urlopen")
with open(exac_file, "rb") as exac_handle:
exac_info = exac_handle.read()
with tempfile.TemporaryFile() as temp:
temp.write(exac_info)
temp.seek(0)
scout_requests.urllib.request.urlopen.return_value = temp
# WHEN fetching the resource
data = scout_requests.fetch_exac_constraint()
# THEN assert that the exac header is there
assert "transcript\tgene" in data[0]
@responses.activate
def test_fetch_exac_constraint_failed_ftp(variant_clinical_file, mocker):
"""Test fetch exac constraint file when ftp request fails"""
# GIVEN file with hgnc info
# GIVEN a mocked call that raises a HTTPError when fetching from ftp
mocker.patch.object(scout_requests.urllib.request, "urlopen")
url = (
"https://storage.googleapis.com/gnomad-public/legacy/exacv1_downloads/release0.3.1"
"/manuscript_data/forweb_cleaned_exac_r03_march16_z_data_pLI.txt.gz"
)
scout_requests.urllib.request.urlopen.return_value = HTTPError(
url, 500, "Internal Error", {}, None
)
# GIVEN a gzipped file
with open(variant_clinical_file, "rb") as zipped_file:
content = zipped_file.read()
responses.add(
responses.GET,
url,
body=content,
status=200,
)
# WHEN fetching the resource
data = scout_requests.fetch_exac_constraint()
# THEN some content is returned
assert len(data) > 10
@responses.activate
def test_fetch_mim_files_mim2genes(phenotype_to_genes_file):
"""Test fetch resource"""
# GIVEN an URL
url = "https://omim.org/static/omim/data/mim2gene.txt"
with open(phenotype_to_genes_file, "r") as hpo_file:
content = hpo_file.read()
responses.add(
responses.GET,
url,
body=content,
status=200,
)
# WHEN fetching the resource
data = scout_requests.fetch_mim_files(api_key=None, mim2genes=True)
# THEN assert that the HPO header is there
assert isinstance(data, dict)
def test_fetch_ensembl_biomart(mocker):
"""Test fetch resource"""
# GIVEN a mock
mocker.patch.object(scout_requests, "EnsemblBiomartClient")
attributes = [
"chromosome_name",
"start_position",
]
# WHEN fetching the resource
client = scout_requests.fetch_ensembl_biomart(attributes=attributes, filters=None)
# THEN assert that a result is returned
assert client
def test_fetch_ensembl_genes(mocker):
"""Test fetch resource"""
# GIVEN a mock
mocker.patch.object(scout_requests, "EnsemblBiomartClient")
# WHEN fetching the resource
client = scout_requests.fetch_ensembl_genes()
# THEN assert that a result is returned
assert client
def test_fetch_ensembl_transcripts(mocker):
"""Test fetch resource"""
# GIVEN a mock
mocker.patch.object(scout_requests, "EnsemblBiomartClient")
# WHEN fetching the resource
client = scout_requests.fetch_ensembl_transcripts()
# THEN assert that a result is returned
assert client
def test_fetch_ensembl_exons(mocker):
"""Test fetch resource"""
# GIVEN a mock
mocker.patch.object(scout_requests, "EnsemblBiomartClient")
# WHEN fetching the resource
client = scout_requests.fetch_ensembl_exons()
# THEN assert that a result is returned
assert client
@responses.activate
def test_fetch_resource_json():
"""Test fetch resource"""
# GIVEN an URL
url = "http://www.github.com"
content = [{"first": "second"}]
responses.add(
responses.GET,
url,
json=content,
status=200,
)
# WHEN fetching the resource
data = scout_requests.fetch_resource(url, json=True)
# THEN assert that a list of lines are returned
assert isinstance(data, list)
assert data[0]["first"] == "second"
@responses.activate
def test_fetch_refseq_version(refseq_response):
"""Test utils service from entrez that retrieves refseq version"""
# GIVEN a refseq accession number
refseq_acc = "NM_020533"
# GIVEN the base url
base_url = (
"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=nuccore&"
"term={}&idtype=acc"
)
url = base_url.format(refseq_acc)
responses.add(
responses.GET,
url,
body=refseq_response,
status=200,
)
# WHEN fetching complete refseq version for accession that has version
refseq_version = scout_requests.fetch_refseq_version(refseq_acc)
# THEN assert that the refseq identifier is the same
assert refseq_acc in refseq_version
# THEN assert that there is a version that is a digit
version_n = refseq_version.split(".")[1]
assert version_n.isdigit()
@responses.activate
def test_fetch_refseq_version_non_existing(refseq_response_non_existing):
"""Test to fetch version for non existing transcript"""
# GIVEN a accession without refseq version
refseq_acc = "NM_000000"
# GIVEN the base url
base_url = (
"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=nuccore&"
"term={}&idtype=acc"
)
url = base_url.format(refseq_acc)
responses.add(
responses.GET,
url,
body=refseq_response_non_existing,
status=200,
)
refseq_version = scout_requests.fetch_refseq_version(refseq_acc)
# THEN assert that the same ref seq was returned
assert refseq_version == refseq_acc
| bsd-3-clause | 7,195,936,272,157,844,000 | 27.347059 | 97 | 0.657605 | false |
rikima/spark | python/pyspark/context.py | 1 | 45234 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import shutil
import signal
import sys
import threading
import warnings
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast, BroadcastPickleRegistry
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer
from pyspark.storagelevel import StorageLevel
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
if sys.version > '3':
xrange = range
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create L{RDD} and
broadcast variables on that cluster.
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
"""
Create a new SparkContext. At least the master and app name should be set,
either through the named parameters here or through C{conf}.
:param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
:param appName: A name for your job, to display on the cluster web UI.
:param sparkHome: Location where Spark is installed on cluster nodes.
:param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
:param environment: A dictionary of environment variables to set on
worker nodes.
:param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
:param serializer: The serializer for RDDs.
:param conf: A L{SparkConf} object setting Spark properties.
:param gateway: Use an existing gateway and JVM, otherwise a new JVM
will be instantiated.
:param jsc: The JavaSparkContext instance (optional).
:param profiler_cls: A class of custom Profiler used to do profiling
(default is pyspark.profiler.BasicProfiler).
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
self._callsite = first_spark_call() or CallSite(None, None, None)
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represents the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
auth_token = self._gateway.gateway_parameters.auth_token
self._accumulatorServer = accumulators._start_update_server(auth_token)
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port, auth_token)
self._jsc.sc().register(self._javaAccumulator)
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
self.pythonVer = "%d.%d" % sys.version_info[:2]
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = BroadcastPickleRegistry()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
try:
filepath = os.path.join(SparkFiles.getRootDirectory(), filename)
if not os.path.exists(filepath):
# In case of YARN with shell mode, 'spark.submit.pyFiles' files are
# not added via SparkContext.addFile. Here we check if the file exists,
# try to copy and then add it to the path. See SPARK-21945.
shutil.copyfile(path, filepath)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, filepath)
except Exception:
warnings.warn(
"Failed to add file [%s] speficied in 'spark.submit.pyFiles' to "
"Python path:\n %s" % (path, "\n ".join(sys.path)),
RuntimeWarning)
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def __repr__(self):
return "<SparkContext master={master} appName={appName}>".format(
master=self.master,
appName=self.appName,
)
def _repr_html_(self):
return """
<div>
<p><b>SparkContext</b></p>
<p><a href="{sc.uiWebUrl}">Spark UI</a></p>
<dl>
<dt>Version</dt>
<dd><code>v{sc.version}</code></dd>
<dt>Master</dt>
<dd><code>{sc.master}</code></dd>
<dt>AppName</dt>
<dd><code>{sc.appName}</code></dd>
</dl>
</div>
""".format(
sc=self
)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise Exception(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
@ignore_unicode_prefix
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
>>> sc.applicationId # doctest: +ELLIPSIS
u'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
pass
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numSlices: the number of partitions of the new RDD
:return: An RDD of int
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(xrange(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
jrdd = self._serialize_to_jvm(c, numSlices, serializer)
return RDD(jrdd, self, serializer)
def _serialize_to_jvm(self, data, parallelism, serializer):
"""
Calling the Java parallelize() method with an ArrayList is too slow,
because it sends O(n) Py4J commands. As an alternative, serialized
objects are written to a file and loaded through textFile().
"""
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
serializer.dump_stream(data, tempFile)
tempFile.close()
readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile
return readRDDFromFile(self._jsc, tempFile.name, parallelism)
finally:
# readRDDFromFile eagerily reads the file so we can delete right after.
os.unlink(tempFile.name)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
@ignore_unicode_prefix
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
@ignore_unicode_prefix
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files::
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
then C{rdd} contains::
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
.. note:: Small files are preferred, as each file will be loaded
fully in memory.
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
def binaryFiles(self, path, minPartitions=None):
"""
.. note:: Experimental
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
.. note:: Small files are preferred, large file is also allowable, but
may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
def binaryRecords(self, path, recordLength):
"""
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. C{PickleSerializer} is used to deserialize pickled objects on the Python side
:param path: path to sequncefile
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter:
:param valueConverter:
:param minSplits: minimum splits in dataset
(default min(2, sc.defaultParallelism))
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
@ignore_unicode_prefix
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a
L{Broadcast<pyspark.broadcast.Broadcast>}
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an L{Accumulator} with the given initial value, using a given
L{AccumulatorParam} helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The C{path} passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use
L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The C{path} passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be a HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> suppress = lock.acquire()
>>> suppress = threading.Thread(target=start_job, args=(10,)).start()
>>> suppress = threading.Thread(target=stop_job).start()
>>> suppress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
L{setLocalProperty}
"""
return self._jsc.getLocalProperty(key)
def setJobDescription(self, value):
"""
Set a human readable description of the current job.
"""
self._jsc.setJobDescription(value)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
if self.profiler_collector is not None:
self.profiler_collector.show_profiles()
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
if self.profiler_collector is not None:
self.profiler_collector.dump_profiles(path)
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 | -2,844,523,567,311,684,600 | 41.713881 | 99 | 0.611376 | false |
etienne-gauvin/music-player-core | examples/demo-console-player.py | 1 | 3812 | #!/usr/bin/env python
# MusicPlayer, https://github.com/albertz/music-player
# Copyright (c) 2012, Albert Zeyer, www.az2000.de
# All rights reserved.
# This code is under the 2-clause BSD license, see License.txt in the root directory of this project.
import sys, os, random, fnmatch
# Our parent path might contain a self-build musicplayer module. Use that one.
sys.path = [os.path.abspath((os.path.dirname(__file__) or ".") + "/..")] + sys.path
import musicplayer
print "Module:", musicplayer.__file__
# ffmpeg log levels: {0:panic, 8:fatal, 16:error, 24:warning, 32:info, 40:verbose}
musicplayer.setFfmpegLogLevel(20)
try:
import better_exchook
better_exchook.install()
except ImportError: pass # doesnt matter
try:
import faulthandler
faulthandler.enable(all_threads=True)
except ImportError:
print "note: module faulthandler not available"
class Song:
def __init__(self, fn):
self.url = fn
self.f = open(fn)
def __eq__(self, other):
return self.url == other.url
def readPacket(self, bufSize):
s = self.f.read(bufSize)
#print "readPacket", self, bufSize, len(s)
return s
def seekRaw(self, offset, whence):
r = self.f.seek(offset, whence)
#print "seekRaw", self, offset, whence, r, self.f.tell()
return self.f.tell()
files = []
def getFiles(path):
for f in sorted(os.listdir(path), key=lambda k: random.random()):
f = os.path.join(path, f)
if os.path.isdir(f): getFiles(f) # recurse
if len(files) > 1000: break # break if we have enough
if fnmatch.fnmatch(f, '*.mp3'): files.append(f)
getFiles(os.path.expanduser("~/Music"))
random.shuffle(files) # shuffle some more
files = sys.argv[1:] + files
assert files, "give me some files or fill-up ~/Music"
i = 0
def songs():
global i, files
while True:
yield Song(files[i])
i += 1
if i >= len(files): i = 0
def peekSongs(n):
nexti = i + 1
if nexti >= len(files): nexti = 0
return map(Song, (files[nexti:] + files[:nexti])[:n])
player = musicplayer.createPlayer()
player.outSamplerate = 48000
player.queue = songs()
player.peekQueue = peekSongs
player.playing = True
def formatTime(t):
if t is None: return "?"
mins = long(t // 60)
t -= mins * 60
hours = mins // 60
mins -= hours * 60
if hours: return "%02i:%02i:%02.0f" % (hours,mins,t)
return "%02i:%02.0f" % (mins,t)
import termios
def prepareStdin():
fd = sys.stdin.fileno()
if os.isatty(fd):
old = termios.tcgetattr(fd)
new = termios.tcgetattr(fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO
# http://www.unixguide.net/unix/programming/3.6.2.shtml
new[6][termios.VMIN] = 0
new[6][termios.VTIME] = 1
termios.tcsetattr(fd, termios.TCSANOW, new)
termios.tcsendbreak(fd, 0)
import atexit
atexit.register(lambda: termios.tcsetattr(fd, termios.TCSANOW, old))
print "Console control:"
print " <space>: play / pause"
print " <left>/<right>: seek back/forward by 10 secs"
print " <return>: next song"
print " <q>: quit"
def getchar():
fd = sys.stdin.fileno()
ch = os.read(fd, 7)
return ch
prepareStdin()
while True:
sys.stdout.write("\r\033[K") # clear line
if player.playing: sys.stdout.write("playing, ")
else: sys.stdout.write("paused, ")
curSong = player.curSong
if curSong:
url = os.path.basename(curSong.url)
if len(url) > 40: url = url[:37] + "..."
sys.stdout.write(
url + " : " +
formatTime(player.curSongPos) + " / " +
formatTime(player.curSongLen))
else:
sys.stdout.write("no song")
ch = getchar()
if ch == "\x1b[D": # left
player.seekRel(-10)
elif ch == "\x1b[C": #right
player.seekRel(10)
elif ch == "\x1b[A": #up
pass
elif ch == "\x1b[B": #down
pass
elif ch == "\n": # return
player.nextSong()
elif ch == " ":
player.playing = not player.playing
elif ch == "q":
print
sys.exit(0)
sys.stdout.flush()
| bsd-2-clause | 4,915,947,774,121,647,000 | 24.245033 | 101 | 0.658447 | false |
tmct/adventOfCode2016 | problems/11/State.py | 1 | 8576 | number_of_levels = 4
from itertools import cycle, islice, combinations
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
pending = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
class State:
def __init__(self, lift_level, chip_levels, generator_levels):
self.lift_level = lift_level
self.chip_levels = chip_levels
self.generator_levels = generator_levels
self._lift_level_gens = None
self._lift_level_chips = None
self._is_safe = None
@property
def is_safe(self):
if self._is_safe is None:
levels_with_generators = self.get_generator_levels()
unpowered_chips = self.get_unpowered_chips()
unpowered_chip_levels = self.get_unpowered_chip_levels(unpowered_chips)
self._is_safe = levels_with_generators.isdisjoint(unpowered_chip_levels)
return self._is_safe
def get_unpowered_chip_levels(self, unpowered_chips):
return {chip_level for index, chip_level in enumerate(self.chip_levels) if
unpowered_chips[index]}
def get_unpowered_chips(self):
return [chip_level != generator_level for chip_level, generator_level in
zip(self.chip_levels, self.generator_levels)]
def get_generator_levels(self):
return {level_index for level_index in range(number_of_levels) if
level_index in self.generator_levels}
def adjacent_safe_states(self):
self._lift_level_gens = {generator for generator, level in enumerate(self.generator_levels) if
level == self.lift_level}
self._lift_level_chips = {chip for chip, level in enumerate(self.chip_levels) if level == self.lift_level}
# iterate over up/down and taking all things on this level, one or two from all, and must be safe
# up first
return {state for state in self.adjacent_states if state.is_safe}
@property
def adjacent_states(self):
# iterate over up/down and taking all things on this level, one or two from all, and must be safe
# up first
return set(self.adjacent_up_states() + self.adjacent_down_states()) # todo remove most sets
def adjacent_up_states(self):
next_lift_level = self.lift_level + 1
if next_lift_level == number_of_levels:
return []
return self.raise_single_chip_states(self._lift_level_chips) + self.raise_single_gen_states(
self._lift_level_gens) + self.raise_double_chip_states(
self._lift_level_chips) + self.raise_double_gen_states(
self._lift_level_gens) + self.raise_chip_and_gen_states(self._lift_level_chips, self._lift_level_gens)
def raise_single_chip_states(self, chips):
return [self.raise_chip(chip) for chip in chips]
def raise_double_chip_states(self, chips):
return [self.raise_two_chips(chip1, chip2) for chip1, chip2 in combinations(chips, 2)]
def raise_double_gen_states(self, gens):
return [self.raise_two_gens(gen1, gen2) for gen1, gen2 in combinations(gens, 2)]
def raise_two_gens(self, gen1, gen2):
new_generator_levels = list(self.generator_levels)
new_generator_levels[gen1] += 1
new_generator_levels[gen2] += 1
return State(self.lift_level + 1,
self.chip_levels,
tuple(new_generator_levels))
def raise_two_chips(self, chip1, chip2):
new_chip_levels = list(self.chip_levels)
new_chip_levels[chip1] += 1
new_chip_levels[chip2] += 1
return State(self.lift_level + 1,
tuple(new_chip_levels),
self.generator_levels)
def raise_single_gen_states(self, gens):
return [self.raise_generator(generator) for generator in gens]
def raise_chip_and_gen_states(self, chips, gens):
return [self.raise_chip_and_gen(chip, gen) for gen in gens for chip in chips]
def raise_chip(self, chip):
new_chip_levels = list(self.chip_levels)
new_chip_levels[chip] += 1
return State(self.lift_level + 1,
tuple(new_chip_levels),
self.generator_levels)
def raise_generator(self, generator):
new_generator_levels = list(self.generator_levels)
new_generator_levels[generator] += 1
return State(self.lift_level + 1,
self.chip_levels,
tuple(new_generator_levels))
def raise_chip_and_gen(self, chip, gen):
new_chip_levels = list(self.chip_levels)
new_chip_levels[chip] += 1
new_generator_levels = list(self.generator_levels)
new_generator_levels[gen] += 1
return State(self.lift_level + 1,
tuple(new_chip_levels),
tuple(new_generator_levels))
def __repr__(self):
res = ''
for level in range(number_of_levels):
res += str(level + 1) + ' '
lift_char = '.'
if self.lift_level == number_of_levels - level - 1:
lift_char = 'E'
res += lift_char + ' '
for value in roundrobin(self.generator_levels, self.chip_levels):
char = '.'
if value == number_of_levels - level - 1:
char = '*'
res += char + ' '
res += '\n'
return res
def adjacent_down_states(self):
next_lift_level = self.lift_level - 1
if next_lift_level == -1:
return []
return self.lower_single_chip_states(self._lift_level_chips) + self.lower_single_gen_states(
self._lift_level_gens) + self.lower_double_chip_states(
self._lift_level_chips) + self.lower_double_gen_states(
self._lift_level_gens) + self.lower_chip_and_gen_states(self._lift_level_chips, self._lift_level_gens)
def lower_single_chip_states(self, chips):
return [self.lower_chip(chip) for chip in chips]
def lower_double_chip_states(self, chips):
return [self.lower_two_chips(chip1, chip2) for chip1, chip2 in combinations(chips, 2)]
def lower_double_gen_states(self, gens):
return [self.lower_two_gens(gen1, gen2) for gen1, gen2 in combinations(gens, 2)]
def lower_two_gens(self, gen1, gen2):
new_generator_levels = list(self.generator_levels)
new_generator_levels[gen1] -= 1
new_generator_levels[gen2] -= 1
return State(self.lift_level - 1,
self.chip_levels,
tuple(new_generator_levels))
def lower_two_chips(self, chip1, chip2):
new_chip_levels = list(self.chip_levels)
new_chip_levels[chip1] -= 1
new_chip_levels[chip2] -= 1
return State(self.lift_level - 1,
tuple(new_chip_levels),
self.generator_levels)
def lower_single_gen_states(self, gens):
return [self.lower_generator(generator) for generator in gens]
def lower_chip_and_gen_states(self, chips, gens):
return [self.lower_chip_and_gen(chip, gen) for gen in gens for chip in chips]
def lower_chip(self, chip):
new_chip_levels = list(self.chip_levels)
new_chip_levels[chip] -= 1
return State(self.lift_level - 1,
tuple(new_chip_levels),
self.generator_levels)
def lower_generator(self, generator):
new_generator_levels = list(self.generator_levels)
new_generator_levels[generator] -= 1
return State(self.lift_level - 1,
self.chip_levels,
tuple(new_generator_levels))
def lower_chip_and_gen(self, chip, gen):
new_chip_levels = list(self.chip_levels)
new_chip_levels[chip] -= 1
new_generator_levels = list(self.generator_levels)
new_generator_levels[gen] -= 1
return State(self.lift_level - 1,
tuple(new_chip_levels),
tuple(new_generator_levels))
def __key(self):
return self.lift_level, tuple(sorted(zip(self.chip_levels, self.generator_levels)))
def __eq__(self, other):
return self.__key() == other.__key()
def __hash__(self):
return hash(self.__key())
| mit | 1,454,731,120,773,989,400 | 39.074766 | 114 | 0.596082 | false |
reider-roque/crypto-challenges | cryptopals/set-1-basics/chal-1/hextobase64_test.py | 1 | 1596 | import unittest
from base64 import b64encode
from binascii import unhexlify
from hextobase64 import hextobase64
# Run tests with python -m unittest file.py
class HexToBase64Tests(unittest.TestCase):
def test_HexToBase64_HexInUppercase_Succeeds(self):
test_input = "0xAB12CD"
expected_result = b64encode(unhexlify(b"ab12cd")).decode("utf-8")
self.assertEqual(expected_result, hextobase64(test_input))
def test_HexToBase64_HexStartsWith0x_Succeeds(self):
test_input = "0xab12cd"
expected_result = b64encode(unhexlify(b"ab12cd")).decode("utf-8")
self.assertEqual(expected_result, hextobase64(test_input))
def test_HexToBase64_HexStartsWithout0x_Succeeds(self):
test_input = "ab12cd"
expected_result = b64encode(unhexlify(b"ab12cd")).decode("utf-8")
self.assertEqual(expected_result, hextobase64(test_input))
def test_HexToBase64_HexContainsNonHexChars_ExceptionThrown(self):
test_input = "ab12cdk!"
with self.assertRaises(ValueError):
hextobase64(test_input)
def test_HexToBase64_HexStrOddLength_ExceptionThrown(self):
test_input = "ab12cd4"
with self.assertRaises(ValueError):
hextobase64(test_input)
def test_HexToBase64_CryptoPalsControlTest_Succeeds(self):
test_input = "49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d"
expected_result = "SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t"
self.assertEqual(expected_result, hextobase64(test_input)) | mit | -3,997,586,875,885,445,000 | 42.162162 | 119 | 0.735589 | false |
mfxox/ILCC | ILCC/utility.py | 1 | 31390 | # coding=utf-8
'''
Created on 3/20/2017 8:58 57PM Wang Weimin
@author: wangwm
'''
import os
from pcd_corners_est import exact_full_marker_data
import numpy as np
from pcd_corners_est import generate_grid_coords
import matplotlib.pyplot as plt
import matplotlib
import vtk
import config
from ast import literal_eval as make_tuple
import cPickle
import cv2
from LM_opt import xyz2angle, voxel2pixel
import transforms3d
from matplotlib.pyplot import cm
import ast
from sklearn.decomposition import PCA
import matplotlib.path as mplPath
import warnings
params = config.default_params()
marker_size = make_tuple(params["pattern_size"])
(H, W) = make_tuple(params['image_res'])
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['text.latex.unicode'] = True
plt.style.use("ggplot")
axis_font = {'fontname': 'Arial', 'size': '35'}
def draw_one_grd_vtk(ls): # arr:[a,b,c,d],a:orig, b, point1, c,point 2, d,color
source = vtk.vtkPlaneSource()
source.SetOrigin(ls[0])
source.SetPoint1(ls[1])
source.SetPoint2(ls[2])
source.Update()
# source.SetPoint1(0, 0, 0)
# source.SetPoint2(4, 3, 0)
# mapper
mapper = vtk.vtkPolyDataMapper()
color = vtk.vtkUnsignedCharArray()
color.SetName("colors")
color.SetNumberOfComponents(3)
# color_tup = np.random.randint(1, 255, 3)
color.SetNumberOfTuples(source.GetOutput().GetNumberOfCells())
for i in xrange(source.GetOutput().GetNumberOfCells()):
color_tup = np.array([255, 255, 255]) * ls[3]
color.InsertTuple(i, color_tup)
source.GetOutput().GetCellData().SetScalars(color)
mapper.SetInputConnection(source.GetOutputPort())
# actor
actor = vtk.vtkActor()
actor.SetMapper(mapper)
# assign actor to the renderer
# ren.AddActor(actor)
return actor
# generate the color list of the point cloud for different color styles. intens_rg: color by reflectance intensity (red:high green:low),
# intens: color by reflectance intensity (white:high back:low), autumn: matplotlib autumn color map, cool: matplotlib cool color map
def gen_color_tup_for_vis(color_style="intens_rg", xyzi_arr=None):
assert xyzi_arr is not None, "The array of the point cloud must be not None"
a = xyzi_arr[:, params['intensity_col_ind']].min()
b = xyzi_arr[:, params['intensity_col_ind']].max()
color_ls = []
if color_style == "intens_rg":
tmp = (xyzi_arr[:, params['intensity_col_ind']] - a) / (b - a) * 255
for k in xrange(xyzi_arr.shape[0]):
rgb_tuple = np.array([tmp[k], 0, 255 - xyzi_arr[k, params['intensity_col_ind']]]).astype(np.int32)
color_ls.append(rgb_tuple)
return color_ls
elif color_style == "intens":
tmp = (xyzi_arr[:, params['intensity_col_ind']] - a) / (b - a) * 255
for k in xrange(xyzi_arr.shape[0]):
rgb_tuple = np.repeat(tmp[k], 3).astype(np.int32)
color_ls.append(rgb_tuple)
return color_ls
elif color_style == "autumn":
tmp = (xyzi_arr[:, params['intensity_col_ind']] - a).astype(np.float32) / (b - a)
for k in xrange(xyzi_arr.shape[0]):
rgb_tuple = (np.array(plt.cm.autumn(1 - tmp[k]))[:3] * 255).astype(np.int32)
color_ls.append(rgb_tuple)
return color_ls
elif color_style == "cool":
tmp = (xyzi_arr[:, params['intensity_col_ind']] - a).astype(np.float32) / (b - a)
for k in xrange(xyzi_arr.shape[0]):
rgb_tuple = (np.array(plt.cm.cool(tmp[k]))[:3] * 255).astype(np.int32)
color_ls.append(rgb_tuple)
return color_ls
elif color_style == "monochrome":
# color = (np.random.randint(0, 255, 3)).tolist()
color = [46, 204, 113]
for k in xrange(xyzi_arr.shape[0]):
color_ls.append(color)
return color_ls
elif color_style == "by_height":
low_height = xyzi_arr[:, 2].min()
high_height = xyzi_arr[:, 2].max()
tmp = 0.0 + 0.7 * (xyzi_arr[:, 2] - low_height) / (high_height - low_height)
for k in xrange(xyzi_arr.shape[0]):
rgb_tuple = (np.array(plt.cm.hsv(tmp[k]))[:3] * 255).astype(np.int32)
color_ls.append(rgb_tuple)
return color_ls
else:
raise ValueError('Input color type is not correct!')
# visualize 3D points with specified color style
def vis_3D_points(full_lidar_arr, color_style="intens_rg"):
all_rows = full_lidar_arr.shape[0]
Colors = vtk.vtkUnsignedCharArray()
Colors.SetNumberOfComponents(3)
Colors.SetName("Colors")
Points = vtk.vtkPoints()
Vertices = vtk.vtkCellArray()
tuple_ls = gen_color_tup_for_vis(color_style, xyzi_arr=full_lidar_arr)
for k in xrange(all_rows):
point = full_lidar_arr[k, :3]
id = Points.InsertNextPoint(point[0], point[1], point[2])
Vertices.InsertNextCell(1)
Vertices.InsertCellPoint(id)
rgb_tuple = tuple_ls[k]
if vtk.VTK_MAJOR_VERSION >= 7:
Colors.InsertNextTuple(rgb_tuple)
else:
Colors.InsertNextTupleValue(rgb_tuple)
polydata = vtk.vtkPolyData()
polydata.SetPoints(Points)
polydata.SetVerts(Vertices)
polydata.GetPointData().SetScalars(Colors)
polydata.Modified()
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION < 6:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
mapper.SetColorModeToDefault()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetPointSize(8)
return actor
# visualize 3D points with specified color array
def vis_pcd_color_arr(array_data, color_arr=[46, 204, 113]):
all_rows = array_data.shape[0]
Colors = vtk.vtkUnsignedCharArray()
Colors.SetNumberOfComponents(3)
Colors.SetName("Colors")
Points = vtk.vtkPoints()
Vertices = vtk.vtkCellArray()
for k in xrange(all_rows):
point = array_data[k, :]
id = Points.InsertNextPoint(point[0], point[1], point[2])
Vertices.InsertNextCell(1)
Vertices.InsertCellPoint(id)
if vtk.VTK_MAJOR_VERSION >= 7:
Colors.InsertNextTuple(color_arr)
else:
Colors.InsertNextTupleValue(color_arr)
polydata = vtk.vtkPolyData()
polydata.SetPoints(Points)
polydata.SetVerts(Vertices)
polydata.GetPointData().SetScalars(Colors)
polydata.Modified()
mapper = vtk.vtkPolyDataMapper()
if vtk.VTK_MAJOR_VERSION <= 5:
mapper.SetInput(polydata)
else:
mapper.SetInputData(polydata)
mapper.SetColorModeToDefault()
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetPointSize(10)
return actor
# visualize with actor:
def vis_with_renderer(renderer):
# Renderer
# renderer.SetBackground(.2, .3, .4)
renderer.SetBackground(1, 1, 1)
renderer.ResetCamera()
transform = vtk.vtkTransform()
transform.Translate(1.0, 0.0, 0.0)
axes = vtk.vtkAxesActor()
renderer.AddActor(axes)
# Render Window
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
# Interactor
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
def get_camera_info(obj, ev):
if renderWindowInteractor.GetKeyCode() == "s":
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renderWindow)
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName("screenshot.png")
if vtk.VTK_MAJOR_VERSION == 5:
writer.SetInput(w2if.GetOutput())
else:
writer.SetInputData(w2if.GetOutput())
writer.Write()
print "screenshot saved"
style = vtk.vtkInteractorStyleSwitch()
renderWindowInteractor.SetInteractorStyle(style)
# style.SetCurrentStyleToTrackballActor()
style.SetCurrentStyleToTrackballCamera()
# Begin Interaction
renderWindowInteractor.AddObserver(vtk.vtkCommand.KeyPressEvent, get_camera_info, 1)
renderWindow.Render()
renderWindowInteractor.Start()
def proj_pcd_2_pix(pcd_arr):
if params["camera_type"] == "panoramic":
angs_ls = map(xyz2angle, pcd_arr.tolist())
pix_ls = (np.array(map(voxel2pixel, angs_ls))).tolist()
elif params['camera_type'] == "perspective":
intrinsic_paras_tuple = make_tuple(params['instrinsic_para'])
intrinsic_paras = np.array(intrinsic_paras_tuple).reshape(3, 3)
cam_coord_pcd = pcd_arr.copy()
pcd_to_pix = (np.dot(intrinsic_paras, cam_coord_pcd.T)).T
proj_pts = (pcd_to_pix / pcd_to_pix[:, 2].reshape(-1, 1))[:, :2].astype(np.int16)
pix_ls = proj_pts.tolist()
else:
raise Exception("Camera type not correctly defined!")
return pix_ls
def remove_occlusion_of_chessboard(pcd_arr, corners_in_pcd_arr):
occlu_thres = 0.1
pcd_ls = pcd_arr.tolist()
pix_ls = proj_pcd_2_pix(pcd_arr)
ind_ls = []
pca = PCA(n_components=3)
pca.fit(corners_in_pcd_arr)
transed_corners_in_pcd_arr = np.dot(pca.components_, corners_in_pcd_arr.T).T
center = transed_corners_in_pcd_arr.mean(axis=0)
bound = np.dot(pca.components_.T,
(np.array(
[[-0.3, -0.225, 0], [-0.3, 0.225, 0], [0.3, 0.225, 0], [0.3, -0.225, 0]]) * 1.05 + center).T).T
if params["camera_type"] == "panoramic":
bound_on_image = np.fliplr(np.array(map(voxel2pixel, map(xyz2angle, bound.tolist()))))
elif params['camera_type'] == "perspective":
intrinsic_paras_tuple = make_tuple(params['instrinsic_para'])
intrinsic_paras = np.array(intrinsic_paras_tuple).reshape(3, 3)
pcd_to_pix = (np.dot(intrinsic_paras, bound.T)).T
inds = np.where(pcd_arr[:, 2] > 0)
pcd_ls = pcd_arr[inds].tolist()
pix_ls = np.array(pix_ls)[inds].tolist()
print "before removal: ", len(pcd_ls)
proj_pts = (pcd_to_pix / pcd_to_pix[:, 2].reshape(-1, 1))[:, :2].astype(np.int16)
bound_on_image = np.fliplr(proj_pts)
# bound_on_image = proj_pts
# print bound_on_image
else:
raise Exception("Camera type not correctly defined!")
polygon_path = mplPath.Path(bound_on_image.tolist())
for i in xrange(len(pcd_ls)):
pix = list(reversed(pix_ls[i]))
# print pix
if polygon_path.contains_point(pix):
point_2_board_dis = abs(np.dot(pca.components_[2], pcd_ls[i] - corners_in_pcd_arr.mean(axis=0)))
# print point_2_board_dis
# print pix_ls[i]
if point_2_board_dis <= occlu_thres:
if params["camera_type"] == "panoramic":
ind_ls.append(i)
elif params['camera_type'] == "perspective":
ind_ls.append(inds[0][i])
else:
raise Exception("Camera type not correctly defined!")
else:
if params["camera_type"] == "panoramic":
ind_ls.append(i)
elif params['camera_type'] == "perspective":
ind_ls.append(inds[0][i])
else:
raise Exception("Camera type not correctly defined!")
return np.array(ind_ls)
# visualize csv file of i-th point cloud
def vis_csv_pcd(ind=1, color_style="monochrome"):
pcd_arr = np.genfromtxt(
os.path.join(params['base_dir'], "pcd/" + str(ind).zfill(params["file_name_digits"])) + ".csv", delimiter=",",
skip_header=1)
# actor = vis_3D_points(pcd_arr, color_style="intens")
actor = vis_3D_points(pcd_arr, color_style=color_style)
renderer = vtk.vtkRenderer()
renderer.AddActor(actor)
vis_with_renderer(renderer)
def vis_segments(ind=1):
renderer = vtk.vtkRenderer()
seg_folder = os.path.join(params['base_dir'], "output/pcd_seg/" + str(ind).zfill(params["file_name_digits"])) + "/"
seg_list = os.listdir(seg_folder)
for seg in seg_list:
if seg.split(".")[-1] == "txt":
color_tup = (np.random.randint(1, 255, 3)).tolist()
points_ls = list()
jdcs_collection = cPickle.load(open(os.path.abspath(seg_folder + seg), 'rb'))
if len(jdcs_collection) > 0: # filter
for jdc in jdcs_collection:
points_ls.extend(jdc)
# print points_ls
actor = vis_pcd_color_arr(np.array(points_ls), color_tup)
renderer.AddActor(actor)
vis_with_renderer(renderer)
def vis_segments_only_chessboard_color(ind=1):
renderer = vtk.vtkRenderer()
seg_folder = os.path.join(params['base_dir'], "output/pcd_seg/" + str(ind).zfill(params["file_name_digits"])) + "/"
seg_list = os.listdir(seg_folder)
chessboard_file_name = \
cPickle.load(open(os.path.join(params['base_dir'], "output/pcd_seg/") + str(ind).zfill(
params["file_name_digits"]) + "_pcd_result.pkl", "r"))[
-1].split("/")[-1]
for seg in seg_list:
if seg.split(".")[-1] == "txt":
if seg == chessboard_file_name:
color_tup = np.array([0, 255, 0])
else:
color_tup = np.array([0, 0, 0])
points_ls = list()
jdcs_collection = cPickle.load(open(os.path.abspath(seg_folder + seg), 'rb'))
if len(jdcs_collection) > 0: # filter
for jdc in jdcs_collection:
points_ls.extend(jdc)
# print points_ls
actor = vis_pcd_color_arr(np.array(points_ls), color_tup)
renderer.AddActor(actor)
vis_with_renderer(renderer)
def cal_theorical_number_points(dis):
h_res = np.deg2rad(0.16) # rad
v_res = np.deg2rad(1.33) # rad
h_len = dis * h_res
v_len = 2 * dis * np.sin(v_res / 2)
w = 0.45
l = 0.6
return (l // v_len) * (w // h_len)
def vis_all_markers(ls=[1]):
import vtk
ren = vtk.vtkRenderer()
# ren.SetBackground(.2, .3, .4)
ren.SetBackground(.5, .6, .7)
for i in ls:
try:
pcd_result_file = os.path.join(params['base_dir'],
"output/pcd_seg/" + str(i).zfill(
params["file_name_digits"]) + "_pcd_result.pkl")
csv_path = os.path.join(params['base_dir'], "pcd/" + str(i).zfill(params["file_name_digits"]) + ".csv")
with open(os.path.abspath(pcd_result_file), "r") as f:
pcd_result_ls = cPickle.load(f)
assert pcd_result_ls is not None
marker_full_data_arr = exact_full_marker_data(csv_path, [pcd_result_ls[-1]])
marker_arr = marker_full_data_arr[:, :3]
# transformed_pcd = roate_with_rt(np.array(r_t), marker_arr)
if i % 4 == 0:
actor2 = vis_3D_points(
np.hstack([marker_arr + np.array([0, 0, 0]), marker_full_data_arr[:, 3:]]), color_style="intens")
elif i % 4 == 1:
actor2 = vis_3D_points(
np.hstack([marker_arr + np.array([0, 0, 0]), marker_full_data_arr[:, 3:]]), color_style="autumn")
elif i % 4 == 2:
actor2 = vis_3D_points(
np.hstack([marker_arr + np.array([0, 0, 0]), marker_full_data_arr[:, 3:]]), color_style="cool")
else:
actor2 = vis_3D_points(
np.hstack([marker_arr + np.array([0, 0, 0]), marker_full_data_arr[:, 3:]]),
color_style="intens_rg")
ren.AddActor(actor2)
except:
print i, "-th pcd corners are not found!"
continue
transform2 = vtk.vtkTransform()
transform2.Translate(0.0, 0.0, 0.0)
axes2 = vtk.vtkAxesActor()
axes2.SetUserTransform(transform2)
ren.AddActor(axes2)
cubeAxesActor = vtk.vtkCubeAxesActor()
cubeAxesActor.SetBounds((-3, 3, -3, 3, -2, 2))
cubeAxesActor.SetCamera(ren.GetActiveCamera())
cubeAxesActor.GetTitleTextProperty(0).SetColor(1.0, 0.0, 0.0)
cubeAxesActor.GetLabelTextProperty(0).SetColor(1.0, 0.0, 0.0)
cubeAxesActor.GetTitleTextProperty(1).SetColor(0.0, 1.0, 0.0)
cubeAxesActor.GetLabelTextProperty(1).SetColor(0.0, 1.0, 0.0)
cubeAxesActor.GetTitleTextProperty(2).SetColor(0.0, 0.0, 1.0)
cubeAxesActor.GetLabelTextProperty(2).SetColor(0.0, 0.0, 1.0)
cubeAxesActor.DrawXGridlinesOn()
cubeAxesActor.DrawYGridlinesOn()
cubeAxesActor.DrawZGridlinesOn()
# if vtk.VTK_MAJOR_VERSION > 5:
# cubeAxesActor.SetGridLineLocation(vtk.VTK_GRID_LINES_FURTHEST)
cubeAxesActor.XAxisMinorTickVisibilityOff()
cubeAxesActor.YAxisMinorTickVisibilityOff()
cubeAxesActor.ZAxisMinorTickVisibilityOff()
# cubeAxesActor.GetProperty().SetColor(0, 255, 0)
cubeAxesActor.GetXAxesLinesProperty().SetColor(0, 255, 0)
cubeAxesActor.GetYAxesLinesProperty().SetColor(0, 255, 0)
cubeAxesActor.GetZAxesLinesProperty().SetColor(0, 255, 0)
ren.AddActor(cubeAxesActor)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
style = vtk.vtkInteractorStyleSwitch()
iren.SetInteractorStyle(style)
style.SetCurrentStyleToTrackballCamera()
def get_camera_info(obj, ev):
if iren.GetKeyCode() == "s":
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renWin)
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName("screenshot.png")
writer.SetInputData(w2if.GetOutput())
writer.Write()
print "screenshot saved"
# save to pdf
if iren.GetKeyCode() == "s":
exp = vtk.vtkGL2PSExporter()
exp.SetRenderWindow(renWin)
exp.SetFilePrefix("screenpdf")
exp.SetFileFormat(2)
exp.SetCompress(False)
exp.SetLandscape(False)
exp.SetSortToBSP()
# exp.SetSortToSimple() # less expensive sort algorithm
exp.DrawBackgroundOn()
exp.SetWrite3DPropsAsRasterImage(False)
iren.AddObserver(vtk.vtkCommand.KeyPressEvent, get_camera_info, 1)
iren.SetRenderWindow(renWin)
renWin.Render()
# ren.SetActiveCamera(camera)
iren.Initialize()
iren.Start()
def transform_grid(args):
corner_arr = args[0]
rot1 = args[1]
rot2 = args[2]
t1 = args[3]
t2 = args[4]
corners_in_pcd_arr = np.dot(np.dot(rot2.T, corner_arr.T).T - t2 + t1, rot1)
return corners_in_pcd_arr[0]
def vis_ested_pcd_corners(ind=1):
# pair_ind = 9
pcd_result_file = os.path.join(params['base_dir'],
"output/pcd_seg/" + str(ind).zfill(params["file_name_digits"]) + "_pcd_result.pkl")
csv_file = os.path.join(params['base_dir'], "pcd/" + str(ind).zfill(params["file_name_digits"]) + ".csv")
full_arr = np.genfromtxt(csv_file, delimiter=",", skip_header=1)
grid_coords = generate_grid_coords()
with open(os.path.abspath(pcd_result_file), "r") as f:
pcd_result_ls = cPickle.load(f)
assert pcd_result_ls is not None
rot1 = pcd_result_ls[0]
t1 = pcd_result_ls[1].reshape(1, 3)
rot2 = pcd_result_ls[2]
t2 = pcd_result_ls[3].reshape(1, 3)
trans_grid_ls = []
for coords in grid_coords:
args = [[coord, rot1, rot2, t1, t2] for coord in coords[:3]]
trans_coords = map(transform_grid, args)
trans_coords.append(coords[3])
trans_grid_ls.append(trans_coords)
ren = vtk.vtkRenderer()
ren.SetBackground(.2, .3, .4)
ren.SetBackground(0.90196079, 0.96078432, 0.59607846)
# ren.SetBackground(1., 1., 1.)
for i in xrange(len(trans_grid_ls)):
tmp_actor = draw_one_grd_vtk(trans_grid_ls[i])
tmp_actor.GetProperty().SetOpacity(0.5)
ren.AddActor(tmp_actor)
show_only_marker = True
if show_only_marker:
marker_full_data_arr = exact_full_marker_data(csv_file, [pcd_result_ls[-1]])
actor2 = vis_3D_points(marker_full_data_arr, color_style="intens_rg")
else:
actor2 = vis_3D_points(full_arr, color_style="intens_rg")
ren.AddActor(actor2)
transform2 = vtk.vtkTransform()
transform2.Translate(0.0, 0.0, 0.0)
axes2 = vtk.vtkAxesActor()
axes2.SetUserTransform(transform2)
ren.AddActor(axes2)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetWindowName(str(i).zfill(params["file_name_digits"]))
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
def get_camera_info(obj, ev):
if iren.GetKeyCode() == "s":
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(renWin)
w2if.Update()
writer = vtk.vtkPNGWriter()
writer.SetFileName("screenshot.png")
writer.SetInputData(w2if.GetOutput())
writer.Write()
print "screenshot saved"
style = vtk.vtkInteractorStyleSwitch()
iren.SetRenderWindow(renWin)
iren.SetInteractorStyle(style)
# style.SetCurrentStyleToTrackballActor()
style.SetCurrentStyleToTrackballCamera()
iren.AddObserver(vtk.vtkCommand.KeyPressEvent, get_camera_info, 1)
iren.Initialize()
renWin.Render()
renWin.SetWindowName(str(ind).zfill(params["file_name_digits"]))
iren.Start()
def draw_chessboard_model(marker_size=marker_size):
gird_coords = generate_grid_coords(x_res=marker_size[0], y_res=marker_size[1])
grid_ls = [(p[0]).flatten()[:2] for p in gird_coords]
corner_arr = np.transpose(np.array(grid_ls).reshape(marker_size[0], marker_size[1], 2)[1:, 1:], (1, 0, 2))
c = np.zeros([corner_arr.shape[0], corner_arr.shape[1], 3]).reshape(
corner_arr.shape[0] * corner_arr.shape[1], 3).astype(np.float32)
c[0] = np.array([0, 0, 1])
c[-1] = np.array([1, 0, 0])
s = np.zeros(corner_arr[:, :, 0].flatten().shape[0]) + 20
s[0] = 60
s[-1] = 60
plt.scatter(corner_arr[:, :, 0].flatten(), corner_arr[:, :, 1].flatten(), c=c, s=s)
plt.plot(corner_arr[:, :, 0].flatten(), corner_arr[:, :, 1].flatten())
plt.xlim(corner_arr[:, :, 0].min(), corner_arr[:, :, 0].max())
plt.ylim(corner_arr[:, :, 1].min(), corner_arr[:, :, 1].max())
plt.xlabel("x coordinates [cm]")
plt.ylabel("y coordinates [cm]")
# plt.axes().set_aspect('equal', 'datalim')
plt.axis('equal')
plt.show()
def convert_to_edge(file_name):
# gray = cv2.imread('lines.jpg')
gray = cv2.imread(file_name)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
img = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
return img
def find_latest(cali_file_ls):
number_ls = []
for file in cali_file_ls:
tmp_ls = file.split("_")
number_ls.append(ast.literal_eval(tmp_ls[0] + "." + tmp_ls[1]))
return cali_file_ls[np.array(number_ls).argmax()]
def back_project_pcd(img, pcd_arr, color_arr, r_t, i, hide_occlussion_by_marker):
# print pcd_arr
rot_mat = np.dot(transforms3d.axangles.axangle2mat([0, 0, 1], r_t[2]),
np.dot(transforms3d.axangles.axangle2mat([0, 1, 0], r_t[1]),
transforms3d.axangles.axangle2mat([1, 0, 0], r_t[0])))
transformed_pcd = np.dot(rot_mat, pcd_arr.T).T + r_t[3:]
transformed_pcd_ls = pcd_arr.tolist()
if not hide_occlussion_by_marker: # whether remove occlussions by the chessboard
if params["camera_type"] == "panoramic":
pcd2angle_s = map(xyz2angle, transformed_pcd_ls)
proj_pts = np.array(map(voxel2pixel, pcd2angle_s))
point_s = 5
elif params['camera_type'] == "perspective":
intrinsic_paras_tuple = make_tuple(params['instrinsic_para'])
intrinsic_paras = np.array(intrinsic_paras_tuple).reshape(3, 3)
cam_coord_pcd = transformed_pcd.copy()
# print cam_coord_pcd
print "before filtering z: ", cam_coord_pcd.shape
# cam_coord_pcd = cam_coord_pcd[np.where(cam_coord_pcd[:, 2] < 0)]
# cam_coord_pcd = cam_coord_pcd[:20000, :]
# print cam_coord_pcd
inds = np.where(cam_coord_pcd[:, 2] > 0.2)
cam_coord_pcd = cam_coord_pcd[inds]
color_arr = color_arr[inds]
# print cam_coord_pcd
print "after filtering z: ", cam_coord_pcd.shape
pcd_to_pix = (np.dot(intrinsic_paras, cam_coord_pcd.T)).T
# pcd_to_pix = pcd_to_pix[np.where(pcd_to_pix[:, 2] > 0)]
inds = np.where(pcd_to_pix[:, 2] > 0)
pcd_to_pix = pcd_to_pix[inds]
color_arr = color_arr[inds]
proj_pts = (pcd_to_pix / pcd_to_pix[:, 2].reshape(-1, 1))[:, :2].astype(np.int16)
point_s = 3
# print proj_pts
#
# print proj_pts.shape
else:
raise Exception("Camera type not correctly defined!")
else:
if params["camera_type"] == "panoramic":
point_s = 5
elif params['camera_type'] == "perspective":
point_s = 3
else:
raise Exception("Camera type not correctly defined!")
chessboard_result_file_path = os.path.join(params['base_dir'], "output/pcd_seg/" + str(i).zfill(
params["file_name_digits"]) + "_pcd_result.pkl")
chessboard_result_file = cPickle.load(open(chessboard_result_file_path, "r"))
rot1 = chessboard_result_file[0]
t1 = chessboard_result_file[1].reshape(1, 3)
# print "rot1*rot1.T: ", np.dot(rot1, rot1.T)
rot2 = chessboard_result_file[2]
t2 = chessboard_result_file[3].reshape(1, 3)
corner_arr = chessboard_result_file[4].reshape(-1, 2)
num = corner_arr.shape[0]
corner_arr = np.hstack([corner_arr, np.zeros(num).reshape(num, 1)])
rot_mat = np.dot(transforms3d.axangles.axangle2mat([0, 0, 1], r_t[2]),
np.dot(transforms3d.axangles.axangle2mat([0, 1, 0], r_t[1]),
transforms3d.axangles.axangle2mat([1, 0, 0], r_t[0])))
trans_arr = np.zeros([4, 4])
trans_arr[:3, :3] = rot_mat
trans_arr[:3, 3] = np.array(r_t[3:])
trans_arr[3, 3] = 1
trans_matrix = np.asmatrix(trans_arr)
corners_in_pcd_arr = np.dot(np.dot(rot2.T, corner_arr.T).T - t2 + t1, rot1)
corners_in_pcd_arr = (trans_matrix[:3, :3] * np.asmatrix(corners_in_pcd_arr).T).T + trans_matrix[:3, 3].T
corners_in_pcd_arr = np.array(corners_in_pcd_arr)
# print "before removal: ", transformed_pcd.shape
inds = remove_occlusion_of_chessboard(transformed_pcd, corners_in_pcd_arr)
print "inds:", inds
proj_pts = np.array(proj_pcd_2_pix(transformed_pcd))[inds].astype(np.int32)
print "after removal: ", proj_pts.shape
color_arr = color_arr[inds]
print
print proj_pts.shape[0], proj_pts.min(axis=0), proj_pts.max(axis=0)
print
for i in xrange(proj_pts.shape[0]):
cv2.circle(img, (proj_pts[i][0], proj_pts[i][1]), point_s, tuple(color_arr[i].tolist()), -1)
return img
def vis_back_proj(ind=1, img_style="edge", pcd_style="intens", hide_occlussion_by_marker=False,
save_without_show=False):
imgfile = os.path.join(params['base_dir'],
"img/" + str(ind).zfill(params["file_name_digits"]) + "." + params['image_format'])
if img_style == "edge":
gray = cv2.imread(imgfile)
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
img = cv2.cvtColor(edges, cv2.COLOR_GRAY2BGR)
elif img_style == "orig":
img = cv2.imread(imgfile)
else:
raise Exception("Please input the right image style")
csvfile = os.path.join(params['base_dir'], "pcd/" + str(ind).zfill(params["file_name_digits"]) + ".csv")
csv = np.genfromtxt(csvfile, delimiter=",", skip_header=1)
pcd = csv[:, :3]
dis_arr = np.linalg.norm(pcd, axis=1)
intens = csv[:, params['intensity_col_ind']]
filels = os.listdir(params['base_dir'])
cali_file_ls = []
for file in filels:
if file.find("cali_result.txt") > -1:
cali_file_ls.append(file)
if len(cali_file_ls) > 1:
warnings.warn("More than one calibration file exit! Load the latest file.", UserWarning)
latest_cali = find_latest(cali_file_ls)
r_t = np.genfromtxt(os.path.join(params['base_dir'], latest_cali), delimiter=',')
print "Load ", latest_cali, " as the extrinsic calibration parameters!"
elif len(cali_file_ls) == 1:
r_t = np.genfromtxt(os.path.join(params['base_dir'], cali_file_ls[0]), delimiter=',')
print "Load ", cali_file_ls[0], " as the extrinsic calibration parameters!"
else:
raise Exception("No calibration file is found!")
if pcd_style == "intens":
pcd_color = np.fliplr((cm.jet(intens.astype(np.float32) / intens.max()) * 255).astype(np.int32)[:, :3])
elif pcd_style == "dis":
pcd_color = np.fliplr((cm.jet(dis_arr / 10) * 255).astype(np.int32)[:, :3])
else:
print "Please input the right pcd color style"
backproj_img = back_project_pcd(img, pcd, pcd_color, r_t, ind, hide_occlussion_by_marker)
if max(backproj_img.shape[0], backproj_img.shape[1]) > 1000:
resize_factor = 1000. / max(backproj_img.shape[0], backproj_img.shape[1])
resized_img_for_view = cv2.resize(backproj_img, (0, 0), fx=resize_factor, fy=resize_factor)
else:
resized_img_for_view = backproj_img
if save_without_show:
window_name = "ind: " + str(ind) + " img_style: " + img_style + " pcd_style: " + pcd_style + (
" hide_occlusion " if hide_occlussion_by_marker else "")
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.imshow(window_name, resized_img_for_view)
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
elif k == ord('s'): # wait for 's' key to save and exit
save_file_name = os.path.join(params['base_dir'],
str(ind).zfill(
params["file_name_digits"])) + "_" + img_style + "_" + pcd_style + (
"_hide_occlusion" if hide_occlussion_by_marker else "") + "." + params['image_format']
cv2.imwrite(save_file_name, img, [cv2.IMWRITE_JPEG_QUALITY, 70])
print "The image is saved to ", save_file_name
cv2.destroyAllWindows()
else:
save_file_name = os.path.join(params['base_dir'], str(ind).zfill(
params["file_name_digits"])) + "_" + img_style + "_" + pcd_style + (
"_hide_occlusion" if hide_occlussion_by_marker else "") + "." + params['image_format']
cv2.imwrite(save_file_name, img, [cv2.IMWRITE_JPEG_QUALITY, 70])
print "The image is saved to ", save_file_name
cv2.destroyAllWindows()
if __name__ == "__main__":
# vis_back_proj(ind=1, img_style="orig", pcd_style="dis", hide_occlussion_by_marker=True)
vis_back_proj(ind=1, img_style="edge", pcd_style="intens", hide_occlussion_by_marker=False)
# vis_all_markers(np.arange(1, 5).tolist())
# vis_all_markers([1])
# vis_segments_only_chessboard_color(1)
# vis_csv_pcd(ind=1)
# vis_segments(ind=1)
# vis_ested_pcd_corners(ind=1)
| bsd-2-clause | -3,670,802,329,565,017,000 | 37.468137 | 136 | 0.599618 | false |
melipelo/ejemplo | app.py | 1 | 3030 | #!/usr/bin/env python
# coding=utf-8
import requests
import urllib2
import json
import os
from flask import Flask
from flask import request
from flask import make_response
from bs4 import BeautifulSoup
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
result = req.get("result")
parameters = result.get("parameters")
if req.get("result").get("action") == "productos.sura":
cliente = parameters.get("tipo_cliente")
speech = "Buscando productos para " + cliente
elif req.get("result").get("action") == "producto.info":
producto = parameters.get("producto")
if (producto=="hogar"):
url = "https://www.sura.com/soluciones-personas/seguro-hogar.aspx"
r = urllib2.urlopen(url).read()
soup = BeautifulSoup(r, 'html.parser')
print soup
contenido = soup.find_all("div",class_="textRightColumn")
if (len(contenido)==0):
speech = "No encontrado"
else:
speech = contenido[0]
else:
speech = "Buscando informacion del producto " + producto
elif req.get("result").get("action") == "planes.salud":
url = "https://api.segurossura.com.co/public/v1/directory/products"
myResponse = requests.get(url)
if(myResponse.ok):
jData = json.loads(myResponse.text)
speech = "Seguros Sura Colombia ofrece los siguientes planes de salud: \n"
for plan in jData:
speech = speech + "\n" + plan["nombreField"].title()
elif req.get("result").get("action") == "info.especialistas":
producto = parameters.get("plan-salud")
ciudad = parameters.get("ciudad")
especialidad = parameters.get("especialidad")
url = "https://api.segurossura.com.co/public/v1/directory/search/" + producto + "/" + ciudad + "?speciality=" + especialidad + "&firstname=&secondname=&firstlastname=&secondlastname="
myResponse = requests.get(url)
if(myResponse.ok):
jData = json.loads(myResponse.text)
speech = "Los profesionales que coinciden con tu busqueda son: \n"
for medico in jData:
speech = speech + "\n" + medico["nombreField"] + "\n Direccion: " + medico["direccionField"].title() + "\n Telefono: " + medico["telefonoField"] + "\n"
elif req.get("result").get("action") == "coberturas.producto":
producto = parameters.get("productos")
speech = "Buscando coberturas del producto: " + producto
else:
speech =" "
return {
"speech": speech,
"displayText": speech,
#"data": {},
# "contextOut": [],
"source": "apiai-onlinestore-shipping"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=True, port=port, host='0.0.0.0')
| apache-2.0 | 5,939,019,461,058,861,000 | 30.237113 | 185 | 0.642244 | false |
petewarden/tensorflow | tensorflow/python/keras/saving/saved_model/save_impl.py | 1 | 28457 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras SavedModel serialization.
TODO (kathywu): Move to layer_serialization.py. Some model-specific logic should
go to model_serialization.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import threading
import weakref
from tensorflow.python.eager import def_function
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.mixed_precision import autocast_variable
from tensorflow.python.keras.saving import saving_utils
from tensorflow.python.keras.saving.saved_model import constants
from tensorflow.python.keras.saving.saved_model import load as keras_load
from tensorflow.python.keras.saving.saved_model import serialized_attributes
from tensorflow.python.keras.saving.saved_model import utils
from tensorflow.python.keras.utils import tf_contextlib
from tensorflow.python.keras.utils import tf_inspect
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils import version_utils
from tensorflow.python.keras.utils.generic_utils import LazyLoader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
# To avoid circular dependencies between keras/engine and keras/saving,
# code in keras/saving must delay imports.
# TODO(b/134426265): Switch back to single-quotes to match the rest of the file
# once the issue with copybara is fixed.
# pylint:disable=g-inconsistent-quotes
base_layer = LazyLoader(
"base_layer", globals(),
"tensorflow.python.keras.engine.base_layer")
metrics = LazyLoader("metrics", globals(),
"tensorflow.python.keras.metrics")
input_layer = LazyLoader(
"input_layer", globals(),
"tensorflow.python.keras.engine.input_layer")
training_lib = LazyLoader(
"training_lib", globals(),
"tensorflow.python.keras.engine.training")
sequential_lib = LazyLoader(
"sequential_lib", globals(),
"tensorflow.python.keras.engine.sequential")
# pylint:enable=g-inconsistent-quotes
def should_skip_serialization(layer):
"""Skip serializing extra objects and functions if layer inputs aren't set."""
saved_model_input_spec_set = (isinstance(layer, training_lib.Model) and
layer._saved_model_inputs_spec is not None) # pylint: disable=protected-access
if not layer.built and not saved_model_input_spec_set:
logging.warning('Skipping full serialization of Keras layer {}, because '
'it is not built.'.format(layer))
return True
return False
def wrap_layer_objects(layer, serialization_cache):
"""Returns extra trackable objects to attach to the serialized layer.
Args:
layer: Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
A dictionary containing all checkpointable objects from a
SerializedAttributes object. See LayerAttributes and ModelAttributes for
entire list of objects
"""
# Wrap all regularization losses as tf.functions.
# First, generate list of all regularization losses in this layer and
# sublayers.
all_losses = layer._callable_losses[:] # pylint: disable=protected-access
for child_layer in utils.list_all_layers(layer):
all_losses.extend(child_layer._callable_losses) # pylint: disable=protected-access
# Next, wrap all loss functions as tf.functions. Use the serialization cache
# to store already-wrapped functions.
keras_loss_cache = serialization_cache.setdefault('keras_losses', {})
wrapped_loss_functions = []
for loss_fn in all_losses:
if loss_fn in keras_loss_cache:
wrapped_loss_functions.append(keras_loss_cache[loss_fn])
else:
wrapped_loss = _wrap_unconditional_loss(loss_fn, len(keras_loss_cache))
keras_loss_cache[loss_fn] = wrapped_loss
wrapped_loss_functions.append(wrapped_loss)
wrapped_layer_losses = [keras_loss_cache[fn]
for fn in layer._callable_losses[:]] # pylint: disable=protected-access
layer_metrics = data_structures.wrap_or_unwrap(
{m.name: m for m in layer._metrics}) # pylint: disable=protected-access
return dict(
variables=data_structures.wrap_or_unwrap(layer.variables),
trainable_variables=data_structures.wrap_or_unwrap(
layer.trainable_variables),
non_trainable_variables=data_structures.wrap_or_unwrap(
layer.non_trainable_variables),
layers=data_structures.wrap_or_unwrap(utils.list_all_layers(layer)),
metrics=data_structures.wrap_or_unwrap(layer.metrics),
regularization_losses=data_structures.wrap_or_unwrap(
wrapped_loss_functions),
layer_regularization_losses=data_structures.wrap_or_unwrap(
wrapped_layer_losses),
layer_metrics=layer_metrics)
# pylint: disable=protected-access
def wrap_layer_functions(layer, serialization_cache):
"""Returns dict of wrapped layer call function and losses in tf.functions.
Args:
layer: Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
A dictionary containing all keras tf.functions to serialize. See
LayerAttributes and ModelAttributes for the list of all attributes.
"""
# Since Sequential models may be modified in place using model.add() or
# model.pop(), don't use saved functions.
if (isinstance(layer, keras_load.RevivedLayer) and
not isinstance(layer, sequential_lib.Sequential)):
return {fn_name: getattr(layer.keras_api, fn_name, None)
for fn_name in serialized_attributes.LayerAttributes.all_functions}
# Reset the losses of the layer and its children. The call function in each
# child layer is replaced with tf.functions.
original_fns = _replace_child_layer_functions(layer, serialization_cache)
original_losses = _reset_layer_losses(layer)
# Wrap all the layer call and activity regularizer functions.
# Use LayerCallCollection to ensure that all layer call functions (__call__,
# call with losses) are traced with the same inputs.
call_collection = LayerCallCollection(layer)
call_fn_with_losses = call_collection.add_function(
_wrap_call_and_conditional_losses(layer),
'{}_layer_call_and_return_conditional_losses'.format(layer.name))
call_fn = call_collection.add_function(
_extract_outputs_from_fn(layer, call_fn_with_losses),
'{}_layer_call_fn'.format(layer.name))
fns = {'call_and_return_conditional_losses': call_fn_with_losses,
'__call__': call_fn}
if layer._activity_regularizer is not None: # pylint: disable=protected-access
fns['activity_regularizer_fn'] = _wrap_activity_regularizer(layer)
fns['call_and_return_all_conditional_losses'] = (
call_collection.add_function(
_append_activity_regularizer_loss(layer,
call_fn_with_losses,
fns['activity_regularizer_fn']),
'{}_layer_call_and_return_all_conditional_losses'.format(layer.name)
))
else:
fns['activity_regularizer_fn'] = None
fns['call_and_return_all_conditional_losses'] = call_fn_with_losses
# Manually trigger traces before restoring the overwritten functions. The
# functions are traced within the layer call context to ensure that layer
# functions (e.g. add_loss) behave as though running in graph mode.
with tracing_scope():
call_collection.trace_with_input_signature()
with base_layer_utils.call_context().enter(
layer, inputs=None, build_graph=True, training=None, saving=True):
for fn in fns.values():
if fn is not None and fn.input_signature is not None:
if isinstance(fn, LayerCall):
fn = fn.wrapped_call
fn.get_concrete_function()
# Restore overwritten functions and losses
_restore_child_layer_functions(original_fns)
_restore_layer_losses(original_losses)
return fns
def default_save_signature(layer):
original_losses = _reset_layer_losses(layer)
fn = saving_utils.trace_model_call(layer)
fn.get_concrete_function()
_restore_layer_losses(original_losses)
return fn
def _replace_child_layer_functions(layer, serialization_cache):
"""Replaces functions in the children layers with wrapped tf.functions.
This step allows functions from parent layers to reference the wrapped
functions from their children layers instead of retracing the ops.
This function also resets all losses stored in the layer. These are stored in
the returned dictionary. Use `_restore_child_layer_functions` to restore
the original attributes.
Args:
layer: Keras Layer object.
serialization_cache: Dictionary shared between all objects during
serialization.
Returns:
Dictionary mapping layer objects -> original functions and losses:
{ Child layer 1: {
'losses': Original losses,
'call': Original call function
'_activity_regularizer': Original activity regularizer},
Child layer 2: ...
}
"""
# pylint: disable=protected-access
original_fns = {}
def replace_layer_functions(child_layer, serialized_fns):
"""Replaces layer call and activity regularizer with wrapped functions."""
original_fns[child_layer] = {
'call': child_layer.call,
'_activity_regularizer': child_layer._activity_regularizer
}
with trackable.no_automatic_dependency_tracking_scope(child_layer):
try:
child_layer._activity_regularizer = serialized_fns.get(
'activity_regularizer_fn')
except AttributeError:
# Some layers have an unsettable activity regularizer.
pass
child_layer.call = utils.use_wrapped_call(
child_layer,
serialized_fns['call_and_return_conditional_losses'],
default_training_value=False)
def replace_metric_functions(child_layer, serialized_fns):
"""Replaces metric functions with wrapped functions."""
original_fns[child_layer] = {
'__call__': child_layer.__call__,
'result': child_layer.result,
'update_state': child_layer.update_state
}
with trackable.no_automatic_dependency_tracking_scope(child_layer):
child_layer.__call__ = serialized_fns['__call__']
child_layer.result = serialized_fns['result']
child_layer.update_state = serialized_fns['update_state']
for child_layer in utils.list_all_layers(layer):
if isinstance(child_layer, input_layer.InputLayer):
continue
if child_layer not in serialization_cache[constants.KERAS_CACHE_KEY]:
serialized_functions = (
child_layer._trackable_saved_model_saver._get_serialized_attributes(
serialization_cache).functions)
else:
serialized_functions = (
serialization_cache[constants.KERAS_CACHE_KEY][child_layer].functions)
if not serialized_functions:
# This indicates either:
# - circular dependency, which means the current layer's functions
# should be wrapped first.
# - Child layer's inputs are not defined, so its functions have not been
# wrapped. In this case, no replacement is necessary so move on to the
# next child.
continue
if isinstance(child_layer, metrics.Metric):
replace_metric_functions(child_layer, serialized_functions)
else:
replace_layer_functions(child_layer, serialized_functions)
return original_fns
# pylint: enable=protected-access
def _restore_child_layer_functions(original_fns):
"""Restores attributes replaced with `_replace_child_layer_functions`."""
for child_layer, fns in original_fns.items():
with trackable.no_automatic_dependency_tracking_scope(child_layer):
for fn_name, fn in fns.items():
try:
setattr(child_layer, fn_name, fn) # pylint: disable=protected-access
except AttributeError:
pass # In the case of _activity_regularizer, setting the attribute
# may be disallowed.
# pylint: disable=protected-access
def _reset_layer_losses(parent_layer):
"""Resets losses of layer and its sublayers, and returns original losses."""
losses_dict = {}
for layer in utils.list_all_layers_and_sublayers(parent_layer):
losses_dict[layer] = {'losses': layer._losses[:],
'eager_losses': layer._eager_losses[:]}
with trackable.no_automatic_dependency_tracking_scope(layer):
layer._losses = []
layer._eager_losses = []
return losses_dict
def _restore_layer_losses(losses_dict):
for layer in losses_dict:
with trackable.no_automatic_dependency_tracking_scope(layer):
layer._losses = losses_dict[layer]['losses']
layer._eager_losses = losses_dict[layer]['eager_losses']
# pylint: enable=protected-access
class LayerTracingContext(threading.local):
def __init__(self):
super(LayerTracingContext, self).__init__()
self.enable_call_tracing = False
self.trace_queue = []
_thread_local_data = LayerTracingContext()
@tf_contextlib.contextmanager
def tracing_scope():
"""Enables tracing scope."""
# This enables the LayerCallCollection's tracing mechanism to trace all call
# functions in the collection.
previous_value = _thread_local_data.enable_call_tracing
previous_queue = _thread_local_data.trace_queue
try:
_thread_local_data.enable_call_tracing = True
_thread_local_data.trace_queue = []
yield
finally:
_thread_local_data.enable_call_tracing = previous_value
# Run traces from the queue.
for fn, args, kwargs, training in _thread_local_data.trace_queue:
if training is not None:
with K.deprecated_internal_learning_phase_scope(training):
fn.get_concrete_function(*args, **kwargs)
else:
fn.get_concrete_function(*args, **kwargs)
_thread_local_data.trace_queue = previous_queue
def add_trace_to_queue(fn, args, kwargs, training=None):
if tracing_enabled():
_thread_local_data.trace_queue.append(
(fn, args[:], kwargs.copy(), training))
def tracing_enabled():
"""Whether to add extra traces to the queue."""
return _thread_local_data.enable_call_tracing
class LayerCallCollection(object):
"""Groups wrapped layer call functions.
This is used to ensure that all layer call functions are traced with the same
inputs-
- call
- call_and_return_conditional_losses
- call_and_return_all_conditional_losses
"""
def __init__(self, layer):
self.layer = layer
self.layer_call_method = _get_layer_call_method(layer)
self._expects_training_arg = utils.layer_uses_training_bool(layer)
self._training_arg_index = utils.get_training_arg_index(
self.layer_call_method)
# If the layer call function has kwargs, then the traced function cannot
# have an input signature.
arg_spec = tf_inspect.getfullargspec(self.layer_call_method)
self._has_kwargs = bool(self._expects_training_arg or
arg_spec.defaults or
arg_spec.kwonlyargs or
arg_spec.varkw)
self._input_signature = self._generate_input_signature(layer)
self._functions = weakref.WeakValueDictionary()
# Get the input argument name from the args.
args = arg_spec.args
if tf_inspect.ismethod(self.layer_call_method):
args = args[1:]
self._input_arg_name = args[0] if args else 'inputs'
def _generate_input_signature(self, layer):
"""Inspects layer object and returns the inferred input signature.
Args:
layer: Layer object.
Returns:
List of possibly nested TensorSpecs of the layer call function inputs.
The list does not contain the `training` argument.
"""
if (isinstance(layer.call, def_function.Function) and
layer.call.input_signature is not None):
return layer.call.input_signature
elif isinstance(layer, training_lib.Model):
return saving_utils.model_input_signature(layer)
elif (layer.input_spec is not None and
layer._use_input_spec_as_call_signature): # pylint: disable=protected-access
def to_tensor_spec_or_none(x):
spec = input_spec.to_tensor_spec(x, layer._compute_dtype) # pylint: disable=protected-access
# If the shape is too general (e.g. multiple dimensions are allowed),
# return None so that separate functions can be generated for each
# inferred input signature.
# TODO(b/134962016): currently partial signatures are not supported.
if spec.shape == tensor_shape.TensorShape(None):
return None
return spec
input_signature = [nest.map_structure(
to_tensor_spec_or_none, layer.input_spec)]
return input_signature
else:
return None
def add_trace(self, *args, **kwargs):
"""Traces all functions with the same args and kwargs.
Args:
*args: Positional args passed to the original function.
**kwargs: Keyword args passed to the original function.
"""
args = list(args)
kwargs = kwargs.copy()
for fn in self._functions.values():
# TODO(kathywu): Replace arguments with broader shapes defined in the
# input signature.
if self._expects_training_arg:
def trace_with_training(value, fn=fn):
utils.set_training_arg(value, self._training_arg_index, args, kwargs)
add_trace_to_queue(fn, args, kwargs, value)
trace_with_training(True)
trace_with_training(False)
else:
add_trace_to_queue(fn, args, kwargs)
@property
def fn_input_signature(self):
"""Returns input signature for the wrapped layer call function."""
if self._has_kwargs:
# Input signatures may only describe tensor arguments and kwargs are not
# supported.
return None
if None in nest.flatten(self._input_signature):
# TODO(b/134962016): If input signature cannot be partially defined.
return None
return self._input_signature
def training_arg_was_passed(self, args, kwargs):
if not self.layer._expects_training_arg and self._expects_training_arg: # pylint: disable=protected-access
return (utils.get_training_arg(self._training_arg_index, args, kwargs)
is not None)
else:
return self.layer._call_arg_was_passed( # pylint: disable=protected-access
'training', args, kwargs, inputs_in_args=True)
def get_training_arg_value(self, args, kwargs):
if not self.layer._expects_training_arg and self._expects_training_arg: # pylint: disable=protected-access
return utils.get_training_arg(self._training_arg_index, args, kwargs)
else:
return self.layer._get_call_arg_value( # pylint: disable=protected-access
'training', args, kwargs, inputs_in_args=True)
def get_input_arg_value(self, args, kwargs):
return self.layer._get_call_arg_value( # pylint: disable=protected-access
self._input_arg_name, args, kwargs, inputs_in_args=True)
def _maybe_wrap_with_training_arg(self, call_fn):
"""Wraps call function with added training argument if necessary."""
if not self.layer._expects_training_arg and self._expects_training_arg: # pylint: disable=protected-access
# Add training arg to wrapper function.
arg_spec = tf_inspect.getfullargspec(call_fn)
args = arg_spec.args + ['training']
defaults = list(arg_spec.defaults or [])
defaults.append(False)
new_arg_spec = tf_inspect.FullArgSpec(
args=args,
varargs=arg_spec.varargs,
varkw=arg_spec.varkw,
defaults=defaults,
kwonlyargs=arg_spec.kwonlyargs,
kwonlydefaults=arg_spec.kwonlydefaults,
annotations=arg_spec.annotations)
# Set new training arg index
self._training_arg_index = len(args) - 1
if tf_inspect.ismethod(call_fn):
self._training_arg_index -= 1
def wrap_with_training_arg(*args, **kwargs):
# Remove the training value, since the original call_fn does not expect
# a training arg. Instead, the training value will be propagated using
# the call context created in LayerCall.
args = list(args)
kwargs = kwargs.copy()
utils.remove_training_arg(self._training_arg_index, args, kwargs)
return call_fn(*args, **kwargs)
return tf_decorator.make_decorator(
target=call_fn,
decorator_func=wrap_with_training_arg,
decorator_argspec=new_arg_spec)
return call_fn
def add_function(self, call_fn, name):
"""Adds a layer call function to the collection."""
fn = LayerCall(
self, self._maybe_wrap_with_training_arg(call_fn), name,
input_signature=self.fn_input_signature)
self._functions[name] = fn.wrapped_call
return fn
def trace_with_input_signature(self):
"""Trace with the layer/models inferred input signature if possible."""
if (None not in nest.flatten(self._input_signature) and self._has_kwargs):
# Manually add traces for layers that have keyword arguments and have
# a fully defined input signature.
self.add_trace(*self._input_signature)
def _filtered_inputs(inputs):
return list(filter(tf_utils.is_tensor_or_variable, nest.flatten(inputs)))
def layer_call_wrapper(call_collection, method, name):
"""Ensures layer losses are kept the same, and runs method in call context."""
# Create wrapper that deals with losses and call context.
def wrapper(*args, **kwargs):
"""Calls method within call context."""
layer = call_collection.layer
training = None
inputs = _filtered_inputs([args, kwargs])
# pylint: disable=protected-access
if (args or kwargs) and call_collection.training_arg_was_passed(
args, kwargs):
training = call_collection.get_training_arg_value(args, kwargs)
# pylint: enable=protected-access
original_losses = _reset_layer_losses(layer)
with base_layer_utils.call_context().enter(
layer, inputs=inputs, build_graph=False, training=training,
saving=True):
with autocast_variable.enable_auto_cast_variables(
layer._compute_dtype_object): # pylint: disable=protected-access
ret = method(*args, **kwargs)
_restore_layer_losses(original_losses)
return ret
# Rename to `name`, since tf.function doesn't have a name argument. Without
# this, all functions returned by this method will be named "call", which
# would be a nightmare to debug.
fn = tf_decorator.make_decorator(target=method, decorator_func=wrapper)
fn.__name__ = name
return fn
class LayerCall(object):
"""Function that triggers traces of other functions in the same collection."""
def __init__(self, call_collection, call_fn, name, input_signature):
"""Initializes a LayerCall object.
Args:
call_collection: a LayerCallCollection, which contains the other layer
call functions (e.g. call_with_conditional_losses, call). These
functions should be traced with the same arguments.
call_fn: A call function.
name: Name of the call function.
input_signature: Input signature of call_fn (can be None).
"""
self.call_collection = call_collection
self.input_signature = input_signature
self.wrapped_call = def_function.function(
layer_call_wrapper(call_collection, call_fn, name),
input_signature=input_signature)
self.original_layer_call = call_collection.layer_call_method
def _maybe_trace(self, args, kwargs):
# Trigger traces of other call functions + extra training-arg traces.
if tracing_enabled():
self.call_collection.add_trace(*args, **kwargs)
def __call__(self, *args, **kwargs):
self._maybe_trace(args, kwargs)
return self.wrapped_call(*args, **kwargs)
def get_concrete_function(self, *args, **kwargs):
self._maybe_trace(args, kwargs)
return self.wrapped_call.get_concrete_function(*args, **kwargs)
def _wrap_call_and_conditional_losses(layer):
"""Wraps call function that returns a tuple of (outputs, losses).
The losses returned are conditional on the inputs passed to the call function.
Unconditional losses (e.g. weight regularizeration) are wrapped separately.
Args:
layer: a Keras layer object
Returns:
python call function that returns outputs and conditional losses -- excludes
activity regularizer
"""
# Create function that generates both outputs and losses
layer_call = _get_layer_call_method(layer)
def call_and_return_conditional_losses(*args, **kwargs):
"""Returns layer (call_output, conditional losses) tuple."""
call_output = layer_call(*args, **kwargs)
if version_utils.is_v1_layer_or_model(layer):
conditional_losses = layer.get_losses_for(
_filtered_inputs([args, kwargs]))
else:
conditional_losses = [
l for l in layer.losses if not hasattr(l, '_unconditional_loss')
]
return call_output, conditional_losses
return _create_call_fn_decorator(layer, call_and_return_conditional_losses)
def _extract_outputs_from_fn(layer, call_and_return_conditional_losses):
"""Returns a function that returns only call function outputs."""
if isinstance(layer, keras_load.RevivedLayer):
return layer.keras_api.__call__ # pylint: disable=protected-access
def call(inputs, *args, **kwargs):
return call_and_return_conditional_losses(inputs, *args, **kwargs)[0]
return _create_call_fn_decorator(layer, call)
def _append_activity_regularizer_loss(
layer, call_fn_with_losses, activity_regularizer_fn):
"""Appends activity regularizer loss to losses returned by the wrapped fn."""
def fn(inputs, *args, **kwargs):
outputs, losses = call_fn_with_losses(inputs, *args, **kwargs)
losses.append(activity_regularizer_fn(outputs))
return outputs, losses
return _create_call_fn_decorator(layer, fn)
def _create_call_fn_decorator(layer, wrapped_call):
call_fn = _get_layer_call_method(layer)
fn, arg_spec = utils.maybe_add_training_arg(
call_fn, wrapped_call, layer._expects_training_arg, # pylint: disable=protected-access
default_training_value=False)
return tf_decorator.make_decorator(
target=call_fn,
decorator_func=fn,
decorator_argspec=arg_spec)
def _wrap_unconditional_loss(loss_fn, index):
"""Wraps callable/unconditional loss, returning a serializable function."""
# Extract original loss function from partial function
fn = loss_fn.args[0] if isinstance(loss_fn, functools.partial) else loss_fn
if isinstance(fn, def_function.Function):
return fn
else:
return def_function.Function(
fn, 'loss_fn_{}'.format(index), input_signature=[])
def _wrap_activity_regularizer(layer):
"""Wraps the activity regularizer."""
# pylint: disable=protected-access
if isinstance(layer._activity_regularizer, def_function.Function):
return layer._activity_regularizer
return def_function.Function(
layer._activity_regularizer,
'{}_activity_regularizer'.format(layer.name),
input_signature=[
tensor_spec.TensorSpec(None, layer._compute_dtype or K.floatx())
])
# pylint: enable=protected-access
def _get_layer_call_method(layer):
if isinstance(layer.call, (def_function.Function)):
return layer.call.python_function
return layer.call
| apache-2.0 | -5,671,393,154,205,207,000 | 38.688982 | 111 | 0.697965 | false |
DavideCanton/Python3 | docs/docs.py | 1 | 1567 | __author__ = 'davide'
import pathlib
import string
import math
from collections import defaultdict
def compute_inverse(fdt, docs, terms):
ft = defaultdict(int)
for t in terms:
for d in docs:
ft[t] += fdt[t, d]
return ft
def index():
terms = set()
docs = []
fdt = defaultdict(int)
folder = pathlib.Path("D:/documenti prova")
for fp in folder.glob("*.txt"):
docs.append(fp.name)
with fp.open() as f:
for line in f:
for word in line.split():
word = word.strip(string.punctuation)
if word:
terms.add(word)
fdt[word, fp.name] += 1
ft = compute_inverse(fdt, docs, terms)
return terms, docs, fdt, ft
if __name__ == "__main__":
terms, docs, fdt, ft = index()
N = len(docs)
q = input("Query>")
f1 = lambda t: math.log(1 + N / ft[t]) if ft[t] > 0 else 0
f2 = lambda t, d: 1 + math.log(fdt[t, d]) if fdt[t, d] > 0 else 0
qt = [x.strip(string.punctuation) for x in q.split()]
wqt = {t: f1(t) for t in qt}
wdt = {(t, d): f2(t, d) for t in qt for d in docs}
wd = math.sqrt(sum(wdt[t, d] ** 2 for t in qt for d in docs))
if abs(wd) < 1E-10:
sd = []
else:
sd = [(d, sum(wdt[t, d] * wqt[t] for t in qt for d in docs) / wd )
for d in docs]
sd.sort(key=lambda t: -t[1])
for el in sd:
print(el)
for t in qt:
for d in docs:
print("{},{} => {}".format(t, d, fdt[t, d])) | gpl-3.0 | -8,411,056,482,351,363,000 | 24.290323 | 74 | 0.498405 | false |
lahwaacz/tvnamer | tests/test_anime_filenames.py | 1 | 1102 | #!/usr/bin/env python
"""Tests anime filename output
"""
from functional_runner import run_tvnamer, verify_out_data
from nose.plugins.attrib import attr
@attr("functional")
def test_group():
"""Anime filename [#100]
"""
out_data = run_tvnamer(
with_files = ['[Some Group] Scrubs - 01 [A1B2C3].avi'],
with_config = """
{
"batch": true,
"filename_anime_with_episode": "[%(group)s] %(seriesname)s - %(episode)s - %(episodename)s [%(crc)s]%(ext)s"
}
""")
expected_files = ['[Some Group] Scrubs - 01 - My First Day [A1B2C3].avi']
verify_out_data(out_data, expected_files)
@attr("functional")
def test_group_no_epname():
"""Anime filename, on episode with no name [#100]
"""
out_data = run_tvnamer(
with_files = ['[Some Group] Somefakeseries - 01 [A1B2C3].avi'],
with_config = """
{
"batch": true,
"filename_anime_without_episode": "[%(group)s] %(seriesname)s - %(episode)s [%(crc)s]%(ext)s"
}
""")
expected_files = ['[Some Group] Somefakeseries - 01 [A1B2C3].avi']
verify_out_data(out_data, expected_files)
| unlicense | 3,900,368,123,542,362,600 | 23.488889 | 112 | 0.608893 | false |
zookeepr/zookeepr | zk/model/ceiling.py | 1 | 3694 | """The application's model objects"""
import sqlalchemy as sa
from meta import Base
from pylons.controllers.util import abort
from beaker.cache import CacheManager
from role import Role
from person_role_map import person_role_map
from meta import Session
import datetime
import random
class Ceiling(Base):
"""Stores the details of product ceilings which are used to control the sale of items with a limited stock
"""
__tablename__ = 'ceiling'
id = sa.Column(sa.types.Integer, primary_key=True)
parent_id = sa.Column(sa.types.Integer, sa.ForeignKey('ceiling.id'), nullable=True)
name = sa.Column(sa.types.Text, nullable=False, unique=True)
max_sold = sa.Column(sa.types.Integer, nullable=True)
available_from = sa.Column(sa.types.DateTime, nullable=True)
available_until = sa.Column(sa.types.DateTime, nullable=True)
cache = CacheManager()
# relations
parent = sa.orm.relation(lambda: Ceiling, backref='children', remote_side=[id])
def qty_sold(self):
qty = 0
for p in self.products:
qty += p.qty_sold()
return qty
def qty_invoiced(self, date=True):
# date: bool? only count items that are not overdue
@self.cache.cache(self.id, expire=600)
def cached(self, date=True):
qty = 0
for p in self.products:
qty += p.qty_invoiced(date)
return qty
return cached(self, date)
def qty_free(self):
qty = 0
for p in self.products:
qty += p.qty_free()
return qty
def percent_sold(self):
if self.max_sold == None:
return 0
else:
percent = float(self.qty_sold()) / float(self.max_sold)
return int(percent * 100)
def percent_invoiced(self):
if self.max_sold == None:
return 0
else:
percent = float(self.qty_invoiced()) / float(self.max_sold)
return int(percent * 100)
def remaining(self):
return self.max_sold - self.qty_sold()
def soldout(self):
if self.max_sold != None:
return self.qty_invoiced() >= self.max_sold
return False
def enough_left(self, qty):
if self.max_sold != None:
return (self.qty_invoiced() + qty) > self.max_sold
return False
def available(self, stock=True, qty=0):
# bool stock: care about if the product is in stock (ie sold out?)
if stock and self.soldout():
return False
elif qty > 0 and self.enough_left(qty):
return False
elif self.available_from is not None and self.available_from >= datetime.datetime.now():
return False
elif self.available_until is not None and self.available_until <= datetime.datetime.now():
return False
elif self.parent is not None and self.parent != self and self.parent.available():
return False
else:
return True
def can_i_sell(self, qty):
if not self.soldout() and self.remaining() > qty:
return True
else:
return False
def __repr__(self):
return '<Ceiling id=%r name=%r max_sold=%r available_from=%r, available_until=%r' % (self.id, self.name, self.max_sold, self.available_from, self.available_until)
@classmethod
def find_all(cls):
return Session.query(Ceiling).order_by(Ceiling.name).all()
@classmethod
def find_by_id(cls, id):
return Session.query(Ceiling).filter_by(id=id).first()
@classmethod
def find_by_name(cls, name):
return Session.query(Ceiling).filter_by(name=name).first()
| gpl-2.0 | -5,095,891,693,858,206,000 | 29.783333 | 170 | 0.61072 | false |
ContinuumIO/dask | dask/array/utils.py | 2 | 11059 | import difflib
import functools
import math
import numbers
import os
import warnings
import numpy as np
from tlz import frequencies, concat
from .core import Array
from ..highlevelgraph import HighLevelGraph
from ..utils import has_keyword, ignoring, is_arraylike
try:
AxisError = np.AxisError
except AttributeError:
try:
np.array([0]).sum(axis=5)
except Exception as e:
AxisError = type(e)
def normalize_to_array(x):
if "cupy" in str(type(x)): # TODO: avoid explicit reference to cupy
return x.get()
else:
return x
def meta_from_array(x, ndim=None, dtype=None):
""" Normalize an array to appropriate meta object
Parameters
----------
x: array-like, callable
Either an object that looks sufficiently like a Numpy array,
or a callable that accepts shape and dtype keywords
ndim: int
Number of dimensions of the array
dtype: Numpy dtype
A valid input for ``np.dtype``
Returns
-------
array-like with zero elements of the correct dtype
"""
# If using x._meta, x must be a Dask Array, some libraries (e.g. zarr)
# implement a _meta attribute that are incompatible with Dask Array._meta
if hasattr(x, "_meta") and isinstance(x, Array):
x = x._meta
if dtype is None and x is None:
raise ValueError("You must specify the meta or dtype of the array")
if np.isscalar(x):
x = np.array(x)
if x is None:
x = np.ndarray
if isinstance(x, type):
x = x(shape=(0,) * (ndim or 0), dtype=dtype)
if (
not hasattr(x, "shape")
or not hasattr(x, "dtype")
or not isinstance(x.shape, tuple)
):
return x
if isinstance(x, list) or isinstance(x, tuple):
ndims = [
0
if isinstance(a, numbers.Number)
else a.ndim
if hasattr(a, "ndim")
else len(a)
for a in x
]
a = [a if nd == 0 else meta_from_array(a, nd) for a, nd in zip(x, ndims)]
return a if isinstance(x, list) else tuple(x)
if ndim is None:
ndim = x.ndim
try:
meta = x[tuple(slice(0, 0, None) for _ in range(x.ndim))]
if meta.ndim != ndim:
if ndim > x.ndim:
meta = meta[(Ellipsis,) + tuple(None for _ in range(ndim - meta.ndim))]
meta = meta[tuple(slice(0, 0, None) for _ in range(meta.ndim))]
elif ndim == 0:
meta = meta.sum()
else:
meta = meta.reshape((0,) * ndim)
except Exception:
meta = np.empty((0,) * ndim, dtype=dtype or x.dtype)
if np.isscalar(meta):
meta = np.array(meta)
if dtype and meta.dtype != dtype:
meta = meta.astype(dtype)
return meta
def compute_meta(func, _dtype, *args, **kwargs):
with np.errstate(all="ignore"), warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
args_meta = [meta_from_array(x) if is_arraylike(x) else x for x in args]
kwargs_meta = {
k: meta_from_array(v) if is_arraylike(v) else v for k, v in kwargs.items()
}
# todo: look for alternative to this, causes issues when using map_blocks()
# with np.vectorize, such as dask.array.routines._isnonzero_vec().
if isinstance(func, np.vectorize):
meta = func(*args_meta)
else:
try:
# some reduction functions need to know they are computing meta
if has_keyword(func, "computing_meta"):
kwargs_meta["computing_meta"] = True
meta = func(*args_meta, **kwargs_meta)
except TypeError as e:
if (
"unexpected keyword argument" in str(e)
or "is an invalid keyword for" in str(e)
or "Did not understand the following kwargs" in str(e)
):
raise
else:
return None
except Exception:
return None
if _dtype and getattr(meta, "dtype", None) != _dtype:
with ignoring(AttributeError):
meta = meta.astype(_dtype)
if np.isscalar(meta):
meta = np.array(meta)
return meta
def allclose(a, b, equal_nan=False, **kwargs):
a = normalize_to_array(a)
b = normalize_to_array(b)
if getattr(a, "dtype", None) != "O":
return np.allclose(a, b, equal_nan=equal_nan, **kwargs)
if equal_nan:
return a.shape == b.shape and all(
np.isnan(b) if np.isnan(a) else a == b for (a, b) in zip(a.flat, b.flat)
)
return (a == b).all()
def same_keys(a, b):
def key(k):
if isinstance(k, str):
return (k, -1, -1, -1)
else:
return k
return sorted(a.dask, key=key) == sorted(b.dask, key=key)
def _not_empty(x):
return x.shape and 0 not in x.shape
def _check_dsk(dsk):
""" Check that graph is well named and non-overlapping """
if not isinstance(dsk, HighLevelGraph):
return
assert all(isinstance(k, (tuple, str)) for k in dsk.layers)
freqs = frequencies(concat(dsk.dicts.values()))
non_one = {k: v for k, v in freqs.items() if v != 1}
assert not non_one, non_one
def assert_eq_shape(a, b, check_nan=True):
for aa, bb in zip(a, b):
if math.isnan(aa) or math.isnan(bb):
if check_nan:
assert math.isnan(aa) == math.isnan(bb)
else:
assert aa == bb
def _get_dt_meta_computed(x, check_shape=True, check_graph=True):
x_original = x
x_meta = None
x_computed = None
if isinstance(x, Array):
assert x.dtype is not None
adt = x.dtype
if check_graph:
_check_dsk(x.dask)
x_meta = getattr(x, "_meta", None)
x = x.compute(scheduler="sync")
x_computed = x
if hasattr(x, "todense"):
x = x.todense()
if not hasattr(x, "dtype"):
x = np.array(x, dtype="O")
if _not_empty(x):
assert x.dtype == x_original.dtype
if check_shape:
assert_eq_shape(x_original.shape, x.shape, check_nan=False)
else:
if not hasattr(x, "dtype"):
x = np.array(x, dtype="O")
adt = getattr(x, "dtype", None)
return x, adt, x_meta, x_computed
def assert_eq(a, b, check_shape=True, check_graph=True, check_meta=True, **kwargs):
a_original = a
b_original = b
a, adt, a_meta, a_computed = _get_dt_meta_computed(
a, check_shape=check_shape, check_graph=check_graph
)
b, bdt, b_meta, b_computed = _get_dt_meta_computed(
b, check_shape=check_shape, check_graph=check_graph
)
if str(adt) != str(bdt):
# Ignore check for matching length of flexible dtypes, since Array._meta
# can't encode that information
if adt.type == bdt.type and not (adt.type == np.bytes_ or adt.type == np.str_):
diff = difflib.ndiff(str(adt).splitlines(), str(bdt).splitlines())
raise AssertionError(
"string repr are different" + os.linesep + os.linesep.join(diff)
)
try:
assert a.shape == b.shape
if check_meta:
if hasattr(a, "_meta") and hasattr(b, "_meta"):
assert_eq(a._meta, b._meta)
if hasattr(a_original, "_meta"):
assert a_original._meta.ndim == a.ndim
if a_meta is not None:
assert type(a_original._meta) == type(a_meta)
if not (np.isscalar(a_meta) or np.isscalar(a_computed)):
assert type(a_meta) == type(a_computed)
if hasattr(b_original, "_meta"):
assert b_original._meta.ndim == b.ndim
if b_meta is not None:
assert type(b_original._meta) == type(b_meta)
if not (np.isscalar(b_meta) or np.isscalar(b_computed)):
assert type(b_meta) == type(b_computed)
assert allclose(a, b, **kwargs)
return True
except TypeError:
pass
c = a == b
if isinstance(c, np.ndarray):
assert c.all()
else:
assert c
return True
def safe_wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS):
"""Like functools.wraps, but safe to use even if wrapped is not a function.
Only needed on Python 2.
"""
if all(hasattr(wrapped, attr) for attr in assigned):
return functools.wraps(wrapped, assigned=assigned)
else:
return lambda x: x
def empty_like_safe(a, shape, **kwargs):
"""
Return np.empty_like(a, shape=shape, **kwargs) if the shape argument
is supported (requires NumPy >= 1.17), otherwise falls back to
using the old behavior, returning np.empty(shape, **kwargs).
"""
try:
return np.empty_like(a, shape=shape, **kwargs)
except TypeError:
return np.empty(shape, **kwargs)
def full_like_safe(a, fill_value, shape, **kwargs):
"""
Return np.full_like(a, fill_value, shape=shape, **kwargs) if the
shape argument is supported (requires NumPy >= 1.17), otherwise
falls back to using the old behavior, returning
np.full(shape, fill_value, **kwargs).
"""
try:
return np.full_like(a, fill_value, shape=shape, **kwargs)
except TypeError:
return np.full(shape, fill_value, **kwargs)
def ones_like_safe(a, shape, **kwargs):
"""
Return np.ones_like(a, shape=shape, **kwargs) if the shape argument
is supported (requires NumPy >= 1.17), otherwise falls back to
using the old behavior, returning np.ones(shape, **kwargs).
"""
try:
return np.ones_like(a, shape=shape, **kwargs)
except TypeError:
return np.ones(shape, **kwargs)
def zeros_like_safe(a, shape, **kwargs):
"""
Return np.zeros_like(a, shape=shape, **kwargs) if the shape argument
is supported (requires NumPy >= 1.17), otherwise falls back to
using the old behavior, returning np.zeros(shape, **kwargs).
"""
try:
return np.zeros_like(a, shape=shape, **kwargs)
except TypeError:
return np.zeros(shape, **kwargs)
def validate_axis(axis, ndim):
""" Validate an input to axis= keywords """
if isinstance(axis, (tuple, list)):
return tuple(validate_axis(ax, ndim) for ax in axis)
if not isinstance(axis, numbers.Integral):
raise TypeError("Axis value must be an integer, got %s" % axis)
if axis < -ndim or axis >= ndim:
raise AxisError(
"Axis %d is out of bounds for array of dimension %d" % (axis, ndim)
)
if axis < 0:
axis += ndim
return axis
def _is_nep18_active():
class A:
def __array_function__(self, *args, **kwargs):
return True
try:
return np.concatenate([A()])
except ValueError:
return False
IS_NEP18_ACTIVE = _is_nep18_active()
| bsd-3-clause | 7,372,364,514,582,665,000 | 29.465565 | 87 | 0.5694 | false |
twwd/MoodleDownloader | downloader.py | 1 | 9854 | #!/usr/bin/env python3
import argparse
import importlib
import os
import re
import sqlite3
from datetime import datetime
from urllib.parse import urljoin
import requests
import yaml
def load_plugin_class(plugin_class_str):
"""
dynamically load a class from a string
"""
class_data = plugin_class_str.split(".")
module_path = "plugins." + ".".join(class_data[:-1])
class_str = class_data[-1]
mod = importlib.import_module(module_path)
return getattr(mod, class_str)
# print if verbose output is on
def log(msg):
if verbose_output:
print(msg)
def course_loop():
download_count = 0
skip_count = 0
# import config
try:
with open(os.path.join(os.path.dirname(__file__), 'data', 'config.yaml'), 'r', encoding='utf-8') as config_file:
config = yaml.load(config_file)
except FileNotFoundError:
print("Please provide a config file under data/config.yaml.")
return
# make the initial request to get the token
session = requests.Session()
# Loop through sources
for src_cfg in config:
# check if there are courses to download from
if 'courses' not in src_cfg or (source_part is not None and src_cfg['name'] not in source_part):
continue
log('\n\nSource: %s' % src_cfg['name'])
# load dynamically the source class
try:
src_class = load_plugin_class(src_cfg['class'])
src = src_class()
except AttributeError:
print('Class %s not found. Check your config file.' % src_cfg['class'])
continue
except ImportError:
print(
'Class %s not found. Check your config file' % src_cfg['class']
+ ' and ensure you have the class qualifier relative to the plugin directory.')
continue
# login
if 'login_url' in src_cfg and 'username' in src_cfg and 'password' in src_cfg:
src.login(session, src_cfg['login_url'], src_cfg['username'], src_cfg['password'])
# loop through courses
for course in src_cfg['courses']:
# check if only some courses should be checked
if course_part is not None and course['name'] not in course_part:
continue
log('\nCourse: %s\n' % course['name'])
if 'path' in course and course['path'] is not None:
course_url = urljoin(src_cfg['base_url'], course['path'])
elif 'param' in course and course['param'] is not None:
course_url = src.course_url(src_cfg['base_url'], course['param'])
else:
course_url = src_cfg['base_url']
# regex pattern for link text and file name
text_pattern = re.compile(course['pattern'])
filename_pattern = None
if 'filename_pattern' in course:
filename_pattern = re.compile(course['filename_pattern'])
# get all relevant links from the source site
links = src.link_list(session, course_url)
if links is None:
continue
for link in links:
if text_pattern.search(link[0]) is not None:
# request file http header
file_request = session.head(link[1], allow_redirects=True)
# get file name
if 'Content-Disposition' in file_request.headers:
file_disposition = file_request.headers['Content-Disposition']
file_name = file_disposition[
file_disposition.index('filename=') + 10:len(file_disposition) - 1].encode(
'latin-1').decode('utf8')
else:
# last part of the link (usually filename)
file_name = link[1].rsplit('/', 1)[-1]
# check extension
file_ext = os.path.splitext(file_name)[1]
if 'ext' in course and course['ext'] is not False:
if file_ext != course['ext'] or file_ext not in course['ext']:
continue
# check file name
if filename_pattern is not None and filename_pattern.search(file_name) is None:
continue
# get last modified date as timestamp
if 'Last-Modified' in file_request.headers:
file_last_modified = int(datetime.strptime(file_request.headers['Last-Modified'], '%a, %d %b %Y %H:%M:%S %Z').timestamp())
else:
print("No timestamp found for file %s" % file_name)
continue
# adjust file name
if 'rename' in course and course['rename'] is not False:
# find a number
num = re.search('\d{1,3}', link[0])
if num is None:
num = re.search('\d{1,3}', file_name)
if num is None:
num = file_last_modified
else:
num = num.group(0)
file_name = course['rename'].replace('%', str(num)) + file_ext
# remove trailing whitespaces
file_name = file_name.strip()
# the complete file path
file_path = os.path.join(course['local_folder'], file_name)
# fetch old timestamp from database
file_last_modified_old = c.execute(
'SELECT last_modified FROM file_modifications WHERE source=? AND course=? AND file_name=?',
(src_cfg['name'], course['name'], file_name)).fetchone()
# save file and timestamp in the database if it doesn't exists
if not simulate and file_last_modified_old is None:
c.execute(
'''
INSERT INTO file_modifications (source, course, file_name, file_path, last_modified)
VALUES (?,?,?,?,?)
''',
(src_cfg['name'], course['name'], file_name, file_path, file_last_modified))
# update timestamp if there's a newer version of the file
elif not simulate and file_last_modified > file_last_modified_old[0]:
c.execute(
'UPDATE file_modifications SET last_modified=? WHERE source=? AND course=? AND file_name=?',
(file_last_modified, src_cfg['name'], course['name'], file_name))
# otherwise skip saving
else:
skip_count += 1
# log(file_name + ' (skipped)')
continue
log(file_name + ' (new)')
if simulate:
conn.rollback()
continue
# request whole file
file_request = session.get(link[1])
# write file
try:
os.makedirs(os.path.dirname(file_path), exist_ok=True)
with open(file_path, 'wb') as f:
f.write(file_request.content)
download_count += 1
except FileNotFoundError:
print('Can\'t write file to %s' % file_path)
conn.rollback()
# save changes to the database
conn.commit()
# display count of downloaded files
log('\nDownloaded %i file(s), skipped %i file(s)' % (download_count, skip_count))
def clear_course():
if course_to_clear[0] == 'all':
c.execute("DELETE FROM file_modifications")
log('\nCleared all courses')
else:
c.execute("DELETE FROM file_modifications WHERE course=?", course_to_clear)
log('\nCleared course %s' % course_to_clear[0])
conn.commit()
# command line args
parser = argparse.ArgumentParser(
description='A simple script for downloading slides and exercises for university lectures.')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose output')
parser.add_argument('-c', '--course', action='append', help='specify a course which should be checked')
parser.add_argument('-s', '--source', action='append', help='specify a source which should be checked')
parser.add_argument('-sim', '--simulate', action='store_true', help='specify if the process should only be simulated')
parser.add_argument('--clear', action='append',
help='specify a course which files should be deleted from the database (not from file system).'
+ 'Use keyword \'all\' to clear the whole database')
args = parser.parse_args()
verbose_output = args.verbose
simulate = args.simulate
course_part = args.course
source_part = args.source
course_to_clear = args.clear
# database for timestamps
conn = sqlite3.connect(os.path.join(os.path.dirname(__file__), 'data', 'file_modifications.db'))
c = conn.cursor()
# check if table exists otherwise create it
c.execute(
'''
CREATE TABLE IF NOT EXISTS file_modifications (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
source TEXT,
course TEXT,
file_name TEXT,
file_path TEXT,
last_modified INTEGER
);
''')
if simulate:
log("Simulation on")
if course_to_clear is not None:
clear_course()
else:
course_loop()
# close cursor
c.close()
| mit | 195,151,069,313,467,550 | 37.643137 | 146 | 0.531561 | false |
eliben/code-for-blog | 2017/continuations-trampolines/tracing.py | 1 | 1493 | # Tracing of function calls. Use @TraceCalls() as a decorator on functions.
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import sys
from functools import wraps
class TraceCalls(object):
""" Use as a decorator on functions that should be traced. Several
functions can be decorated - they will all be indented according
to their call depth.
"""
def __init__(self, stream=sys.stdout, indent_step=2, show_ret=False):
self.stream = stream
self.indent_step = indent_step
self.show_ret = show_ret
# This is a class attribute since we want to share the indentation
# level between different traced functions, in case they call
# each other.
TraceCalls.cur_indent = 0
def __call__(self, fn):
@wraps(fn)
def wrapper(*args, **kwargs):
indent = ' ' * TraceCalls.cur_indent
argstr = ', '.join(
[self._argrepr(a) for a in args] +
["%s=%s" % (a, repr(b)) for a, b in kwargs.items()])
self.stream.write('%s%s(%s)\n' % (indent, fn.__name__, argstr))
TraceCalls.cur_indent += self.indent_step
ret = fn(*args, **kwargs)
TraceCalls.cur_indent -= self.indent_step
if self.show_ret:
self.stream.write('%s--> %s\n' % (indent, ret))
return ret
return wrapper
def _argrepr(self, arg):
return repr(arg)
| unlicense | 7,345,459,679,741,440,000 | 33.72093 | 75 | 0.58004 | false |
JulienDrecq/django-phonebook | phonebook/tests/test.py | 1 | 5797 | from django.test import TestCase
from phonebook.models import Contact
from phonebook.forms import LoginForm, ContactForm
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
class ContactTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="user_test", password="password_test")
def test_login(self):
"""Test login user"""
response = self.client.login(username=self.user.username, password='password_test')
self.assertTrue(response)
def test_login_fail(self):
"""Test fail login user"""
response = self.client.login(username=self.user.username, password='test_password')
self.assertFalse(response)
def test_login_form(self):
form_data = {'username': self.user.username, 'password': 'test_password'}
form = LoginForm(data=form_data)
self.assertEqual(form.is_valid(), True)
def test_login_page_form(self):
"""Test login user"""
response = self.client.post(reverse('phonebook_login_page'),
{'username': self.user.username, 'password': 'password_test'})
self.assertRedirects(response, reverse('phonebook_lists_contacts'))
def test_login_page_form_fail(self):
"""Test fail login user"""
response = self.client.post(reverse('phonebook_login_page'),
{'username': self.user.username, 'password': 'test_password'})
self.assertContains(response, 'Username or password is not correct.')
def test_contact_create(self):
"""Create a contact"""
contact = Contact.objects.create(firstname='Test firstname', lastname='Test lastname', email='[email protected]',
user_id=self.user)
self.assertIsNotNone(contact)
self.assertEquals(str(contact), "Test firstname Test lastname")
self.assertEquals(unicode(contact), "Test firstname Test lastname")
def test_contact_form(self):
form_data = {
'firstname': 'Test firstname',
'lastname': 'Test lastname',
'email': '[email protected]',
}
form = ContactForm(data=form_data)
self.assertEqual(form.is_valid(), True)
class ContactViewTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(username="user_test", password="password_test")
self.client.login(username=self.user.username, password='password_test')
self.contact = Contact.objects.create(firstname='Test firstname', lastname='Test lastname',
email='[email protected]', phone='0606060606', user_id=self.user)
self.assertIsNotNone(self.contact)
def test_call_login_redirect(self):
response = self.client.get(reverse('phonebook_login_page'))
self.assertRedirects(response, reverse('phonebook_lists_contacts'))
def test_call_view_lists_contacts(self):
response = self.client.get(reverse('phonebook_lists_contacts'))
self.assertEqual(response.status_code, 200)
def test_call_view_edit_contact(self):
response = self.client.get(reverse('phonebook_edit', kwargs={'contact_id': self.contact.id}))
self.assertEqual(response.status_code, 200)
def test_call_view_post_edit_contact(self):
response = self.client.post(reverse('phonebook_edit', kwargs={'contact_id': self.contact.id}),
{'firstname': self.contact.firstname, 'lastname': self.contact.lastname,
'email': self.contact.email})
self.assertRedirects(response, reverse('phonebook_lists_contacts'))
def test_call_view_post_new_contact(self):
response = self.client.post(reverse('phonebook_new_contact'), {'firstname': 'Tesst new firstname',
'lastname': 'Test new lastname',
'email': '[email protected]'})
self.assertRedirects(response, reverse('phonebook_lists_contacts'))
def test_call_view_get_new_contact(self):
response = self.client.get(reverse('phonebook_new_contact'))
self.assertRedirects(response, reverse('phonebook_lists_contacts'))
def test_call_view_call(self):
response = self.client.get(reverse('phonebook_call', kwargs={'num': self.contact.phone}))
self.assertEqual(response.status_code, 200)
def test_call_view_exports_contacts(self):
response = self.client.get(reverse('phonebook_exports_contacts'))
self.assertEqual(response.status_code, 200)
def test_call_view_delete_contact(self):
response = self.client.get(reverse('phonebook_delete', kwargs={'contact_id': self.contact.id}))
self.assertRedirects(response, reverse('phonebook_lists_contacts'))
def test_call_logout(self):
response = self.client.get(reverse('phonebook_logout'))
self.assertRedirects(response, reverse('phonebook_login_page'))
def test_call_view_edit_contact_with_fail(self):
response = self.client.get(reverse('phonebook_edit', kwargs={'contact_id': 9999}))
self.assertRedirects(response, reverse('phonebook_lists_contacts'))
def test_call_view_post_search_contact(self):
response = self.client.post(reverse('phonebook_search_contact'), {'query': '[email protected]'})
self.assertRedirects(response, reverse('phonebook_search_contact_query', kwargs={'query': '[email protected]'}))
def test_call_view_get_search_contact(self):
response = self.client.get(reverse('phonebook_search_contact'))
self.assertRedirects(response, reverse('phonebook_lists_contacts'))
| bsd-3-clause | 8,311,268,371,994,698,000 | 47.308333 | 118 | 0.642746 | false |
Subsets and Splits