repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
daira/zcash | qa/rpc-tests/wallet_1941.py | 3 | 4053 | #!/usr/bin/env python3
# Copyright (c) 2016 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php .
# This is a regression test for #1941.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, initialize_chain_clean, \
initialize_datadir, start_nodes, start_node, connect_nodes_bi, \
bitcoind_processes, wait_and_assert_operationid_status, \
get_coinbase_address, DEFAULT_FEE
from decimal import Decimal
starttime = 1388534400
class Wallet1941RegressionTest (BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
# Start nodes with -regtestshieldcoinbase to set fCoinbaseMustBeShielded to true.
def setup_network(self, split=False):
self.nodes = start_nodes(1, self.options.tmpdir, extra_args=[['-regtestshieldcoinbase','-debug=zrpc']] )
self.is_network_split=False
def add_second_node(self):
initialize_datadir(self.options.tmpdir, 1)
self.nodes.append(start_node(1, self.options.tmpdir, extra_args=['-regtestshieldcoinbase','-debug=zrpc']))
self.nodes[1].setmocktime(starttime + 9000)
connect_nodes_bi(self.nodes,0,1)
self.sync_all()
def restart_second_node(self, extra_args=[]):
self.nodes[1].stop()
bitcoind_processes[1].wait()
self.nodes[1] = start_node(1, self.options.tmpdir, extra_args=['-regtestshieldcoinbase','-debug=zrpc'] + extra_args)
self.nodes[1].setmocktime(starttime + 9000)
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
def run_test (self):
print("Mining blocks...")
self.nodes[0].setmocktime(starttime)
self.nodes[0].generate(101)
self.sync_all()
mytaddr = get_coinbase_address(self.nodes[0])
myzaddr = self.nodes[0].z_getnewaddress()
# Send 10 coins to our zaddr.
recipients = []
recipients.append({"address":myzaddr, "amount":Decimal('10.0') - DEFAULT_FEE})
myopid = self.nodes[0].z_sendmany(mytaddr, recipients)
wait_and_assert_operationid_status(self.nodes[0], myopid)
self.nodes[0].generate(1)
# Ensure the block times of the latest blocks exceed the variability
self.nodes[0].setmocktime(starttime + 3000)
self.nodes[0].generate(1)
self.nodes[0].setmocktime(starttime + 6000)
self.nodes[0].generate(1)
self.nodes[0].setmocktime(starttime + 9000)
self.nodes[0].generate(1)
self.sync_all()
# Confirm the balance on node 0.
resp = self.nodes[0].z_getbalance(myzaddr)
assert_equal(Decimal(resp), Decimal('10.0') - DEFAULT_FEE)
# Export the key for the zaddr from node 0.
key = self.nodes[0].z_exportkey(myzaddr)
# Start the new wallet
self.add_second_node()
self.nodes[1].getnewaddress()
self.nodes[1].z_getnewaddress()
self.nodes[1].generate(101)
self.sync_all()
# Import the key on node 1, only scanning the last few blocks.
# (uses 'true' to test boolean fallback)
self.nodes[1].z_importkey(key, 'true', self.nodes[1].getblockchaininfo()['blocks'] - 100)
# Confirm that the balance on node 1 is zero, as we have not
# rescanned over the older transactions
resp = self.nodes[1].z_getbalance(myzaddr)
assert_equal(Decimal(resp), 0)
# Re-import the key on node 1, scanning from before the transaction.
self.nodes[1].z_importkey(key, 'yes', self.nodes[1].getblockchaininfo()['blocks'] - 110)
# Confirm that the balance on node 1 is valid now (node 1 must
# have rescanned)
resp = self.nodes[1].z_getbalance(myzaddr)
assert_equal(Decimal(resp), Decimal('10.0') - DEFAULT_FEE)
if __name__ == '__main__':
Wallet1941RegressionTest().main()
| mit | -6,997,861,232,740,065,000 | 38.349515 | 124 | 0.655564 | false |
loanzen/falcon-resource-factory | tests/test_resource_factory.py | 1 | 4597 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
import falcon
import pytest
from falcon import testing
from falcon.cmd.print_routes import print_routes
from falcon_resource_factory import ResourceFactory
def detail_view(resource, req, res, **kwargs):
res.body = '{0} Called'.format(req.method)
def list_view(resource, req, res, **kwargs):
res.body = '{0} list Called'.format(req.method)
def resource_creator(detail_methods, list_methods,
detail_method_map=ResourceFactory.DETAIL_METHOD_MAP,
list_method_map=ResourceFactory.LIST_METHOD_MAP,
custom_views=None, params=None):
test_resource = type('TestResource', (), params or {})
for method in detail_methods:
func_name = detail_method_map[method]
setattr(test_resource, func_name, detail_view)
for method in list_methods:
func_name = list_method_map[method]
setattr(test_resource, func_name, list_view)
return test_resource()
@pytest.fixture()
def app():
return falcon.API()
@pytest.fixture
def client(app):
return testing.TestClient(app)
@pytest.fixture()
def resource_factory():
return ResourceFactory()
def _test_detail_routes(app, client, expected_params):
resource, method_map, params, uri_template = app._router.find('/items/5')
assert expected_params == params
assert resource.__class__.__name__ == 'TestResourceDetail'
assert hasattr(resource, 'on_get')
assert hasattr(resource, 'on_post')
response = client.simulate_get('/items/5')
assert response.content.lower() == b'get called'
response = client.simulate_post('/items/5')
assert response.content.lower() == b'post called'
def test_detail_routes(app, resource_factory, client):
res = resource_creator(['GET', 'POST'], [])
resource_factory.add_routes(app, '/items/', res)
expected_params = {'id': '5'}
_test_detail_routes(app, client, expected_params)
def test_detail_routes_custom_identifier(app, client):
resource_factory = ResourceFactory(detail_identifier='uuid')
res = resource_creator(['GET', 'POST'], [])
resource_factory.add_routes(app, '/items/', res)
expected_params = {'uuid': '5'}
_test_detail_routes(app, client, expected_params)
def test_detail_routes_custom_method_map(app, client):
detail_method_map = {
'GET': 'obj_get',
'POST': 'obj_post'
}
resource_factory = ResourceFactory(detail_method_map=detail_method_map)
res = resource_creator(detail_method_map.keys(), [],
detail_method_map=detail_method_map)
resource_factory.add_routes(app, '/items/', res)
expected_params = {'id': '5'}
_test_detail_routes(app, client, expected_params)
def _test_list_routes(app, client):
resource, method_map, params, uri_template = app._router.find('/items')
assert hasattr(resource, 'on_get')
assert hasattr(resource, 'on_put')
assert resource.__class__.__name__ == 'TestResourceList'
response = client.simulate_get('/items/')
assert response.content.lower() == b'get list called'
response = client.simulate_put('/items/')
assert response.content.lower() == b'put list called'
def test_list_routes(app, resource_factory, client):
res = resource_creator([], ['GET', 'PUT'])
resource_factory.add_routes(app, '/items/', res)
_test_list_routes(app, client)
def test_list_routes_custom_method_map(app, client):
list_method_map = {
'GET': 'obj_get_list',
'PUT': 'obj_put_list'
}
resource_factory = ResourceFactory(list_method_map=list_method_map)
res = resource_creator([], list_method_map.keys(),
list_method_map=list_method_map)
resource_factory.add_routes(app, '/items/', res)
_test_list_routes(app, client)
def test_generated_resources_has_params(app, resource_factory, client):
const_parmas = {
'PARAM_1': '1',
'PARAM_2': '2',
}
hidden_params = {
'__x': 'hidden',
'func': lambda: None
}
params = dict(const_parmas)
params.update(dict(hidden_params))
res = resource_creator(['GET'], ['GET'], params=params)
resource_factory.add_routes(app, '/items/', res)
list_resource, _, _, _ = app._router.find('/items')
list_resource_cls = list_resource.__class__
for key, val in const_parmas.items():
assert getattr(list_resource_cls, key) == val
for key in hidden_params.keys():
assert not hasattr(list_resource_cls, key) | mit | 248,095,538,347,996,830 | 28.101266 | 77 | 0.644768 | false |
zsommers/bdo_chronicle | bdo_tools/nodes/urls.py | 1 | 1286 | from django.conf.urls import include, url
from django.views.generic import DetailView, ListView, TemplateView
from . import models
kingdoms_patterns = [
url(r'^(?P<pk>[0-9]+)/$', DetailView.as_view(model=models.Kingdom), name='detail'),
url(r'^$', ListView.as_view(model=models.Kingdom), name='list'),
]
territories_patterns = [
url(r'^(?P<pk>[0-9]+)/$', DetailView.as_view(model=models.Territory), name='detail'),
url(r'^$', ListView.as_view(model=models.Territory), name='list'),
]
nodes_patterns = [
url(r'^(?P<pk>[0-9]+)/$', DetailView.as_view(model=models.Node), name='detail'),
url(r'^$', ListView.as_view(model=models.Node), name='list'),
]
properties_patterns = [
url(r'^(?P<pk>[0-9]+)/$', DetailView.as_view(model=models.Property), name='detail'),
url(r'^$', ListView.as_view(model=models.Property), name='list'),
]
app_name = 'nodes'
urlpatterns = [
url(r'^kingdoms/', include(kingdoms_patterns, namespace='kingdoms')),
url(r'^territories/', include(territories_patterns, namespace='territories')),
url(r'^nodes/', include(nodes_patterns, namespace='nodes')),
url(r'^properties/', include(properties_patterns, namespace='properties')),
url(r'^$', TemplateView.as_view(template_name='nodes/main.html'), name='main'),
]
| mit | 9,142,507,298,452,070,000 | 37.969697 | 89 | 0.664075 | false |
travistang/late_fyt | proportional.py | 1 | 1578 | import numpy
import random
import sum_tree
class Experience(object):
def __init__(self, memory_size, batch_size, alpha):
self.tree = sum_tree.SumTree(memory_size)
self.memory_size = memory_size
self.batch_size = batch_size
self.alpha = alpha
def add(self, data, priority):
self.tree.add(data, priority**self.alpha)
def size(self):
return self.tree.filled_size()
def select(self, beta):
if self.tree.filled_size() < self.batch_size:
return None, None, None
out = []
indices = []
weights = []
priorities = []
for _ in range(self.batch_size):
r = random.random()
data, priority, index = self.tree.find(r)
priorities.append(priority)
weights.append((1./self.memory_size/priority)**-beta if priority > 1e-16 else 0)
indices.append(index)
out.append(data)
self.priority_update([index], [0]) # To avoid duplicating
self.priority_update(indices, priorities) # Revert priorities
return out, weights, indices
def priority_update(self, indices, priorities):
for i, p in zip(indices, priorities):
self.tree.val_update(i, p**self.alpha)
def reset_alpha(self, alpha):
self.alpha, old_alpha = alpha, self.alpha
priorities = [self.tree.get_val(i)**-old_alpha for i in range(self.tree.filled_size())]
self.priority_update(range(self.tree.filled_size()), priorities)
| mit | 7,641,482,424,519,402,000 | 30.56 | 95 | 0.584918 | false |
jorvis/biocode | gff/convert_genbank_to_gff3.py | 1 | 10340 | #!/usr/bin/env python3
"""
This is a script to convert GenBank flat files to GFF3 format with a specific focus on
initially maintaining as much structural annotation as possible, then expanding into
functional annotation support.
This is not guaranteed to convert all features, but warnings will be printed wherever possible
for features which aren't included.
Currently supported:
Structural features: gene, CDS, mRNA, tRNA, rRNA
Annotations: primary identifiers, gene product name
This is written to handle multi-entry GBK files
Caveats:
- Because the GBK flatfile format doesn't explicitly model parent/child features, this script
links them using the expected format convention of shared /locus_tag entries for each feature
of the gene graph (gene, mRNA, CDS)
- It has only been tested with prokaryotic (non-spliced) genes
Author: Joshua Orvis (jorvis AT gmail)
"""
import argparse
import sys
from collections import defaultdict
from Bio import SeqIO
from biocode import annotation, things, utils
def main():
parser = argparse.ArgumentParser( description='Convert GenBank flat files to GFF3 format')
## output file to be written
parser.add_argument('-i', '--input_file', type=str, required=True, help='Path to an input GBK file' )
parser.add_argument('-o', '--output_file', type=str, required=False, help='Path to an output GFF file to be created' )
parser.add_argument('--with_fasta', dest='fasta', action='store_true', help='Include the FASTA section with genomic sequence at end of file. (default)' )
parser.add_argument('--no_fasta', dest='fasta', action='store_false' )
parser.set_defaults(fasta=True)
args = parser.parse_args()
## output will either be a file or STDOUT
ofh = sys.stdout
if args.output_file is not None:
ofh = open(args.output_file, 'wt')
ofh.write("##gff-version 3\n")
assemblies = dict()
current_assembly = None
current_gene = None
current_RNA = None
rna_count_by_gene = defaultdict(int)
exon_count_by_RNA = defaultdict(int)
seqs_pending_writes = False
features_skipped_count = 0
# each gb_record is a SeqRecord object
for gb_record in SeqIO.parse(open(args.input_file, "r"), "genbank"):
mol_id = gb_record.name
if mol_id not in assemblies:
assemblies[mol_id] = things.Assembly(id=mol_id)
if len(str(gb_record.seq)) > 0:
seqs_pending_writes = True
assemblies[mol_id].residues = str(gb_record.seq)
assemblies[mol_id].length = len(str(gb_record.seq))
current_assembly = assemblies[mol_id]
# each feat is a SeqFeature object
for feat in gb_record.features:
#print(feat)
fmin = int(feat.location.start)
fmax = int(feat.location.end)
if feat.location.strand == 1:
strand = '+'
elif feat.location.strand == -1:
strand = '-'
else:
raise Exception("ERROR: unstranded feature encountered: {0}".format(feat))
#print("{0} located at {1}-{2} strand:{3}".format( locus_tag, fmin, fmax, strand ) )
if feat.type == 'source':
continue
if feat.type == 'gene':
# print the previous gene (if there is one)
if current_gene is not None:
gene.print_as(fh=ofh, source='GenBank', format='gff3')
locus_tag = feat.qualifiers['locus_tag'][0]
gene = things.Gene(id=locus_tag, locus_tag=locus_tag)
gene.locate_on( target=current_assembly, fmin=fmin, fmax=fmax, strand=strand )
current_gene = gene
current_RNA = None
elif feat.type == 'mRNA':
locus_tag = feat.qualifiers['locus_tag'][0]
rna_count_by_gene[locus_tag] += 1
feat_id = "{0}.mRNA.{1}".format( locus_tag, rna_count_by_gene[locus_tag] )
mRNA = things.mRNA(id=feat_id, parent=current_gene, locus_tag=locus_tag)
mRNA.locate_on( target=current_assembly, fmin=fmin, fmax=fmax, strand=strand )
gene.add_mRNA(mRNA)
current_RNA = mRNA
if feat_id in exon_count_by_RNA:
raise Exception( "ERROR: two different RNAs found with same ID: {0}".format(feat_id) )
else:
exon_count_by_RNA[feat_id] = 0
elif feat.type == 'tRNA':
locus_tag = feat.qualifiers['locus_tag'][0]
rna_count_by_gene[locus_tag] += 1
feat_id = "{0}.tRNA.{1}".format(locus_tag, rna_count_by_gene[locus_tag])
if 'product' in feat.qualifiers:
anticodon = feat.qualifiers['product'][0]
else:
anticodon = None
tRNA = things.tRNA(id=feat_id, parent=current_gene, anticodon=anticodon)
tRNA.locate_on(target=current_assembly, fmin=fmin, fmax=fmax, strand=strand)
gene.add_tRNA(tRNA)
current_RNA = tRNA
if feat_id in exon_count_by_RNA:
raise Exception( "ERROR: two different RNAs found with same ID: {0}".format(feat_id) )
else:
exon_count_by_RNA[feat_id] = 0
elif feat.type == 'rRNA':
locus_tag = feat.qualifiers['locus_tag'][0]
rna_count_by_gene[locus_tag] += 1
feat_id = "{0}.rRNA.{1}".format(locus_tag, rna_count_by_gene[locus_tag])
if 'product' in feat.qualifiers:
product = feat.qualifiers['product'][0]
else:
product = None
annot = annotation.FunctionalAnnotation(product_name=product)
rRNA = things.rRNA(id=feat_id, parent=current_gene, annotation=annot)
rRNA.locate_on( target=current_assembly, fmin=fmin, fmax=fmax, strand=strand )
gene.add_rRNA(rRNA)
current_RNA = rRNA
if feat_id in exon_count_by_RNA:
raise Exception( "ERROR: two different RNAs found with same ID: {0}".format(feat_id) )
else:
exon_count_by_RNA[feat_id] = 0
elif feat.type == 'CDS':
locus_tag = feat.qualifiers['locus_tag'][0]
# If processing a prokaryotic GBK, we'll encounter CDS before mRNA, so we have to
# manually make one
if current_RNA is None:
feat_id = "{0}.mRNA.{1}".format( locus_tag, rna_count_by_gene[locus_tag] )
mRNA = things.mRNA(id=feat_id, parent=current_gene)
mRNA.locate_on( target=current_assembly, fmin=fmin, fmax=fmax, strand=strand )
gene.add_mRNA(mRNA)
current_RNA = mRNA
if 'product' in feat.qualifiers:
product = feat.qualifiers['product'][0]
else:
product = None
if 'gene' in feat.qualifiers:
gene_symbol = feat.qualifiers['gene'][0]
else:
gene_symbol = None
annot = annotation.FunctionalAnnotation(product_name=product, gene_symbol=gene_symbol)
if 'db_xref' in feat.qualifiers:
for dbxref in feat.qualifiers['db_xref']:
annot.add_dbxref(dbxref)
polypeptide_id = "{0}.polypeptide.{1}".format( locus_tag, rna_count_by_gene[locus_tag] )
polypeptide = things.Polypeptide(id=polypeptide_id, parent=mRNA, annotation=annot)
mRNA.add_polypeptide(polypeptide)
exon_count_by_RNA[current_RNA.id] += 1
cds_id = "{0}.CDS.{1}".format( current_RNA.id, exon_count_by_RNA[current_RNA.id] )
current_CDS_phase = 0
for loc in feat.location.parts:
subfmin = int(loc.start)
subfmax = int(loc.end)
CDS = things.CDS(id=cds_id, parent=current_RNA)
CDS.locate_on( target=current_assembly, fmin=subfmin, fmax=subfmax, strand=strand, phase=current_CDS_phase )
current_RNA.add_CDS(CDS)
# calculate the starting phase for the next CDS feature (in case there is one)
# 0 + 6 = 0 TTGCAT
# 0 + 7 = 2 TTGCATG
# 1 + 6 = 1 TTGCAT
# 2 + 7 = 1 TTGCATG
# general: 3 - ((length - previous phase) % 3)
current_CDS_phase = 3 - (((subfmax - subfmin) - current_CDS_phase) % 3)
if current_CDS_phase == 3:
current_CDS_phase = 0
exon_id = "{0}.exon.{1}".format( current_RNA.id, exon_count_by_RNA[current_RNA.id] )
exon = things.Exon(id=exon_id, parent=current_RNA)
exon.locate_on( target=current_assembly, fmin=subfmin, fmax=subfmax, strand=strand )
current_RNA.add_exon(exon)
exon_count_by_RNA[current_RNA.id] += 1
else:
print("WARNING: The following feature was skipped:\n{0}".format(feat))
features_skipped_count += 1
# don't forget to do the last gene, if there were any
if current_gene is not None:
gene.print_as(fh=ofh, source='GenBank', format='gff3')
if args.fasta is True:
if seqs_pending_writes is True:
ofh.write("##FASTA\n")
for assembly_id in assemblies:
ofh.write(">{0}\n".format(assembly_id))
ofh.write("{0}\n".format(utils.wrapped_fasta(assemblies[assembly_id].residues)))
if features_skipped_count > 0:
print("Warning: {0} unsupported feature types were skipped".format(features_skipped_count))
if __name__ == '__main__':
main()
| mit | -5,754,837,827,644,957,000 | 40.693548 | 158 | 0.547872 | false |
jespino/coval | coval/international.py | 1 | 3349 | import string
import re
def isbn(isbn, strict=True):
'''Validation of an ISBN (international Standard Book Number)'''
if not strict:
isbn = isbn.upper()
if isbn[0:4] == 'ISBN':
isbn = isbn[4:]
isbn = isbn.strip().replace("-", "").replace("/", "")
if len(isbn) == 10:
return isbn10(isbn)
elif len(isbn) == 13:
return isbn13(isbn)
else:
return False
# Extracted from Wikipedia's http://en.wikipedia.org/wiki/Isbn page
def isbn10(isbn, strict=True):
'''Validation of an ISBN (international Standard Book Number) in ISBN-10 format'''
if not strict:
isbn = isbn.upper()
if isbn[0:4] == 'ISBN':
isbn = isbn[4:]
isbn = isbn.strip().replace("-", "").replace("/", "")
if not re.match('^\d{10}$', isbn):
return False
total = sum([int(num)*weight for num, weight in
zip(isbn, reversed(range(1, 11)))])
return total%11==0
# Extracted from Wikipedia's http://en.wikipedia.org/wiki/Isbn page
def isbn13(isbn, strict=True):
'''Validation of an ISBN (international Standard Book Number) in ISBN-13 format'''
if not strict:
isbn = isbn.upper()
if isbn[0:4] == 'ISBN':
isbn = isbn[4:]
isbn = isbn.strip().replace("-", "").replace("/", "")
if not re.match('^\d{13}$', isbn):
return False
total = sum([int(num)*weight for num, weight in zip(isbn, (1,3)*6)])
ck = 10-(total%10)
return ck == int(isbn[-1])
def iban(iban, strict=True):
'''Validation of an IBAN (international bankaccount number)'''
country_code_length = {
'AD': 24, 'AE': 23, 'AL': 28, 'AT': 20, 'BA': 20, 'BE': 16, 'BG': 22,
'CH': 21, 'CY': 28, 'CZ': 24, 'DE': 22, 'DK': 18, 'EE': 20, 'ES': 24,
'FR': 27, 'FI': 18, 'GB': 22, 'GE': 22, 'GI': 23, 'GR': 27, 'HR': 21,
'HU': 28, 'IE': 22, 'IL': 23, 'IS': 26, 'IT': 27, 'KW': 30, 'LB': 28,
'LI': 21, 'LT': 20, 'LU': 20, 'LV': 21, 'MC': 27, 'ME': 22, 'MK': 19,
'MR': 27, 'MT': 31, 'MU': 30, 'NL': 18, 'NO': 15, 'PL': 28, 'PT': 25,
'RO': 24, 'RS': 22, 'SA': 24, 'SE': 24, 'SI': 19, 'SK': 24, 'SM': 27,
'TN': 24, 'TR': 26,
}
letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if not strict:
iban = iban.strip().replace("-", "").replace("/", "")
iban = iban.upper()
if len(iban)<2 or not iban[0:2] in country_code_length.keys():
return False
if len(iban) != country_code_length[iban[0:2]]:
return False
iban = iban[4:]+iban[0:4]
iban_translated = ''
for char in iban:
if char in letters:
iban_translated += str(letters.index(char)+10)
elif char in '0123456789':
iban_translated += char
else:
return False
return (int(iban_translated) % 97) == 1
def banknote_euro(banknote, strict=True):
'''Validation of a Euro banknote id'''
euro_country_codes = 'JKLMNPRSTUVWXYZ'
if not strict:
banknote = banknote.strip().replace("-", "").replace("/", "")
if len(banknote) != 12:
return False
if not banknote[0] in euro_country_codes:
return False
# Convert charater to ascii code
banknote = int(str(ord(banknote[0]))+banknote[1:])
return (int(banknote) % 9) == 0
| bsd-3-clause | 4,280,021,965,208,855,600 | 30.299065 | 86 | 0.533294 | false |
kennethreitz/pipenv | pipenv/vendor/shellingham/posix/__init__.py | 1 | 2843 | import os
import re
from .._core import SHELL_NAMES, ShellDetectionFailure
from . import proc, ps
def _get_process_mapping():
"""Select a way to obtain process information from the system.
* `/proc` is used if supported.
* The system `ps` utility is used as a fallback option.
"""
for impl in (proc, ps):
try:
mapping = impl.get_process_mapping()
except EnvironmentError:
continue
return mapping
raise ShellDetectionFailure('compatible proc fs or ps utility is required')
def _iter_process_args(mapping, pid, max_depth):
"""Iterator to traverse up the tree, yielding each process's argument list.
"""
for _ in range(max_depth):
try:
proc = mapping[pid]
except KeyError: # We've reached the root process. Give up.
break
if proc.args: # Persumably the process should always have a name?
yield proc.args
pid = proc.ppid # Go up one level.
def _get_login_shell(proc_cmd):
"""Form shell information from the SHELL environment variable if possible.
"""
login_shell = os.environ.get('SHELL', '')
if login_shell:
proc_cmd = login_shell
else:
proc_cmd = proc_cmd[1:]
return (os.path.basename(proc_cmd).lower(), proc_cmd)
_INTERPRETER_SHELL_NAMES = [
(re.compile(r'^python(\d+(\.\d+)?)?$'), {'xonsh'}),
]
def _get_interpreter_shell(proc_name, proc_args):
"""Get shell invoked via an interpreter.
Some shells are implemented on, and invoked with an interpreter, e.g. xonsh
is commonly executed with an executable Python script. This detects what
script the interpreter is actually running, and check whether that looks
like a shell.
See sarugaku/shellingham#26 for rational.
"""
for pattern, shell_names in _INTERPRETER_SHELL_NAMES:
if not pattern.match(proc_name):
continue
for arg in proc_args:
name = os.path.basename(arg).lower()
if os.path.isfile(arg) and name in shell_names:
return (name, arg)
return None
def _get_shell(cmd, *args):
if cmd.startswith('-'): # Login shell! Let's use this.
return _get_login_shell(cmd)
name = os.path.basename(cmd).lower()
if name in SHELL_NAMES: # Command looks like a shell.
return (name, cmd)
shell = _get_interpreter_shell(name, args)
if shell:
return shell
return None
def get_shell(pid=None, max_depth=6):
"""Get the shell that the supplied pid or os.getpid() is running in.
"""
pid = str(pid or os.getpid())
mapping = _get_process_mapping()
for proc_args in _iter_process_args(mapping, pid, max_depth):
shell = _get_shell(*proc_args)
if shell:
return shell
return None
| mit | 1,781,591,116,275,053,000 | 29.569892 | 79 | 0.623285 | false |
MC911-MV-1s2016/lya-compiler-python | lyacompiler/lya_debug_source.py | 1 | 11212 | lya_source_dcl = """
dcl dcl1 int;
dcl dcl2, dcl3, dcl4, dcl5 char;
dcl dcl6, dcl7 int, dcl8 bool;
dcl dcl9 int = 5;
dcl dcl10, dcl11 int = 6;
dcl dcl12 int, dcl13, dcl14 int = 10;
dcl dcl15 int (2:5);
dcl dcl16 char (0:10);
dcl dcl17 bool(10:11);
dcl dcl18 dcl17 (1:2);
dcl dcl19 int (0:1) (1:2);
"""
lya_source_syn = """
syn syn1 = 1;
syn syn2, syn3, syn4 = 3;
syn syn5 int = 2;
syn syn6, syn7 int = 3;
syn syn8 = 10, syn9 = 12;
syn syn10, syn11 int = 13, syn12 = 20;
"""
lya_source_type = """
type type1 = int;
type type2 = char;
type type3 = bool;
type type4 = type3;
type type7, type8 = int;
type type9, type10, type11 = char;
type type12 = bool, type13 = type9;
type type14 = int, type15, type16 = char, type17, type18, type19 = char;
type type20 = ref int;
type type21 = ref ref type20;
type type22 = chars[20];
type type23 = array [int] char;
type type24 = array[1:2] bool;
type type25 = array[int, bool, char, mode1(1:4), int(3:5), 1:5] bool;
"""
lya_source_composite_mode = """
dcl cms1 chars [10];
dcl cma1 array [int] bool;
dcl cma2 array [bool, int] char;
"""
lya_source_procedure1 = """
power: proc (n int, r int) returns (int);
dcl c int;
type t = bool;
end;
"""
lya_source_procedure2 = """
power: proc (n int, r int) returns (int);
end;
"""
lya_source_procedure3 = """
power: proc (n int, r int);
dcl c int;
type t = bool;
end;
"""
lya_source_procedure4 = """
power: proc () returns (int);
dcl c int;
type t = bool;
end;
"""
lya_source_procedure5 = """
power: proc (n int, r int);
end;
"""
lya_source_procedure6 = """
power: proc () returns (int);
end;
"""
lya_source_procedure7 = """
power: proc ();
dcl c int;
end;
"""
lya_source_procedure8 = """
power: proc ();
end;
"""
lya_source_procedure9 = """
power: proc (n int loc, r, z int) returns (int loc);
dcl c, d int = 1;
type t = bool;
end;
"""
lya_source_if1 = """
label: if 1+2 then
exit label1;
else
exit label2;
fi;
"""
lya_source_if2 = """
if 1+2 then
exit label1;
exit label2;
fi;
"""
lya_source_if3 = """
if 1+2 then
else
exit label2;
exit label3;
fi;
"""
lya_source_if4 = """
if 1+2 then
else
fi;
"""
lya_source_if5 = """
if 1+2 then
exit label1;
elsif 1+2 then
exit label2;
exit label22;
else
exit lable3;
fi;
"""
lya_source_if6 = """
if 1+2 then
exit label1;
elsif 1+2 then
exit label2;
exit label22;
fi;
"""
lya_source_if7 = """
if 1+2 then
if 1+3 then
exit label1;
fi;
elsif 1+2 then
exit label2;
if 2+5 then
else
exit label22;
fi;
else
if 2+5 then
exit a1;
elsif 1+2 then
exit label22;
fi;
fi;
"""
lya_source_action1 = """
label1: ac1 = 10 + 10;
ac2 += 2;
ac3 -= 10;
ac4 *= 55;
ac5 /= 1;
ac5 %= 20;
ac6 &= 2;
"""
lya_source_expression = """
dcl var1 int=3+5-7*7/9%3;
dcl var2 int = 2 in 3;
dcl var3 bool = 5 && 3 || 1 == 2 & 2;
dcl var4 bool = if 2 then 3 else 5 fi;
dcl var2 int = var1 + 3;
"""
lya_source_action2 = """
exit label1;
result 1 + 2;
return;
return 2 + 1;
"""
lya_source_call1 = """
function();
function(1);
function(1, 2);
function(1+2, 2);
function(1,2,3/2);
"""
lya_source_call2 = """
num(1);
pred();
succ(1,2);
upper(1/2);
lower(2/3);
length();
read(100);
print(var2+2);
"""
lya_source_do1 = """
dcl var int = 3;
do od;
do var = 2; od;
do while 1; od;
do while 3; var = 32; od;
"""
lya_source_do2 = """
do for counter in int; od;
do for counter in bool; var3 = 12; od;
do for counter down in char; od;
do for counter in int while 3; var = 32; od;
do for counter = 3 to 8; od;
do for counter = 3 down to 8; od;
do for counter = 3 by 5 to 8; od;
do for counter = 3 by 5 down to 8; od;
"""
lya_source_do3 = """
dcl var int = 3;
do od;
do var = 2; od;
do while var; od;
do while 3; var = 32; od;
"""
test2_source = """dcl m int = 2, n int = 3;
p: proc (x int);
dcl s int;
s = m * x;
print("s = ", s);
end;
p(n);
print(m);"""
test3_source = """dcl m int = 2, n int = 3;
p: proc (x, y int, b bool) returns (int);
dcl s int = x;
if b then
s += y;
result s;
else
result y;
fi;
end;
dcl b bool;
read (b);
print (p(m, n, b));"""
test4_source = """dcl i int, b bool = true;
x:
do while b;
read (i);
if i <= 0 then
exit x;
fi;
print (i*i);
od;
print (0);"""
test5_source = """dcl i, soma int;
soma = 0;
do for i=1 to 10;
soma += i;
od;
print (soma);
"""
test6_source = """dcl i int;
dcl soma int = 0, b bool = true;
do for i=1 to 10 while b;
soma += i;
if soma > 100 then
b = false;
fi;
od;
print (soma);"""
test7_source = """dcl i,j int, r ref int;
p: proc(x int, y ref int) returns (int);
dcl b bool;
read(b);
if b then
y = -> i;
result y->;
else
y = r;
result r->;
fi;
end;
read(i);
r = -> i;
print(p(i,->j));"""
test8_source = """dcl i int, j,k int = 2;
p: proc(x int, y int loc) returns (int loc);
dcl z int = y;
y = x;
result k;
print(z); /* print 2 */
end;
i = p(3,j);
print(i, j); /* print 2,3 */"""
test9_source = """dcl a array[3:10] int;
dcl i,j int;
read(j);
a[3]=2*j;
do
for i = 4 to 10;
a[i] = 5+i;
od;
print(a[j]);"""
test10_source = """dcl x, y int;
p: proc (b bool) returns (int loc);
if b then
result x;
else
result y;
fi;
end;
dcl b bool = false;
p(b) = 20;
p(true) = 10;
print(x, y); // display 10, 20
"""
test11_source = """type vector = array[1:10] int;
dcl v vector, i int;
sum: proc (v vector) returns (int);
dcl s, i int;
i = 1;
s = 0;
do
while i<=10;
s = s + v[i];
i += 1;
od;
return s;
end;
do
for i = 1 to 10;
read(v[i]);
od;
print(sum(v));"""
syn_test_source = """syn sy1 = 20;
syn sy6 = sy1;
syn sy2 char = 'c';
syn sy3 bool = true;
syn sy4 int = 1 + sy1;"""
dcl_op_source = """dcl var1 int=3+5-7*7/9%3; dcl var2 int = 2 in 3;"""
dcl_op_source2 = """dcl var2, varx char;\ndcl var3, var4 int = 10;\ndcl var5 = 10 + 5 * (10 - 20);"""
test_rel_exp_source = """dcl m bool = false, n bool = false;
p: proc (x bool);
dcl s bool;
s = m >= x;
end;
p(n);"""
test_unary_op_source = """dcl m int = 2, n int = 3;
p: proc (x int);
dcl s bool;
s = !true;
end;
p(n);"""
test_elsif_source = """dcl m int = 2, n int = 3, y, s int, b bool = true;
if b then
s += y;
elsif b then
s = y;
else
s = 3;
fi;
print (s);"""
testret_source = """dcl m int = 2, n int = 3;
p: proc (x, y int, b bool) returns (int);
dcl s int = x;
if b then
s += y;
return s;
else
result y;
fi;
end;
dcl b bool = true;
read (b);
print (p(m, n, b));"""
typedef_source = """type my_int = int;
dcl x my_int = 2;
type vector = array[1:10] int;
dcl v vector;
type p_int = ref int;
dcl pi p_int;
print(x);
print(v);
print(pi);
type r_my_int = ref my_int;
dcl uou r_my_int;
print(uou);"""
printtest_source = """
dcl c chars[10] = "BANANA";
print("Oi", "tudo bem?");
print(c);"""
# The only variable exported from this module.
__all__ = ['lya_debug_source']
lya_gcd = """
gcd: proc (x int, y int) returns (int);
dcl g int;
g = y;
do
while x > 0;
g = x;
x = y - (y/x) * x;
y = g;
od;
return g;
end;
dcl a, b int;
print("give-me two integers separated by space:");
read(a);
read(b);
print ("GCD of ", a, b, " is ", gcd(a,b));"""
lya_gen_primes = """dcl n1, n2, i, j int, flag bool;
print("Enter 2 numbers (intervals) separated by space: ");
read(n1);
read(n2);
print("Prime numbers between ", n1, " and ", n2, " are:\n");
do
for i = n1 to n2;
flag = true;
loop: do
for j = 2 to i/2;
if i % j == 0 then
flag = false;
exit loop;
fi;
od;
if flag then
print(i, " ");
fi;
od;
"""
lya_bubble_sort = """dcl v array[0:100] int;
dcl n, c, d, swap int;
print("Enter number of elements: ");
read(n);
print("Enter ", n, " integers\n");
do
for c = 0 to n-1;
read(v[c]);
od;
do
for c = 0 to n-2;
do
for d = 0 to n-c-2;
// For decreasing order use "<"
if v[d] > v[d+1] then
swap = v[d];
v[d] = v[d+1];
v[d+1] = swap;
fi;
od;
od;
print("Sorted list in ascending order:\n");
do
for c = 0 to n-1;
print(v[c], " ");
od;
"""
lya_palindrome = """dcl n,t int, reverse int = 0;
print("Enter a number: ");
read(n);
t = n;
do
while t != 0;
reverse = reverse * 10;
reverse = reverse + t % 10;
t = t / 10;
od;
if n == reverse then
print(n, " is a palindrome number.\n");
else
print(n, " is not a palindrome number.\n");
fi;"""
lya_ref_example = """swapByRef: proc(x ref int, y ref int);
dcl t int = x->;
x-> = y->;
y-> = t;
end;
dcl i int = 10, j int = 20;
// declaring reference to int
dcl r ref int = ->i;
swapByRef( r, ->j );
print(i, j);"""
lya_fibo = """fibo: proc (n int, g int loc);
dcl h int;
if n < 0 then
print(g);
return;
else
h = g; fibo(n-1, h);
g = h; fibo(n-2, g);
fi;
print(n,g);
end;
dcl k int = 0;
fibo(3,k);
//fibo(-1,k);
"""
lya_armstrong = """power: proc (n int, r int) returns (int);
dcl c int, p int = 1;
do
for c = 1 to r;
p = p*n;
od;
return p;
end;
dcl n int, sum int = 0;
dcl temp, remainder int, digits int = 0;
print("Input an integer: ");
read(n);
temp = n;
do
while temp != 0;
digits += 1;
temp = temp / 10;
od;
temp = n;
do
while temp != 0;
remainder = temp % 10;
sum = sum + power(remainder, digits);
temp = temp / 10;
od;
if n == sum then
print(n, " is an Armstrong number.\n");
else
print(n, " is not an Armstrong number.\n");
fi;"""
lya_fat = """
fat: proc (n int) returns (int);
if n==0 then
return 1;
else
return n * fat (n-1);
fi;
end;
dcl x int;
print("give-me a positive integer:");
read(x);
print("fatorial of ", x, " = ", fat(x));"""
lya_int_stack = """syn top int = 10;
type stack = array [1:top+1] int;
push: proc (s stack loc, elem int);
if s[top+1] == top then
print("stack is full");
else
s[top+1] += 1;
s[s[top+1]] = elem;
fi;
end;
pop: proc (s stack loc) returns (int);
if s[top+1] == 0 then
print("empty stack");
result 0;
else
result s[s[top+1]];
s[top+1] -= 1;
fi;
end;
init: proc (s stack loc);
s[top+1] = 0;
end;
dcl q stack, v1, v2 int;
init(q);
read(v1, v2);
push(q,v1);
push(q,v2);
print(pop(q) + pop(q));"""
lya_debug_source = lya_bubble_sort
| bsd-3-clause | -97,219,455,865,958,500 | 16.382946 | 101 | 0.505619 | false |
Vytax/plugin.video.lrt.lt | liblrt.py | 1 | 11756 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import urllib
import urllib2
import sys
import simplejson as json
from StringIO import StringIO
import gzip
from HTMLParser import HTMLParser
htmlParser = HTMLParser()
LRT_URL = 'http://www.lrt.lt/'
VIDEOS_COUNT_PER_PAGE = 100
LATEST_NEWS_URL = LRT_URL + 'data-service/module/mediaNews/callback/top_media/startRow/%d/limit/' + str(VIDEOS_COUNT_PER_PAGE)
LATEST_VIDEOS_URL = LRT_URL + 'data-service/module/media/callback/latest_media/startRow/%d/order/dateZ/limit/' + str(VIDEOS_COUNT_PER_PAGE)
POPULAR_VIDEOS_URL = LRT_URL + 'data-service/module/media/callback/popular_media/startRow/%d/order/viewsZ/date/7/limit/' + str(VIDEOS_COUNT_PER_PAGE)
TVSHOW_VIDEOS_URL = LRT_URL + 'data-service/module/media/callback/popular_media/program/%d/startRow/%d/limit/' + str(VIDEOS_COUNT_PER_PAGE)
SEARCH_VIDEOS_URL = LRT_URL + 'data-service/module/media/callback/popular_media/order/dateZ/content/%s/startRow/%d/limit/' + str(VIDEOS_COUNT_PER_PAGE)
PLAYLISTS_URL = LRT_URL + 'data-service/module/play/callback/playlists_%d/category/%d/enable/true/count/0/limit/' + str(VIDEOS_COUNT_PER_PAGE)
PLAYLIST_URL = LRT_URL
PLAYLISTSGROUPS_URL = LRT_URL + 'mediateka/grojarasciai'
KIDS_VIDEOS_URL = LRT_URL + 'data-service/module/kids/callback/load_media/category/%s/age/%s/startRow/%d/limit/' + str(VIDEOS_COUNT_PER_PAGE)
reload(sys)
sys.setdefaultencoding('utf8')
import htmlentitydefs
class HTMLTextExtractor(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.result = [ ]
def handle_data(self, d):
self.result.append(d)
def handle_charref(self, number):
codepoint = int(number[1:], 16) if number[0] in (u'x', u'X') else int(number)
self.result.append(unichr(codepoint))
def handle_entityref(self, name):
codepoint = htmlentitydefs.name2codepoint[name]
self.result.append(unichr(codepoint))
def get_text(self):
return u''.join(self.result)
def html_to_text(html):
s = HTMLTextExtractor()
s.feed(html)
return s.get_text()
def getURL(url):
request = urllib2.Request(url)
request.add_header('Accept-encoding', 'gzip')
response = urllib2.urlopen(request)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO(response.read())
f = gzip.GzipFile(fileobj=buf)
return f.read()
return response.read()
def getLiveURLs():
html = getURL(LRT_URL + 'pradzia');
aside = re.findall('<aside class="right-off-canvas-menu">(.*?)</aside>', html, re.DOTALL)
if not aside:
print "Error: getLiveURLs"
return
li_items = re.findall('<li.*?>(.*?)</li>', aside[0], re.DOTALL)
liveURLs = []
for li_item in li_items:
liveURL = {}
link = re.findall('<a class="now-channel-link.*?" href="(.*?)">', li_item, re.DOTALL)
if not link:
continue
link = link[0]
if link.startswith('http://'):
liveURL['url'] = link
else:
liveURL['url'] = LRT_URL + link
now = re.findall('<span class="now">(.*?)</span>', li_item, re.DOTALL)
if now:
liveURL['nowPlaying'] = html_to_text(now[0]).strip()
name = re.findall('<span class="channelName">(.*?)</span>', li_item, re.DOTALL)
if name:
liveURL['name'] = htmlParser.unescape(name[0]).replace('·','').strip()
else:
if now:
liveURL['name'] = liveURL['nowPlaying']
else:
liveURL['name'] = 'LRT'
contentType = re.findall('<span class="(video|audio)"></span>', li_item, re.DOTALL)
if contentType:
liveURL['contentType'] = contentType[0]
else:
liveURL['contentType'] = 'video'
if liveURL:
liveURLs.append(liveURL)
return liveURLs
def getVideoStreamURL(url):
result = {}
html = getURL(url)
html = html.replace('"+(md.device == \'mobile\'?\'640/360\':\'870/490\')', '/500/280/size16x9"')
source = re.findall('sources: [\[\s]*\{([^\}]*)\}', html, re.DOTALL)
if not source:
return result
source = re.sub(re.compile('\n[\s]*(\/\/[^\n]*)', re.DOTALL), '', source[0])
url_hash = url.split('#', 1)
if len(url_hash) == 2:
url_hash = url_hash[1]
else:
url_hash = '';
source = re.sub(re.compile('("[\+\s]*location.hash.substring\(1\))', re.DOTALL), url_hash + '"', source)
source = source.replace('"file"', 'file')
mfile = re.findall('file[:\s]*"(.*?)"' ,source, re.DOTALL)
result['url'] = mfile[0].replace('\/','/')
image = re.findall('image: "(.*?)"', html, re.DOTALL)
if image:
result['image'] = LRT_URL + image[0]
return result
def str_duration_to_int(duration):
if not duration:
return 0
parts = duration.split(':')
if not parts:
return 0
return int(parts[0])*3600+int(parts[1])*60+int(parts[2])
def getLatestNews(startRow=0):
json = getLRTJSON(LATEST_NEWS_URL % startRow)
result = {}
result['startRow'] = json['startRow']
result['endRow'] = json['endRow']
result['totalRows'] = json['totalRows']
dataList = []
for data in json['data']:
d = {}
d['title'] = data['title']
if data['content']:
d['plot'] = data['content'].replace('\t','').strip()
else:
d['plot'] = ''
if data['category']:
d['genre'] = data['category']
else:
d['genre'] = ''
if data['date']:
d['aired'] = data['date']
if 'length' in data:
d['duration'] = str_duration_to_int(data['length'])
d['thumbnailURL'] = LRT_URL + 'mimages/News/images/' + str(data['newsId']) + '/500/280/size16x9'
d['url'] = LRT_URL + 'mediateka/irasas/' + str(data['id']) + '/lrt#wowzaplaystart=' + str(data['start']) + '&wowzaplayduration=' + str(data['end'])
dataList.append(d)
result['data'] = dataList
return result
def getLatestVideos(startRow=0):
json = getLRTJSON(LATEST_VIDEOS_URL % startRow)
return parseStandartJSON(json)
def getPopularVideos(startRow=0):
json = getLRTJSON(POPULAR_VIDEOS_URL % startRow)
return parseStandartJSON(json)
def getTVShowVideos(mediaId, startRow=0):
json = getLRTJSON(TVSHOW_VIDEOS_URL % (mediaId, startRow))
return parseStandartJSON(json)
def getSearchVideos(key, startRow=0):
json = getLRTJSON(SEARCH_VIDEOS_URL % (urllib.quote(key.encode("utf-8")), startRow))
return parseStandartJSON(json)
def parseStandartJSON(json):
result = {}
result['startRow'] = json['startRow']
result['endRow'] = json['endRow']
result['totalRows'] = json['totalRows']
dataList = []
for data in json['data']:
d = {}
d['title'] = data['title']
if data['content']:
d['plot'] = data['content'].replace('\t','').strip()
else:
d['plot'] = ''
if data['category']:
d['genre'] = data['category']
else:
d['genre'] = ''
if data['date']:
d['aired'] = data['date']
#d['duration'] = str_duration_to_int(data['end']) - str_duration_to_int(data['start'])
d['thumbnailURL'] = LRT_URL + '/mimages/Media/items/' + str(data['id']) + '/500/280'
d['url'] = LRT_URL + 'mediateka/irasas/' + str(data['id']) + '/lrt'
dataList.append(d)
result['data'] = dataList
return result
def getLRTJSON(url):
jsonData = getURL(url)
response = json.loads(jsonData)['response']
if not response:
return False
data = response['data']
if not data:
return False
return response
def getTVShowsList():
html = getURL(LRT_URL + 'mediateka/irasai')
tvList = []
select = re.findall('<select id="show"[^>]*>(.*?)</select>', html, re.DOTALL)
items = re.findall('<option value="(\d{2,10})">(.*?)</option>', select[0], re.DOTALL)
for item in items:
show = {'id': item[0], 'title': item[1]}
tvList.append(show)
return tvList
def getPlaylistsGroups():
html = getURL(PLAYLISTSGROUPS_URL)
items = re.findall('<div class="blockTop blockTopSimple beforefilter">(.*?)</div>', html, re.DOTALL)
tvList = []
for i, item in enumerate(items):
tvList.append({'id': i+1, 'title': item })
return tvList
def getPlaylists(mediaId, startRow=0):
json = getLRTJSON(PLAYLISTS_URL % (mediaId, mediaId))
if not json:
return []
tvList = []
for item in json['data']:
tv = {}
tv['id'] = item['id']
tv['title'] = item['title']
tv['date'] = item['date']
tv['thumbnailURL'] = LRT_URL + 'mimages/PlayList/items/' + str(item['id']) + '/500/280'
tvList.append(tv)
return tvList
def getPlaylist(mediaId):
html = getURL(LRT_URL + 'mediateka/grojarasciai/id/' + str(mediaId))
items = re.findall('<div class="playlist-scroll">.*?</div>', html, re.DOTALL)
if not items:
return []
tvList = []
items = re.findall('<img class="pl-rec-img" src="http://www.lrt.lt/mimages/Media/items/(\d+)/240/135/" alt="([^"]*)"/>', items[0], re.DOTALL)
for i, title in items:
tv = {}
tv['title'] = title
tv['url'] = LRT_URL + 'mediateka/irasas/' + i
tv['thumbnailURL'] = LRT_URL + 'mimages/Media/items/' + i + '/500/280'
tv['plot'] = ''
tv['genre'] = ''
tvList.append(tv)
return {'data': tvList, 'startRow': 1, 'totalRows': 1}
def getKidsAgeGroups():
html = getURL(LRT_URL + 'vaikams')
items = re.findall('<a class="[^"]*" href="http://www.lrt.lt/vaikams/([^"]*)"><br>([^<]*)</a>', html, re.DOTALL)
if not items:
return []
tvList = []
for i, item in enumerate(items):
tv = {}
tv['title'] = item[1]
tv['id'] = '%d:%s' % (i+1, item[0])
tvList.append(tv)
return tvList
def getKidsCategory(age, cat=None):
age = age.split(':')
url = LRT_URL + 'vaikams/' + age[1]
if cat:
cat = str(cat)
url = url + '/' + cat
html = getURL(url)
jsonData = re.findall('GLOBAL\.kidsCategories = (\{.*?)</script>', html, re.DOTALL)
if not jsonData:
return None
data = json.loads(jsonData[0])
parents = []
for i in data.keys():
v = data[i]
if v['parent']:
parents.append(v['parent'])
items = []
for i in data.keys():
v = data[i]
if v['parent'] == cat and age[0] in v['age']:
tv = {}
tv['title'] = v['name']
tv['id'] = int(v['id'])
tv['thumbnailURL'] = LRT_URL + v['image']
if v['id'] in parents:
tv['type'] = 'cat'
else:
tv['type'] = 'list'
items.append(tv)
return items
def getKidsVideoList(age, cat, startRow=0):
if not cat or not age:
return None
age = age.split(':')
jsonData = getLRTJSON(KIDS_VIDEOS_URL % (cat, age[0], startRow))
if not jsonData:
return None
result = {}
result['startRow'] = jsonData['startRow']
result['endRow'] = jsonData['endRow']
result['totalRows'] = jsonData['totalRows']
dataList = []
for data in jsonData['data']:
d = {}
d['title'] = data['title']
if data['content']:
d['plot'] = htmlParser.unescape(data['content']).replace('\t','').strip()
else:
d['plot'] = ''
if data['date']:
d['aired'] = data['date']
d['genre'] = ''
dataType = int(data['type'])
if dataType == 3:
img = data['image']
yid = re.findall('http:\/\/img\.youtube\.com\/vi\/([^\/]*)\/', img)
if yid:
yid = yid[0]
else:
continue
d['type'] = 'youtube'
d['youtubeID'] = yid
d['thumbnailURL'] = img
elif dataType == 4:
d['thumbnailURL'] = LRT_URL + data['image']
d['url'] = LRT_URL + 'vaikams/%s/%s/%s' % (age[1], cat, data['id'])
else:
continue
dataList.append(d)
result['data'] = dataList
return result
| gpl-2.0 | 9,193,241,455,403,997,000 | 24.171306 | 151 | 0.583581 | false |
filippobrizzi/soma | graph_sched/graphCreator.py | 1 | 5395 | import sys
import pargraph as par
import copy
import schedule as sched
import profiler as pro
import time
import multiprocessing
import itertools
import random
import threading
""" Usage: call with <filename> <pragma_xml_file> <executable_name> <profiling_interations> True/False (for output) """
if __name__ == "__main__":
pragma_xml = sys.argv[1]
executable = sys.argv[2]
count = int(sys.argv[3])
output = sys.argv[4]
execution_time = float(sys.argv[5])
deadline = float(sys.argv[6])
multi = sys.argv[7]
#runs count time the executable and aggregates the informations in executable_profile.xml. The single profile outputs are saved as profile+iter.xml
profile_xml = pro.profileCreator(count, executable)
#return the nested dot graphs in code style (one for each function)
visual_nested_graphs = par.getNesGraph(pragma_xml, profile_xml)
#returns the graphs to be visualized and the object graphs in flow style (one for each function)
(visual_flow_graphs, flow_graphs) = par.getParalGraph(pragma_xml, profile_xml)
i = 0
for g in visual_nested_graphs:
g.write_pdf('graphs/%s_code.pdf'%flow_graphs[i].type)
g.write_dot('graphs/%s_code.dot'%flow_graphs[i].type)
i += 1
i = 0
for g in visual_flow_graphs:
g.write_pdf('graphs/%s_flow.pdf'%flow_graphs[i].type)
g.write_dot('graphs/%s_flow.dot'%flow_graphs[i].type)
i += 1
#creates the flow type graph --> flow.xml
par.dump_graphs(flow_graphs)
#adding to the original xml the profiling informations --> code.xml
pro.add_profile_xml(profile_xml, pragma_xml)
#creating the total graph with the call-tree
func_graph = par.create_complete_graph(visual_flow_graphs, profile_xml)
#creating the graphs with the function calls
func_graph.write_pdf('graphs/function_graphs.pdf')
func_graph.write_dot('graphs/function_graphs.dot')
#creating the expanded graph where the functions are inserted in the flow graph
exp_flows = copy.deepcopy(flow_graphs)
par.explode_graph(exp_flows)
main_flow = sched.get_main(exp_flows)
#creating a generator for the expanded graph
gen = sched.generate_task(main_flow)
#creating a new generator for the expanded graph
sched.make_white(main_flow)
#getting the number of physical cores of the machine profiled
max_flows = sched.get_core_num(profile_xml)
max_flows = 4
#getting cores of the actual machine, but the problem is multithreading
cores = multiprocessing.cpu_count()
if cores == 1:
cores = 2
#initializing all the lists for the parallel scheduling algorithm
tasks_list = []
task_list = []
flows_list = []
optimal_flow_list = []
p_list = []
queue_list = []
results = []
num_tasks = 0
#getting the number of tasks in the expanded graph and creating a list of task
for task in gen:
task_list.append(task)
num_tasks += 1
if output == 'True':
sched.make_white(main_flow)
par.scanGraph(main_flow)
#starting the parallel or sequential search of the best solution with a timing constrain
if multi == 'parallel':
for core in range(cores):
tmp = []
optimal_flow_list.append(tmp)
tmp_2 = []
flows_list.append(tmp_2)
random.shuffle(task_list)
tasks_list.append(copy.deepcopy(task_list))
q = sched.Queue()
queue_list.append(q)
p_list.append(multiprocessing.Process(target = sched.get_optimal_flow, args = (flows_list[core], tasks_list[core], 0, optimal_flow_list[core], num_tasks, max_flows, execution_time, queue_list[core], )))
print "starting core: ",core
p_list[core].start()
#getting the results from the processes
for queue in queue_list:
t = queue.q.get()
results.append(t)
#joining all the processes
i = 0
for p in p_list:
p.join()
print "core ", i, " joined"
i += 1
#getting the best result
optimal_flow = results[0]
best = 0
for i in range(len(results)):
print "result:"
for flow in results[i]:
flow.dump()
if sched.get_cost(results[i]) < sched.get_cost(optimal_flow):
best = i
optimal_flow = results[best]
else:
optimal_flow = []
flow_list = []
execution_time += time.clock()
print "searching best schedule"
sched.get_optimal_flow_single(flow_list, task_list, 0, optimal_flow, num_tasks, max_flows, execution_time )
#printing the best result
print "solution:"
for flow in optimal_flow:
flow.dump("\t")
print "\ttime:",flow.time
#substitutes "for tasks" with splitted versions if present in the optimal flows
par.add_new_tasks(optimal_flow, main_flow)
sched.make_white(main_flow)
gen_ = sched.generate_task(main_flow)
t_list = []
for t in gen_:
t_list.append(t)
"""
print t.type," @ ", t.start_line, " has parents:"
for p in t.parent:
print "\t ",p.type," @ ", p.start_line
print "and children:"
for c in t.children:
print "\t ",c.type," @ ", c.start_line
print
"""
#adds id's to all the tasks to retrive the flow to which they belong
par.add_flow_id(optimal_flow, t_list)
#sets arrival times and deadlines using a modified version of the chetto algorithm
sched.chetto(main_flow, deadline, optimal_flow)
#checks if the schedule is feasible and in case creates the schedule file
if sched.check_schedule(main_flow):
sched.create_schedule(main_flow, len(optimal_flow))
sched.make_white(main_flow)
#sched.print_schedule(main_flow)
else:
print "tasks not schedulable, try with more search time"
#prints extended info of the entire pragma graph
| gpl-3.0 | -2,984,676,818,941,451,300 | 27.544974 | 206 | 0.704912 | false |
jkyeung/XlsxWriter | xlsxwriter/test/comparison/test_types07.py | 1 | 2093 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'types07.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/calcChain.xml', '[Content_Types].xml', 'xl/_rels/workbook.xml.rels']
self.ignore_elements = {}
def test_write_nan_and_inf(self):
"""Test writing special numbers."""
workbook = Workbook(self.got_filename, {'nan_inf_to_errors': True})
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, float('nan'))
worksheet.write(1, 0, float('inf'))
worksheet.write(2, 0, float('-inf'))
workbook.close()
self.assertExcelEqual()
def test_write_nan_and_inf_write_number(self):
"""Test writing special numbers."""
workbook = Workbook(self.got_filename, {'nan_inf_to_errors': True})
worksheet = workbook.add_worksheet()
worksheet.write_number(0, 0, float('nan'))
worksheet.write_number(1, 0, float('inf'))
worksheet.write_number(2, 0, float('-inf'))
workbook.close()
self.assertExcelEqual()
def test_write_nan_and_inf_write_as_string(self):
"""Test writing special numbers."""
workbook = Workbook(self.got_filename, {'nan_inf_to_errors': True,
'strings_to_numbers': True})
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, 'nan')
worksheet.write(1, 0, 'inf')
worksheet.write(2, 0, '-inf')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause | -7,666,676,698,832,765,000 | 28.478873 | 101 | 0.580029 | false |
Byron/pgit | src/python/pgit/tests/cmd/test_submodule.py | 1 | 1935 | #-*-coding:utf-8-*-
"""
@package pgit.tests.cmd.test_submodule
@brief tests for pgit.cmd.submodule
@author Sebastian Thiel
@copyright [GNU Lesser General Public License](https://www.gnu.org/licenses/lgpl.html)
"""
__all__ = []
from bcmd import InputError
from ..lib import with_application
from pgit.tests.lib import *
from pgit.cmd.submodule import *
class TestSubmoduleCommand(TestCmdBase):
subcommand_name = 'submodule'
@with_application
def test_base(self):
# in this test we are quite trusty and just call all known command args
# and try to trigger arg-based exceptions. The underlying system
# was tested in detail, so its really just about running the commands code
# QUERY
#######
# invalid command raises
# self.failUnlessRaises(str, psm, ['somecommand'])
psm = self.cmd
# simple query - we have no submodules
out = psm('query')
# assert not out
# UPDATE
########
# invalid filter raises early
assert psm(SubmoduleCommand.OP_UPDATE, "doesntexist")[0] == SubmoduleCommand.ARGUMENT_ERROR
# dry-run does nothing
psm(SubmoduleCommand.OP_UPDATE, '-n')
# updates all without anything else
psm(SubmoduleCommand.OP_UPDATE)
# only update one, none-recursively, to the latest revision
psm(SubmoduleCommand.OP_UPDATE, '--non-recursive', '-l', 'gitpython')
# ADD
#####
# too many or too few args
assert psm(SubmoduleCommand.OP_ADD, "one")[0] == SubmoduleCommand.ARGUMENT_ERROR
assert psm(SubmoduleCommand.OP_ADD, ['arg']*4)[0] == SubmoduleCommand.ARGUMENT_ERROR
# all the write-tests don't actually work as this repository has no submodules anymore
# Also it doesn't make much sense to test the argparser, as it is working just fine.
| lgpl-3.0 | 508,672,822,010,695,230 | 31.79661 | 99 | 0.633592 | false |
DimaWoogy/convex_hull_bst | charts/run.py | 1 | 4349 | from matplotlib import pyplot as plt
from matplotlib.font_manager import FontProperties
from subprocess import call
import random
import sys
import math
from scipy.spatial import ConvexHull
from shapely import geometry
font = FontProperties()
font.set_family('Times New Roman')
font.set_size(12)
def generate_points_on_circle(size):
for i in range(size):
angle = random.uniform(0, 2 * math.pi)
yield (math.cos(angle), math.sin(angle))
def generate_points_in_circle(size):
for i in range(size):
angle = random.uniform(0, 2 * math.pi)
radius = random.random()
yield (radius * math.cos(angle), radius * math.sin(angle))
def generate_points_with_normal_dist(size):
return [(random.gauss(0, 1), random.gauss(0, 1)) for i in range(size)]
def triangle_area(triangle):
def distance(p1, p2):
return math.hypot(p1[0] - p2[0], p1[1] - p2[1])
a, b, c = triangle
first = distance(a, b)
second = distance(b, c)
third = distance(c, a)
p = 0.5 * (first + second + third)
return math.sqrt(p * (p - first) * (p - second) * (p - third))
def triangles_from_hull(points):
p = [points[i] for i in ConvexHull(points).vertices]
t = [(p[0], p[i], p[i + 1]) for i in range(1, len(p) - 1)]
w = [triangle_area(x) for x in t]
return t, w
def random_point_in_triangle(triangle):
a, b, c = triangle
r1, r2 = random.random(), random.random()
x = (1 - math.sqrt(r1)) * a[0] + (math.sqrt(r1) * (1 - r2)) * b[0] + \
(math.sqrt(r1) * r2) * c[0]
y = (1 - math.sqrt(r1)) * a[1] + (math.sqrt(r1) * (1 - r2)) * b[1] + \
(math.sqrt(r1) * r2) * c[1]
return x, y
def generate_points_with_percent_on_hull(size, percent):
points_on_hull = list(generate_points_on_circle(round(size * percent)))
triangles, weights = triangles_from_hull(points_on_hull)
in_hull_size = size - len(points_on_hull)
points_in_hull = [random_point_in_triangle(t) for t in random.choices(
triangles, weights=weights, k=in_hull_size)]
res = points_on_hull + points_in_hull
random.shuffle(res)
return res
def generate_input(sizes, genFunc):
with open("in.txt", 'w') as f:
f.write(str(len(sizes)) + '\n')
for size in sizes:
points = list(genFunc(size))
f.write(str(len(points)) + '\n')
for x, y in points:
f.write(str(x) + ' ' + str(y) + '\n')
def read_algo_names():
with open("algoNames.txt", 'r') as f:
return list(f.readlines())
def read_result():
with open("out.txt", 'r') as f:
return list(zip(*[list(map(float, line.split())) for line in f]))
def plot_and_save(x, values, names, labelx, labely, filename):
linestyles = ['k--', 'k-']
g = []
for i in range(len(values)):
g.append(plt.plot(x, values[i], linestyles[i])[0])
plt.yticks(fontname="Times New Roman", fontsize=10)
plt.xticks(fontname="Times New Roman", fontsize=10)
plt.legend(g, names, prop=font)
plt.xlabel(labelx, fontproperties=font)
plt.ylabel(labely, fontproperties=font)
plt.grid()
plt.savefig(filename, bbox_inches='tight')
def new_comparison(executable):
percentage_on_hull = [0.1, 0.5, 1, 1.5, 2, 2.5, 3]
print(percentage_on_hull)
points_num = [10000, 25000, 50000, 75000, 100000, 250000, 500000, 1000000]
print(points_num)
for n in points_num:
generate_input(percentage_on_hull,
lambda percent:
generate_points_with_percent_on_hull(n, percent / 100))
call([executable])
y = read_result()
print([f[1] / f[0] for f in zip(*y)])
plt.figure()
plot_and_save(percentage_on_hull, y, read_algo_names(),
'процент', 'время (сек)',
'comparison_' + str(n) + '.svg')
def classic_comparison(executable):
x = [1000, 2500, 5000, 7500, 10000, 25000, 50000, 75000, 100000]
plt.figure()
generate_input(x, generate_points_with_normal_dist)
call([executable])
plot_and_save(x, read_result(), read_algo_names(),
'количество точек', 'время (сек)', 'comparison_gauss.svg')
if len(sys.argv) == 2:
new_comparison(sys.argv[1])
else:
print("Usage: run.py path_to_executable")
| mit | -5,708,612,539,971,205,000 | 29.146853 | 78 | 0.597773 | false |
z23han/Wrangling-MongoDB | Lesson_1_Data_Extraction_Fundamentals/parseCSV.py | 1 | 2010 | # Your task is to read the input DATAFILE line by line, and for the first 10 lines (not including the header)
# split each line on "," and then for each line, create a dictionary
# where the key is the header title of the field, and the value is the value of that field in the row.
# The function parse_file should return a list of dictionaries,
# each data line in the file being a single list entry.
# Field names and values should not contain extra whitespace, like spaces or newline characters.
# You can use the Python string method strip() to remove the extra whitespace.
# You have to parse only the first 10 data lines in this exercise,
# so the returned list should have 10 entries!
import os
DATADIR = ""
DATAFILE = "beatles-diskography.csv"
# -*- coding: utf-8 -*-
def parse_file(datafile):
data = []
with open(datafile, "r") as f:
cnt = 0
key_list = []
value_list = []
for line in f:
#print line
if cnt == 0:
key_list = line.strip().split(',')
else:
value_list = line.strip().split(',')
if cnt != 0:
data_dict = {}
for i in xrange(len(key_list)):
data_dict[key_list[i]] = value_list[i]
data.append(data_dict)
cnt += 1
if cnt > 10:
break
return data
def test():
# a simple test of your implemetation
datafile = os.path.join(DATADIR, DATAFILE)
d = parse_file(datafile)
firstline = {'Title': 'Please Please Me', 'UK Chart Position': '1', 'Label': 'Parlophone(UK)', 'Released': '22 March 1963', 'US Chart Position': '-', 'RIAA Certification': 'Platinum', 'BPI Certification': 'Gold'}
tenthline = {'Title': '', 'UK Chart Position': '1', 'Label': 'Parlophone(UK)', 'Released': '10 July 1964', 'US Chart Position': '-', 'RIAA Certification': '', 'BPI Certification': 'Gold'}
assert d[0] == firstline
assert d[9] == tenthline
test()
| agpl-3.0 | 410,967,701,874,879,040 | 37.653846 | 216 | 0.602488 | false |
City-of-Helsinki/smbackend | smbackend/urls.py | 1 | 1679 | from django.contrib import admin
from django.urls import include, re_path
from django.utils.translation import gettext_lazy as _
from munigeo.api import all_views as munigeo_views
from rest_framework import routers
from observations.api import views as observations_views
from observations.views import obtain_auth_token
from services import views
from services.api import all_views as services_views
from services.unit_redirect_viewset import UnitRedirectViewSet
from shortcutter import urls as shortcutter_urls
admin.site.site_header = _("Servicemap administration")
admin.site.index_title = _("Application management")
router = routers.DefaultRouter()
registered_api_views = set()
for view in services_views + munigeo_views + observations_views:
kwargs = {}
if view["name"] in registered_api_views:
continue
else:
registered_api_views.add(view["name"])
if "basename" in view:
kwargs["basename"] = view["basename"]
router.register(view["name"], view["class"], **kwargs)
urlpatterns = [
# Examples:
# url(r'^$', 'smbackend.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
# url(r'^', include(v1_api.urls)),
# url(r'^admin/', include(admin.site.urls)),
re_path(r"^admin/", admin.site.urls),
re_path(r"^open311/", views.post_service_request, name="services"),
re_path(r"^v2/", include(router.urls)),
re_path(r"^v2/api-token-auth/", obtain_auth_token, name="api-auth-token"),
re_path(r"^v2/redirect/unit/", UnitRedirectViewSet.as_view({"get": "list"})),
re_path(r"^v2/suggestion/", views.suggestion, name="suggestion"),
re_path(r"", include(shortcutter_urls)),
]
| agpl-3.0 | 5,467,478,504,102,424,000 | 35.5 | 81 | 0.699226 | false |
code-for-india/sahana_shelter_worldbank | models/zzz_1st_run.py | 1 | 13208 | # -*- coding: utf-8 -*-
# 1st-run initialisation
# Set settings.base.prepopulate to 0 in Production
# (to save 1x DAL hit every page).
pop_list = settings.get_base_prepopulate()
if pop_list == 0:
pop_list = []
else:
table = db[auth.settings.table_group_name]
# The query used here takes 2/3 the time of .count().
if db(table.id > 0).select(table.id, limitby=(0, 1)).first():
pop_list = []
if not isinstance(pop_list, (list, tuple)):
pop_list = [pop_list]
if len(pop_list) > 0:
# =========================================================================
# Populate default roles and permissions
#
# Allow debug
import sys
print >> sys.stdout, "Please be patient whilst the database is populated"
# Shortcuts
acl = auth.permission
sysroles = auth.S3_SYSTEM_ROLES
create_role = auth.s3_create_role
#update_acls = auth.s3_update_acls
# Do not remove or change order of these 5 definitions (System Roles):
create_role("Administrator",
"System Administrator - can access & make changes to any data",
uid=sysroles.ADMIN,
system=True, protected=True)
create_role("Authenticated",
"Authenticated - all logged-in users",
uid=sysroles.AUTHENTICATED,
protected=True)
create_role("Anonymous",
"Unauthenticated users",
# Allow unauthenticated users to view the list of organisations
# so they can select an organisation when registering
dict(t="org_organisation", uacl=acl.READ, entity="any"),
# Allow unauthenticated users to see the list of sites for an
# org when registering
dict(c="org", f="sites_for_org", uacl=acl.READ, entity="any"),
uid=sysroles.ANONYMOUS,
protected=True)
# Primarily for Security Policy 2
create_role("Editor",
"Editor - can access & make changes to any unprotected data",
uid=sysroles.EDITOR,
system=True, protected=True)
# MapAdmin
map_admin = create_role("MapAdmin",
"MapAdmin - allowed access to edit the MapService Catalogue",
dict(c="gis", uacl=acl.ALL, oacl=acl.ALL),
dict(c="gis", f="location", uacl=acl.ALL, oacl=acl.ALL),
uid=sysroles.MAP_ADMIN,
system=True, protected=True)
# OrgAdmin (policies 6, 7 and 8)
create_role("OrgAdmin",
"OrgAdmin - allowed to manage user roles for entity realms",
uid=sysroles.ORG_ADMIN,
system=True, protected=True)
# Enable shortcuts (needed by default.py)
system_roles = auth.get_system_roles()
ADMIN = system_roles.ADMIN
AUTHENTICATED = system_roles.AUTHENTICATED
ANONYMOUS = system_roles.ANONYMOUS
EDITOR = system_roles.EDITOR
MAP_ADMIN = system_roles.MAP_ADMIN
ORG_ADMIN = system_roles.ORG_ADMIN
# =========================================================================
# Configure Scheduled Tasks
#
has_module = settings.has_module
if has_module("msg"):
# Send Messages from Outbox
# SMS every minute
s3task.schedule_task("msg_process_outbox",
vars={"contact_method":"SMS"},
period=120, # seconds
timeout=120, # seconds
repeats=0 # unlimited
)
# Emails every 5 minutes
s3task.schedule_task("msg_process_outbox",
vars={"contact_method":"EMAIL"},
period=300, # seconds
timeout=300, # seconds
repeats=0 # unlimited
)
# Tweets every minute
#s3task.schedule_task("msg_process_outbox",
# vars={"contact_method":"TWITTER"},
# period=120, # seconds
# timeout=120, # seconds
# repeats=0 # unlimited
# )
# Subscription notifications
s3task.schedule_task("notify_check_subscriptions",
period=300,
timeout=300,
repeats=0)
# Daily maintenance
s3task.schedule_task("maintenance",
vars={"period":"daily"},
period=86400, # seconds, so 1/day
timeout=600, # seconds
repeats=0 # unlimited
)
# =========================================================================
# Import PrePopulate data
#
# Override authorization
auth.override = True
# Load all Models to ensure all DB tables present
s3db.load_all_models()
# Shortcuts
path_join = os.path.join
request_folder = request.folder
if settings.get_auth_opt_in_to_email():
table = db.pr_group
for team in settings.get_auth_opt_in_team_list():
table.insert(name = team, group_type = 5)
# Synchronisation
db.sync_config.insert() # Defaults are fine
# Person Registry
tablename = "pr_person"
# Add extra indexes on search fields
# Should work for our 3 supported databases: sqlite, MySQL & PostgreSQL
field = "first_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
field = "middle_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
field = "last_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
# GIS
# Add extra index on search field
# Should work for our 3 supported databases: sqlite, MySQL & PostgreSQL
tablename = "gis_location"
field = "name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
# Messaging Module
if has_module("msg"):
update_super = s3db.update_super
# To read inbound email, set username (email address), password, etc.
# here. Insert multiple records for multiple email sources.
table = db.msg_email_channel
id = table.insert(server = "imap.gmail.com",
protocol = "imap",
use_ssl = True,
port = 993,
username = "example-username",
password = "password",
delete_from_server = False
)
update_super(table, dict(id=id))
# Need entries for the Settings/1/Update URLs to work
table = db.msg_sms_outbound_gateway
id = table.insert(outgoing_sms_handler = "WEB_API")
update_super(table, dict(id=id))
table = db.msg_sms_modem_channel
id = table.insert(modem_baud = 115200)
update_super(table, dict(id=id))
table = db.msg_sms_webapi_channel
id = table.insert(to_variable = "to")
update_super(table, dict(id=id))
table = db.msg_sms_smtp_channel
id = table.insert(address="changeme")
update_super(table, dict(id=id))
table = db.msg_tropo_channel
id = table.insert(token_messaging = "")
update_super(table, dict(id=id))
table = db.msg_twitter_channel
id = table.insert(enabled = False)
update_super(table, dict(id=id))
# Budget Module
if has_module("budget"):
db.budget_parameter.insert() # Defaults are fine
# Climate Module
if has_module("climate"):
s3db.climate_first_run()
# CAP module
if has_module("cap"):
db.cap_alert.insert(template_title="Default", is_template=True)
# Incident Reporting System
if has_module("irs"):
# Categories visible to ends-users by default
table = db.irs_icategory
table.insert(code = "flood")
table.insert(code = "geophysical.landslide")
table.insert(code = "roadway.bridgeClosure")
table.insert(code = "roadway.roadwayClosure")
table.insert(code = "other.buildingCollapsed")
table.insert(code = "other.peopleTrapped")
table.insert(code = "other.powerFailure")
# Supply Module
if has_module("supply"):
db.supply_catalog.insert(name = settings.get_supply_catalog_default())
# Ensure DB population committed when running through shell
db.commit()
# =========================================================================
# PrePopulate import (from CSV)
#
# Create the bulk Importer object
bi = s3base.S3BulkImporter()
s3.import_role = bi.import_role
s3.import_user = bi.import_user
s3.import_image = bi.import_image
s3.import_remote_csv = bi.import_remote_csv
# Relax strict email-matching rule for import updates of person records
email_required = settings.get_pr_import_update_requires_email()
settings.pr.import_update_requires_email = False
# Additional settings for user table imports:
s3db.configure("auth_user",
onaccept = lambda form: auth.s3_approve_user(form.vars))
s3db.add_components("auth_user", auth_membership="user_id")
# Flag that Assets are being imported, not synced
s3.asset_import = True
# Allow population via shell scripts
if not request.env.request_method:
request.env.request_method = "GET"
grandTotalStart = datetime.datetime.now()
for pop_setting in pop_list:
start = datetime.datetime.now()
# Clear Tasklist
bi.tasks = []
# Import data specific to the prepopulate setting
if pop_setting == 1:
# Populate with the default data
path = path_join(request_folder,
"private",
"templates",
"default")
bi.perform_tasks(path)
else:
path = path_join(request_folder,
"private",
"templates",
pop_setting)
if os.path.exists(path):
bi.perform_tasks(path)
else:
print >> sys.stderr, "Unable to install data %s no valid directory found" % pop_setting
grandTotalEnd = datetime.datetime.now()
duration = grandTotalEnd - grandTotalStart
try:
# Python 2.7
duration = '{:.2f}'.format(duration.total_seconds()/60)
print >> sys.stdout, "Pre-populate task completed in %s mins" % duration
except AttributeError:
# older Python
print >> sys.stdout, "Pre-populate task completed in %s" % duration
bi.resultList = []
for errorLine in bi.errorList:
try:
print >> sys.stderr, errorLine
except:
s3_unicode = s3base.s3_unicode
_errorLine = ""
for i in range(0, len(errorLine)):
try:
_errorLine += s3_unicode(errorline[i])
except:
pass
print >> sys.stderr, _errorLine
# Restore setting for strict email-matching
settings.pr.import_update_requires_email = email_required
# Restore Auth
auth.override = False
# Update Location Tree (disabled during prepop)
start = datetime.datetime.now()
gis.update_location_tree()
end = datetime.datetime.now()
print >> sys.stdout, "Location Tree update completed in %s" % (end - start)
# Countries are only editable by MapAdmin
db(db.gis_location.level == "L0").update(owned_by_group=map_admin)
if has_module("stats"):
# Populate stats_demographic_aggregate (disabled during prepop)
# - needs to be done after locations
start = datetime.datetime.now()
s3db.stats_demographic_rebuild_all_aggregates()
end = datetime.datetime.now()
print >> sys.stdout, "Demographic data aggregation completed in %s" % (end - start)
if has_module("vulnerability"):
# Populate vulnerability_aggregate (disabled during prepop)
# - needs to be done after locations
start = datetime.datetime.now()
s3db.vulnerability_rebuild_all_aggregates()
end = datetime.datetime.now()
print >> sys.stdout, "Vulnerability data aggregation completed in %s" % (end - start)
grandTotalEnd = datetime.datetime.now()
duration = grandTotalEnd - grandTotalStart
try:
# Python 2.7
duration = '{:.2f}'.format(duration.total_seconds()/60)
print >> sys.stdout, "Pre-populate completed in %s mins" % duration
except AttributeError:
# older Python
print >> sys.stdout, "Pre-populate completed in %s" % duration
# Restore view
response.view = "default/index.html"
# END =========================================================================
| mit | -2,316,289,051,526,580,700 | 36.310734 | 103 | 0.552998 | false |
metzzo/Paraphrase_Identification | distribFeat.py | 1 | 3395 | from __future__ import division
import pickle
import numpy
import math
from nltk.tokenize import RegexpTokenizer
from sklearn.decomposition import NMF, TruncatedSVD
import sentenceFeatures
# Obtain distributional features ((2 * K) in number)
# IMPORTANT: both training and test set must be present in sentences
# sentences is an array of tokenized sentences (matrix of words, basically)
# fullSent is the untokenized version
def distribFeat(fullSent, sentences, K):
paraphraseMap = pickle.load(open("paraphraseMap", "rb"))
notParaphrMap = pickle.load(open("notParaphrMap", "rb"))
n = len(sentences)
uniqWords = []
for s in sentences:
for word in s:
if word not in uniqWords:
uniqWords.append(word)
# M will hold TF-KLD score for each word for each sentence
M = numpy.zeros((len(uniqWords), n))
for word in uniqWords:
if word in paraphraseMap:
if word in notParaphrMap:
p = paraphraseMap[word]
np = 1 - p
q = notParaphrMap[word]
nq = 1 - q
kl = p * math.log(p/q) + np * math.log(np/nq)
else:
kl = 1
else:
kl = 0
for i in range(0,n):
if word in sentences[i]:
M[uniqWords.index(word)][i] += kl
# Step 2: Matrix factorization
#factory = TruncatedSVD(n_components = K)
factory = NMF(n_components = K, max_iter=2000)
W = factory.fit_transform(M) # M = W*H , returns W, which we don't need
H = factory.components_ # should be size K * n
print(M.shape)
print(W.shape)
print(H.shape)
#Step 3: obtain feature set for paraphrase pair
features = []
i = 0
while i < n:
feat = [0] * (K * 2)
for j in range(0, K):
feat[j] = H[j][i] + H[j][i + 1]
feat[j * 2] = abs(H[j][i] - H[j][i + 1])
#feat.extend(sentenceFeatures.compute(fullSent[i],fullSent[i+1]))
i += 2 # step to next pair of sentences
features.append(feat)
return features
def getData():
tokenizer = RegexpTokenizer(r'\w+')
f = open("msr_paraphrase_train.txt", "r")
f.readline()
sentences = []
sentencesWords = []
trainClass = [0] * 4076
for i in range(0,4076):
tokens = f.readline().strip().split('\t')
trainClass[i] = int(tokens[0])
#sentences.append(tokens[3].lower())
#sentences.append(tokens[4].lower())
sentencesWords.append(tokenizer.tokenize(tokens[3].lower()))
sentencesWords.append(tokenizer.tokenize(tokens[4].lower()))
f.close()
#trainFeat = distribFeat(sentences, sentencesWords, 500)
f = open("msr_paraphrase_test.txt", "r")
f.readline()
#sentences = []
#sentencesWords = []
testClass = [0] * 1725
for i in range(0,1725):
tokens = f.readline().strip().split('\t')
testClass[i] = int(tokens[0])
#sentences.append(tokens[3].lower())
#sentences.append(tokens[4].lower())
sentencesWords.append(tokenizer.tokenize(tokens[3].lower()))
sentencesWords.append(tokenizer.tokenize(tokens[4].lower()))
f.close()
allFeat = distribFeat(sentences, sentencesWords, 50)
print(len(allFeat))
trainFeat = allFeat[:4076]
testFeat = allFeat[4076:]
return trainFeat, trainClass, testFeat, testClass
| gpl-3.0 | -2,220,064,860,507,433,700 | 32.613861 | 75 | 0.599116 | false |
pereerro/schooly | timed_groups/__init__.py | 1 | 1182 | # -*- coding: utf-8 -*-
##############################################################################
#
# school module for OpenERP
# Copyright (C) 2010 Tecnoba S.L. (http://www.tecnoba.com)
# Pere Ramon Erro Mas <[email protected]> All Rights Reserved.
#
# This file is a part of school module
#
# school OpenERP module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# school OpenERP module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import timed_groups
import wizards
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,229,363,716,360,227,000 | 41.214286 | 82 | 0.624365 | false |
rackerlabs/deuce | deuce/tests/test_validation.py | 1 | 21839 | import hashlib
from unittest import TestCase
import uuid
from falcon import request
from stoplight import validate
from stoplight.exceptions import ValidationFailed
from deuce.transport import validation as v
from deuce.transport.wsgi import errors
class MockRequest(object):
pass
class InvalidSeparatorError(Exception):
"""Invalid Separator Error is raised whenever
a invalid separator is set for joining query strings
in a url"""
def __init__(self, msg):
Exception.__init__(self, msg)
class TestRulesBase(TestCase):
@staticmethod
def build_request(params=None, separator='&'):
"""Build a request object to use for testing
:param params: list of tuples containing the name and value pairs
for parameters to add to the QUERY_STRING
"""
mock_env = {
'wsgi.errors': 'mock',
'wsgi.input': 'mock',
'REQUEST_METHOD': 'PUT',
'PATH_INFO': '/',
'SERVER_NAME': 'mock',
'SERVER_PORT': '8888',
'QUERY_STRING': None
}
if params is not None:
for param in params:
name = param[0]
value = param[1]
param_set = '{0}='.format(name)
if value is not None and len(value):
param_set = '{0}={1}'.format(name, value)
if mock_env['QUERY_STRING'] is None:
mock_env['QUERY_STRING'] = param_set
else:
if separator in ('&', ';'):
mock_env['QUERY_STRING'] = '{1}{0}{2}'.format(
separator, mock_env['QUERY_STRING'], param_set)
else:
raise InvalidSeparatorError('separator in query string'
'must be & or ;')
if mock_env['QUERY_STRING'] is None:
del mock_env['QUERY_STRING']
return request.Request(mock_env)
def cases_with_none_okay(self):
positive_cases = self.__class__.positive_cases[:]
positive_cases.append(None)
negative_cases = self.__class__.negative_cases[:]
while negative_cases.count(None):
negative_cases.remove(None)
while negative_cases.count(''):
negative_cases.remove('')
return (positive_cases, negative_cases)
class TestRequests(TestRulesBase):
def test_request(self):
positive_case = [TestRulesBase.build_request()]
negative_case = [MockRequest()]
for case in positive_case:
v.is_request(case)
for case in negative_case:
with self.assertRaises(ValidationFailed):
v.is_request(none_ok=True)(case)
class TestVaultRules(TestRulesBase):
positive_cases = [
'a',
'0',
'__vault_id____',
'-_-_-_-_-_-_-_-',
'snake_case_is_ok',
'So-are-hyphonated-names',
'a' * v.VAULT_ID_MAX_LEN
]
negative_cases = [
'', # empty case should raise
'.', '!', '@', '#', '$', '%',
'^', '&', '*', '[', ']', '/',
'@#$@#$@#^@%$@#@#@#$@!!!@$@$@',
'\\', 'a' * (v.VAULT_ID_MAX_LEN + 1),
None
]
@validate(vault_id=v.VaultGetRule)
def utilize_get_vault_id(self, vault_id):
return True
@validate(vault_id=v.VaultPutRule)
def utilize_put_vault_id(self, vault_id):
return True
@validate(req=v.RequestRule(v.VaultMarkerRule))
def utilize_request(self, req, raiseme=False):
if raiseme:
raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string))
else:
return True
def test_vault_id(self):
for name in self.__class__.positive_cases:
v.val_vault_id(name)
for name in self.__class__.negative_cases:
with self.assertRaises(ValidationFailed):
v.val_vault_id()(name)
def test_vault_get(self):
for p_case in self.__class__.positive_cases:
self.assertTrue(self.utilize_get_vault_id(p_case))
for case in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPNotFound):
self.utilize_get_vault_id(case)
def test_vault_put(self):
for p_case in self.__class__.positive_cases:
self.assertTrue(self.utilize_put_vault_id(p_case))
for case in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_put_vault_id(case)
def test_vault_id_marker(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for vault_id in positive_cases:
vault_id_req = TestRulesBase.build_request(params=[('marker',
vault_id)])
self.assertTrue(self.utilize_request(vault_id_req))
# We currently skip the negative test for the VaultMarkerRule
# due to the nature of the negative cases for the Vault Name.
# Leaving the code in below should we figure out a good way to
# capture the data for the URL encoding.
#
# Note: It is not a failure of build_request()'s QUERY_STRING building
# but a miss-match between it, urllib.parse.urlencode(), and Falcon.
# Use of urllib.parse.urlencode() has other issues here as well.
#
# for vault_id in negative_cases:
# vault_id_req = TestRulesBase.build_request(params=[('marker',
# vault_id)])
# with self.assertRaises(errors.HTTPNotFound):
# self.utilize_request(vault_id_req, raiseme=True)
class TestMetadataBlockRules(TestRulesBase):
positive_cases = [
'da39a3ee5e6b4b0d3255bfef95601890afd80709',
'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa',
'ffffffffffffffffffffffffffffffffffffffff',
'a' * 40,
]
negative_cases = [
'',
'.',
'a', '0', 'f', 'F', 'z', '#', '$', '?',
'a39a3ee5e6b4b0d3255bfef95601890afd80709', # one char short
'da39a3ee5e6b4b0d3255bfef95601890afd80709a', # one char long
'DA39A3EE5E6B4B0D3255BFEF95601890AFD80709',
'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF',
'AaaAaaAaaaaAaAaaaAaaaaaaaAAAAaaaaAaaaaaa' * 2,
'AaaAaaAaaaaAaAaaaAaaaaaaaAAAAaaaaAaaaaaa' * 3,
'AaaAaaAaaaaAaAaaaAaaaaaaaAAAAaaaaAaaaaaa' * 4,
None
]
@validate(metadata_block_id=v.BlockGetRule)
def utilize_get_metadata_block_get(self, metadata_block_id):
return True
@validate(metadata_block_id=v.BlockPutRule)
def utilize_put_metadata_block_id(self, metadata_block_id):
return True
@validate(metadata_block_id=v.BlockPostRule)
def utilize_post_metadata_block_id(self, metadata_block_id):
return True
@validate(metadata_block_id=v.BlockGetRuleNoneOk)
def utilize_get_metadata_block_get_none_okay(self, metadata_block_id):
return True
@validate(metadata_block_id=v.BlockPutRuleNoneOk)
def utilize_put_metadata_block_id_none_okay(self, metadata_block_id):
return True
@validate(metadata_block_id=v.BlockPostRuleNoneOk)
def utilize_post_metadata_block_id_none_okay(self, metadata_block_id):
return True
@validate(req=v.RequestRule(v.BlockMarkerRule))
def utilize_request(self, req, raiseme=False):
if raiseme:
raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string))
else:
return True
def test_block_id(self):
for blockid in self.__class__.positive_cases:
v.val_block_id(blockid)
for blockid in self.__class__.negative_cases:
with self.assertRaises(v.ValidationFailed):
v.val_block_id()(blockid)
def test_get_block_id(self):
for blockid in self.__class__.positive_cases:
self.utilize_get_metadata_block_get(blockid)
for blockid in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPNotFound):
self.utilize_get_metadata_block_get(blockid)
def test_put_block_id(self):
for blockid in self.__class__.positive_cases:
self.utilize_put_metadata_block_id(blockid)
for blockid in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_put_metadata_block_id(blockid)
def test_get_block_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for blockid in positive_cases:
self.utilize_get_metadata_block_get_none_okay(blockid)
for blockid in negative_cases:
with self.assertRaises(errors.HTTPNotFound):
self.utilize_get_metadata_block_get_none_okay(blockid)
def test_put_block_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for blockid in positive_cases:
self.utilize_put_metadata_block_id_none_okay(blockid)
for blockid in negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_put_metadata_block_id_none_okay(blockid)
def test_post_block_id(self):
for blockid in self.__class__.positive_cases:
self.utilize_post_metadata_block_id(blockid)
for blockid in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_post_metadata_block_id(blockid)
def test_post_block_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for blockid in positive_cases:
self.utilize_post_metadata_block_id_none_okay(blockid)
for blockid in negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_post_metadata_block_id_none_okay(blockid)
def test_block_id_marker(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for block_id in positive_cases:
block_id_req = TestRulesBase.build_request(params=[('marker',
block_id)])
self.assertTrue(self.utilize_request(block_id_req))
for block_id in negative_cases:
block_id_req = TestRulesBase.build_request(params=[('marker',
block_id)])
with self.assertRaises(errors.HTTPNotFound):
self.utilize_request(block_id_req, raiseme=True)
class TestStorageBlockRules(TestRulesBase):
positive_cases = [hashlib.sha1(bytes(i)).hexdigest() + '_' +
str(uuid.uuid4()) for i in range(0, 1000)]
negative_cases = [
'',
'fecfd28bbc9345891a66d7c1b8ff46e60192d'
'2840c3de7c4-5fe9-4b2e-b19a-9cf81364997b', # note no '_' between sha1
# and uuid
'e7bf692b-ec7b-40ad-b0d1-45ce6798fb6z', # note trailing z
str(uuid.uuid4()).upper(), # Force case sensitivity
None
]
@validate(storage_block_id=v.StorageBlockGetRule)
def utilize_get_storage_block_get(self, storage_block_id):
return True
@validate(storage_block_id=v.StorageBlockPutRule)
def utilize_put_storage_block_id(self, storage_block_id):
return True
@validate(storage_block_id=v.StorageBlockRuleGetNoneOk)
def utilize_get_storage_block_get_none_okay(self, storage_block_id):
return True
@validate(storage_block_id=v.StorageBlockRulePutNoneOk)
def utilize_put_storage_block_id_none_okay(self, storage_block_id):
return True
@validate(req=v.RequestRule(v.StorageBlockMarkerRule))
def utilize_request(self, req, raiseme=False):
if raiseme:
raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string))
else:
return True
def test_storage_storage_block_id(self):
for storage_id in self.__class__.positive_cases:
v.val_storage_block_id(storage_id)
for storage_id in self.__class__.negative_cases:
with self.assertRaises(ValidationFailed):
v.val_storage_block_id()(storage_id)
def test_get_storage_block_id(self):
for storage_id in self.__class__.positive_cases:
self.utilize_get_storage_block_get(storage_id)
for storage_id in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPNotFound):
self.utilize_get_storage_block_get(storage_id)
def test_put_storage_block_id(self):
for storage_id in self.__class__.positive_cases:
self.utilize_put_storage_block_id(storage_id)
for storage_id in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_put_storage_block_id(storage_id)
def test_get_storage_block_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for storage_id in positive_cases:
self.utilize_get_storage_block_get_none_okay(storage_id)
for storage_id in negative_cases:
with self.assertRaises(errors.HTTPNotFound):
self.utilize_get_storage_block_get_none_okay(storage_id)
def test_put_storage_block_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for storage_id in positive_cases:
self.utilize_put_storage_block_id_none_okay(storage_id)
for storage_id in negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_put_storage_block_id_none_okay(storage_id)
def test_storage_block_id_marker(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for storage_id in positive_cases:
storage_id_req = TestRulesBase.build_request(params=[('marker',
storage_id)])
self.assertTrue(self.utilize_request(storage_id_req))
for storage_id in negative_cases:
storage_id_req = TestRulesBase.build_request(params=[('marker',
storage_id)])
with self.assertRaises(errors.HTTPNotFound):
self.utilize_request(storage_id_req, raiseme=True)
class TestFileRules(TestRulesBase):
# Let's try try to append some UUIds and check for faileus
positive_cases = [str(uuid.uuid4()) for _ in range(0, 1000)]
negative_cases = [
'',
'e7bf692b-ec7b-40ad-b0d1-45ce6798fb6z', # note trailing z
str(uuid.uuid4()).upper(), # Force case sensitivity
None
]
@validate(file_id=v.FileGetRule)
def utilize_file_id_get(self, file_id):
return True
@validate(file_id=v.FilePutRule)
def utilize_file_id_put(self, file_id):
return True
@validate(file_id=v.FilePostRule)
def utilize_file_id_post(self, file_id):
return True
@validate(file_id=v.FileGetRuleNoneOk)
def utilize_file_id_get_none_okay(self, file_id):
return True
@validate(file_id=v.FilePutRuleNoneOk)
def utilize_file_id_put_none_okay(self, file_id):
return True
@validate(file_id=v.FilePostRuleNoneOk)
def utilize_file_id_post_none_okay(self, file_id):
return True
@validate(req=v.RequestRule(v.FileMarkerRule))
def utilize_request(self, req, raiseme=False):
if raiseme:
raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string))
else:
return True
def test_file_id(self):
for fileid in self.__class__.positive_cases:
v.val_file_id(fileid)
for fileid in self.__class__.negative_cases:
with self.assertRaises(ValidationFailed):
v.val_file_id()(fileid)
def test_get_file_id(self):
for file_id in self.__class__.positive_cases:
self.utilize_file_id_get(file_id)
for file_id in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPNotFound):
self.utilize_file_id_get(file_id)
def test_put_file_id(self):
for file_id in self.__class__.positive_cases:
self.utilize_file_id_put(file_id)
for file_id in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_file_id_put(file_id)
def test_post_file_id(self):
for file_id in self.__class__.positive_cases:
self.utilize_file_id_post(file_id)
for file_id in self.__class__.negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_file_id_post(file_id)
def test_get_file_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for file_id in positive_cases:
self.utilize_file_id_get_none_okay(file_id)
for file_id in negative_cases:
with self.assertRaises(errors.HTTPNotFound):
self.utilize_file_id_get_none_okay(file_id)
def test_put_file_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for file_id in positive_cases:
self.utilize_file_id_put_none_okay(file_id)
for file_id in negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_file_id_put_none_okay(file_id)
def test_post_file_id_none_okay(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for file_id in positive_cases:
self.utilize_file_id_post_none_okay(file_id)
for file_id in negative_cases:
with self.assertRaises(errors.HTTPBadRequestAPI):
self.utilize_file_id_post_none_okay(file_id)
def test_file_id_marker(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for file_id in positive_cases:
file_id_req = TestRulesBase.build_request(params=[('marker',
file_id)])
self.assertTrue(self.utilize_request(file_id_req))
for file_id in negative_cases:
file_id_req = TestRulesBase.build_request(params=[('marker',
file_id)])
with self.assertRaises(errors.HTTPNotFound):
self.utilize_request(file_id_req, raiseme=True)
class TestOffsetRules(TestRulesBase):
positive_cases = [
'0', '1', '2', '3', '55', '100',
'101010', '99999999999999999999999999999'
]
negative_cases = [
'-1', '-23', 'O', 'zero', 'one', '-999', '1.0', '1.3',
'0.0000000000001',
None
]
@validate(req=v.RequestRule(v.OffsetMarkerRule))
def utilize_request(self, req, raiseme=False):
if raiseme:
raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string))
else:
return True
def test_offset(self):
for offset in self.__class__.positive_cases:
v.val_offset()(offset)
for offset in self.__class__.negative_cases:
with self.assertRaises(ValidationFailed):
v.val_offset()(offset)
def test_offset_marker(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for offset in positive_cases:
offset_req = TestRulesBase.build_request(params=[('marker',
offset)])
self.assertTrue(self.utilize_request(offset_req))
for offset in negative_cases:
offset_req = TestRulesBase.build_request(params=[('marker',
offset)])
with self.assertRaises(errors.HTTPNotFound):
self.utilize_request(offset_req, raiseme=True)
class TestLimitRules(TestRulesBase):
positive_cases = [
'0', '100', '100000000', '100'
]
negative_cases = [
'-1', 'blah', None
]
@validate(req=v.RequestRule(v.LimitRule))
def utilize_request(self, req, raiseme=False):
if raiseme:
raise RuntimeError('QUERY_STRING: {0}'.format(req.query_string))
else:
return True
def test_limit(self):
for limit in self.__class__.positive_cases:
v.val_limit()(limit)
for limit in self.__class__.negative_cases:
with self.assertRaises(ValidationFailed):
v.val_limit()(limit)
v.val_limit(empty_ok=True)('')
v.val_limit(none_ok=True)(None)
with self.assertRaises(ValidationFailed):
v.val_limit()('')
with self.assertRaises(ValidationFailed):
v.val_limit()(None)
def test_limit_marker(self):
positive_cases, negative_cases = self.cases_with_none_okay()
for limit in positive_cases:
limit_req = TestRulesBase.build_request(params=[('limit',
limit)])
self.assertTrue(self.utilize_request(limit_req))
for limit in negative_cases:
limit_req = TestRulesBase.build_request(params=[('limit',
limit)])
with self.assertRaises(errors.HTTPNotFound):
self.utilize_request(limit_req, raiseme=True)
| apache-2.0 | 3,394,681,553,709,124,600 | 32.444104 | 79 | 0.589587 | false |
googleapis/python-aiplatform | google/cloud/aiplatform_v1beta1/services/migration_service/transports/grpc_asyncio.py | 1 | 13902 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.aiplatform_v1beta1.types import migration_service
from google.longrunning import operations_pb2 # type: ignore
from .base import MigrationServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import MigrationServiceGrpcTransport
class MigrationServiceGrpcAsyncIOTransport(MigrationServiceTransport):
"""gRPC AsyncIO backend transport for MigrationService.
A service that migrates resources from automl.googleapis.com,
datalabeling.googleapis.com and ml.googleapis.com to Vertex AI.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
self_signed_jwt_kwargs = cls._get_self_signed_jwt_kwargs(host, scopes)
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
**self_signed_jwt_kwargs,
**kwargs,
)
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def search_migratable_resources(
self,
) -> Callable[
[migration_service.SearchMigratableResourcesRequest],
Awaitable[migration_service.SearchMigratableResourcesResponse],
]:
r"""Return a callable for the search migratable resources method over gRPC.
Searches all of the resources in
automl.googleapis.com, datalabeling.googleapis.com and
ml.googleapis.com that can be migrated to Vertex AI's
given location.
Returns:
Callable[[~.SearchMigratableResourcesRequest],
Awaitable[~.SearchMigratableResourcesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "search_migratable_resources" not in self._stubs:
self._stubs["search_migratable_resources"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.MigrationService/SearchMigratableResources",
request_serializer=migration_service.SearchMigratableResourcesRequest.serialize,
response_deserializer=migration_service.SearchMigratableResourcesResponse.deserialize,
)
return self._stubs["search_migratable_resources"]
@property
def batch_migrate_resources(
self,
) -> Callable[
[migration_service.BatchMigrateResourcesRequest],
Awaitable[operations_pb2.Operation],
]:
r"""Return a callable for the batch migrate resources method over gRPC.
Batch migrates resources from ml.googleapis.com,
automl.googleapis.com, and datalabeling.googleapis.com
to Vertex AI.
Returns:
Callable[[~.BatchMigrateResourcesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_migrate_resources" not in self._stubs:
self._stubs["batch_migrate_resources"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.MigrationService/BatchMigrateResources",
request_serializer=migration_service.BatchMigrateResourcesRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_migrate_resources"]
__all__ = ("MigrationServiceGrpcAsyncIOTransport",)
| apache-2.0 | -2,154,680,416,297,731,300 | 43.700965 | 102 | 0.630557 | false |
liiight/notifiers | tests/providers/test_mailgun.py | 1 | 2725 | import pytest
import datetime
import time
from email import utils
from notifiers.exceptions import BadArguments
from notifiers.core import FAILURE_STATUS
provider = "mailgun"
class TestMailgun:
def test_mailgun_metadata(self, provider):
assert provider.metadata == {
"base_url": "https://api.mailgun.net/v3/{domain}/messages",
"name": "mailgun",
"site_url": "https://documentation.mailgun.com/",
}
@pytest.mark.parametrize(
"data, message",
[
({}, "to"),
({"to": "foo"}, "domain"),
({"to": "foo", "domain": "bla"}, "api_key"),
({"to": "foo", "domain": "bla", "api_key": "bla"}, "from"),
(
{"to": "foo", "domain": "bla", "api_key": "bla", "from": "bbb"},
"message",
),
],
)
def test_mailgun_missing_required(self, data, message, provider):
data["env_prefix"] = "test"
with pytest.raises(BadArguments, match=f"'{message}' is a required property"):
provider.notify(**data)
@pytest.mark.online
def test_mailgun_sanity(self, provider, test_message):
provider.notify(message=test_message, raise_on_errors=True)
@pytest.mark.online
def test_mailgun_all_options(self, provider, tmpdir, test_message):
dir_ = tmpdir.mkdir("sub")
file_1 = dir_.join("hello.txt")
file_1.write("content")
file_2 = dir_.join("world.txt")
file_2.write("content")
now = datetime.datetime.now() + datetime.timedelta(minutes=3)
rfc_2822 = utils.formatdate(time.mktime(now.timetuple()))
data = {
"message": test_message,
"html": f"<b>{now}</b>",
"subject": f"{now}",
"attachment": [file_1.strpath, file_2.strpath],
"inline": [file_1.strpath, file_2.strpath],
"tag": ["foo", "bar"],
"dkim": True,
"deliverytime": rfc_2822,
"testmode": False,
"tracking": True,
"tracking_clicks": "htmlonly",
"tracking_opens": True,
"require_tls": False,
"skip_verification": True,
"headers": {"foo": "bar"},
"data": {"foo": {"bar": "bla"}},
}
provider.notify(**data, raise_on_errors=True)
def test_mailgun_error_response(self, provider):
data = {
"api_key": "FOO",
"message": "bla",
"to": "[email protected]",
"domain": "foo",
"from": "[email protected]",
}
rsp = provider.notify(**data)
assert rsp.status == FAILURE_STATUS
assert "Forbidden" in rsp.errors
| mit | -4,442,736,360,810,114,600 | 32.231707 | 86 | 0.519266 | false |
davidnmurray/iris | lib/iris/io/__init__.py | 1 | 15278 | # (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides an interface to manage URI scheme support in iris.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
import glob
import os.path
import types
import re
import collections
import iris.fileformats
import iris.fileformats.dot
import iris.cube
import iris.exceptions
# Saving routines, indexed by file extension.
class _SaversDict(dict):
"""A dictionary that can only have string keys with no overlap."""
def __setitem__(self, key, value):
if not isinstance(key, six.string_types):
raise ValueError("key is not a string")
if key in self:
raise ValueError("A saver already exists for", key)
for k in self.keys():
if k.endswith(key) or key.endswith(k):
raise ValueError("key %s conflicts with existing key %s" % (key, k))
dict.__setitem__(self, key, value)
_savers = _SaversDict()
def run_callback(callback, cube, field, filename):
"""
Runs the callback mechanism given the appropriate arguments.
Args:
* callback:
A function to add metadata from the originating field and/or URI which
obeys the following rules:
1. Function signature must be: ``(cube, field, filename)``.
2. Modifies the given cube inplace, unless a new cube is
returned by the function.
3. If the cube is to be rejected the callback must raise
an :class:`iris.exceptions.IgnoreCubeException`.
.. note::
It is possible that this function returns None for certain callbacks,
the caller of this function should handle this case.
"""
if callback is None:
return cube
# Call the callback function on the cube, generally the function will
# operate on the cube in place, but it is also possible that the function
# will return a completely new cube instance.
try:
result = callback(cube, field, filename)
except iris.exceptions.IgnoreCubeException:
result = None
else:
if result is None:
result = cube
elif not isinstance(result, iris.cube.Cube):
raise TypeError("Callback function returned an "
"unhandled data type.")
return result
def decode_uri(uri, default='file'):
r'''
Decodes a single URI into scheme and scheme-specific parts.
In addition to well-formed URIs, it also supports bare file paths.
Both Windows and UNIX style paths are accepted.
.. testsetup::
from iris.io import *
Examples:
>>> from iris.io import decode_uri
>>> print(decode_uri('http://www.thing.com:8080/resource?id=a:b'))
('http', '//www.thing.com:8080/resource?id=a:b')
>>> print(decode_uri('file:///data/local/dataZoo/...'))
('file', '///data/local/dataZoo/...')
>>> print(decode_uri('/data/local/dataZoo/...'))
('file', '/data/local/dataZoo/...')
>>> print(decode_uri('file:///C:\data\local\dataZoo\...'))
('file', '///C:\\data\\local\\dataZoo\\...')
>>> print(decode_uri('C:\data\local\dataZoo\...'))
('file', 'C:\\data\\local\\dataZoo\\...')
>>> print(decode_uri('dataZoo/...'))
('file', 'dataZoo/...')
'''
# make sure scheme has at least 2 letters to avoid windows drives
# put - last in the brackets so it refers to the character, not a range
# reference on valid schemes: http://tools.ietf.org/html/std66#section-3.1
match = re.match(r"^([a-zA-Z][a-zA-Z0-9+.-]+):(.+)", uri)
if match:
scheme = match.group(1)
part = match.group(2)
else:
# Catch bare UNIX and Windows paths
scheme = default
part = uri
return scheme, part
def expand_filespecs(file_specs):
"""
Find all matching file paths from a list of file-specs.
Args:
* file_specs (iterable of string):
File paths which may contain '~' elements or wildcards.
Returns:
A list of matching file paths. If any of the file-specs matches no
existing files, an exception is raised.
"""
# Remove any hostname component - currently unused
filenames = [os.path.expanduser(fn[2:] if fn.startswith('//') else fn)
for fn in file_specs]
# Try to expand all filenames as globs
glob_expanded = {fn : sorted(glob.glob(fn)) for fn in filenames}
# If any of the specs expanded to an empty list then raise an error
value_lists = glob_expanded.values()
if not all(value_lists):
raise IOError("One or more of the files specified did not exist %s." %
["%s expanded to %s" % (pattern, expanded if expanded else "empty")
for pattern, expanded in six.iteritems(glob_expanded)])
return sum(value_lists, [])
def load_files(filenames, callback, constraints=None):
"""
Takes a list of filenames which may also be globs, and optionally a
constraint set and a callback function, and returns a
generator of Cubes from the given files.
.. note::
Typically, this function should not be called directly; instead, the
intended interface for loading is :func:`iris.load`.
"""
all_file_paths = expand_filespecs(filenames)
# Create default dict mapping iris format handler to its associated filenames
handler_map = collections.defaultdict(list)
for fn in all_file_paths:
with open(fn, 'rb') as fh:
handling_format_spec = iris.fileformats.FORMAT_AGENT.get_spec(os.path.basename(fn), fh)
handler_map[handling_format_spec].append(fn)
# Call each iris format handler with the approriate filenames
for handling_format_spec in sorted(handler_map):
fnames = handler_map[handling_format_spec]
if handling_format_spec.constraint_aware_handler:
for cube in handling_format_spec.handler(fnames, callback,
constraints):
yield cube
else:
for cube in handling_format_spec.handler(fnames, callback):
yield cube
def load_http(urls, callback):
"""
Takes a list of urls and a callback function, and returns a generator
of Cubes from the given URLs.
.. note::
Typically, this function should not be called directly; instead, the
intended interface for loading is :func:`iris.load`.
"""
# Create default dict mapping iris format handler to its associated filenames
handler_map = collections.defaultdict(list)
for url in urls:
handling_format_spec = iris.fileformats.FORMAT_AGENT.get_spec(url, None)
handler_map[handling_format_spec].append(url)
# Call each iris format handler with the appropriate filenames
for handling_format_spec in sorted(handler_map):
fnames = handler_map[handling_format_spec]
for cube in handling_format_spec.handler(fnames, callback):
yield cube
def _dot_save(cube, target):
# A simple wrapper for `iris.fileformats.dot.save` which allows the
# saver to be registered without triggering the import of
# `iris.fileformats.dot`.
import iris.fileformats.dot
return iris.fileformats.dot.save(cube, target)
def _dot_save_png(cube, target, **kwargs):
# A simple wrapper for `iris.fileformats.dot.save_png` which allows the
# saver to be registered without triggering the import of
# `iris.fileformats.dot`.
import iris.fileformats.dot
return iris.fileformats.dot.save_png(cube, target, **kwargs)
def _grib_save(cube, target, append=False, **kwargs):
# A simple wrapper for `iris.fileformats.grib.save_grib2` which
# allows the saver to be registered without having `gribapi`
# installed.
try:
import gribapi
except ImportError:
raise RuntimeError('Unable to save GRIB file - the ECMWF '
'`gribapi` package is not installed.')
return iris.fileformats.grib.save_grib2(cube, target, append, **kwargs)
def _check_init_savers():
# TODO: Raise a ticket to resolve the cyclic import error that requires
# us to initialise this on first use. Probably merge io and fileformats.
if "pp" not in _savers:
_savers.update({"pp": iris.fileformats.pp.save,
"nc": iris.fileformats.netcdf.save,
"dot": _dot_save,
"dotpng": _dot_save_png,
"grib2": _grib_save})
def add_saver(file_extension, new_saver):
"""
Add a custom saver to the Iris session.
Args:
* file_extension - A string such as "pp" or "my_format".
* new_saver - A function of the form ``my_saver(cube, target)``.
See also :func:`iris.io.save`
"""
# Make sure it's a func with 2+ args
if not hasattr(new_saver, "__call__") or new_saver.__code__.co_argcount < 2:
raise ValueError("Saver routines must be callable with 2+ arguments.")
# Try to add this saver. Invalid keys will be rejected.
_savers[file_extension] = new_saver
def find_saver(filespec):
"""
Find the saver function appropriate to the given filename or extension.
Args:
* filespec - A string such as "my_file.pp" or "PP".
Returns:
A save function or None.
Save functions can be passed to :func:`iris.io.save`.
"""
_check_init_savers()
matches = [ext for ext in _savers if filespec.lower().endswith('.' + ext) or
filespec.lower() == ext]
# Multiple matches could occur if one of the savers included a '.':
# e.g. _savers = {'.dot.png': dot_png_saver, '.png': png_saver}
if len(matches) > 1:
fmt = "Multiple savers found for %r: %s"
matches = ', '.join(map(repr, matches))
raise ValueError(fmt % (filespec, matches))
return _savers[matches[0]] if matches else None
def save(source, target, saver=None, **kwargs):
"""
Save one or more Cubes to file (or other writable).
Iris currently supports three file formats for saving, which it can
recognise by filename extension:
* netCDF - the Unidata network Common Data Format:
* see :func:`iris.fileformats.netcdf.save`
* GRIB2 - the WMO GRIdded Binary data format;
* see :func:`iris.fileformats.grib.save_grib2`
* PP - the Met Office UM Post Processing Format.
* see :func:`iris.fileformats.pp.save`
A custom saver can be provided to the function to write to a different
file format.
Args:
* source - A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or
sequence of cubes.
* target - A filename (or writable, depending on file format).
When given a filename or file, Iris can determine the
file format.
Kwargs:
* saver - Optional. Specifies the save function to use.
If omitted, Iris will attempt to determine the format.
This keyword can be used to implement a custom save
format. Function form must be:
``my_saver(cube, target)`` plus any custom keywords. It
is assumed that a saver will accept an ``append`` keyword
if it's file format can handle multiple cubes. See also
:func:`iris.io.add_saver`.
All other keywords are passed through to the saver function; see the
relevant saver documentation for more information on keyword arguments.
Examples::
# Save a cube to PP
iris.save(my_cube, "myfile.pp")
# Save a cube list to a PP file, appending to the contents of the file
# if it already exists
iris.save(my_cube_list, "myfile.pp", append=True)
# Save a cube to netCDF, defaults to NETCDF4 file format
iris.save(my_cube, "myfile.nc")
# Save a cube list to netCDF, using the NETCDF4_CLASSIC storage option
iris.save(my_cube_list, "myfile.nc", netcdf_format="NETCDF3_CLASSIC")
.. warning::
Saving a cube whose data has been loaded lazily
(if `cube.has_lazy_data()` returns `True`) to the same file it expects
to load data from will cause both the data in-memory and the data on
disk to be lost.
.. code-block:: python
cube = iris.load_cube('somefile.nc')
# The next line causes data loss in 'somefile.nc' and the cube.
iris.save(cube, 'somefile.nc')
In general, overwriting a file which is the source for any lazily loaded
data can result in corruption. Users should proceed with caution when
attempting to overwrite an existing file.
"""
# Determine format from filename
if isinstance(target, six.string_types) and saver is None:
saver = find_saver(target)
elif hasattr(target, 'name') and saver is None:
saver = find_saver(target.name)
elif isinstance(saver, six.string_types):
saver = find_saver(saver)
if saver is None:
raise ValueError("Cannot save; no saver")
# Single cube?
if isinstance(source, iris.cube.Cube):
saver(source, target, **kwargs)
# CubeList or sequence of cubes?
elif (isinstance(source, iris.cube.CubeList) or
(isinstance(source, (list, tuple)) and
all([isinstance(i, iris.cube.Cube) for i in source]))):
# Only allow cubelist saving for those fileformats that are capable.
if not 'iris.fileformats.netcdf' in saver.__module__:
# Make sure the saver accepts an append keyword
if not "append" in saver.__code__.co_varnames:
raise ValueError("Cannot append cubes using saver function "
"'%s' in '%s'" %
(saver.__code__.co_name,
saver.__code__.co_filename))
# Force append=True for the tail cubes. Don't modify the incoming
# kwargs.
kwargs = kwargs.copy()
for i, cube in enumerate(source):
if i != 0:
kwargs['append'] = True
saver(cube, target, **kwargs)
# Netcdf saver.
else:
saver(source, target, **kwargs)
else:
raise ValueError("Cannot save; non Cube found in source")
| gpl-3.0 | -5,592,605,519,203,370,000 | 34.948235 | 99 | 0.624231 | false |
lcrees/callchain | callchain/lazy_auto/chainlet.py | 1 | 2446 | # -*- coding: utf-8 -*-
'''lazy auto-balancing chainlets'''
from appspace.keys import appifies
from twoq.lazy.mixins import AutoQMixin
from twoq.mixins.filtering import (
FilterMixin, CollectMixin, SetMixin, SliceMixin)
from twoq.mixins.ordering import RandomMixin, OrderMixin
from twoq.mixins.reducing import MathMixin, TruthMixin, ReduceMixin
from twoq.mixins.mapping import DelayMixin, CopyMixin, RepeatMixin, MapMixin
from callchain.chain import ChainletQ
from callchain.services.order import KRandom, KOrder
from callchain.services.reduce import KMath, KReduce, KTruth
from callchain.services.map import KDelay, KCopy, KRepeat, KMap
from callchain.services.filter import KCollect, KSet, KSlice, KFilter
__all__ = (
'mathchain', 'truthchain', 'reducechain', 'collectchain', 'setchain',
'slicechain', 'filterchain', 'delaychain', 'copychain', 'repeatchain',
'mapchain', 'randomchain', 'orderchain',
)
@appifies(KDelay)
class delaychain(ChainletQ, AutoQMixin, DelayMixin):
'''auto-balancing delayed mapping chainlet'''
@appifies(KCopy)
class copychain(ChainletQ, AutoQMixin, CopyMixin):
'''auto-balancing copy chainlet'''
@appifies(KRepeat)
class repeatchain(ChainletQ, AutoQMixin, RepeatMixin):
'''auto-balancing repeat chainlet'''
@appifies(KMap)
class mapchain(ChainletQ, AutoQMixin, MapMixin):
'''auto-balancing mapping chainlet'''
@appifies(KCollect)
class collectchain(ChainletQ, AutoQMixin, CollectMixin):
'''auto-balancing collecting chainlet'''
@appifies(KSet)
class setchain(ChainletQ, AutoQMixin, SetMixin):
'''auto-balancing seting chainlet'''
@appifies(KSlice)
class slicechain(ChainletQ, AutoQMixin, SliceMixin):
'''auto-balancing slicing chainlet'''
@appifies(KFilter)
class filterchain(ChainletQ, AutoQMixin, FilterMixin):
'''auto-balancing filtering chainlet'''
@appifies(KRandom)
class randomchain(ChainletQ, AutoQMixin, RandomMixin):
'''auto-balancing randomizing chainlet'''
@appifies(KOrder)
class orderchain(ChainletQ, AutoQMixin, OrderMixin):
'''auto-balancing ordering chainlet'''
@appifies(KMath)
class mathchain(ChainletQ, AutoQMixin, MathMixin):
'''auto-balancing mathing chainlet'''
@appifies(KReduce)
class reducechain(ChainletQ, AutoQMixin, ReduceMixin):
'''auto-balancing reducing chainlet'''
@appifies(KTruth)
class truthchain(ChainletQ, AutoQMixin, TruthMixin):
'''auto-balancing truthing chainlet'''
| mit | 8,175,919,239,576,136,000 | 23.46 | 76 | 0.754702 | false |
huggingface/pytorch-transformers | src/transformers/models/reformer/modeling_reformer.py | 1 | 110832 | # coding=utf-8
# Copyright 2020 The Trax Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch REFORMER model. """
import sys
from collections import namedtuple
from dataclasses import dataclass
from functools import reduce
from operator import mul
from typing import List, Optional, Tuple
import numpy as np
import torch
from torch import nn
from torch.autograd.function import Function
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...file_utils import (
DUMMY_INPUTS,
DUMMY_MASK,
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from ...modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput
from ...modeling_utils import PreTrainedModel, apply_chunking_to_forward
from ...utils import logging
from .configuration_reformer import ReformerConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "google/reformer-crime-and-punishment"
_CONFIG_FOR_DOC = "ReformerConfig"
_TOKENIZER_FOR_DOC = "ReformerTokenizer"
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/reformer-crime-and-punishment",
"google/reformer-enwik8",
# See all Reformer models at https://huggingface.co/models?filter=reformer
]
# Define named tuples for nn.Modules here
LSHSelfAttentionOutput = namedtuple("LSHSelfAttentionOutput", ["hidden_states", "attention_probs", "buckets"])
LocalSelfAttentionOutput = namedtuple("LocalSelfAttentionOutput", ["hidden_states", "attention_probs"])
AttentionOutput = namedtuple("AttentionOutput", ["hidden_states", "attention_probs", "buckets"])
ReformerOutput = namedtuple("ReformerOutput", ["hidden_states", "attn_output", "attention_probs", "buckets"])
ReformerBackwardOutput = namedtuple(
"ReformerBackwardOutput", ["attn_output", "hidden_states", "grad_attn_output", "grad_hidden_states"]
)
ReformerEncoderOutput = namedtuple(
"ReformerEncoderOutput",
["hidden_states", "all_hidden_states", "all_attentions", "past_buckets_states"],
)
def _stable_argsort(vector, dim):
# this function scales the vector so that torch.argsort is stable.
# torch.argsort is not stable on its own
scale_offset = torch.arange(vector.shape[dim], device=vector.device).view(1, 1, -1)
scale_offset = scale_offset.expand(vector.shape)
scaled_vector = vector.shape[dim] * vector + (scale_offset % vector.shape[dim])
return torch.argsort(scaled_vector, dim=dim)
def _get_least_common_mult_chunk_len(config):
attn_types = config.attn_layers
attn_types_set = set(attn_types)
if len(attn_types_set) == 1 and attn_types[0] == "lsh":
return config.lsh_attn_chunk_length
elif len(attn_types_set) == 1 and attn_types[0] == "local":
return config.local_attn_chunk_length
elif len(attn_types_set) == 2 and attn_types_set == set(["lsh", "local"]):
return np.lcm(config.lsh_attn_chunk_length, config.local_attn_chunk_length)
else:
raise NotImplementedError(
f"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {config.attn_layers}. Select "
"attn layer types from ['lsh', 'local'] only."
)
def _get_min_chunk_len(config):
attn_types = config.attn_layers
attn_types_set = set(attn_types)
if len(attn_types_set) == 1 and attn_types[0] == "lsh":
return config.lsh_attn_chunk_length
elif len(attn_types_set) == 1 and attn_types[0] == "local":
return config.local_attn_chunk_length
elif len(attn_types_set) == 2 and attn_types_set == set(["lsh", "local"]):
return min(config.lsh_attn_chunk_length, config.local_attn_chunk_length)
else:
raise NotImplementedError(
f"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {config.attn_layers}. Select "
"attn layer types from ['lsh', 'local'] only."
)
class AxialPositionEmbeddings(nn.Module):
"""
Constructs axial position embeddings. Useful for very long input sequences to save memory and time.
"""
def __init__(self, config):
super().__init__()
self.axial_pos_shape = config.axial_pos_shape
self.axial_pos_embds_dim = config.axial_pos_embds_dim
self.dropout = config.hidden_dropout_prob
self.least_common_mult_chunk_length = _get_least_common_mult_chunk_len(config)
self.weights = nn.ParameterList()
if sum(self.axial_pos_embds_dim) != config.hidden_size:
raise ValueError(
f"Make sure that config.axial_pos_embds factors: {self.axial_pos_embds_dim} sum to "
f"config.hidden_size: {config.hidden_size}"
)
# create weights
for axis, axial_pos_embd_dim in enumerate(self.axial_pos_embds_dim):
# create expanded shapes
ax_shape = [1] * len(self.axial_pos_shape)
ax_shape[axis] = self.axial_pos_shape[axis]
ax_shape = tuple(ax_shape) + (axial_pos_embd_dim,)
# create tensor and init
self.weights.append(nn.Parameter(torch.ones(ax_shape, dtype=torch.float32)))
def forward(self, position_ids):
# broadcast weights to correct shape
batch_size = position_ids.shape[0]
sequence_length = position_ids.shape[1]
broadcasted_weights = [
weight.expand((batch_size,) + self.axial_pos_shape + weight.shape[-1:]) for weight in self.weights
]
if self.training is True:
if reduce(mul, self.axial_pos_shape) != sequence_length:
raise ValueError(
f"If training, make sure that config.axial_pos_shape factors: {self.axial_pos_shape} multiply to "
f"sequence length. Got prod({self.axial_pos_shape}) != sequence_length: {sequence_length}. "
f"You might want to consider padding your sequence length to {reduce(mul, self.axial_pos_shape)} "
"or changing config.axial_pos_shape."
)
if self.dropout > 0:
weights = torch.cat(broadcasted_weights, dim=-1)
# permute weights so that 2D correctly drops dims 1 and 2
transposed_weights = weights.transpose(2, 1)
# drop entire matrix of last two dims (prev dims 1 and 2)
dropped_transposed_weights = nn.functional.dropout2d(
transposed_weights, p=self.dropout, training=self.training
)
dropped_weights = dropped_transposed_weights.transpose(2, 1)
position_encodings = torch.reshape(dropped_weights, (batch_size, sequence_length, -1))
else:
position_encodings = torch.cat(
[torch.reshape(weight, (batch_size, sequence_length, -1)) for weight in broadcasted_weights],
dim=-1,
)
else:
if reduce(mul, self.axial_pos_shape) < sequence_length:
raise ValueError(
f"Make sure that config.axial_pos_shape factors: {self.axial_pos_shape} multiply at least to "
f"max(sequence_length, least_common_mult_chunk_length): max({sequence_length}, "
f"{self.least_common_mult_chunk_length})."
)
# compute how many columns are needed
max_position_id = position_ids.max().item()
required_pos_encodings_columns = -(-(max_position_id + 1) // self.axial_pos_shape[1])
# cut to columns that are needed
position_encodings = torch.cat(
[weight[:, :required_pos_encodings_columns] for weight in broadcasted_weights], dim=-1
)
position_encodings = torch.reshape(position_encodings, (batch_size, -1, position_encodings.shape[-1]))
# select correct position encodings
position_encodings = torch.cat(
[
torch.index_select(position_encodings[i], 0, position_ids[i]).unsqueeze(0)
for i in range(batch_size)
],
dim=0,
)
return position_encodings
class PositionEmbeddings(nn.Module):
"""Constructs conventional position embeddings of shape `[max_pos_embeddings, hidden_size]`."""
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size)
def forward(self, position_ids):
position_embeddings = self.embedding(position_ids)
position_embeddings = nn.functional.dropout(position_embeddings, p=self.dropout, training=self.training)
return position_embeddings
class ReformerEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.max_position_embeddings = config.max_position_embeddings
self.dropout = config.hidden_dropout_prob
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = (
AxialPositionEmbeddings(config) if config.axial_pos_embds else PositionEmbeddings(config)
)
def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, start_idx_pos_encodings=0):
if input_ids is not None:
input_shape = input_ids.size()
device = input_ids.device
else:
input_shape = inputs_embeds.size()[:-1]
device = inputs_embeds.device
seq_length = input_shape[1]
if position_ids is None:
position_ids = torch.arange(
start_idx_pos_encodings, start_idx_pos_encodings + seq_length, dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
if position_ids.shape[-1] > self.max_position_embeddings:
raise ValueError(
f"Sequence Length: {position_ids.shape[-1]} has to be larger equal than "
f"config.max_position_embeddings {self.max_position_embeddings}."
)
# dropout
embeddings = nn.functional.dropout(inputs_embeds, p=self.dropout, training=self.training)
# add positional embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
return embeddings
class EfficientAttentionMixin:
"""
A few utilities for nn.Modules in Reformer, to be used as a mixin.
"""
def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after):
"""
Used to implement attention between consecutive chunks.
Args:
vectors: array of shape [batch_size, num_attention_heads, n_chunks, chunk_len, ...]
num_chunks_before: chunks before current chunk to include in attention
num_chunks_after: chunks after current chunk to include in attention
Returns:
tensor of shape [num_chunks, N * chunk_length, ...], where N = (1 + num_chunks_before + num_chunks_after).
"""
if num_chunks_before == 0 and num_chunks_after == 0:
return vectors
slices = []
for i in range(-num_chunks_before, num_chunks_after + 1):
if i == 0:
slices.append(vectors)
else:
slices.append(torch.cat([vectors[:, :, i:, ...], vectors[:, :, :i, ...]], dim=2))
return torch.cat(slices, dim=3)
def _split_hidden_size_dim(self, x, num_attn_heads, attn_head_size):
"""
splits hidden_size dim into attn_head_size and num_attn_heads
"""
new_x_shape = x.size()[:-1] + (num_attn_heads, attn_head_size)
x = x.view(*new_x_shape)
return x.transpose(2, 1)
def _merge_hidden_size_dims(self, x, num_attn_heads, attn_head_size):
"""
merges attn_head_size dim and num_attn_heads dim into hidden_size
"""
x = x.permute(0, 2, 1, 3)
return torch.reshape(x, (x.size()[0], -1, num_attn_heads * attn_head_size))
def _split_seq_length_dim_to(self, vectors, dim_factor_1, dim_factor_2, num_attn_heads, attn_head_size=None):
"""
splits sequence length dim of vectors into `dim_factor_1` and `dim_factor_2` dims
"""
batch_size = vectors.shape[0]
split_dim_shape = (batch_size, num_attn_heads, dim_factor_1, dim_factor_2)
if len(vectors.shape) == 4:
return torch.reshape(vectors, split_dim_shape + (attn_head_size,))
elif len(vectors.shape) == 3:
return torch.reshape(vectors, split_dim_shape)
else:
raise ValueError(f"Input vector rank should be one of [3, 4], but is: {len(vectors.shape)}")
class LSHSelfAttention(nn.Module, EfficientAttentionMixin):
def __init__(self, config):
super().__init__()
self.config = config
self.chunk_length = config.lsh_attn_chunk_length
self.num_hashes = config.num_hashes
self.num_buckets = config.num_buckets
self.num_chunks_before = config.lsh_num_chunks_before
self.num_chunks_after = config.lsh_num_chunks_after
self.hash_seed = config.hash_seed
self.is_decoder = config.is_decoder
self.max_position_embeddings = config.max_position_embeddings
self.dropout = config.lsh_attention_probs_dropout_prob
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = config.attention_head_size
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.hidden_size = config.hidden_size
# projection matrices
self.query_key = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
# save mask value here. Need fp32 and fp16 mask values
self.register_buffer("self_mask_value_float16", torch.tensor(-1e3))
self.register_buffer("self_mask_value_float32", torch.tensor(-1e5))
self.register_buffer("mask_value_float16", torch.tensor(-1e4))
self.register_buffer("mask_value_float32", torch.tensor(-1e9))
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
num_hashes=None,
buckets=None,
past_buckets_states=None,
use_cache=False,
output_attentions=False,
**kwargs,
):
sequence_length = hidden_states.shape[1]
batch_size = hidden_states.shape[0]
# num hashes can optionally be overwritten by user
num_hashes = num_hashes if num_hashes is not None else self.num_hashes
do_cached_attention = use_cache and past_buckets_states[1] is not None
# check if cache shall be used and that hidden states are already cached
if do_cached_attention:
assert (
sequence_length == 1
), f"At the moment, auto-regressive language generation is only possible one word at a time. Make sure that input sequence length {sequence_length} equals 1, when `past_buckets_states` is passed."
past_buckets = past_buckets_states[0]
past_states = past_buckets_states[1]
# get query vector
query_vectors = self.query_key(hidden_states)
query_vectors = self._split_hidden_size_dim(
query_vectors, self.num_attention_heads, self.attention_head_size
)
if past_buckets is not None:
key_value_hidden_states, sorted_bucket_idx, buckets = self._get_relevant_hid_states_and_buckets(
query_vectors=query_vectors,
attention_mask=attention_mask,
num_hashes=num_hashes,
hidden_states=hidden_states,
past_states=past_states,
past_buckets=past_buckets,
)
query_key_vectors = self._query_per_attn_head(key_value_hidden_states)
value_vectors = self._value_per_attn_head(key_value_hidden_states)
# split key & value vectors by num hashes to apply
# self attention on each separately
query_key_vectors = self._split_seq_length_dim_to(
query_key_vectors,
num_hashes,
-1,
self.num_attention_heads,
self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors,
num_hashes,
-1,
self.num_attention_heads,
self.attention_head_size,
)
# repeat query vectors across hash dimension
query_vectors = query_vectors.unsqueeze(2).repeat(1, 1, num_hashes, 1, 1)
else:
key_value_hidden_states = torch.cat([past_states, hidden_states], dim=1)
query_key_vectors = self.query_key(key_value_hidden_states)
value_vectors = self.value(key_value_hidden_states)
else:
# project hidden_states to query_key and value
query_vectors = None
query_key_vectors = self.query_key(hidden_states)
value_vectors = self.value(hidden_states)
# if query key is not already split
if not do_cached_attention or past_buckets is None:
query_key_vectors = self._split_hidden_size_dim(
query_key_vectors, self.num_attention_heads, self.attention_head_size
)
value_vectors = self._split_hidden_size_dim(
value_vectors, self.num_attention_heads, self.attention_head_size
)
# cache buckets for next incremental decoding
if do_cached_attention and past_buckets is None and key_value_hidden_states.shape[1] >= self.chunk_length:
buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)
# free memory
del hidden_states
assert (
query_key_vectors.shape[-1] == self.attention_head_size
), f"last dim of query_key_vectors is {query_key_vectors.shape[-1]} but should be {self.attention_head_size}."
assert (
value_vectors.shape[-1] == self.attention_head_size
), f"last dim of value_vectors is {value_vectors.shape[-1]} but should be {self.attention_head_size}."
do_standard_self_attention = (sequence_length <= self.chunk_length) or (
use_cache and past_buckets_states[1] is not None
)
# LSH attention only makes sense if chunked attention should be performed
if not do_standard_self_attention:
# set `num_buckets` on the fly, recommended way to do it
if self.num_buckets is None:
self._set_num_buckets(sequence_length)
# use cached buckets for backprop only
if buckets is None:
# hash query key vectors into buckets
buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask)
else:
# make sure buckets has correct shape for LSH attention
buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes * sequence_length)
assert (
int(buckets.shape[-1]) == num_hashes * sequence_length
), f"last dim of buckets is {buckets.shape[-1]}, but should be {num_hashes * sequence_length}"
sorted_bucket_idx, undo_sorted_bucket_idx = self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx(
sequence_length, buckets, num_hashes
)
# make sure bucket idx is not longer then sequence length
sorted_bucket_idx_per_hash = sorted_bucket_idx % sequence_length
# cluster query key value vectors according to hashed buckets
query_key_vectors = self._gather_by_expansion(query_key_vectors, sorted_bucket_idx_per_hash, num_hashes)
value_vectors = self._gather_by_expansion(value_vectors, sorted_bucket_idx_per_hash, num_hashes)
query_key_vectors = self._split_seq_length_dim_to(
query_key_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
if self.chunk_length is None:
assert (
self.num_chunks_before == 0 and self.num_chunks_after == 0
), "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0."
elif do_cached_attention and past_buckets is not None:
# use max sequence length
sorted_bucket_idx_per_hash = sorted_bucket_idx
else:
# get sequence length indices
sorted_bucket_idx_per_hash = torch.arange(sequence_length, device=query_key_vectors.device).repeat(
batch_size, self.num_attention_heads, 1
)
# scale key vectors
key_vectors = self._len_and_dim_norm(query_key_vectors)
# set query_vectors to query key vectors if LSH self attention
query_vectors = query_vectors if query_vectors is not None else query_key_vectors
# free memory
del query_key_vectors
# get attention probs
out_vectors, logits, attention_probs = self._attend(
query_vectors=query_vectors,
key_vectors=key_vectors,
value_vectors=value_vectors,
sorted_bucket_idx_per_hash=sorted_bucket_idx_per_hash,
attention_mask=attention_mask,
head_mask=head_mask,
do_standard_self_attention=do_standard_self_attention,
do_cached_attention=do_cached_attention,
)
# free memory
del key_vectors, value_vectors
# re-order out_vectors and logits
if not do_standard_self_attention:
# sort clusters back to correct ordering
out_vectors, logits = ReverseSort.apply(out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx)
if not do_standard_self_attention or (do_cached_attention and past_buckets is not None):
# sum up all hash rounds
if num_hashes > 1:
out_vectors = self._split_seq_length_dim_to(
out_vectors,
num_hashes,
sequence_length,
self.num_attention_heads,
self.attention_head_size,
)
logits = self._split_seq_length_dim_to(
logits,
num_hashes,
sequence_length,
self.num_attention_heads,
self.attention_head_size,
).unsqueeze(-1)
probs_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True))
out_vectors = torch.sum(out_vectors * probs_vectors, dim=2)
# free memory
del probs_vectors
# free memory
del logits
assert out_vectors.shape == (
batch_size,
self.num_attention_heads,
sequence_length,
self.attention_head_size,
), "out_vectors have be of shape `[batch_size, config.num_attention_heads, sequence_length, config.attention_head_size]`."
out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size)
if output_attentions is False:
attention_probs = ()
if buckets is not None:
buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes, -1)
return LSHSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs, buckets=buckets)
def _query_per_attn_head(self, hidden_states):
per_head_query_key = self.query_key.weight.reshape(
self.num_attention_heads, self.attention_head_size, self.hidden_size
).transpose(-2, -1)
# only relevant for inference and no bias => we can use einsum here
query_key_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_query_key)
return query_key_vectors
def _value_per_attn_head(self, hidden_states):
per_head_value = self.value.weight.reshape(
self.num_attention_heads, self.attention_head_size, self.hidden_size
).transpose(-2, -1)
# only relevant for inference and no bias => we can use einsum here
value_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_value)
return value_vectors
def _hash_vectors(self, vectors, num_hashes, attention_mask, increase_num_buckets=False):
batch_size = vectors.shape[0]
# See https://arxiv.org/pdf/1509.02897.pdf
# We sample a different random rotation for each round of hashing to
# decrease the probability of hash misses.
if isinstance(self.num_buckets, int):
assert (
self.num_buckets % 2 == 0
), f"There should be an even number of buckets, but `self.num_buckets`: {self.num_buckets}"
rotation_size = self.num_buckets
num_buckets = self.num_buckets
else:
# Factorize the hash if self.num_buckets is a list or tuple
rotation_size, num_buckets = 0, 1
for bucket_factor in self.num_buckets:
assert (
bucket_factor % 2 == 0
), f"The number of buckets should be even, but `num_bucket`: {bucket_factor}"
rotation_size = rotation_size + bucket_factor
num_buckets = num_buckets * bucket_factor
# remove gradient
vectors = vectors.detach()
if self.hash_seed is not None:
# for determinism
torch.manual_seed(self.hash_seed)
rotations_shape = (self.num_attention_heads, vectors.shape[-1], num_hashes, rotation_size // 2)
# create a random self.attention_head_size x num_hashes x num_buckets/2
random_rotations = torch.randn(rotations_shape, device=vectors.device, dtype=vectors.dtype)
# Output dim: Batch_Size x Num_Attn_Heads x Num_Hashes x Seq_Len x Num_Buckets/2
rotated_vectors = torch.einsum("bmtd,mdhr->bmhtr", vectors, random_rotations)
if isinstance(self.num_buckets, int) or len(self.num_buckets) == 1:
rotated_vectors = torch.cat([rotated_vectors, -rotated_vectors], dim=-1)
buckets = torch.argmax(rotated_vectors, dim=-1)
else:
# Get the buckets for them and combine.
buckets, cur_sum, cur_product = None, 0, 1
for bucket_factor in self.num_buckets:
rotated_vectors_factor = rotated_vectors[..., cur_sum : cur_sum + (bucket_factor // 2)]
cur_sum = cur_sum + bucket_factor // 2
rotated_vectors_factor = torch.cat([rotated_vectors_factor, -rotated_vectors_factor], dim=-1)
if buckets is None:
buckets = torch.argmax(rotated_vectors_factor, dim=-1)
else:
buckets = buckets + (cur_product * torch.argmax(rotated_vectors_factor, dim=-1))
cur_product = cur_product * bucket_factor
if attention_mask is not None and (attention_mask.sum().item() < batch_size * attention_mask.shape[-1]):
# add an extra bucket for padding tokens only
num_buckets = num_buckets + 1
# assign padding tokens extra bucket
buckets_mask = attention_mask.to(torch.uint8)[:, None, None, :].expand(buckets.shape)
buckets = torch.where(
buckets_mask, buckets, torch.tensor(num_buckets - 1, dtype=torch.long, device=buckets.device)
)
elif increase_num_buckets:
num_buckets = num_buckets + 1
# buckets is now (Batch_size x Num_Attn_Heads x Num_Hashes x Seq_Len).
# Next we add offsets so that bucket numbers from different hashing rounds don't overlap.
offsets = torch.arange(num_hashes, device=vectors.device)
offsets = (offsets * num_buckets).view((1, 1, -1, 1))
# expand to batch size and num attention heads
offsets = offsets.expand((batch_size, self.num_attention_heads) + offsets.shape[-2:])
offset_buckets = (buckets + offsets).flatten(start_dim=2, end_dim=3)
return offset_buckets
def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(self, sequence_length, buckets, num_hashes):
# no gradients are needed
with torch.no_grad():
# hash-based sort
sorted_bucket_idx = _stable_argsort(buckets, dim=-1)
# create simple indices to scatter to, to have undo sort
indices = (
torch.arange(sorted_bucket_idx.shape[-1], device=buckets.device)
.view(1, 1, -1)
.expand(sorted_bucket_idx.shape)
)
# get undo sort
undo_sorted_bucket_idx = sorted_bucket_idx.new(*sorted_bucket_idx.size())
undo_sorted_bucket_idx.scatter_(-1, sorted_bucket_idx, indices)
return sorted_bucket_idx, undo_sorted_bucket_idx
def _set_num_buckets(self, sequence_length):
# `num_buckets` should be set to 2 * sequence_length // chunk_length as recommended in paper
num_buckets_pow_2 = (2 * (sequence_length // self.chunk_length)).bit_length() - 1
# make sure buckets are power of 2
num_buckets = 2 ** num_buckets_pow_2
# factorize `num_buckets` if `num_buckets` becomes too large
num_buckets_limit = 2 * max(
int((self.max_position_embeddings // self.chunk_length) ** (0.5)),
self.chunk_length,
)
if num_buckets > num_buckets_limit:
num_buckets = [2 ** (num_buckets_pow_2 // 2), 2 ** (num_buckets_pow_2 - num_buckets_pow_2 // 2)]
logger.warning(f"config.num_buckets is not set. Setting config.num_buckets to {num_buckets}...")
# set num buckets in config to be properly saved
self.config.num_buckets = num_buckets
self.num_buckets = num_buckets
def _attend(
self,
query_vectors,
key_vectors,
value_vectors,
sorted_bucket_idx_per_hash,
attention_mask,
head_mask,
do_standard_self_attention,
do_cached_attention,
):
# look at previous and following chunks if chunked attention
if not do_standard_self_attention:
key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)
value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)
# get logits and dots
# (BS, NumAttn, NumHash x NumChunk, Chunk_L x Hidden),(BS, NumAttn, NumHash x NumChunk, Chunk_L * (Num_bef + Num_aft + 1) x Hidden) -> (BS, NumAttn, NumHash x NumChunk, Chunk_L, Chunk_L * (1 + Num_bef + Num_aft))
query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))
# free memory
del query_vectors, key_vectors
# if chunked attention split bucket idxs to query and key
if not do_standard_self_attention:
query_bucket_idx = self._split_seq_length_dim_to(
sorted_bucket_idx_per_hash, -1, self.chunk_length, self.num_attention_heads
)
key_value_bucket_idx = self._look_adjacent(query_bucket_idx, self.num_chunks_before, self.num_chunks_after)
elif do_cached_attention and query_key_dots.ndim > 4:
key_value_bucket_idx = sorted_bucket_idx_per_hash
query_bucket_idx = (
key_value_bucket_idx.new_ones(key_value_bucket_idx.shape[:-1] + (1,)) * key_value_bucket_idx.max()
)
elif do_cached_attention and query_key_dots.ndim <= 4:
query_bucket_idx = (query_key_dots.shape[-1] - 1) * torch.ones_like(query_key_dots)[:, :, :, -1]
key_value_bucket_idx = torch.arange(
query_key_dots.shape[-1], dtype=torch.long, device=query_key_dots.device
)[None, None, :].expand(query_bucket_idx.shape[:2] + (-1,))
else:
query_bucket_idx = key_value_bucket_idx = sorted_bucket_idx_per_hash
# get correct mask values depending on precision
if query_key_dots.dtype == torch.float16:
self_mask_value = self.self_mask_value_float16.half()
mask_value = self.mask_value_float16.half()
else:
self_mask_value = self.self_mask_value_float32
mask_value = self.mask_value_float32
if not do_cached_attention:
mask = self._compute_attn_mask(
query_bucket_idx,
key_value_bucket_idx,
attention_mask,
query_key_dots.shape,
do_standard_self_attention,
)
if mask is not None:
query_key_dots = torch.where(mask, query_key_dots, mask_value)
# free memory
del mask
# Self mask is ALWAYS applied.
# From the reformer paper (https://arxiv.org/pdf/2001.04451.pdf):
# " While attention to the future is not allowed, typical implementations of the
# Transformer do allow a position to attend to itself.
# Such behavior is undesirable in a shared-QK formulation because the dot-product
# of a query vector with itself will almost always be greater than the dot product of a
# query vector with a vector at another position. We therefore modify the masking
# to forbid a token from attending to itself, except in situations
# where a token has no other valid attention targets (e.g. the first token in a sequence) "
self_mask = torch.ne(query_bucket_idx.unsqueeze(-1), key_value_bucket_idx.unsqueeze(-2)).to(
query_bucket_idx.device
)
# apply self_mask
query_key_dots = torch.where(self_mask, query_key_dots, self_mask_value)
# free memory
del self_mask
logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)
# dots shape is `[batch_size, num_attn_heads, num_hashes * seq_len // chunk_length, chunk_length, chunk_length * (1 + num_chunks_before + num_chunks_after)]`
attention_probs = torch.exp(query_key_dots - logits)
# free memory
del query_key_dots
# dropout
attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# attend values
out_vectors = torch.matmul(attention_probs, value_vectors)
# free memory
del value_vectors
# merge chunk length
if out_vectors.ndim > 4:
logits = logits.flatten(start_dim=2, end_dim=3).squeeze(-1)
out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)
return out_vectors, logits, attention_probs
def _compute_attn_mask(
self, query_indices, key_indices, attention_mask, query_key_dot_shape, do_standard_self_attention
):
# attention mask for LSH
if attention_mask is not None:
# if chunked attention, the attention mask has to correspond to LSH order
attention_mask = attention_mask.to(torch.uint8)[:, None, :]
if not do_standard_self_attention:
# expand attn_mask to fit with key_value_bucket_idx shape
attention_mask = attention_mask[:, None, :]
attention_mask = attention_mask.expand(query_indices.shape[:-1] + (-1,))
# extract attention mask from LSH sorted key_indices
attention_mask = torch.gather(attention_mask, -1, key_indices)
attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dot_shape)
# Causal mask
if self.is_decoder is True:
causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device)
# add attention mask if not None
if attention_mask is not None:
attention_mask = causal_mask * attention_mask
else:
attention_mask = causal_mask
return attention_mask
def _get_relevant_hid_states_and_buckets(
self, query_vectors, attention_mask, num_hashes, hidden_states, past_states, past_buckets
):
# concat hidden states
hidden_states = torch.cat([past_states, hidden_states], dim=1)
# batch_size hidden
batch_size = hidden_states.shape[0]
sequence_length = hidden_states.shape[1]
# check if cached buckets include pad bucket
max_bucket = self.num_buckets if isinstance(self.num_buckets, int) else reduce(mul, self.num_buckets)
# if pad bucket was cached => need to increase num buckets for caching
increase_num_buckets = past_buckets.max() > num_hashes * max_bucket - 1
# retrieve query buckets
query_buckets = self._hash_vectors(
query_vectors, num_hashes, attention_mask, increase_num_buckets=increase_num_buckets
)
# concat buckets
concat_buckets = torch.cat([past_buckets, query_buckets.unsqueeze(-1)], dim=-1)
# hash-based sort
bucket_idx = _stable_argsort(concat_buckets, dim=-1)
# bucket_idx has shape: BatchSize x NumAttnHeads x NumHashes x SequenceLength
assert bucket_idx.shape == (
batch_size,
self.num_attention_heads,
num_hashes,
sequence_length,
), f"bucket_idx should have shape {(batch_size, self.num_attention_heads, num_hashes, sequence_length)}, but has shape {bucket_idx.shape}."
# find indices of new bucket indices
relevant_bucket_idx = (bucket_idx == (bucket_idx.shape[-1] - 1)).nonzero()
# expand relevant bucket indices to its chunks
relevant_bucket_idx_chunk = self._expand_to_indices_in_relevant_chunk(relevant_bucket_idx, sequence_length)
relevant_bucket_idx_chunk = bucket_idx[tuple(relevant_bucket_idx_chunk.transpose(0, 1))]
# adapt bucket_idx for batch and hidden states for index select
bucket_idx_batch_offset = sequence_length * (
batch_size
* torch.arange(relevant_bucket_idx_chunk.shape[-1], device=hidden_states.device, dtype=torch.long)
// relevant_bucket_idx_chunk.shape[-1]
)
# add batch offset
relevant_bucket_idx_chunk_all_batch = relevant_bucket_idx_chunk + bucket_idx_batch_offset
hidden_states = hidden_states.reshape((-1, self.hidden_size))
# select all relevant hidden states
relevant_hidden_states = hidden_states.index_select(0, relevant_bucket_idx_chunk_all_batch)
# reshape hidden states and bucket_idx to correct output
relevant_hidden_states = relevant_hidden_states.reshape(
batch_size, self.num_attention_heads, -1, self.hidden_size
)
relevant_bucket_idx_chunk = relevant_bucket_idx_chunk.reshape(
batch_size, self.num_attention_heads, num_hashes, -1
)
assert (
relevant_hidden_states.shape[2]
== (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes
), f"There should be {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes} `hidden_states`, there are {relevant_hidden_states.shape[2]} `hidden_states`."
assert (
relevant_bucket_idx_chunk.shape[-1]
== (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length
), f"There should be {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length} `hidden_states`, there are {relevant_bucket_idx_chunk.shape[-1]} `bucket_idx`."
return relevant_hidden_states, relevant_bucket_idx_chunk, query_buckets
def _expand_to_indices_in_relevant_chunk(self, indices, sequence_length):
# get relevant indices of where chunk starts and its size
start_indices_chunk = ((indices[:, -1] // self.chunk_length) - self.num_chunks_before) * self.chunk_length
total_chunk_size = self.chunk_length * (1 + self.num_chunks_before + self.num_chunks_after)
# expand start indices and add correct chunk offset via arange
expanded_start_indices = start_indices_chunk.unsqueeze(-1).expand(indices.shape[0], total_chunk_size)
chunk_sequence_indices = expanded_start_indices + torch.arange(
total_chunk_size, device=indices.device, dtype=torch.long
).unsqueeze(0).expand(indices.shape[0], total_chunk_size)
# make sure that circular logic holds via % seq len
chunk_sequence_indices = chunk_sequence_indices.flatten() % sequence_length
# expand indices and set indices correctly
indices = indices.unsqueeze(1).expand((indices.shape[0], total_chunk_size, -1)).flatten(0, 1).clone()
indices[:, -1] = chunk_sequence_indices
return indices
def _len_and_dim_norm(self, vectors):
"""
length and attention head size dim normalization
"""
vectors = self._len_norm(vectors)
vectors = vectors * torch.rsqrt(
torch.tensor(self.attention_head_size, device=vectors.device, dtype=vectors.dtype)
)
return vectors
def _len_norm(self, x, epsilon=1e-6):
"""
length normalization
"""
variance = torch.mean(x ** 2, -1, keepdim=True)
norm_x = x * torch.rsqrt(variance + epsilon)
return norm_x
def _gather_by_expansion(self, vectors, idxs, num_hashes):
"""
expand dims of idxs and vectors for all hashes and gather
"""
expanded_idxs = idxs.unsqueeze(-1).expand(-1, -1, -1, self.attention_head_size)
vectors = vectors.repeat(1, 1, num_hashes, 1)
return torch.gather(vectors, 2, expanded_idxs)
class ReverseSort(Function):
"""
After chunked attention is applied which sorted clusters, original ordering has to be restored. Since customized
backward function is used for Reformer, the gradients of the output vectors have to be explicitly sorted here.
"""
@staticmethod
def forward(ctx, out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx):
# save sorted_bucket_idx for backprop
with torch.no_grad():
ctx.sorted_bucket_idx = sorted_bucket_idx
# undo sort to have correct order for next layer
expanded_undo_sort_indices = undo_sorted_bucket_idx.unsqueeze(-1).expand(out_vectors.shape)
out_vectors = torch.gather(out_vectors, 2, expanded_undo_sort_indices)
logits = torch.gather(logits, 2, undo_sorted_bucket_idx)
return out_vectors, logits
@staticmethod
def backward(ctx, grad_out_vectors, grad_logits):
# get parameters saved in ctx
sorted_bucket_idx = ctx.sorted_bucket_idx
expanded_sort_indices = sorted_bucket_idx.unsqueeze(-1).expand(grad_out_vectors.shape)
# reverse sort of forward
grad_out_vectors = torch.gather(grad_out_vectors, 2, expanded_sort_indices)
grad_logits = torch.gather(grad_logits, 2, sorted_bucket_idx)
# return grad and `None` fillers for last 2 forward args
return grad_out_vectors, grad_logits, None, None
class LocalSelfAttention(nn.Module, EfficientAttentionMixin):
def __init__(self, config):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.chunk_length = config.local_attn_chunk_length
self.num_chunks_before = config.local_num_chunks_before
self.num_chunks_after = config.local_num_chunks_after
self.is_decoder = config.is_decoder
self.pad_token_id = config.pad_token_id
self.attention_head_size = config.attention_head_size
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.hidden_size = config.hidden_size
# projection matrices
self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False)
self.dropout = config.local_attention_probs_dropout_prob
# save mask value here
self.register_buffer("mask_value_float16", torch.tensor(-1e4))
self.register_buffer("mask_value_float32", torch.tensor(-1e9))
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
past_buckets_states=None,
use_cache=False,
output_attentions=False,
**kwargs,
):
sequence_length = hidden_states.shape[1]
batch_size = hidden_states.shape[0]
# check if cache shall be used and that hidden states are already cached
if use_cache and past_buckets_states[1] is not None:
assert (
past_buckets_states[0] is None
), "LocalSelfAttention should not make use of `buckets`. There seems to be an error when caching hidden_states_and_buckets."
key_value_hidden_states = self._retrieve_relevant_hidden_states(
past_buckets_states[1], self.chunk_length, self.num_chunks_before
)
key_value_hidden_states = torch.cat([key_value_hidden_states, hidden_states], dim=1)
# only query vector for last token
query_vectors = self.query(hidden_states)
# compute key and value for relevant chunk
key_vectors = self.key(key_value_hidden_states)
value_vectors = self.value(key_value_hidden_states)
# free memory
del key_value_hidden_states
else:
# project hidden_states to query, key and value
query_vectors = self.query(hidden_states)
key_vectors = self.key(hidden_states)
value_vectors = self.value(hidden_states)
# split last dim into `config.num_attention_heads` and `config.attention_head_size`
query_vectors = self._split_hidden_size_dim(query_vectors, self.num_attention_heads, self.attention_head_size)
key_vectors = self._split_hidden_size_dim(key_vectors, self.num_attention_heads, self.attention_head_size)
value_vectors = self._split_hidden_size_dim(value_vectors, self.num_attention_heads, self.attention_head_size)
assert (
query_vectors.shape[-1] == self.attention_head_size
), f"last dim of query_key_vectors is {query_vectors.shape[-1]} but should be {self.attention_head_size}."
assert (
key_vectors.shape[-1] == self.attention_head_size
), f"last dim of query_key_vectors is {key_vectors.shape[-1]} but should be {self.attention_head_size}."
assert (
value_vectors.shape[-1] == self.attention_head_size
), f"last dim of query_key_vectors is {value_vectors.shape[-1]} but should be {self.attention_head_size}."
if self.chunk_length is None:
assert (
self.num_chunks_before == 0 and self.num_chunks_after == 0
), "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and `config.num_chunks_before` are set to 0."
# normalize key vectors
key_vectors = key_vectors / torch.sqrt(
torch.tensor(self.attention_head_size, device=key_vectors.device, dtype=key_vectors.dtype)
)
# get sequence length indices
indices = torch.arange(sequence_length, device=query_vectors.device).repeat(
batch_size, self.num_attention_heads, 1
)
# if one should do normal n^2 self-attention
do_standard_self_attention = sequence_length <= self.chunk_length
# if input should be chunked
if not do_standard_self_attention:
# chunk vectors
# B x Num_Attn_Head x Seq_Len // chunk_len x chunk_len x attn_head_size
query_vectors = self._split_seq_length_dim_to(
query_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
key_vectors = self._split_seq_length_dim_to(
key_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
value_vectors = self._split_seq_length_dim_to(
value_vectors,
-1,
self.chunk_length,
self.num_attention_heads,
self.attention_head_size,
)
# chunk indices
query_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads)
key_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads)
# append chunks before and after
key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after)
value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after)
key_indices = self._look_adjacent(key_indices, self.num_chunks_before, self.num_chunks_after)
else:
query_indices = key_indices = indices
# query-key matmul: QK^T
query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2))
# free memory
del query_vectors, key_vectors
mask = self._compute_attn_mask(
query_indices, key_indices, attention_mask, query_key_dots.shape, do_standard_self_attention
)
if mask is not None:
# get mask tensor depending on half precision or not
if query_key_dots.dtype == torch.float16:
mask_value = self.mask_value_float16.half()
else:
mask_value = self.mask_value_float32
query_key_dots = torch.where(mask, query_key_dots, mask_value)
# free memory
del mask
# softmax
logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True)
attention_probs = torch.exp(query_key_dots - logits)
# free memory
del logits
# dropout
attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
# attend values
out_vectors = torch.matmul(attention_probs, value_vectors)
# free memory
del value_vectors
# merge chunk length
if not do_standard_self_attention:
out_vectors = out_vectors.flatten(start_dim=2, end_dim=3)
assert out_vectors.shape == (
batch_size,
self.num_attention_heads,
sequence_length,
self.attention_head_size,
)
out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size)
if output_attentions is False:
attention_probs = ()
return LocalSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs)
def _compute_attn_mask(
self, query_indices, key_indices, attention_mask, query_key_dots_shape, do_standard_self_attention
):
# chunk attention mask and look before and after
if attention_mask is not None:
attention_mask = attention_mask.to(torch.uint8)[:, None, :]
if not do_standard_self_attention:
attention_mask = self._split_seq_length_dim_to(attention_mask, -1, self.chunk_length, 1)
attention_mask = self._look_adjacent(attention_mask, self.num_chunks_before, self.num_chunks_after)
# create attn_mask
attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dots_shape)
# Causal mask
if self.is_decoder is True:
causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device)
# add attention mask if not None
if attention_mask is not None:
attention_mask = causal_mask * attention_mask
else:
attention_mask = causal_mask
return attention_mask
@staticmethod
def _retrieve_relevant_hidden_states(previous_hidden_states, chunk_length, num_chunks_before):
start_position = ((previous_hidden_states.shape[1] // chunk_length) - num_chunks_before) * chunk_length
return previous_hidden_states[:, start_position:]
class ReformerSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
all_head_size = config.num_attention_heads * config.attention_head_size
self.dropout = config.hidden_dropout_prob
self.dense = nn.Linear(all_head_size, config.hidden_size, bias=False)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
class ReformerAttention(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.layer_id = layer_id
self.attn_layers = config.attn_layers
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "lsh":
self.self_attention = LSHSelfAttention(config)
elif len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "local":
self.self_attention = LocalSelfAttention(config)
elif len(set(self.attn_layers)) == 2 and set(self.attn_layers) == set(["lsh", "local"]):
# get correct attn layers
if self.attn_layers[self.layer_id] == "lsh":
self.self_attention = LSHSelfAttention(config)
else:
self.self_attention = LocalSelfAttention(config)
else:
raise NotImplementedError(
f"Only attn layer types 'lsh' and 'local' exist, but got `config.attn_layers`: {self.attn_layers}. "
"Select attn layer types from ['lsh', 'local'] only."
)
self.output = ReformerSelfOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
num_hashes=None,
past_buckets_states=None,
use_cache=False,
orig_sequence_length=None,
output_attentions=False,
buckets=None,
):
hidden_states = self.layer_norm(hidden_states)
# make sure cached hidden states is set to None for backward pass
if past_buckets_states is not None:
past_buckets_states_layer = past_buckets_states[self.layer_id]
else:
past_buckets_states_layer = None
# use cached buckets for backprob if buckets not None for LSHSelfAttention
self_attention_outputs = self.self_attention(
hidden_states=hidden_states,
head_mask=head_mask,
attention_mask=attention_mask,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states_layer,
use_cache=use_cache,
output_attentions=output_attentions,
buckets=buckets,
)
# add buckets if necessary
if hasattr(self_attention_outputs, "buckets"):
buckets = self_attention_outputs.buckets
else:
buckets = None
# cache hidden states for future use
if use_cache:
if past_buckets_states[self.layer_id][0] is None:
# padded input should not be cached
past_buckets = (
buckets[:, :, :, :orig_sequence_length]
if (buckets is not None and orig_sequence_length > 1)
else buckets
)
else:
past_buckets = torch.cat([past_buckets_states[self.layer_id][0], buckets], dim=-1)
if past_buckets_states[self.layer_id][1] is None:
# padded input should not be cached
past_states = hidden_states[:, :orig_sequence_length]
else:
past_states = torch.cat([past_buckets_states[self.layer_id][1], hidden_states], dim=1)
past_buckets_states[self.layer_id] = (past_buckets, past_states)
# compute attention feed forward output
attention_output = self.output(self_attention_outputs.hidden_states)
return AttentionOutput(
hidden_states=attention_output,
attention_probs=self_attention_outputs.attention_probs,
buckets=buckets,
)
class ReformerFeedForwardDense(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
if isinstance(config.hidden_act, str):
self.act_fn = ACT2FN[config.hidden_act]
else:
self.act_fn = config.hidden_act
self.dense = nn.Linear(config.hidden_size, config.feed_forward_size)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = self.act_fn(hidden_states)
return hidden_states
class ReformerFeedForwardOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.dense = nn.Linear(config.feed_forward_size, config.hidden_size)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return hidden_states
class ChunkReformerFeedForward(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dense = ReformerFeedForwardDense(config)
self.output = ReformerFeedForwardOutput(config)
def forward(self, attention_output):
return apply_chunking_to_forward(
self.forward_chunk,
self.chunk_size_feed_forward,
self.seq_len_dim,
attention_output,
)
def forward_chunk(self, hidden_states):
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dense(hidden_states)
return self.output(hidden_states)
class ReformerLayer(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.attention = ReformerAttention(config, layer_id)
# dropout requires to have the same
# seed for forward and backward pass
self.attention_seed = None
self.feed_forward_seed = None
self.feed_forward = ChunkReformerFeedForward(config)
def _init_attention_seed(self):
"""
This function sets a new seed for the attention layer to make dropout deterministic for both forward calls: 1
normal forward call and 1 forward call in backward to recalculate activations.
"""
# randomize seeds
# use cuda generator if available
if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0:
# GPU
device_idx = torch.cuda.current_device()
self.attention_seed = torch.cuda.default_generators[device_idx].seed()
else:
# CPU
self.attention_seed = int(torch.seed() % sys.maxsize)
torch.manual_seed(self.attention_seed)
def _init_feed_forward_seed(self):
"""
This function sets a new seed for the feed forward layer to make dropout deterministic for both forward calls:
1 normal forward call and 1 forward call in backward to recalculate activations.
"""
# randomize seeds
# use cuda generator if available
if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0:
# GPU
device_idx = torch.cuda.current_device()
self.feed_forward_seed = torch.cuda.default_generators[device_idx].seed()
else:
# CPU
self.feed_forward_seed = int(torch.seed() % sys.maxsize)
torch.manual_seed(self.feed_forward_seed)
def forward(
self,
prev_attn_output,
hidden_states,
attention_mask=None,
head_mask=None,
num_hashes=None,
past_buckets_states=None,
use_cache=False,
orig_sequence_length=None,
output_attentions=False,
):
with torch.no_grad():
# every forward pass we sample a different seed
# for dropout and save for forward fn in backward pass
# to have correct dropout
if self.training:
self._init_attention_seed()
attn_outputs = self.attention(
hidden_states=hidden_states,
head_mask=head_mask,
attention_mask=attention_mask,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states,
use_cache=use_cache,
orig_sequence_length=orig_sequence_length,
output_attentions=output_attentions,
)
attn_output = attn_outputs.hidden_states
# Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0)
# Y_1 = X_1 + f(X_2)
attn_output = prev_attn_output + attn_output
# free memory
del prev_attn_output
# every forward pass we sample a different seed
# for dropout and save seed for forward fn in backward
# to have correct dropout
if self.training:
self._init_feed_forward_seed()
# Y_2 = X_2 + g(Y_1)
hidden_states = hidden_states + self.feed_forward(attn_output)
return ReformerOutput(
attn_output=attn_output,
hidden_states=hidden_states,
attention_probs=attn_outputs.attention_probs,
buckets=attn_outputs.buckets,
)
def backward_pass(
self,
next_attn_output,
hidden_states,
grad_attn_output,
grad_hidden_states,
attention_mask=None,
head_mask=None,
buckets=None,
):
# Implements the backward pass for reversible ResNets.
# A good blog post on how this works can be found here:
# Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0)
# This code is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py
assert (
self.training
), "If you want to train `ReformerModel` and its variations, make sure to use `model.train()` to put the model into training mode."
with torch.enable_grad():
next_attn_output.requires_grad = True
# set seed to have correct dropout
torch.manual_seed(self.feed_forward_seed)
# g(Y_1)
res_hidden_states = self.feed_forward(next_attn_output)
res_hidden_states.backward(grad_hidden_states, retain_graph=True)
with torch.no_grad():
# X_2 = Y_2 - g(Y_1)
hidden_states = hidden_states - res_hidden_states
del res_hidden_states
grad_attn_output = grad_attn_output + next_attn_output.grad
next_attn_output.grad = None
with torch.enable_grad():
hidden_states.requires_grad = True
# set seed to have correct dropout
torch.manual_seed(self.attention_seed)
# f(X_2)
# use cached buckets for backprob if buckets not None for LSHSelfAttention
output = self.attention(
hidden_states=hidden_states,
head_mask=head_mask,
attention_mask=attention_mask,
buckets=buckets,
).hidden_states
output.backward(grad_attn_output, retain_graph=True)
with torch.no_grad():
# X_1 = Y_1 - f(X_2)
attn_output = next_attn_output - output
del output, next_attn_output
grad_hidden_states = grad_hidden_states + hidden_states.grad
hidden_states.grad = None
hidden_states = hidden_states.detach()
return ReformerBackwardOutput(
attn_output=attn_output,
hidden_states=hidden_states,
grad_attn_output=grad_attn_output,
grad_hidden_states=grad_hidden_states,
)
class _ReversibleFunction(Function):
"""
To prevent PyTorch from performing the usual backpropagation, a customized backward function is implemented here.
This way it is made sure that no memory expensive activations are saved during the forward pass. This function is
heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py
"""
@staticmethod
def forward(
ctx,
hidden_states,
layers,
attention_mask,
head_mask,
num_hashes,
all_hidden_states,
all_attentions,
past_buckets_states,
use_cache,
orig_sequence_length,
output_hidden_states,
output_attentions,
):
all_buckets = ()
# split duplicated tensor
hidden_states, attn_output = torch.chunk(hidden_states, 2, dim=-1)
for layer_id, (layer, layer_head_mask) in enumerate(zip(layers, head_mask)):
if output_hidden_states is True:
all_hidden_states.append(hidden_states)
layer_outputs = layer(
prev_attn_output=attn_output,
hidden_states=hidden_states,
attention_mask=attention_mask,
head_mask=layer_head_mask,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states,
use_cache=use_cache,
orig_sequence_length=orig_sequence_length,
output_attentions=output_attentions,
)
attn_output = layer_outputs.attn_output
hidden_states = layer_outputs.hidden_states
all_buckets = all_buckets + (layer_outputs.buckets,)
if output_attentions:
all_attentions.append(layer_outputs.attention_probs)
# Add last layer
if output_hidden_states is True:
all_hidden_states.append(hidden_states)
# attach params to ctx for backward
ctx.save_for_backward(attn_output.detach(), hidden_states.detach())
ctx.layers = layers
ctx.all_buckets = all_buckets
ctx.head_mask = head_mask
ctx.attention_mask = attention_mask
# Concatenate 2 RevNet outputs
return torch.cat([attn_output, hidden_states], dim=-1)
@staticmethod
def backward(ctx, grad_hidden_states):
grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1)
# retrieve params from ctx for backward
attn_output, hidden_states = ctx.saved_tensors
# create tuple
output = ReformerBackwardOutput(
attn_output=attn_output,
hidden_states=hidden_states,
grad_attn_output=grad_attn_output,
grad_hidden_states=grad_hidden_states,
)
# free memory
del grad_attn_output, grad_hidden_states, attn_output, hidden_states
layers = ctx.layers
all_buckets = ctx.all_buckets
head_mask = ctx.head_mask
attention_mask = ctx.attention_mask
for idx, layer in enumerate(layers[::-1]):
# pop last buckets from stack
buckets = all_buckets[-1]
all_buckets = all_buckets[:-1]
# backprop
output = layer.backward_pass(
next_attn_output=output.attn_output,
hidden_states=output.hidden_states,
grad_attn_output=output.grad_attn_output,
grad_hidden_states=output.grad_hidden_states,
head_mask=head_mask[len(layers) - idx - 1],
attention_mask=attention_mask,
buckets=buckets,
)
assert all_buckets == (), "buckets have to be empty after backpropagation"
grad_hidden_states = torch.cat([output.grad_attn_output, output.grad_hidden_states], dim=-1)
# num of return vars has to match num of forward() args
# return gradient for hidden_states arg and None for other args
return grad_hidden_states, None, None, None, None, None, None, None, None, None, None, None
class ReformerEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.dropout = config.hidden_dropout_prob
self.layers = nn.ModuleList([ReformerLayer(config, i) for i in range(config.num_hidden_layers)])
# Reformer is using Rev Nets, thus last layer outputs are concatenated and
# Layer Norm is done over 2 * hidden_size
self.layer_norm = nn.LayerNorm(2 * config.hidden_size, eps=config.layer_norm_eps)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
num_hashes=None,
past_buckets_states=None,
use_cache=False,
orig_sequence_length=None,
output_hidden_states=False,
output_attentions=False,
):
# hidden_states and attention lists to be filled if wished
all_hidden_states = []
all_attentions = []
# init cached hidden states if necessary
if past_buckets_states is None:
past_buckets_states = [((None), (None)) for i in range(len(self.layers))]
# concat same tensor for reversible ResNet
hidden_states = torch.cat([hidden_states, hidden_states], dim=-1)
hidden_states = _ReversibleFunction.apply(
hidden_states,
self.layers,
attention_mask,
head_mask,
num_hashes,
all_hidden_states,
all_attentions,
past_buckets_states,
use_cache,
orig_sequence_length,
output_hidden_states,
output_attentions,
)
# Apply layer norm to concatenated hidden states
hidden_states = self.layer_norm(hidden_states)
# Apply dropout
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
return ReformerEncoderOutput(
hidden_states=hidden_states,
all_hidden_states=all_hidden_states,
all_attentions=all_attentions,
past_buckets_states=past_buckets_states,
)
class ReformerOnlyLMHead(nn.Module):
def __init__(self, config):
super().__init__()
# Reformer is using Rev Nets, thus last layer outputs are concatenated and
# Layer Norm is done over 2 * hidden_size
self.seq_len_dim = 1
self.chunk_size_lm_head = config.chunk_size_lm_head
self.decoder = nn.Linear(2 * config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
def forward_chunk(self, hidden_states):
hidden_states = self.decoder(hidden_states)
return hidden_states
class ReformerPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ReformerConfig
base_model_prefix = "reformer"
@property
def dummy_inputs(self):
input_ids = torch.tensor(DUMMY_INPUTS)
input_mask = torch.tensor(DUMMY_MASK)
dummy_inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
return dummy_inputs
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, AxialPositionEmbeddings):
for weight in module.weights:
torch.nn.init.normal_(weight, std=self.config.axial_norm_std)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@dataclass
class ReformerModelOutput(ModelOutput):
"""
Output type of :class:`~transformers.ReformerModel`.
Args:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then
``num_predict`` corresponds to ``sequence_length``.
past_buckets_states (:obj:`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`Tuple(torch.LongTensor, torch.FloatTensor` of length :obj:`config.n_layers`, with the first
element being the previous `buckets` of shape :obj:`(batch_size, num_heads, num_hashes, sequence_length)`)
and the second being the previous `hidden_states` of shape :obj:`(batch_size, sequence_length,
hidden_size)`).
Contains precomputed buckets and hidden-states that can be used (see ``past_buckets_states`` input) to
speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
last_hidden_state: torch.FloatTensor
past_buckets_states: Optional[List[Tuple[torch.LongTensor, torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class ReformerModelWithLMHeadOutput(ModelOutput):
"""
Output type of :class:`~transformers.ReformerModelWithLMHead`.
Args:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss (for next-token prediction).
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then
``num_predict`` corresponds to ``sequence_length``.
past_buckets_states (:obj:`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, `optional`, returned when ``use_cache=True`` is passed or when ``config.use_cache=True``):
List of :obj:`Tuple(torch.LongTensor, torch.FloatTensor` of length :obj:`config.n_layers`, with the first
element being the previous `buckets` of shape :obj:`(batch_size, num_heads, num_hashes, sequence_length)`)
and the second being the previous `hidden_states` of shape :obj:`(batch_size, sequence_length,
hidden_size)`).
Contains precomputed buckets and hidden-states that can be used (see ``past_buckets_states`` input) to
speed up sequential decoding.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
TTuple of :obj:`torch.FloatTensor` (one for the output of the embeddings and one for the output of each
layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads,
sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_buckets_states: Optional[List[Tuple[torch.LongTensor, torch.FloatTensor]]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
REFORMER_START_DOCSTRING = r"""
Reformer was proposed in `Reformer: The Efficient Transformer <https://arxiv.org/abs/2001.04451>`__ by Nikita
Kitaev, Łukasz Kaiser, Anselm Levskaya.
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.ReformerConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
REFORMER_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be
a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices
are automatically padded to be a multiple of the chunk length.
Indices can be obtained using :class:`~transformers.ReformerTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`__
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
num_hashes (:obj:`int`, `optional`):
The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites
the default defined in :obj:`config.num_hashes`.
For more information, see :obj:`num_hashes` in :class:`~transformers.ReformerConfig`.
past_buckets_states (:obj:`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, `optional`):
List of :obj:`Tuple(torch.LongTensor, torch.FloatTensor` of length :obj:`config.n_layers`, with the first
element being the previous `buckets` of shape :obj:`(batch_size, num_heads, num_hashes, sequence_length)`)
and the second being the previous `hidden_states` of shape :obj:`(batch_size, sequence_length,
hidden_size)`).
Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed
up sequential decoding.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Reformer Model transformer outputting raw hidden-states" "without any specific head on top.",
REFORMER_START_DOCSTRING,
)
class ReformerModel(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
assert (
self.config.num_hidden_layers > 0
), "`config.attn_layers` is empty. Select at least one attn layer form ['lsh', 'local']"
self.embeddings = ReformerEmbeddings(config)
self.encoder = ReformerEncoder(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=ReformerModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
past_buckets_states=None,
use_cache=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
):
use_cache = use_cache if use_cache is not None else self.config.use_cache
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size() # noqa: F841
device = input_ids.device
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1] # noqa: F841
device = inputs_embeds.device
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
assert (
len(input_shape) == 2
), f"`input_ids` have be of shape `[batch_size, sequence_length]`, but got shape: {input_shape}"
if past_buckets_states is not None:
assert not self.training, "`past_buckets_states` can only be used for inference, not for training`."
# prepare head mask
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers, is_attention_chunked=True)
# original sequence length for padding
orig_sequence_length = input_shape[-1]
# if needs padding
least_common_mult_chunk_length = _get_least_common_mult_chunk_len(self.config)
min_chunk_length = _get_min_chunk_len(self.config)
must_pad_to_match_chunk_length = (
input_shape[-1] % least_common_mult_chunk_length != 0
and input_shape[-1] > min_chunk_length
and past_buckets_states is None
)
if must_pad_to_match_chunk_length:
padding_length = least_common_mult_chunk_length - input_shape[-1] % least_common_mult_chunk_length
if self.training is True:
raise ValueError(
f"If training, sequence length {input_shape[-1]} has to be a multiple of least common multiple "
f"chunk_length {least_common_mult_chunk_length}. Please consider padding the input to a length "
f"of {input_shape[-1] + padding_length}."
)
# pad input
input_ids, inputs_embeds, attention_mask, position_ids, input_shape = self._pad_to_mult_of_chunk_length(
input_ids,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
position_ids=position_ids,
input_shape=input_shape,
padding_length=padding_length,
padded_seq_length=least_common_mult_chunk_length,
device=device,
)
# start index for position encoding depends on incremental decoding
if past_buckets_states is not None:
start_idx_pos_encodings = past_buckets_states[0][1].shape[1]
else:
start_idx_pos_encodings = 0
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
start_idx_pos_encodings=start_idx_pos_encodings,
)
encoder_outputs = self.encoder(
hidden_states=embedding_output,
head_mask=head_mask,
attention_mask=attention_mask,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states,
use_cache=use_cache,
orig_sequence_length=orig_sequence_length,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
)
sequence_output = encoder_outputs.hidden_states
# if padding was applied
if must_pad_to_match_chunk_length:
sequence_output = sequence_output[:, :orig_sequence_length]
past_buckets_states = encoder_outputs.past_buckets_states if use_cache else None
hidden_states = encoder_outputs.all_hidden_states if output_hidden_states else None
attentions = encoder_outputs.all_attentions if output_attentions else None
if not return_dict:
return tuple(v for v in [sequence_output, past_buckets_states, hidden_states, attentions] if v is not None)
return ReformerModelOutput(
last_hidden_state=sequence_output,
past_buckets_states=past_buckets_states,
hidden_states=hidden_states,
attentions=attentions,
)
def _pad_to_mult_of_chunk_length(
self,
input_ids,
inputs_embeds=None,
attention_mask=None,
position_ids=None,
input_shape=None,
padding_length=None,
padded_seq_length=None,
device=None,
):
logger.info(
f"Input ids are automatically padded from {input_shape[-1]} to {input_shape[-1] + padding_length} to be a "
f"multiple of `config.chunk_length`: {padded_seq_length}"
)
padded_input_ids = torch.full(
(input_shape[0], padding_length),
self.config.pad_token_id,
device=device,
dtype=torch.long,
)
# Extend `attention_mask`
if attention_mask is not None:
pad_attention_mask = torch.zeros(input_shape[0], padding_length, device=device, dtype=attention_mask.dtype)
attention_mask = torch.cat([attention_mask, pad_attention_mask], dim=-1)
else:
attention_mask = torch.cat(
[
torch.ones(input_shape, device=device, dtype=torch.uint8),
torch.zeros((input_shape[0], padding_length), device=device, dtype=torch.uint8),
],
dim=-1,
)
# Extend `input_ids` with padding to match least common multiple chunk_length
if input_ids is not None:
input_ids = torch.cat([input_ids, padded_input_ids], dim=-1)
input_shape = input_ids.size()
# Pad position ids if given
if position_ids is not None:
padded_position_ids = torch.arange(input_shape[-1], padded_seq_length, dtype=torch.long, device=device)
padded_position_ids = position_ids.unsqueeze(0).expand(input_shape[0], padding_length)
position_ids = torch.cat([position_ids, padded_position_ids], dim=-1)
# Extend `inputs_embeds` with padding to match least common multiple chunk_length
if inputs_embeds is not None:
padded_inputs_embeds = self.embeddings(padded_input_ids, position_ids)
inputs_embeds = torch.cat([inputs_embeds, padded_inputs_embeds], dim=-2)
input_shape = inputs_embeds.size()
return input_ids, inputs_embeds, attention_mask, position_ids, input_shape
@add_start_docstrings("""Reformer Model with a `language modeling` head on top. """, REFORMER_START_DOCSTRING)
class ReformerModelWithLMHead(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
assert config.is_decoder, "If you want to use `ReformerModelWithLMHead` make sure that `is_decoder=True`."
assert (
"local" not in self.config.attn_layers or config.local_num_chunks_after == 0
), f"If causal mask is enabled, make sure that `config.local_num_chunks_after` is set to 0 and not {config.local_num_chunks_after}."
assert (
"lsh" not in self.config.attn_layers or config.lsh_num_chunks_after == 0
), f"If causal mask is enabled, make sure that `config.lsh_num_chunks_after` is set to 1 and not {config.lsh_num_chunks_after}."
self.reformer = ReformerModel(config)
self.lm_head = ReformerOnlyLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=CausalLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
past_buckets_states=None,
use_cache=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
labels=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0,
..., config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
reformer_outputs = self.reformer(
input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
num_hashes=num_hashes,
past_buckets_states=past_buckets_states,
use_cache=use_cache,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
sequence_output = reformer_outputs[0]
logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1))
if not return_dict:
output = (logits,) + reformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return ReformerModelWithLMHeadOutput(
loss=loss,
logits=logits,
past_buckets_states=reformer_outputs.past_buckets_states,
hidden_states=reformer_outputs.hidden_states,
attentions=reformer_outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, use_cache=None, num_hashes=None, **kwargs):
# only last token for inputs_ids if past is defined in kwargs
if past is not None:
input_ids = input_ids[:, -1:]
inputs_dict = {
"input_ids": input_ids,
"past_buckets_states": past,
"use_cache": use_cache,
"num_hashes": num_hashes,
}
return inputs_dict
def _reorder_cache(self, past, beam_idx):
reord_past_buckets_states = []
for layer_past in past:
# buckets
if layer_past[0] is not None:
reord_buckets = layer_past[0].index_select(0, beam_idx)
else:
reord_buckets = None
# hidden states
reord_hidden_states = layer_past[1].index_select(0, beam_idx)
reord_past_buckets_states.append((reord_buckets, reord_hidden_states))
return reord_past_buckets_states
@add_start_docstrings("""Reformer Model with a `language modeling` head on top. """, REFORMER_START_DOCSTRING)
class ReformerForMaskedLM(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
assert (
not config.is_decoder
), "If you want to use `ReformerForMaskedLM` make sure `config.is_decoder=False` for bi-directional self-attention."
self.reformer = ReformerModel(config)
self.lm_head = ReformerOnlyLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
labels=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
reformer_outputs = self.reformer(
input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
num_hashes=num_hashes,
use_cache=False, # no causal mask
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
sequence_output = reformer_outputs[0]
logits = self.lm_head(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + reformer_outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=logits,
hidden_states=reformer_outputs.hidden_states,
attentions=reformer_outputs.attentions,
)
@add_start_docstrings(
"""
Reformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
REFORMER_START_DOCSTRING,
)
class ReformerForSequenceClassification(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.reformer = ReformerModel(config)
self.classifier = ReformerClassificationHead(config)
if config.is_decoder is True:
logger.warning("You might want to disable causal masking for sequence classification")
self.init_weights()
@add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
labels=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.reformer(
input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
num_hashes=num_hashes,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class ReformerClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(2 * config.hidden_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, hidden_states, **kwargs):
hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = torch.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
@add_start_docstrings(
"""
Reformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / TriviaQA
( a linear layer on top of hidden-states output to compute `span start logits` and `span end logits`.
""",
REFORMER_START_DOCSTRING,
)
class ReformerForQuestionAnswering(ReformerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.reformer = ReformerModel(config)
# 2 * config.hidden_size because we use reversible residual layers
self.qa_outputs = nn.Linear(2 * config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
position_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
num_hashes=None,
start_positions=None,
end_positions=None,
output_hidden_states=None,
output_attentions=None,
return_dict=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
reformer_outputs = self.reformer(
input_ids,
position_ids=position_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
num_hashes=num_hashes,
use_cache=False, # no causal mask
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
sequence_output = reformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + reformer_outputs[1:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=reformer_outputs.hidden_states,
attentions=reformer_outputs.attentions,
)
| apache-2.0 | -5,121,016,691,501,299,000 | 41.824961 | 220 | 0.61832 | false |
darkfeline/mir.dlsite | setup.py | 1 | 1535 | # Copyright (C) 2016 Allen Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from setuptools import setup
def find_version(path):
with open(path) as f:
text = f.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
text, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='mir.dlsite',
version=find_version('mir/dlsite/__init__.py'),
description='API for DLsite',
long_description='',
keywords='',
url='https://github.com/darkfeline/mir.dlsite',
author='Allen Li',
author_email='[email protected]',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.6',
],
packages=['mir.dlsite'],
install_requires=[
'beautifulsoup4~=4.6',
'dataclasses==0.5',
'lxml~=4.0',
],
)
| apache-2.0 | 3,668,088,456,833,777,700 | 29.098039 | 74 | 0.636482 | false |
UCSD-PL/kraken | reflex/coq/bench-quark/test/quark/output/output.py | 1 | 6774 | #!/usr/bin/python
import sys
import os
import tempfile
import gobject
import gtk
import socket
import shm
import threading
import time
import struct
import cairo
import array
import cPickle as pickle
import message
import config
#gtk.gdk.threads_init()
def olog(str):
olog_nonl(str + "\n")
def olog_nonl(str):
sys.stderr.write("O: " + str)
sys.stderr.flush()
class UI:
shm_obj = None
sem_obj = None
cr = None
pixbuf = None
rectangle = None
def redraw(self) :
print "redraw-----"
if self.sem_obj != None:
self.thread_lock.acquire()
try :
try :
self.sem_obj.P()
try :
shm_obj = self.shm_obj
size = struct.unpack_from("i", shm_obj.read(4,4*0))[0]
x = struct.unpack_from("i", shm_obj.read(4,4*1))[0]
y = struct.unpack_from("i", shm_obj.read(4,4*2))[0]
width = struct.unpack_from("i", shm_obj.read(4,4*3))[0]
height = struct.unpack_from("i", shm_obj.read(4,4*4))[0]
pixbufloader = gtk.gdk.PixbufLoader()
pixbufloader.write(shm_obj.read(size,4*5))
pixbufloader.close()
pixbuf = pixbufloader.get_pixbuf()
# shm_obj = self.shm_obj
# size = struct.unpack_from("i", shm_obj.read(4,4*0))[0]
# x = struct.unpack_from("i", shm_obj.read(4,4*1))[0]
# y = struct.unpack_from("i", shm_obj.read(4,4*2))[0]
# width = struct.unpack_from("i", shm_obj.read(4,4*3))[0]
# height = struct.unpack_from("i", shm_obj.read(4,4*4))[0]
# pixels = pickle.loads(shm_obj.read(size,4*5))
# pixbuf = gtk.gdk.pixbuf_new_from_array(pixels, gtk.gdk.COLORSPACE_RGB,8)
finally :
self.sem_obj.V()
pass
#print pixbuf.get_width()
#print pixbuf.get_height()
#print x
#print y
pixbuf.copy_area(0, 0, pixbuf.get_width(), pixbuf.get_height(), self.pixbuf, x, y)
self.rectangle = (x,y,width,height)
self.win.queue_draw_area(x,y, pixbuf.get_width(), pixbuf.get_height())
except TypeError:
olog("unexpected error:" + str(sys.exc_info()[0]))
pass
except :
olog("unexpected general error:" + str(sys.exc_info()[0]))
pass
finally:
self.thread_lock.release()
pass
def window_destroyed(self, widget, data=None):
#olog("window is destroyed")
gtk.main_quit()
def expose(self, widget, event):
# Load Cairo drawing context.
self.thread_lock.acquire()
try :
if self.pixbuf <> None :
area = event.area
#olog("x,y,width,height = %d %d %d %d" % (area.x, area.y, area.width, area.height))
self.pixbuf.render_to_drawable(self.win.window, gtk.gdk.GC(self.win.window), area.x, area.y, area.x, area.y, area.width, area.height)
# if self.rectangle <> None:
# cr = widget.window.cairo_create()
# cr.set_line_width(1)
# cr.set_source_rgb(255, 0, 0)
# cr.rectangle(self.rectangle[0], self.rectangle[1], self.rectangle[2], self.rectangle[3])
# cr.stroke()
finally:
self.thread_lock.release()
def handle_input(self, source, condition):
#olog("handle_input:")
m = self.message_handler.recv()
if m[0] == message.RenderCompleted :
# load a new shared memory
#olog("display msg is received")
shmid = int(m[1])
if self.shm_obj <> None:
if self.shm_obj.shmid == shmid :
self.redraw()
else:
self.thread_lock.acquire()
try :
self.shm_obj.detach()
self.shm_obj = shm.memory(shmid)
self.sem_obj = shm.semaphore(shm.getsemid(shmid))
self.shm_obj.attach()
finally:
self.thread_lock.release()
else :
self.thread_lock.acquire()
try :
self.shm_obj = shm.memory(shmid)
self.sem_obj = shm.semaphore(shm.getsemid(shmid))
self.shm_obj.attach()
finally:
self.thread_lock.release()
else :
sys.stderr.write('invalid event type\n')
sys.exit(1)
gobject.io_add_watch(self.soc.fileno(), gobject.IO_IN, self.handle_input)
return False
def handle_hup(self, source, condition):
gtk.main_quit()
return False
def main(self):
self.thread_lock = threading.Lock()
self.shm_obj = None
self.sem_obj = None
self.message_handler = message.MessageHandler()
self.soc = self.message_handler.KCHAN
# socket.fromfd(int(sys.argv[1]), msg.FAMILY, msg.TYPE)
gobject.io_add_watch(self.soc.fileno(), gobject.IO_IN, self.handle_input)
gobject.io_add_watch(self.soc.fileno(), gobject.IO_HUP, self.handle_hup)
window = gtk.Window() #gtk.WINDOW_TOPLEVEL)
window.set_decorated(False)
window.set_app_paintable(True)
screen = window.get_screen()
rgba = screen.get_rgba_colormap()
window.set_colormap(rgba)
window.set_title("Quark Web Browser Output")
vsize = config.ydimension
window.set_default_size(1100, vsize - 200)
#window.set_keep_above(True)
window.set_decorated(False)
window.connect("destroy", self.window_destroyed)
window.connect('expose-event', self.expose)
#window.move(100,300)
window.move(63,143)
self.win = window
window.show_all()
(x,y,width,height,depth) = self.win.window.get_geometry()
#width = 4000
#height = 4000
#self.pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, width, height)
self.pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, False, 8, width, height)
gtk.main()
def curr_tab(self):
return self.tabs[self.curr]
UI().main()
| gpl-2.0 | 2,311,913,955,336,338,400 | 35.419355 | 149 | 0.50059 | false |
Cinntax/home-assistant | homeassistant/components/fibaro/__init__.py | 1 | 18019 | """Support for the Fibaro devices."""
import logging
from collections import defaultdict
from typing import Optional
import voluptuous as vol
from homeassistant.const import (
ATTR_ARMED,
ATTR_BATTERY_LEVEL,
CONF_DEVICE_CLASS,
CONF_EXCLUDE,
CONF_ICON,
CONF_PASSWORD,
CONF_URL,
CONF_USERNAME,
CONF_WHITE_VALUE,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import convert, slugify
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_ENERGY_KWH = "current_energy_kwh"
ATTR_CURRENT_POWER_W = "current_power_w"
CONF_COLOR = "color"
CONF_DEVICE_CONFIG = "device_config"
CONF_DIMMING = "dimming"
CONF_GATEWAYS = "gateways"
CONF_PLUGINS = "plugins"
CONF_RESET_COLOR = "reset_color"
DOMAIN = "fibaro"
FIBARO_CONTROLLERS = "fibaro_controllers"
FIBARO_DEVICES = "fibaro_devices"
FIBARO_COMPONENTS = [
"binary_sensor",
"climate",
"cover",
"light",
"scene",
"sensor",
"switch",
]
FIBARO_TYPEMAP = {
"com.fibaro.multilevelSensor": "sensor",
"com.fibaro.binarySwitch": "switch",
"com.fibaro.multilevelSwitch": "switch",
"com.fibaro.FGD212": "light",
"com.fibaro.FGR": "cover",
"com.fibaro.doorSensor": "binary_sensor",
"com.fibaro.doorWindowSensor": "binary_sensor",
"com.fibaro.FGMS001": "binary_sensor",
"com.fibaro.heatDetector": "binary_sensor",
"com.fibaro.lifeDangerSensor": "binary_sensor",
"com.fibaro.smokeSensor": "binary_sensor",
"com.fibaro.remoteSwitch": "switch",
"com.fibaro.sensor": "sensor",
"com.fibaro.colorController": "light",
"com.fibaro.securitySensor": "binary_sensor",
"com.fibaro.hvac": "climate",
"com.fibaro.setpoint": "climate",
"com.fibaro.FGT001": "climate",
"com.fibaro.thermostatDanfoss": "climate",
}
DEVICE_CONFIG_SCHEMA_ENTRY = vol.Schema(
{
vol.Optional(CONF_DIMMING): cv.boolean,
vol.Optional(CONF_COLOR): cv.boolean,
vol.Optional(CONF_WHITE_VALUE): cv.boolean,
vol.Optional(CONF_RESET_COLOR): cv.boolean,
vol.Optional(CONF_DEVICE_CLASS): cv.string,
vol.Optional(CONF_ICON): cv.string,
}
)
FIBARO_ID_LIST_SCHEMA = vol.Schema([cv.string])
GATEWAY_CONFIG = vol.Schema(
{
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_URL): cv.url,
vol.Optional(CONF_PLUGINS, default=False): cv.boolean,
vol.Optional(CONF_EXCLUDE, default=[]): FIBARO_ID_LIST_SCHEMA,
vol.Optional(CONF_DEVICE_CONFIG, default={}): vol.Schema(
{cv.string: DEVICE_CONFIG_SCHEMA_ENTRY}
),
},
extra=vol.ALLOW_EXTRA,
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Required(CONF_GATEWAYS): vol.All(cv.ensure_list, [GATEWAY_CONFIG])}
)
},
extra=vol.ALLOW_EXTRA,
)
class FibaroController:
"""Initiate Fibaro Controller Class."""
def __init__(self, config):
"""Initialize the Fibaro controller."""
from fiblary3.client.v4.client import Client as FibaroClient
self._client = FibaroClient(
config[CONF_URL], config[CONF_USERNAME], config[CONF_PASSWORD]
)
self._scene_map = None
# Whether to import devices from plugins
self._import_plugins = config[CONF_PLUGINS]
self._device_config = config[CONF_DEVICE_CONFIG]
self._room_map = None # Mapping roomId to room object
self._device_map = None # Mapping deviceId to device object
self.fibaro_devices = None # List of devices by type
self._callbacks = {} # Update value callbacks by deviceId
self._state_handler = None # Fiblary's StateHandler object
self._excluded_devices = config[CONF_EXCLUDE]
self.hub_serial = None # Unique serial number of the hub
def connect(self):
"""Start the communication with the Fibaro controller."""
try:
login = self._client.login.get()
info = self._client.info.get()
self.hub_serial = slugify(info.serialNumber)
except AssertionError:
_LOGGER.error("Can't connect to Fibaro HC. " "Please check URL.")
return False
if login is None or login.status is False:
_LOGGER.error(
"Invalid login for Fibaro HC. " "Please check username and password"
)
return False
self._room_map = {room.id: room for room in self._client.rooms.list()}
self._read_devices()
self._read_scenes()
return True
def enable_state_handler(self):
"""Start StateHandler thread for monitoring updates."""
from fiblary3.client.v4.client import StateHandler
self._state_handler = StateHandler(self._client, self._on_state_change)
def disable_state_handler(self):
"""Stop StateHandler thread used for monitoring updates."""
self._state_handler.stop()
self._state_handler = None
def _on_state_change(self, state):
"""Handle change report received from the HomeCenter."""
callback_set = set()
for change in state.get("changes", []):
try:
dev_id = change.pop("id")
if dev_id not in self._device_map.keys():
continue
device = self._device_map[dev_id]
for property_name, value in change.items():
if property_name == "log":
if value and value != "transfer OK":
_LOGGER.debug("LOG %s: %s", device.friendly_name, value)
continue
if property_name == "logTemp":
continue
if property_name in device.properties:
device.properties[property_name] = value
_LOGGER.debug(
"<- %s.%s = %s", device.ha_id, property_name, str(value)
)
else:
_LOGGER.warning("%s.%s not found", device.ha_id, property_name)
if dev_id in self._callbacks:
callback_set.add(dev_id)
except (ValueError, KeyError):
pass
for item in callback_set:
self._callbacks[item]()
def register(self, device_id, callback):
"""Register device with a callback for updates."""
self._callbacks[device_id] = callback
def get_children(self, device_id):
"""Get a list of child devices."""
return [
device
for device in self._device_map.values()
if device.parentId == device_id
]
def get_siblings(self, device_id):
"""Get the siblings of a device."""
return self.get_children(self._device_map[device_id].parentId)
@staticmethod
def _map_device_to_type(device):
"""Map device to HA device type."""
# Use our lookup table to identify device type
device_type = None
if "type" in device:
device_type = FIBARO_TYPEMAP.get(device.type)
if device_type is None and "baseType" in device:
device_type = FIBARO_TYPEMAP.get(device.baseType)
# We can also identify device type by its capabilities
if device_type is None:
if "setBrightness" in device.actions:
device_type = "light"
elif "turnOn" in device.actions:
device_type = "switch"
elif "open" in device.actions:
device_type = "cover"
elif "value" in device.properties:
if device.properties.value in ("true", "false"):
device_type = "binary_sensor"
else:
device_type = "sensor"
# Switches that control lights should show up as lights
if (
device_type == "switch"
and device.properties.get("isLight", "false") == "true"
):
device_type = "light"
return device_type
def _read_scenes(self):
scenes = self._client.scenes.list()
self._scene_map = {}
for device in scenes:
if not device.visible:
continue
device.fibaro_controller = self
if device.roomID == 0:
room_name = "Unknown"
else:
room_name = self._room_map[device.roomID].name
device.room_name = room_name
device.friendly_name = f"{room_name} {device.name}"
device.ha_id = "scene_{}_{}_{}".format(
slugify(room_name), slugify(device.name), device.id
)
device.unique_id_str = f"{self.hub_serial}.scene.{device.id}"
self._scene_map[device.id] = device
self.fibaro_devices["scene"].append(device)
def _read_devices(self):
"""Read and process the device list."""
devices = self._client.devices.list()
self._device_map = {}
self.fibaro_devices = defaultdict(list)
last_climate_parent = None
for device in devices:
try:
device.fibaro_controller = self
if device.roomID == 0:
room_name = "Unknown"
else:
room_name = self._room_map[device.roomID].name
device.room_name = room_name
device.friendly_name = room_name + " " + device.name
device.ha_id = "{}_{}_{}".format(
slugify(room_name), slugify(device.name), device.id
)
if (
device.enabled
and (
"isPlugin" not in device
or (not device.isPlugin or self._import_plugins)
)
and device.ha_id not in self._excluded_devices
):
device.mapped_type = self._map_device_to_type(device)
device.device_config = self._device_config.get(device.ha_id, {})
else:
device.mapped_type = None
dtype = device.mapped_type
if dtype:
device.unique_id_str = f"{self.hub_serial}.{device.id}"
self._device_map[device.id] = device
if dtype != "climate":
self.fibaro_devices[dtype].append(device)
else:
# if a sibling of this has been added, skip this one
# otherwise add the first visible device in the group
# which is a hack, but solves a problem with FGT having
# hidden compatibility devices before the real device
if last_climate_parent != device.parentId and device.visible:
self.fibaro_devices[dtype].append(device)
last_climate_parent = device.parentId
_LOGGER.debug(
"%s (%s, %s) -> %s %s",
device.ha_id,
device.type,
device.baseType,
dtype,
str(device),
)
except (KeyError, ValueError):
pass
def setup(hass, base_config):
"""Set up the Fibaro Component."""
gateways = base_config[DOMAIN][CONF_GATEWAYS]
hass.data[FIBARO_CONTROLLERS] = {}
def stop_fibaro(event):
"""Stop Fibaro Thread."""
_LOGGER.info("Shutting down Fibaro connection")
for controller in hass.data[FIBARO_CONTROLLERS].values():
controller.disable_state_handler()
hass.data[FIBARO_DEVICES] = {}
for component in FIBARO_COMPONENTS:
hass.data[FIBARO_DEVICES][component] = []
for gateway in gateways:
controller = FibaroController(gateway)
if controller.connect():
hass.data[FIBARO_CONTROLLERS][controller.hub_serial] = controller
for component in FIBARO_COMPONENTS:
hass.data[FIBARO_DEVICES][component].extend(
controller.fibaro_devices[component]
)
if hass.data[FIBARO_CONTROLLERS]:
for component in FIBARO_COMPONENTS:
discovery.load_platform(hass, component, DOMAIN, {}, base_config)
for controller in hass.data[FIBARO_CONTROLLERS].values():
controller.enable_state_handler()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_fibaro)
return True
return False
class FibaroDevice(Entity):
"""Representation of a Fibaro device entity."""
def __init__(self, fibaro_device):
"""Initialize the device."""
self.fibaro_device = fibaro_device
self.controller = fibaro_device.fibaro_controller
self._name = fibaro_device.friendly_name
self.ha_id = fibaro_device.ha_id
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.controller.register(self.fibaro_device.id, self._update_callback)
def _update_callback(self):
"""Update the state."""
self.schedule_update_ha_state(True)
@property
def level(self):
"""Get the level of Fibaro device."""
if "value" in self.fibaro_device.properties:
return self.fibaro_device.properties.value
return None
@property
def level2(self):
"""Get the tilt level of Fibaro device."""
if "value2" in self.fibaro_device.properties:
return self.fibaro_device.properties.value2
return None
def dont_know_message(self, action):
"""Make a warning in case we don't know how to perform an action."""
_LOGGER.warning(
"Not sure how to setValue: %s " "(available actions: %s)",
str(self.ha_id),
str(self.fibaro_device.actions),
)
def set_level(self, level):
"""Set the level of Fibaro device."""
self.action("setValue", level)
if "value" in self.fibaro_device.properties:
self.fibaro_device.properties.value = level
if "brightness" in self.fibaro_device.properties:
self.fibaro_device.properties.brightness = level
def set_level2(self, level):
"""Set the level2 of Fibaro device."""
self.action("setValue2", level)
if "value2" in self.fibaro_device.properties:
self.fibaro_device.properties.value2 = level
def call_turn_on(self):
"""Turn on the Fibaro device."""
self.action("turnOn")
def call_turn_off(self):
"""Turn off the Fibaro device."""
self.action("turnOff")
def call_set_color(self, red, green, blue, white):
"""Set the color of Fibaro device."""
red = int(max(0, min(255, red)))
green = int(max(0, min(255, green)))
blue = int(max(0, min(255, blue)))
white = int(max(0, min(255, white)))
color_str = f"{red},{green},{blue},{white}"
self.fibaro_device.properties.color = color_str
self.action("setColor", str(red), str(green), str(blue), str(white))
def action(self, cmd, *args):
"""Perform an action on the Fibaro HC."""
if cmd in self.fibaro_device.actions:
getattr(self.fibaro_device, cmd)(*args)
_LOGGER.debug("-> %s.%s%s called", str(self.ha_id), str(cmd), str(args))
else:
self.dont_know_message(cmd)
@property
def hidden(self) -> bool:
"""Return True if the entity should be hidden from UIs."""
return self.fibaro_device.visible is False
@property
def current_power_w(self):
"""Return the current power usage in W."""
if "power" in self.fibaro_device.properties:
power = self.fibaro_device.properties.power
if power:
return convert(power, float, 0.0)
else:
return None
@property
def current_binary_state(self):
"""Return the current binary state."""
if self.fibaro_device.properties.value == "false":
return False
if (
self.fibaro_device.properties.value == "true"
or int(self.fibaro_device.properties.value) > 0
):
return True
return False
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self.fibaro_device.unique_id_str
@property
def name(self) -> Optional[str]:
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Get polling requirement from fibaro device."""
return False
def update(self):
"""Call to update state."""
pass
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {}
try:
if "battery" in self.fibaro_device.interfaces:
attr[ATTR_BATTERY_LEVEL] = int(
self.fibaro_device.properties.batteryLevel
)
if "fibaroAlarmArm" in self.fibaro_device.interfaces:
attr[ATTR_ARMED] = bool(self.fibaro_device.properties.armed)
if "power" in self.fibaro_device.interfaces:
attr[ATTR_CURRENT_POWER_W] = convert(
self.fibaro_device.properties.power, float, 0.0
)
if "energy" in self.fibaro_device.interfaces:
attr[ATTR_CURRENT_ENERGY_KWH] = convert(
self.fibaro_device.properties.energy, float, 0.0
)
except (ValueError, KeyError):
pass
attr["fibaro_id"] = self.fibaro_device.id
return attr
| apache-2.0 | 5,719,580,739,145,357,000 | 35.11022 | 87 | 0.568178 | false |
supermari0/ironic | ironic/tests/drivers/amt/test_management.py | 1 | 10486 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for AMT ManagementInterface
"""
import mock
from oslo_config import cfg
from ironic.common import boot_devices
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules.amt import common as amt_common
from ironic.drivers.modules.amt import management as amt_mgmt
from ironic.drivers.modules.amt import resource_uris
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.drivers.drac import utils as test_utils
from ironic.tests.drivers import third_party_driver_mock_specs as mock_specs
from ironic.tests.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_amt_info()
CONF = cfg.CONF
@mock.patch.object(amt_common, 'pywsman', spec_set=mock_specs.PYWSMAN_SPEC)
class AMTManagementInteralMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(AMTManagementInteralMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_amt')
self.node = obj_utils.create_test_node(self.context,
driver='fake_amt',
driver_info=INFO_DICT)
def test__set_boot_device_order(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootConfigSetting
device = boot_devices.PXE
result_xml = test_utils.build_soap_xml([{'ReturnValue': '0'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
amt_mgmt._set_boot_device_order(self.node, device)
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
namespace, 'ChangeBootOrder')
def test__set_boot_device_order_fail(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootConfigSetting
device = boot_devices.PXE
result_xml = test_utils.build_soap_xml([{'ReturnValue': '2'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
self.assertRaises(exception.AMTFailure,
amt_mgmt._set_boot_device_order, self.node, device)
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
namespace, 'ChangeBootOrder')
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = None
self.assertRaises(exception.AMTConnectFailure,
amt_mgmt._set_boot_device_order, self.node, device)
def test__enable_boot_config(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootService
result_xml = test_utils.build_soap_xml([{'ReturnValue': '0'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
amt_mgmt._enable_boot_config(self.node)
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
namespace, 'SetBootConfigRole')
def test__enable_boot_config_fail(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootService
result_xml = test_utils.build_soap_xml([{'ReturnValue': '2'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
self.assertRaises(exception.AMTFailure,
amt_mgmt._enable_boot_config, self.node)
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
namespace, 'SetBootConfigRole')
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = None
self.assertRaises(exception.AMTConnectFailure,
amt_mgmt._enable_boot_config, self.node)
class AMTManagementTestCase(db_base.DbTestCase):
def setUp(self):
super(AMTManagementTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_amt')
self.info = INFO_DICT
self.node = obj_utils.create_test_node(self.context,
driver='fake_amt',
driver_info=self.info)
def test_get_properties(self):
expected = amt_common.COMMON_PROPERTIES
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(amt_common, 'parse_driver_info', autospec=True)
def test_validate(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.management.validate(task)
mock_drvinfo.assert_called_once_with(task.node)
@mock.patch.object(amt_common, 'parse_driver_info', autospec=True)
def test_validate_fail(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
mock_drvinfo.side_effect = exception.InvalidParameterValue('x')
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.validate,
task)
def test_get_supported_boot_devices(self):
expected = [boot_devices.PXE, boot_devices.DISK, boot_devices.CDROM]
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(
sorted(expected),
sorted(task.driver.management.get_supported_boot_devices()))
def test_set_boot_device_one_time(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, 'pxe')
self.assertEqual('pxe',
task.node.driver_internal_info["amt_boot_device"])
self.assertFalse(
task.node.driver_internal_info["amt_boot_persistent"])
def test_set_boot_device_persistent(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, 'pxe',
persistent=True)
self.assertEqual('pxe',
task.node.driver_internal_info["amt_boot_device"])
self.assertTrue(
task.node.driver_internal_info["amt_boot_persistent"])
def test_set_boot_device_fail(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.set_boot_device,
task, 'fake-device')
@mock.patch.object(amt_mgmt, '_enable_boot_config', autospec=True)
@mock.patch.object(amt_mgmt, '_set_boot_device_order', autospec=True)
def test_ensure_next_boot_device_one_time(self, mock_sbdo, mock_ebc):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
device = boot_devices.PXE
task.node.driver_internal_info['amt_boot_device'] = 'pxe'
task.driver.management.ensure_next_boot_device(task.node, device)
self.assertEqual('disk',
task.node.driver_internal_info["amt_boot_device"])
self.assertTrue(
task.node.driver_internal_info["amt_boot_persistent"])
mock_sbdo.assert_called_once_with(task.node, device)
mock_ebc.assert_called_once_with(task.node)
@mock.patch.object(amt_mgmt, '_enable_boot_config', autospec=True)
@mock.patch.object(amt_mgmt, '_set_boot_device_order', autospec=True)
def test_ensure_next_boot_device_persistent(self, mock_sbdo, mock_ebc):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
device = boot_devices.PXE
task.node.driver_internal_info['amt_boot_device'] = 'pxe'
task.node.driver_internal_info['amt_boot_persistent'] = True
task.driver.management.ensure_next_boot_device(task.node, device)
self.assertEqual('pxe',
task.node.driver_internal_info["amt_boot_device"])
self.assertTrue(
task.node.driver_internal_info["amt_boot_persistent"])
mock_sbdo.assert_called_once_with(task.node, device)
mock_ebc.assert_called_once_with(task.node)
def test_get_boot_device(self):
expected = {'boot_device': boot_devices.DISK, 'persistent': True}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected,
task.driver.management.get_boot_device(task))
def test_get_sensor_data(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(NotImplementedError,
task.driver.management.get_sensors_data,
task)
| apache-2.0 | -8,218,640,313,690,177,000 | 45.8125 | 79 | 0.612722 | false |
jjgomera/pychemqt | tools/UI_Tables/plot.py | 1 | 72270 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.'''
###############################################################################
# Plot functionality for plugin:
# - PlotMEoS: Plot widget to show meos plot data, add context menu options
# - Plot2D: Dialog for select a special 2D plot
# - Plot3D: Dialog for define a 3D plot
# - EditPlot: Dialog to edit plot
# - AddLine: Dialog to add new isoline to plot
# - EditAxis: Dialog to configure axes plot properties
# - AxisWidget: Dialog to configure axes plot properties
# - calcIsoline: Isoline calculation procedure
# - get_points: Get point number to plot lines from Preferences
# - getLineFormat: get matplotlib line format from preferences
# - plotIsoline: plot isoline procedure
# - plot2D3D: general procedure for plotting 2D and 3D
# - _getunitTransform
###############################################################################
from functools import partial
import gzip
import json
from math import log10, atan, pi
import os
from PyQt5 import QtCore, QtGui, QtWidgets
from numpy import concatenate, linspace, logspace, transpose, log, nan
from matplotlib.font_manager import FontProperties
from lib import meos, unidades, plot, config
from lib.thermo import ThermoAdvanced
from lib.utilities import formatLine
from UI.widgets import (Entrada_con_unidades, createAction, LineStyleCombo,
MarkerCombo, ColorSelector, InputFont, ClickableLabel)
from .library import calcPoint, getLimit, getClassFluid, getMethod
from .chooseFluid import Dialog_InfoFluid
# FIXME: Plot3D save/load support
class PlotMEoS(QtWidgets.QWidget):
"""Plot widget to show meos plot data, add context menu options"""
icon = os.path.join(config.IMAGE_PATH, "button", "plot.png")
mouseMove = QtCore.pyqtSignal(QtCore.QPointF)
def __init__(self, dim, toolbar=False, filename="", parent=None):
"""constructor
Input:
dim: dimension of plot, | 2 | 3
toolbar: boolean to add the matplotlib toolbar
filename: filename for data
"""
super(PlotMEoS, self).__init__(parent)
self.setWindowIcon(QtGui.QIcon(QtGui.QPixmap(self.icon)))
self.setMouseTracking(True)
self.parent = parent
self.dim = dim
self.filename = filename
self.notes = []
layout = QtWidgets.QVBoxLayout(self)
self.plot = plot.matplotlib(dim)
self.plot.lx = self.plot.ax.axhline(c="#888888", ls=":") # horiz line
self.plot.ly = self.plot.ax.axvline(c="#888888", ls=":") # vert line
self.plot.lx.set_visible(False)
self.plot.ly.set_visible(False)
layout.addWidget(self.plot)
self.toolbar = plot.NavigationToolbar2QT(self.plot, self.plot)
self.toolbar.setVisible(toolbar)
layout.addWidget(self.toolbar)
self.editAxesAction = createAction(
QtWidgets.QApplication.translate("pychemqt", "Edit &Axis"),
icon=os.environ["pychemqt"]+"/images/button/editor",
slot=self.editAxis, parent=self)
self.editAction = createAction(
QtWidgets.QApplication.translate("pychemqt", "Edit &Plot"),
slot=self.edit,
icon=os.environ["pychemqt"]+"/images/button/fit",
parent=self)
self.editMarginAction = createAction(
QtWidgets.QApplication.translate("pychemqt", "Edit &Margins"),
slot=self.toolbar.configure_subplots, parent=self)
self.saveAction = createAction(
QtWidgets.QApplication.translate("pychemqt", "&Save Plot"),
slot=self.toolbar.save_figure,
icon=os.environ["pychemqt"]+"/images/button/fileSave", parent=self)
self.toolbarVisibleAction = createAction(
QtWidgets.QApplication.translate("pychemqt", "Toggle &Toolbar"),
self.toolbar.setVisible, checkable=True, parent=self)
self.gridToggleAction = createAction(
QtWidgets.QApplication.translate("pychemqt", "Toggle &Grid"),
self.grid, checkable=True, parent=self)
grid = config.Preferences.getboolean("MEOS", "grid")
self.gridToggleAction.setChecked(grid)
# Widgets to show in the statusbar of mainwindow
self.statusWidget = []
self.statusPosition = QtWidgets.QLabel()
self.statusPosition.setFrameShape(QtWidgets.QFrame.WinPanel)
self.statusPosition.setFrameShadow(QtWidgets.QFrame.Sunken)
self.statusWidget.append(self.statusPosition)
# self.statusThermo = QtWidgets.QLabel()
self.statusThermo = ClickableLabel()
self.statusThermo.setFrameShape(QtWidgets.QFrame.WinPanel)
self.statusThermo.setFrameShadow(QtWidgets.QFrame.Sunken)
self.statusWidget.append(self.statusThermo)
self.statusThermo.clicked.connect(self.showFluid)
if dim == 2:
self.plot.fig.canvas.mpl_connect('button_press_event', self.click)
self.plot.fig.canvas.mpl_connect(
'motion_notify_event', self.updatePosition)
else:
self.editMarginAction.setEnabled(False)
def showFluid(self):
method = self.config["method"]
index = self.config["fluid"]
fluid = getClassFluid(method, index)
dlg = Dialog_InfoFluid(fluid.__class__)
dlg.exec_()
def mouseMoveEvent(self, event):
# print(event.globalPos())
QtWidgets.QWidget.mouseMoveEvent(self, event)
self.mouseMove.emit(event.globalPos())
def closeEvent(self, event):
self.parent.dirty[self.parent.idTab] = True
self.parent.saveControl()
def contextMenuEvent(self, event):
"""Create context menu"""
menuTable = QtWidgets.QMenu(
QtWidgets.QApplication.translate("pychemqt", "Tabulated data"))
menuTable.setIcon(
QtGui.QIcon(os.environ["pychemqt"]+"/images/button/table"))
for linea in self.plot.ax.lines:
action = createAction(
linea.get_label(),
slot=partial(self.table, linea), parent=self)
menuTable.addAction(action)
menu = QtWidgets.QMenu()
menu.addAction(self.editAxesAction)
menu.addAction(self.editAction)
menu.addAction(self.editMarginAction)
menu.addSeparator()
menu.addAction(self.saveAction)
menu.addAction(menuTable.menuAction())
menu.addSeparator()
menu.addAction(self.toolbarVisibleAction)
menu.addAction(self.gridToggleAction)
menu.exec_(event.globalPos())
if self.plot.ax._gridOn:
self.gridToggleAction.setChecked(True)
def grid(self, bool):
self.plot.ax.grid(bool)
self.plot.ax._gridOn = bool
self.plot.draw()
def edit(self):
dialog = EditPlot(self, self.parent)
dialog.exec_()
def editAxis(self):
dialog = EditAxis(self.plot, self.parent)
dialog.exec_()
def table(self, obj):
"""Export plot data to table
Input:
obj: object (Line2D instance) to show data
"""
xtxt = meos.propiedades[meos.keys.index(self.x)]
ytxt = meos.propiedades[meos.keys.index(self.y)]
xunit = meos.units[meos.keys.index(self.x)]
yunit = meos.units[meos.keys.index(self.y)]
HHeader = [xtxt+os.linesep+xunit.text(), ytxt+os.linesep+yunit.text()]
units = [xunit, yunit]
if self.dim == 3:
ztxt = meos.propiedades[meos.keys.index(self.z)]
zunit = meos.units[meos.keys.index(self.z)]
HHeader.append(ztxt+os.linesep+zunit.text())
units.append(zunit)
data = obj._verts3d
else:
data = obj.get_data(orig=True)
# Don't import at top level to avoid recursion import
from .table import TablaMEoS # noqa
tabla = TablaMEoS(self.dim, horizontalHeader=HHeader, units=units,
stretch=False, readOnly=True, parent=self.parent)
method = getMethod()
projectConfig = self.parent.currentConfig
index = projectConfig.getint("MEoS", "fluid")
tabla.Point = getClassFluid(method, index)
tabla.setData(list(map(list, transpose(data))))
tabla.verticalHeader().setContextMenuPolicy(
QtCore.Qt.CustomContextMenu)
self.parent.centralWidget().currentWidget().addSubWindow(tabla)
title = QtWidgets.QApplication.translate("pychemqt", "Table from") + \
" " + obj.get_label()
tabla.setWindowTitle(title)
wdg = self.parent.centralWidget().currentWidget().subWindowList()[-1]
wdg.setWindowIcon(QtGui.QIcon(QtGui.QPixmap(tabla.icon)))
self.parent.dirty[self.parent.idTab] = True
self.parent.saveControl()
tabla.show()
def _getData(self):
"""Get data from file"""
filenameHard = os.environ["pychemqt"]+"dat"+os.sep+"mEoS" + \
os.sep + self.filename+".gz"
filenameSoft = config.conf_dir+self.filename
if os.path.isfile(filenameSoft):
print(filenameSoft)
with open(filenameSoft, "rb") as archivo:
data = json.load(archivo)
return data
elif os.path.isfile(filenameHard):
print(filenameHard)
with gzip.GzipFile(filenameHard, 'rb') as archivo:
data = json.load(archivo)
self._saveData(data)
return data
def _saveData(self, data):
"""Save changes in data to file"""
with open(config.conf_dir+self.filename, 'w') as file:
json.dump(data, file)
def click(self, event):
"""Update input and graph annotate when mouse click over chart"""
# Accept only left click
if event.button != 1:
return
units = {"x": unidades.Dimensionless,
"T": unidades.Temperature,
"P": unidades.Pressure,
"h": unidades.Enthalpy,
"u": unidades.Enthalpy,
"s": unidades.SpecificHeat,
"v": unidades.SpecificVolume,
"rho": unidades.Density}
if self.x in units and self.y in units:
x = units[self.x](event.xdata, "conf")
y = units[self.y](event.ydata, "conf")
method = self.config["method"]
index = self.config["fluid"]
fluid = getClassFluid(method, index)
kwargs = {self.x: x, self.y: y}
fluido = calcPoint(fluid, self.config, **kwargs)
Tmin, Tmax, Pmin, Pmax = getLimit(fluid, self.config)
if fluido and fluido.status and \
Tmin <= fluido.T <= Tmax and \
0 < fluido.P.kPa <= Pmax:
self.plot.lx.set_ydata(event.ydata)
self.plot.ly.set_xdata(event.xdata)
self.plot.lx.set_visible(True)
self.plot.ly.set_visible(True)
self.showPointData(fluido)
else:
self.plot.lx.set_visible(False)
self.plot.ly.set_visible(False)
self.clearPointData()
def showPointData(self, state):
self.clearPointData()
yi = 0.98
for key in ("T", "P", "x", "v", "rho", "h", "s", "u"):
self.notes.append(self.plot.ax.annotate(
"%s: %s" % (key, state.__getattribute__(key).str), (0.01, yi),
xycoords='axes fraction', size="small", va="center"))
yi -= 0.025
self.plot.draw()
def clearPointData(self):
while self.notes:
anotation = self.notes.pop()
anotation.remove()
self.plot.draw()
def updatePosition(self, point):
try:
x = point.xdata
y = point.ydata
except AttributeError:
x = None
if x is None:
self.statusPosition.setText("-, -")
else:
txt = "%s=%4g, %s=%4g" % (self.x, x, self.y, y)
self.statusPosition.setText(txt)
def writeToJSON(self, data):
"""Write instance parameter to file"""
data["filename"] = self.filename
data["windowTitle"] = self.windowTitle()
data["x"] = self.x
data["y"] = self.y
data["z"] = self.z
# TODO: Add support for save font properties
# Title format
title = {}
title["txt"] = self.plot.ax.get_title()
title["color"] = self.plot.ax.title.get_color()
title["family"] = self.plot.ax.title.get_fontfamily()
title["style"] = self.plot.ax.title.get_style()
title["weight"] = self.plot.ax.title.get_weight()
title["stretch"] = self.plot.ax.title.get_stretch()
title["size"] = self.plot.ax.title.get_size()
data["title"] = title
# xlabel format
xlabel = {}
xlabel["txt"] = self.plot.ax.get_xlabel()
xlabel["color"] = self.plot.ax.xaxis.get_label().get_color()
xlabel["family"] = self.plot.ax.xaxis.get_label().get_fontfamily()
xlabel["style"] = self.plot.ax.xaxis.get_label().get_style()
xlabel["weight"] = self.plot.ax.xaxis.get_label().get_weight()
xlabel["stretch"] = self.plot.ax.xaxis.get_label().get_stretch()
xlabel["size"] = self.plot.ax.xaxis.get_label().get_size()
data["xlabel"] = xlabel
# ylable format
ylabel = {}
ylabel["txt"] = self.plot.ax.get_ylabel()
ylabel["color"] = self.plot.ax.yaxis.get_label().get_color()
ylabel["family"] = self.plot.ax.yaxis.get_label().get_fontfamily()
ylabel["style"] = self.plot.ax.yaxis.get_label().get_style()
ylabel["weight"] = self.plot.ax.yaxis.get_label().get_weight()
ylabel["stretch"] = self.plot.ax.yaxis.get_label().get_stretch()
ylabel["size"] = self.plot.ax.yaxis.get_label().get_size()
data["ylabel"] = ylabel
# zlable format
zlabel = {}
if self.z:
zlabel["txt"] = self.plot.ax.get_zlabel()
zlabel["color"] = self.plot.ax.zaxis.get_label().get_color()
zlabel["family"] = self.plot.ax.zaxis.get_label().get_fontfamily()
zlabel["style"] = self.plot.ax.zaxis.get_label().get_style()
zlabel["weight"] = self.plot.ax.zaxis.get_label().get_weight()
zlabel["stretch"] = self.plot.ax.zaxis.get_label().get_stretch()
zlabel["size"] = self.plot.ax.zaxis.get_label().get_size()
data["zlabel"] = zlabel
data["grid"] = self.plot.ax._gridOn
data["xscale"] = self.plot.ax.get_xscale()
data["yscale"] = self.plot.ax.get_yscale()
xmin, xmax = self.plot.ax.get_xlim()
data["xmin"] = xmin
data["xmax"] = xmax
ymin, ymax = self.plot.ax.get_ylim()
data["ymin"] = ymin
data["ymax"] = ymax
if self.z:
zmin, zmax = self.plot.ax.get_zlim()
data["zmin"] = zmin
data["zmax"] = zmax
else:
data["zmin"] = None
data["zmax"] = None
data["marginleft"] = self.plot.fig.subplotpars.left
data["marginbottom"] = self.plot.fig.subplotpars.bottom
data["marginright"] = self.plot.fig.subplotpars.right
data["margintop"] = self.plot.fig.subplotpars.top
# Config
data["method"] = self.config["method"]
data["fluid"] = self.config["fluid"]
data["eq"] = self.config["eq"]
data["visco"] = self.config["visco"]
data["thermal"] = self.config["thermal"]
if self.config["method"] == "meos":
data["external_dependences"] = ""
elif self.config["method"] == "coolprop":
data["external_dependences"] = "CoolProp"
else:
data["external_dependences"] = "refprop"
# data
lines = {}
for line in self.plot.ax.lines[2:]:
dat = {}
dat["x"] = line.get_xdata().tolist()
dat["y"] = line.get_ydata().tolist()
dat["label"] = line.get_label()
# line style
dat["lw"] = line.get_lw()
dat["ls"] = line.get_ls()
dat["marker"] = line.get_marker()
dat["color"] = line.get_color()
dat["ms"] = line.get_ms()
dat["mfc"] = line.get_mfc()
dat["mew"] = line.get_mew()
dat["mec"] = line.get_mec()
dat["visible"] = line.get_visible()
dat["antialiased"] = line.get_antialiased()
# line text
# saturation and melting line dont define it at plot creation
try:
text = {}
text["visible"] = line.text.get_visible()
text["txt"] = line.text.get_text()
text["rot"] = line.text.get_rotation()
text["pos"] = line.text.pos
text["family"] = line.text.get_fontfamily()
text["style"] = line.text.get_style()
text["weight"] = line.text.get_weight()
text["stretch"] = line.text.get_stretch()
text["size"] = line.text.get_size()
text["va"] = line.text.get_va()
except AttributeError:
text = {"visible": False, "txt": "", "pos": 50, "rot": 0,
"family": "sans-serif", "style": "normal",
"weight": "normal", "stretch": "normal",
"size": "small", "va": "center"}
dat["annotation"] = text
lines[line._label] = dat
data["lines"] = lines
@classmethod
def readFromJSON(cls, data, parent):
filename = data["filename"]
title = data["windowTitle"]
x = data["x"]
y = data["y"]
z = data["z"]
if z:
dim = 3
else:
dim = 2
grafico = PlotMEoS(dim=dim, parent=parent, filename=filename)
grafico.x = x
grafico.y = y
grafico.z = z
grafico.setWindowTitle(title)
title = data["title"]["txt"]
if title:
grafico.plot.ax.set_title(title)
grafico.plot.ax.title.set_color(data["title"]["color"])
grafico.plot.ax.title.set_family(data["title"]["family"])
grafico.plot.ax.title.set_style(data["title"]["style"])
grafico.plot.ax.title.set_weight(data["title"]["weight"])
grafico.plot.ax.title.set_stretch(data["title"]["stretch"])
grafico.plot.ax.title.set_size(data["title"]["size"])
xlabel = data["xlabel"]["txt"]
if xlabel:
grafico.plot.ax.set_xlabel(xlabel)
label = grafico.plot.ax.xaxis.get_label()
label.set_color(data["xlabel"]["color"])
label.set_family(data["xlabel"]["family"])
label.set_style(data["xlabel"]["style"])
label.set_weight(data["xlabel"]["weight"])
label.set_stretch(data["xlabel"]["stretch"])
label.set_size(data["xlabel"]["size"])
ylabel = data["ylabel"]["txt"]
if ylabel:
grafico.plot.ax.set_ylabel(ylabel)
label = grafico.plot.ax.yaxis.get_label()
label.set_color(data["ylabel"]["color"])
label.set_family(data["ylabel"]["family"])
label.set_style(data["ylabel"]["style"])
label.set_weight(data["ylabel"]["weight"])
label.set_stretch(data["ylabel"]["stretch"])
label.set_size(data["ylabel"]["size"])
if z:
zlabel = data["zlabel"]["txt"]
if zlabel:
grafico.plot.ax.set_zlabel(zlabel)
label = grafico.plot.ax.zaxis.get_label()
label.set_color(data["zlabel"]["color"])
label.set_family(data["zlabel"]["family"])
label.set_style(data["zlabel"]["style"])
label.set_weight(data["zlabel"]["weight"])
label.set_stretch(data["zlabel"]["stretch"])
label.set_size(data["zlabel"]["size"])
grafico.plot.ax._gridOn = data["grid"]
grafico.plot.ax.grid(data["grid"])
grafico.plot.ax.set_xlim(data["xmin"], data["xmax"])
grafico.plot.ax.set_ylim(data["ymin"], data["ymax"])
if z:
grafico.plot.ax.set_zlim(data["zmin"], data["zmax"])
for label, line in data["lines"].items():
x = line["x"]
y = line["y"]
format = {}
format["lw"] = line["lw"]
format["ls"] = line["ls"]
format["marker"] = line["marker"]
format["color"] = line["color"]
format["ms"] = line["ms"]
format["mfc"] = line["mfc"]
format["mew"] = line["mew"]
format["mec"] = line["mec"]
ln, = grafico.plot.ax.plot(x, y, label=label, **format)
ln.set_visible(line["visible"])
ln.set_antialiased(line["antialiased"])
txt = line["annotation"]["txt"]
rot = line["annotation"]["rot"]
pos = line["annotation"]["pos"]
i = int(len(x)*pos/100)
kw = {}
kw["ha"] = "center"
kw["rotation_mode"] = "anchor"
for key in ("va", "visible", "family", "style", "weight",
"stretch", "size"):
kw[key] = line["annotation"][key]
if i >= len(x):
i = len(x)-1
text = grafico.plot.ax.text(x[i], y[i], txt, rotation=rot, **kw)
# We creating a link between line and its annotation text
ln.text = text
# We save position value in % unit to avoid index find
ln.text.pos = pos
grafico.plot.ax.set_xscale(data["xscale"])
grafico.plot.ax.set_yscale(data["yscale"])
# Load margins
left = data["marginleft"]
bottom = data["marginbottom"]
right = data["marginright"]
top = data["margintop"]
grafico.plot.fig.subplots_adjust(left, bottom, right, top)
# Load config
conf = {}
conf["method"] = data["method"]
conf["fluid"] = data["fluid"]
conf["eq"] = data["eq"]
conf["visco"] = data["visco"]
conf["thermal"] = data["thermal"]
grafico.config = conf
grafico.changeStatusThermo(conf)
return grafico
def changeStatusThermo(self, config):
fluid = getClassFluid(config["method"], config["fluid"])
txt = "%s (%s)" % (fluid.name, config["method"])
self.statusThermo.setText(txt)
class Plot2D(QtWidgets.QDialog):
"""Dialog for select a special 2D plot"""
def __init__(self, parent=None):
super(Plot2D, self).__init__(parent)
self.setWindowTitle(
QtWidgets.QApplication.translate("pychemqt", "Setup 2D Plot"))
layout = QtWidgets.QVBoxLayout(self)
group_Ejex = QtWidgets.QGroupBox(
QtWidgets.QApplication.translate("pychemqt", "Axis X"))
layout.addWidget(group_Ejex)
layout_GroupX = QtWidgets.QVBoxLayout(group_Ejex)
self.ejeX = QtWidgets.QComboBox()
layout_GroupX.addWidget(self.ejeX)
self.Xscale = QtWidgets.QCheckBox(
QtWidgets.QApplication.translate("pychemqt", "Logarithmic scale"))
layout_GroupX.addWidget(self.Xscale)
for prop in ThermoAdvanced.propertiesName():
self.ejeX.addItem(prop)
group_Ejey = QtWidgets.QGroupBox(
QtWidgets.QApplication.translate("pychemqt", "Axis Y"))
layout.addWidget(group_Ejey)
layout_GroupY = QtWidgets.QVBoxLayout(group_Ejey)
self.ejeY = QtWidgets.QComboBox()
layout_GroupY.addWidget(self.ejeY)
self.Yscale = QtWidgets.QCheckBox(
QtWidgets.QApplication.translate("pychemqt", "Logarithmic scale"))
layout_GroupY.addWidget(self.Yscale)
self.buttonBox = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
layout.addWidget(self.buttonBox)
self.ejeXChanged(0)
self.ejeX.currentIndexChanged.connect(self.ejeXChanged)
def ejeXChanged(self, index):
"""Fill variables available in ejeY, all except the active in ejeX"""
# Save current status to restore
current = self.ejeY.currentIndex()
if current == -1:
current = 0
# Refill ejeY combo
self.ejeY.clear()
props = ThermoAdvanced.propertiesName()
del props[index]
for prop in props:
self.ejeY.addItem(prop)
# Restore inicial state
if index == 0 and current == 0:
self.ejeY.setCurrentIndex(0)
elif index <= current:
self.ejeY.setCurrentIndex(current)
else:
self.ejeY.setCurrentIndex(current+1)
class Plot3D(QtWidgets.QDialog):
"""Dialog for configure a 3D plot"""
def __init__(self, parent=None):
super(Plot3D, self).__init__(parent)
self.setWindowTitle(
QtWidgets.QApplication.translate("pychemqt", "Setup 3D Plot"))
layout = QtWidgets.QGridLayout(self)
layout.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Axis X")), 1, 1)
self.ejeX = QtWidgets.QComboBox()
for prop in ThermoAdvanced.propertiesName():
self.ejeX.addItem(prop)
layout.addWidget(self.ejeX, 1, 2)
layout.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Axis Y")), 2, 1)
self.ejeY = QtWidgets.QComboBox()
layout.addWidget(self.ejeY, 2, 2)
layout.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Axis Z")), 3, 1)
self.ejeZ = QtWidgets.QComboBox()
layout.addWidget(self.ejeZ, 3, 2)
self.buttonBox = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
layout.addWidget(self.buttonBox, 4, 1, 1, 2)
self.ejeX.currentIndexChanged.connect(self.ejeXChanged)
self.ejeY.currentIndexChanged.connect(self.ejeYChanged)
self.ejeXChanged(0)
def ejeXChanged(self, index):
"""Fill variables available in ejeY, all except the active in ejeX"""
# Save current status to restore
current = self.ejeY.currentIndex()
if current == -1:
current = 0
# Refill ejeY combo
self.ejeY.clear()
props = ThermoAdvanced.propertiesName()
del props[index]
for prop in props:
self.ejeY.addItem(prop)
# Restore inicial state
if index == 0 and current == 0:
self.ejeY.setCurrentIndex(0)
elif index <= current:
self.ejeY.setCurrentIndex(current)
else:
self.ejeY.setCurrentIndex(current+1)
def ejeYChanged(self, indY):
"""Fill variables available in ejeZ, all except the actives in other"""
# Save current status to restore
current = self.ejeZ.currentIndex()
if current == -1:
current = 0
# Refill ejeY combo
self.ejeZ.clear()
prop2 = ThermoAdvanced.propertiesName()[:]
indX = self.ejeX.currentIndex()
del prop2[indX]
del prop2[indY]
for prop in prop2:
self.ejeZ.addItem(prop)
# Restore inicial state
if indX == 0 and indY == 0 and current == 0:
self.ejeZ.setCurrentIndex(0)
elif indY <= current or indX <= current:
self.ejeZ.setCurrentIndex(current)
else:
self.ejeZ.setCurrentIndex(current+1)
class EditPlot(QtWidgets.QDialog):
"""Dialog to edit plot. This dialog let user change plot p"""
def __init__(self, plotMEoS, parent=None):
super(EditPlot, self).__init__(parent)
self.setWindowTitle(
QtWidgets.QApplication.translate("pychemqt", "Edit Plot"))
layout = QtWidgets.QGridLayout(self)
self.plotMEoS = plotMEoS
self.fig = plotMEoS.plot
self.parent = parent
self.semaforo = QtCore.QSemaphore(1)
self.lista = QtWidgets.QListWidget()
layout.addWidget(self.lista, 0, 1, 1, 3)
lytTitle = QtWidgets.QHBoxLayout()
label = QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Label"))
lytTitle.addWidget(label)
self.label = QtWidgets.QLineEdit()
lytTitle.addWidget(self.label)
layout.addLayout(lytTitle, 1, 1, 1, 3)
layout.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Line Width")), 2, 1)
layout.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Line Style")), 2, 2)
layout.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Color")), 2, 3)
self.Grosor = QtWidgets.QDoubleSpinBox()
self.Grosor.setAlignment(QtCore.Qt.AlignRight)
self.Grosor.setRange(0.1, 5)
self.Grosor.setDecimals(1)
self.Grosor.setSingleStep(0.1)
layout.addWidget(self.Grosor, 3, 1)
self.Linea = LineStyleCombo()
layout.addWidget(self.Linea, 3, 2)
self.ColorButton = ColorSelector()
layout.addWidget(self.ColorButton, 3, 3)
layout.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Marker")), 4, 1)
layout.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Marker Size")), 4, 2)
layout.addWidget(QtWidgets.QLabel(QtWidgets.QApplication.translate(
"pychemqt", "Marker Color")), 4, 3)
self.Marca = MarkerCombo()
layout.addWidget(self.Marca, 5, 1)
self.markerSize = QtWidgets.QDoubleSpinBox()
self.markerSize.setAlignment(QtCore.Qt.AlignRight)
self.markerSize.setDecimals(1)
self.markerSize.setSingleStep(0.1)
layout.addWidget(self.markerSize, 5, 2)
self.markerfacecolor = ColorSelector()
layout.addWidget(self.markerfacecolor, 5, 3)
layout.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Marker edge")), 7, 1)
layout.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Width")), 6, 2)
layout.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Color")), 6, 3)
self.markerEdgeSize = QtWidgets.QDoubleSpinBox()
self.markerEdgeSize.setAlignment(QtCore.Qt.AlignRight)
self.markerEdgeSize.setDecimals(1)
self.markerEdgeSize.setSingleStep(0.1)
layout.addWidget(self.markerEdgeSize, 7, 2)
self.markeredgecolor = ColorSelector()
layout.addWidget(self.markeredgecolor, 7, 3)
grpAnnotate = QtWidgets.QGroupBox(
QtWidgets.QApplication.translate("pychemqt", "Annotation"))
layout.addWidget(grpAnnotate, 8, 1, 1, 3)
lytAnnotation = QtWidgets.QGridLayout(grpAnnotate)
self.annotationVisible = QtWidgets.QCheckBox(
QtWidgets.QApplication.translate("pychemqt", "Visible"))
lytAnnotation.addWidget(self.annotationVisible, 1, 1, 1, 3)
lytTitle = QtWidgets.QHBoxLayout()
label = QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Label"))
lytTitle.addWidget(label)
# self.annotationLabel = QtWidgets.QLineEdit()
self.annotationLabel = InputFont()
lytTitle.addWidget(self.annotationLabel)
lytAnnotation.addLayout(lytTitle, 2, 1, 1, 3)
lytPosition = QtWidgets.QHBoxLayout()
lytPosition.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Location")))
self.labelAnnotationPos = Entrada_con_unidades(
int, value=50, width=40, frame=False, readOnly=True, suffix="%",
showNull=True)
self.labelAnnotationPos.setFixedWidth(40)
lytPosition.addWidget(self.labelAnnotationPos)
self.annotationPos = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.annotationPos.setRange(0, 100)
self.annotationPos.setValue(50)
self.annotationPos.valueChanged.connect(
partial(self._updateLabel, self.labelAnnotationPos))
lytPosition.addWidget(self.annotationPos)
lytAnnotation.addLayout(lytPosition, 3, 1, 1, 3)
lytAngle = QtWidgets.QHBoxLayout()
lytAngle.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Rotation")))
self.labelAnnotationRot = Entrada_con_unidades(
int, value=50, width=40, frame=False, readOnly=True, suffix="º",
showNull=True)
self.labelAnnotationRot.setFixedWidth(40)
lytAngle.addWidget(self.labelAnnotationRot)
self.annotationRot = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.annotationRot.setRange(0, 360)
self.annotationRot.setValue(0)
self.annotationRot.valueChanged.connect(
partial(self._updateLabel, self.labelAnnotationRot))
lytAngle.addWidget(self.annotationRot)
lytAnnotation.addLayout(lytAngle, 4, 1, 1, 3)
lytVA = QtWidgets.QHBoxLayout()
lytVA.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Aligment")))
self.annotationVA = QtWidgets.QComboBox()
alignment = [
QtWidgets.QApplication.translate("pychemqt", "Center"),
QtWidgets.QApplication.translate("pychemqt", "Top"),
QtWidgets.QApplication.translate("pychemqt", "Bottom"),
QtWidgets.QApplication.translate("pychemqt", "Baseline"),
QtWidgets.QApplication.translate("pychemqt", "Center baseline")]
for alig in alignment:
self.annotationVA.addItem(alig)
lytVA.addWidget(self.annotationVA)
lytVA.addItem(QtWidgets.QSpacerItem(
10, 10, QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding))
lytAnnotation.addLayout(lytVA, 5, 1, 1, 3)
self.annotationVisible.stateChanged.connect(
self.annotationLabel.setEnabled)
self.annotationVisible.stateChanged.connect(
self.annotationPos.setEnabled)
self.annotationVisible.stateChanged.connect(
self.annotationRot.setEnabled)
self.visible = QtWidgets.QCheckBox(
QtWidgets.QApplication.translate("pychemqt", "Visible"))
layout.addWidget(self.visible, 13, 1, 1, 3)
self.antialiases = QtWidgets.QCheckBox(
QtWidgets.QApplication.translate("pychemqt", "Antialiases"))
layout.addWidget(self.antialiases, 14, 1, 1, 3)
layoutButton = QtWidgets.QHBoxLayout()
layout.addLayout(layoutButton, 15, 1, 1, 3)
self.botonAdd = QtWidgets.QPushButton(QtGui.QIcon(QtGui.QPixmap(
os.environ["pychemqt"] + "/images/button/add.png")), "")
self.botonAdd.clicked.connect(self.add)
layoutButton.addWidget(self.botonAdd)
self.botonRemove = QtWidgets.QPushButton(QtGui.QIcon(QtGui.QPixmap(
os.environ["pychemqt"] + "/images/button/remove.png")), "")
self.botonRemove.clicked.connect(self.remove)
layoutButton.addWidget(self.botonRemove)
self.buttonBox = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Close)
self.buttonBox.rejected.connect(self.close)
layoutButton.addWidget(self.buttonBox)
for linea in self.fig.ax.lines[2:]:
self.lista.addItem(linea._label)
self.lista.currentRowChanged.connect(self.update)
self.lista.setCurrentRow(0)
self.label.textChanged.connect(partial(self.changeValue, "label"))
self.Grosor.valueChanged.connect(partial(self.changeValue, "lw"))
self.Linea.valueChanged.connect(partial(self.changeValue, "ls"))
self.Linea.currentIndexChanged.connect(self.ColorButton.setEnabled)
self.ColorButton.valueChanged.connect(
partial(self.changeValue, "color"))
self.Marca.valueChanged.connect(partial(self.changeValue, "marker"))
self.Marca.currentIndexChanged.connect(self.markerSize.setEnabled)
self.Marca.currentIndexChanged.connect(self.markerfacecolor.setEnabled)
self.Marca.currentIndexChanged.connect(self.markerEdgeSize.setEnabled)
self.Marca.currentIndexChanged.connect(self.markeredgecolor.setEnabled)
self.markerSize.valueChanged.connect(partial(self.changeValue, "ms"))
self.markerfacecolor.valueChanged.connect(
partial(self.changeValue, "mfc"))
self.markerEdgeSize.valueChanged.connect(
partial(self.changeValue, "mew"))
self.markeredgecolor.valueChanged.connect(
partial(self.changeValue, "mec"))
self.visible.toggled.connect(partial(self.changeValue, "visible"))
self.antialiases.toggled.connect(
partial(self.changeValue, "antialiases"))
self.annotationVisible.toggled.connect(
partial(self.changeValue, "textVisible"))
self.annotationLabel.textChanged.connect(
partial(self.changeValue, "textLabel"))
self.annotationLabel.colorChanged.connect(
partial(self.changeValue, "textcolor"))
self.annotationLabel.fontChanged.connect(
partial(self.changeValue, "textfont"))
self.annotationPos.valueChanged.connect(
partial(self.changeValue, "textPos"))
self.annotationRot.valueChanged.connect(
partial(self.changeValue, "textRot"))
self.annotationVA.currentIndexChanged.connect(
partial(self.changeValue, "textVA"))
def _updateLabel(self, label, value):
label.setValue(value)
def update(self, i):
"""Fill format widget with value of selected line"""
if self.semaforo.available() > 0:
self.semaforo.acquire(1)
line = self.fig.ax.lines[i+2]
self.label.setText(line.get_label())
self.Grosor.setValue(line.get_lw())
self.Linea.setCurrentValue(line.get_ls())
self.ColorButton.setColor(line.get_color())
self.Marca.setCurrentValue(line.get_marker())
self.markerSize.setValue(line.get_ms())
self.markerfacecolor.setColor(line.get_mfc())
self.markerEdgeSize.setValue(line.get_mew())
self.markeredgecolor.setColor(line.get_mec())
self.visible.setChecked(line.get_visible())
self.antialiases.setChecked(line.get_antialiased())
try:
self.annotationVisible.setChecked(line.text.get_visible())
self.annotationLabel.setText(line.text.get_text())
self.annotationPos.setValue(line.text.pos)
self.annotationRot.setValue(line.text.get_rotation())
va = ["center", "top", "bottom", "baseline", "center_baseline"]
self.annotationVA.setCurrentIndex(va.index(line.text.get_va()))
except AttributeError:
self.annotationVisible.setChecked(False)
self.semaforo.release(1)
def changeValue(self, key, value):
"""Update plot data"""
if self.semaforo.available() > 0:
self.semaforo.acquire(1)
linea = self.fig.ax.lines[self.lista.currentRow()+2]
func = {"label": linea.set_label,
"lw": linea.set_lw,
"ls": linea.set_ls,
"marker": linea.set_marker,
"color": linea.set_color,
"ms": linea.set_ms,
"mfc": linea.set_mfc,
"mew": linea.set_mew,
"mec": linea.set_mec,
"visible": linea.set_visible,
"antialiases": linea.set_antialiased,
"textVisible": linea.text.set_visible,
"textLabel": linea.text.set_text,
"textcolor": linea.text.set_color,
"textfont": linea.text.set_fontproperties,
"textPos": linea.text.set_position,
"textRot": linea.text.set_rotation,
"textVA": linea.text.set_va}
if key == "textPos":
linea.text.pos = value
xi = linea.get_xdata()
yi = linea.get_ydata()
i = int(len(xi)*value/100)
if i >= len(xi):
i = len(yi)-1
value = xi[i], yi[i]
elif key == "textVA":
va = ["center", "top", "bottom", "baseline", "center_baseline"]
value = va[value]
elif key == "textfont":
value = convertFont(value)
elif key in ("ls", "marker", "color", "mfc", "mec"):
value = str(value)
func[key](value)
if key == "label":
self.lista.currentItem().setText(value)
else:
self.fig.draw()
self.parent.dirty[self.parent.idTab] = True
self.parent.saveControl()
self.semaforo.release(1)
def add(self):
"""Add a isoline to plot"""
dialog = AddLine()
if dialog.exec_():
method = getMethod()
projectConfig = self.parent.currentConfig
points = get_points(config.Preferences)
self.parent.progressBar.setVisible(True)
index = projectConfig.getint("MEoS", "fluid")
fluid = getClassFluid(method, index)
prop = dialog.tipo.currentIndex()
value = dialog.input[prop].value
Tmin, Tmax, Pmin, Pmax = getLimit(fluid, projectConfig)
Pmax = Pmax*1000
T = concatenate([
linspace(Tmin, 0.9*fluid.Tc, points),
linspace(0.9*fluid.Tc, 0.99*fluid.Tc, points),
linspace(0.99*fluid.Tc, fluid.Tc, points),
linspace(fluid.Tc, 1.01*fluid.Tc, points),
linspace(1.01*fluid.Tc, 1.1*fluid.Tc, points),
linspace(1.1*fluid.Tc, Tmax, points)]).tolist()
P = concatenate([
logspace(log10(Pmin), log10(0.9*fluid.Pc), points),
linspace(0.9*fluid.Pc, 0.99*fluid.Pc, points),
linspace(0.99*fluid.Pc, fluid.Pc, points),
linspace(fluid.Pc, 1.01*fluid.Pc, points),
linspace(1.01*fluid.Pc, 1.1*fluid.Pc, points),
logspace(log10(1.1*fluid.Pc), log10(Pmax), points)]).tolist()
for i in range(5, 0, -1):
del T[points*i]
del P[points*i]
if prop == 0:
# Calcualte isotherm line
self.parent.statusBar().showMessage(
QtWidgets.QApplication.translate(
"pychemqt", "Adding isotherm line..."))
fluidos = calcIsoline(
fluid, self.parent.currentConfig, "P", "T", P, value,
0, 0, 100, 1, self.parent.progressBar)
var = "T"
name = "Isotherm"
unit = unidades.Temperature
elif prop == 1:
# Calculate isobar line
self.parent.statusBar().showMessage(
QtWidgets.QApplication.translate(
"pychemqt", "Adding isobar line..."))
fluidos = calcIsoline(
fluid, self.parent.currentConfig, "T", "P", T, value,
0, 0, 100, 1, self.parent.progressBar)
var = "P"
name = "Isobar"
unit = unidades.Pressure
elif prop == 2:
# Calculate isoenthalpic line
self.parent.statusBar().showMessage(
QtWidgets.QApplication.translate(
"pychemqt", "Adding isoenthalpic line..."))
fluidos = calcIsoline(
fluid, self.parent.currentConfig, "P", "h", P, value,
0, 0, 100, 1, self.parent.progressBar)
var = "h"
name = "Isoenthalpic"
unit = unidades.Enthalpy
elif prop == 3:
# Calculate isoentropic line
self.parent.statusBar().showMessage(
QtWidgets.QApplication.translate(
"pychemqt", "Adding isoentropic line..."))
fluidos = calcIsoline(
fluid, self.parent.currentConfig, "T", "s", T, value,
0, 0, 100, 1, self.parent.progressBar)
var = "s"
name = "Isoentropic"
unit = unidades.SpecificHeat
elif prop == 4:
# Calculate isochor line
self.parent.statusBar().showMessage(
QtWidgets.QApplication.translate(
"pychemqt", "Adding isochor line..."))
fluidos = calcIsoline(
fluid, self.parent.currentConfig, "T", "v", T, value,
0, 0, 100, 1, self.parent.progressBar)
var = "v"
name = "Isochor"
unit = unidades.SpecificVolume
elif prop == 5:
# Calculate isodensity line
self.parent.statusBar().showMessage(
QtWidgets.QApplication.translate(
"pychemqt", "Adding isodensity line..."))
fluidos = calcIsoline(
fluid, self.parent.currentConfig, "T", "rho", T, value,
0, 0, 100, 1, self.parent.progressBar)
var = "rho"
name = "Isochor"
unit = unidades.Density
elif prop == 6:
# Calculate isoquality line
self.parent.statusBar().showMessage(
QtWidgets.QApplication.translate(
"pychemqt", "Adding isoquality line..."))
T = T[:3*points-2]
fluidos = calcIsoline(
fluid, self.parent.currentConfig, "T", "x", T, value,
0, 0, 100, 1, self.parent.progressBar)
var = "x"
name = "Isoquality"
unit = unidades.Dimensionless
line = {value: {}}
for x in ThermoAdvanced.propertiesKey():
dat_propiedad = []
for fluido in fluidos:
num = fluido.__getattribute__(x)
if isinstance(num, str):
dat_propiedad.append(num)
elif x in ("f", "fi"):
dat_propiedad.append(num[0])
elif num is not None:
dat_propiedad.append(num._data)
else:
dat_propiedad.append(None)
line[value][x] = dat_propiedad
style = getLineFormat(config.Preferences, name)
functionx = _getunitTransform(self.plotMEoS.x)
functiony = _getunitTransform(self.plotMEoS.y)
functionz = _getunitTransform(self.plotMEoS.z)
transform = (functionx, functiony, functionz)
ax = self.plotMEoS.x, self.plotMEoS.y, self.plotMEoS.z
plotIsoline(line, ax, var, unit, self.plotMEoS, transform, **style)
self.plotMEoS.plot.draw()
self.parent.progressBar.setVisible(False)
self.parent.dirty[self.parent.idTab] = True
self.parent.saveControl()
self.lista.addItem(self.fig.ax.lines[-1].get_label())
self.lista.setCurrentRow(self.lista.count()-1)
# Save new line to file
data = self.plotMEoS._getData()
if var not in data:
data[var] = {}
data[var][value] = line[value]
self.plotMEoS._saveData(data)
def remove(self):
"""Remove a line from plot"""
self.parent.statusBar().showMessage(QtWidgets.QApplication.translate(
"pychemqt", "Deleting line..."))
QtWidgets.QApplication.processEvents()
# Remove data from file
data = self.plotMEoS._getData()
txt = self.lista.currentItem().text().split()
var = txt[0]
units = {"T": unidades.Temperature,
"P": unidades.Pressure,
"v": unidades.SpecificVolume,
"rho": unidades.Density,
"h": unidades.Enthalpy,
"s": unidades.SpecificHeat,
"x": unidades.Dimensionless}
if var in units:
unit = units[var]
for key in data[var]:
str = unit(key).str
if str[1:] == " ".join(txt[2:]):
del data[var][key]
self.plotMEoS._saveData(data)
break
# Remove line to plot and update list element
index = self.lista.currentRow()
del self.fig.ax.lines[index+2]
if index == 0:
self.lista.setCurrentRow(1)
else:
self.lista.setCurrentRow(index-1)
self.lista.takeItem(index)
self.fig.draw()
self.parent.statusBar().clearMessage()
self.parent.dirty[self.parent.idTab] = True
self.parent.saveControl()
class AddLine(QtWidgets.QDialog):
"""Dialog to add new isoline to plot"""
lineas = [(QtWidgets.QApplication.translate("pychemqt", "Isotherm"),
unidades.Temperature, None),
(QtWidgets.QApplication.translate("pychemqt", "Isobar"),
unidades.Pressure, None),
(QtWidgets.QApplication.translate("pychemqt", "Isoenthalpic"),
unidades.Enthalpy, None),
(QtWidgets.QApplication.translate("pychemqt", "Isoentropic"),
unidades.SpecificHeat, "SpecificEntropy"),
(QtWidgets.QApplication.translate("pychemqt", "Isochor"),
unidades.SpecificVolume, None),
(QtWidgets.QApplication.translate("pychemqt", "Isodensity"),
unidades.Density, None),
(QtWidgets.QApplication.translate("pychemqt", "Isoquality"),
float, None)]
def __init__(self, parent=None):
super(AddLine, self).__init__(parent)
self.setWindowTitle(
QtWidgets.QApplication.translate("pychemqt", "Add Line to Plot"))
layout = QtWidgets.QGridLayout(self)
self.tipo = QtWidgets.QComboBox()
layout.addWidget(self.tipo, 1, 1, 1, 2)
layout.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Value")), 2, 1)
self.input = []
for title, unidad, magnitud in self.lineas:
self.input.append(Entrada_con_unidades(unidad, magnitud))
layout.addWidget(self.input[-1], 2, 2)
self.tipo.addItem(title)
self.buttonBox = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
layout.addWidget(self.buttonBox, 10, 1, 1, 2)
self.isolineaChanged(0)
self.tipo.currentIndexChanged.connect(self.isolineaChanged)
def isolineaChanged(self, int):
"""Let show only the active inputs"""
for i in self.input:
i.setVisible(False)
self.input[int].setVisible(True)
class EditAxis(QtWidgets.QDialog):
"""Dialog to configure axes plot properties, label, margins, scales"""
def __init__(self, fig=None, parent=None):
super(EditAxis, self).__init__(parent)
self.setWindowTitle(
QtWidgets.QApplication.translate("pychemqt", "Edit Axis"))
layout = QtWidgets.QGridLayout(self)
self.fig = fig
lytTitle = QtWidgets.QHBoxLayout()
lb = QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Title"))
lb.setSizePolicy(
QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Maximum)
lytTitle.addWidget(lb)
self.title = InputFont()
lytTitle.addWidget(self.title)
layout.addLayout(lytTitle, 1, 1, 1, self.fig.dim)
self.axisX = AxisWidget("x", self)
layout.addWidget(self.axisX, 2, 1)
self.axisY = AxisWidget("y", self)
layout.addWidget(self.axisY, 2, 2)
if self.fig.dim == 3:
self.axisZ = AxisWidget("z", self)
layout.addWidget(self.axisZ, 2, 3)
self.axisX.scale.setEnabled(False)
self.axisY.scale.setEnabled(False)
self.axisZ.scale.setEnabled(False)
self.gridCheckbox = QtWidgets.QCheckBox(
QtWidgets.QApplication.translate("pychemqt", "Show Grid"))
layout.addWidget(self.gridCheckbox, 3, 1, 1, self.fig.dim)
layout.addItem(QtWidgets.QSpacerItem(
10, 10, QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding), 5, 1, 1, self.fig.dim)
self.buttonBox = QtWidgets.QDialogButtonBox(
QtWidgets.QDialogButtonBox.Close)
self.buttonBox.rejected.connect(self.reject)
layout.addWidget(self.buttonBox, 10, 1, 1, self.fig.dim)
if fig:
self.populate()
self.title.textChanged.connect(partial(self.update, "title"))
self.title.colorChanged.connect(partial(self.update, "titlecolor"))
self.title.fontChanged.connect(partial(self.update, "titlefont"))
self.axisX.label.textChanged.connect(partial(self.update, "xlabel"))
self.axisX.label.colorChanged.connect(
partial(self.update, "xlabelcolor"))
self.axisX.label.fontChanged.connect(
partial(self.update, "xlabelfont"))
self.axisY.label.textChanged.connect(partial(self.update, "ylabel"))
self.axisY.label.colorChanged.connect(
partial(self.update, "ylabelcolor"))
self.axisY.label.fontChanged.connect(
partial(self.update, "ylabelfont"))
self.gridCheckbox.toggled.connect(partial(self.update, "grid"))
self.axisX.scale.toggled.connect(partial(self.update, "xscale"))
self.axisY.scale.toggled.connect(partial(self.update, "yscale"))
self.axisX.min.valueChanged.connect(partial(self.update, "xmin"))
self.axisY.min.valueChanged.connect(partial(self.update, "ymin"))
self.axisX.max.valueChanged.connect(partial(self.update, "xmax"))
self.axisY.max.valueChanged.connect(partial(self.update, "ymax"))
if self.fig.dim == 3:
self.axisZ.label.textChanged.connect(
partial(self.update, "zlabel"))
self.axisZ.label.colorChanged.connect(
partial(self.update, "zlabelcolor"))
self.axisZ.label.fontChanged.connect(
partial(self.update, "zlabelfont"))
self.axisZ.min.valueChanged.connect(partial(self.update, "zmin"))
self.axisZ.max.valueChanged.connect(partial(self.update, "zmax"))
def populate(self):
"""Fill widget with plot parameters"""
self.title.setText(self.fig.ax.get_title())
self.title.setColor(QtGui.QColor(self.fig.ax.title.get_color()))
self.axisX.label.setText(self.fig.ax.get_xlabel())
xcolor = self.fig.ax.xaxis.get_label().get_color()
self.axisX.label.setColor(QtGui.QColor(xcolor))
self.axisY.label.setText(self.fig.ax.get_ylabel())
ycolor = self.fig.ax.yaxis.get_label().get_color()
self.axisY.label.setColor(QtGui.QColor(ycolor))
self.gridCheckbox.setChecked(self.fig.ax._gridOn)
self.axisX.scale.setChecked(self.fig.ax.get_xscale() == "log")
self.axisY.scale.setChecked(self.fig.ax.get_yscale() == "log")
xmin, xmax = self.fig.ax.get_xlim()
self.axisX.min.setValue(xmin)
self.axisX.max.setValue(xmax)
ymin, ymax = self.fig.ax.get_ylim()
self.axisY.min.setValue(ymin)
self.axisY.max.setValue(ymax)
if self.fig.dim == 3:
self.axisZ.label.setText(self.fig.ax.get_zlabel())
zcolor = self.fig.ax.zaxis.get_label().get_color()
self.axisZ.label.setColor(QtGui.QColor(zcolor))
zmin, zmax = self.fig.ax.get_zlim()
self.axisZ.min.setValue(zmin)
self.axisZ.max.setValue(zmax)
def update(self, key, value):
"""Update plot
Input:
key: plot parameter key to update
value: new value for key
"""
f = {"xlabel": self.fig.ax.set_xlabel,
"xlabelcolor": self.fig.ax.xaxis.get_label().set_color,
"xlabelfont": self.fig.ax.xaxis.get_label().set_fontproperties,
"ylabel": self.fig.ax.set_ylabel,
"ylabelcolor": self.fig.ax.yaxis.get_label().set_color,
"ylabelfont": self.fig.ax.yaxis.get_label().set_fontproperties,
"title": self.fig.ax.set_title,
"titlecolor": self.fig.ax.title.set_color,
"titlefont": self.fig.ax.title.set_fontproperties,
"xscale": self.fig.ax.set_xscale,
"yscale": self.fig.ax.set_yscale,
"grid": self.fig.ax.grid}
if self.fig.dim == 3:
f["zlabel"] = self.fig.ax.set_zlabel
f["zlabelcolor"] = self.fig.ax.zaxis.get_label().set_color
f["zlabelfont"] = self.fig.ax.zaxis.get_label().set_fontproperties
if key in ("xscale", "yscale"):
if value:
value = "log"
else:
value = "linear"
if key == "grid":
self.fig.ax._gridOn = value
if key in ("titlecolor", "xlabelcolor", "ylabelcolor"):
value = str(value)
if key in ("titlefont", "xlabelfont", "ylabelfont"):
value = convertFont(value)
if key in ("xmin", "xmax"):
xmin = self.axisX.min.value
xmax = self.axisX.max.value
self.fig.ax.set_xlim(xmin, xmax)
elif key in ("ymin", "ymax"):
ymin = self.axisY.min.value
ymax = self.axisY.max.value
self.fig.ax.set_ylim(ymin, ymax)
elif key in ("zmin", "zmax"):
ymin = self.axisZ.min.value
ymax = self.axisZ.max.value
self.fig.ax.set_zlim(ymin, ymax)
else:
f[key](value)
self.parent().dirty[self.parent().idTab] = True
self.parent().saveControl()
self.fig.draw()
def convertFont(qfont):
"""Convert qt QFont class properties to FontProperties to use in
matplotlib
Parameters
----------
qfont : QFont
QFont with properties to extract
Returns
-------
font : FontProperties
FontProperties instance to use in any matplotlib text instance
"""
family = str(qfont.family())
# Matplotlib use 0-1000 scale, qt only 0-100 scale
weight = 10*qfont.weight()
if qfont.style() == 0:
style = "normal"
elif qfont.style() == 1:
style = "italic"
elif qfont.style() == 2:
style = "oblique"
else:
style = None
# print(family, style, qfont.stretch(), weight, qfont.pointSize())
font = FontProperties(family, style, None, qfont.stretch(),
weight, qfont.pointSize())
return font
class AxisWidget(QtWidgets.QGroupBox):
"""Dialog to configure axes plot properties"""
def __init__(self, name, parent=None):
title = name+" "+QtWidgets.QApplication.translate("pychemqt", "Axis")
super(AxisWidget, self).__init__(title, parent)
lyt = QtWidgets.QGridLayout(self)
lyt.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "Label")), 1, 1)
self.label = InputFont()
lyt.addWidget(self.label, 1, 2)
self.scale = QtWidgets.QCheckBox(
QtWidgets.QApplication.translate("pychemqt", "Logarithmic scale"))
lyt.addWidget(self.scale, 2, 1, 1, 2)
lyt.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "from")), 3, 1)
self.min = Entrada_con_unidades(float, min=float("-inf"))
lyt.addWidget(self.min, 3, 2)
lyt.addWidget(QtWidgets.QLabel(
QtWidgets.QApplication.translate("pychemqt", "to")), 4, 1)
self.max = Entrada_con_unidades(float, min=float("-inf"))
lyt.addWidget(self.max, 4, 2)
def calcIsoline(f, conf, var, fix, vvar, vfix, ini, step, end, total, bar):
"""Procedure to calculate isoline. In isotherm and isobar add to calculate
point the saturated states in two-phases region"""
fluidos = []
fail = 0
N_points = get_points(config.Preferences)
fase = None
rhoo = 0
To = 0
for Ti in vvar:
kwargs = {var: Ti, fix: vfix, "rho0": rhoo, "T0": To}
print(kwargs)
fluido = calcPoint(f, conf, **kwargs)
avance = ini + end*step/total + \
end/total*(len(fluidos)+fail)/(len(vvar)+N_points)
bar.setValue(avance)
QtWidgets.QApplication.processEvents()
if fluido and fluido.status and (fluido.rho != rhoo or fluido.T != To):
fluidos.append(fluido)
# Save values of last point as initial guess for next calculation
if var not in ("T", "P") or fix not in ("T", "P"):
rhoo = fluido.rho
To = fluido.T
if var in ("T", "P") and fix in ("T", "P"):
if fase is None:
fase = fluido.x
if fase == fluido.x:
continue
print("Calculating two phase additional point")
if fluido.P < f.Pc and fluido.T < f.Tc:
if fase != fluido.x and fase <= 0:
xi = linspace(0, 1, N_points)
elif fase != fluido.x and fase >= 1:
xi = linspace(1, 0, N_points)
for x in xi:
print({fix: vfix, "x": x})
fluido_x = calcPoint(f, conf, **{fix: vfix, "x": x})
fluidos.insert(-1, fluido_x)
avance = ini + end*step/total + end/total * \
(len(fluidos)+fail)/(len(vvar)+N_points)
bar.setValue(avance)
fase = fluido.x
else:
fail += 1
return fluidos
def get_points(Preferences):
"""Get point number to plot lines from Preferences"""
definition = Preferences.getint("MEOS", "definition")
if definition == 1:
points = 10
elif definition == 2:
points = 25
elif definition == 3:
points = 50
elif definition == 4:
points = 100
else:
points = 5
return points
def getLineFormat(Preferences, name):
"""get matplotlib line format from preferences
Preferences: configparser instance with pycheqmt preferences
name: name of isoline"""
format = formatLine(Preferences, "MEOS", name)
# Anotation
if name != "saturation":
format["annotate"] = Preferences.getboolean("MEOS", name+"label")
format["pos"] = Preferences.getint("MEOS", name+"position")
format["unit"] = Preferences.getboolean("MEOS", name+"units")
format["variable"] = Preferences.getboolean("MEOS", name+"variable")
return format
def plotIsoline(data, axis, title, unidad, grafico, transform, **format):
"""Procedure to plot any isoline
Input:
data: section of property isoline of matrix data
axis: array with keys of three axis, z None in 2D plot
title: key of isoline type
unidad: unidades subclass with isoline unit
grafico: PlotMEoS instance to plot data
transform: unit transform function for use configurated units in plots
format: any matplotlib plot kwargs
"""
x, y, z = axis
fx, fy, fz = transform
xscale = grafico.plot.ax.get_xscale()
yscale = grafico.plot.ax.get_yscale()
annotate = format.pop("annotate")
pos = format.pop("pos")
unit = format.pop("unit")
variable = format.pop("variable")
for key in sorted(data.keys()):
xi = list(map(fx, data[key][x]))
yi = list(map(fy, data[key][y]))
label = "%s =%s" % (title, unidad(key).str)
if z:
zi = list(map(fz, data[key][z]))
line, = grafico.plot.ax.plot(xi, yi, zi, label=label, **format)
else:
line, = grafico.plot.ax.plot(xi, yi, label=label, **format)
# Add annotate for isolines
if not z:
if variable and unit:
txt = label
elif variable:
txt = "%s =%s" % (title, unidad(key).config())
elif unit:
txt = unidad(key).str
else:
txt = unidad(key).config()
xmin, xmax = grafico.plot.ax.get_xlim()
ymin, ymax = grafico.plot.ax.get_ylim()
i = int(len(xi)*pos/100)
if i >= len(xi):
i = len(yi)-2
if pos > 50:
j = i-1
else:
j = i+1
if xscale == "log":
f_x = (log(xi[i])-log(xi[j]))/(log(xmax)-log(xmin))
else:
f_x = (xi[i]-xi[j])/(xmax-xmin)
if yscale == "log":
f_y = (log(yi[i])-log(yi[j]))/(log(ymax)-log(ymin))
else:
f_y = (yi[i]-yi[j])/(ymax-ymin)
rot = atan(f_y/f_x)*360/2/pi
kw = {}
kw["ha"] = "center"
kw["va"] = "center_baseline"
kw["rotation_mode"] = "anchor"
kw["rotation"] = rot
kw["size"] = "small"
text = grafico.plot.ax.text(xi[i], yi[i], txt, **kw)
line.text = text
line.text.pos = pos
if not annotate:
text.set_visible(False)
def plot2D3D(grafico, data, Preferences, x, y, z=None):
"""Plot procedure
Parameters:
grafico: plot
data: data to plot
Preferences: ConfigParser instance from mainwindow preferencesChanged
x: Key for x axis
y: Key for y axis
z: Key for z axis Optional for 3D plot"""
functionx = _getunitTransform(x)
functiony = _getunitTransform(y)
functionz = _getunitTransform(z)
transform = (functionx, functiony, functionz)
# Plot saturation lines
format = getLineFormat(Preferences, "saturation")
if x == "P" and y == "T":
satLines = QtWidgets.QApplication.translate(
"pychemqt", "Saturation Line"),
else:
satLines = [
QtWidgets.QApplication.translate(
"pychemqt", "Liquid Saturation Line"),
QtWidgets.QApplication.translate(
"pychemqt", "Vapor Saturation Line")]
for fase, label in enumerate(satLines):
xsat = list(map(functionx, data["saturation_%i" % fase][x]))
ysat = list(map(functiony, data["saturation_%i" % fase][y]))
if z:
zsat = list(map(functionz, data["saturation_%i" % fase][z]))
grafico.plot.ax.plot(xsat, ysat, zsat, label=label, **format)
else:
grafico.plot.ax.plot(xsat, ysat, label=label, **format)
# Plot melting and sublimation lines
if "melting" in data:
label = QtWidgets.QApplication.translate("pychemqt", "Melting Line")
xmel = list(map(functionx, data["melting"][x]))
ymel = list(map(functiony, data["melting"][y]))
if z:
zmel = list(map(functionz, data["melting"][z]))
grafico.plot.ax.plot(xmel, ymel, zmel, label=label, **format)
else:
grafico.plot.ax.plot(xmel, ymel, label=label, **format)
if "sublimation" in data:
xsub = list(map(functionx, data["sublimation"][x]))
ysub = list(map(functiony, data["sublimation"][y]))
label = QtWidgets.QApplication.translate(
"pychemqt", "Sublimation Line")
if z:
zmel = list(map(functionz, data["melting"][z]))
grafico.plot.ax.plot(xmel, ymel, zmel, label=label, **format)
else:
grafico.plot.ax.plot(xsub, ysub, label=label, **format)
# Plot quality isolines
if x not in ["P", "T"] or y not in ["P", "T"] or z:
format = getLineFormat(Preferences, "Isoquality")
plotIsoline(data["x"], (x, y, z), "x", unidades.Dimensionless, grafico,
transform, **format)
# Plot isotherm lines
if x != "T" and y != "T" or z:
format = getLineFormat(Preferences, "Isotherm")
plotIsoline(data["T"], (x, y, z), "T", unidades.Temperature, grafico,
transform, **format)
# Plot isobar lines
if x != "P" and y != "P" or z:
format = getLineFormat(Preferences, "Isobar")
plotIsoline(data["P"], (x, y, z), "P", unidades.Pressure, grafico,
transform, **format)
# Plot isochor lines
if x not in ["rho", "v"] and y not in ["rho", "v"] or z:
format = getLineFormat(Preferences, "Isochor")
plotIsoline(data["v"], (x, y, z), "v", unidades.SpecificVolume,
grafico, transform, **format)
# Plot isodensity lines
if "rho" in data:
plotIsoline(data["rho"], (x, y, z), "rho", unidades.Density,
grafico, transform, **format)
# Plot isoenthalpic lines
if x != "h" and y != "h" or z:
format = getLineFormat(Preferences, "Isoenthalpic")
plotIsoline(data["h"], (x, y, z), "h", unidades.Enthalpy, grafico,
transform, **format)
# Plot isoentropic lines
if x != "s" and y != "s" or z:
format = getLineFormat(Preferences, "Isoentropic")
plotIsoline(data["s"], (x, y, z), "s", unidades.SpecificHeat, grafico,
transform, **format)
def _getunitTransform(eje):
"""Return the axis unit transform function to map data to configurated unit
Parameters:
seq: list with axis property keys
"""
if not eje:
return None
elif eje == "T":
index = config.getMainWindowConfig().getint("Units", "Temperature")
func = [float, unidades.K2C, unidades.K2R, unidades.K2F, unidades.K2Re]
return func[index]
else:
unit = meos.units[meos.keys.index(eje)]
factor = unit(1.).config()
def f(val):
if val is not None and type(val) != str:
return val*factor
else:
return nan
return f
# return lambda val: val*factor if val is not None else nan
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
conf = config.getMainWindowConfig()
# SteamTables = AddPoint(conf)
# SteamTables=AddLine(None)
# SteamTables = Dialog(conf)
SteamTables = Plot3D()
SteamTables.show()
sys.exit(app.exec_())
| gpl-3.0 | 5,307,471,223,947,510,000 | 39.531127 | 79 | 0.575989 | false |
Flexget/Flexget | flexget/event.py | 1 | 3312 | """
Provides small event framework
"""
from typing import Callable, List, Dict, Any
from loguru import logger
logger = logger.bind(name='event')
class Event:
"""Represents one registered event."""
def __init__(self, name: str, func: Callable, priority: int = 128) -> None:
self.name = name
self.func = func
self.priority = priority
def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs)
def __eq__(self, other):
return self.priority == other.priority
def __lt__(self, other):
return self.priority < other.priority
def __gt__(self, other):
return self.priority > other.priority
def __str__(self):
return f'<Event(name={self.name},func={self.func.__name__},priority={self.priority})>'
__repr__ = __str__
def __hash__(self):
return hash((self.name, self.func, self.priority))
_events: Dict[str, List[Event]] = {}
def event(name: str, priority: int = 128) -> Callable[[Callable], Callable]:
"""Register event to function with a decorator"""
def decorator(func: Callable) -> Callable:
add_event_handler(name, func, priority)
return func
return decorator
def get_events(name: str) -> List[Event]:
"""
:param String name: event name
:return: List of :class:`Event` for *name* ordered by priority
"""
if name not in _events:
raise KeyError('No such event %s' % name)
_events[name].sort(reverse=True)
return _events[name]
def add_event_handler(name: str, func: Callable, priority: int = 128) -> Event:
"""
:param string name: Event name
:param function func: Function that acts as event handler
:param priority: Priority for this hook
:return: Event created
:rtype: Event
:raises Exception: If *func* is already registered in an event
"""
events = _events.setdefault(name, [])
for event in events:
if event.func == func:
raise ValueError(
'%s has already been registered as event listener under name %s'
% (func.__name__, name)
)
logger.trace('registered function {} to event {}', func.__name__, name)
event = Event(name, func, priority)
events.append(event)
return event
def remove_event_handlers(name: str) -> None:
"""Removes all handlers for given event `name`."""
_events.pop(name, None)
def remove_event_handler(name: str, func: Callable) -> None:
"""Remove `func` from the handlers for event `name`."""
for e in list(_events.get(name, [])):
if e.func is func:
_events[name].remove(e)
def fire_event(name: str, *args, **kwargs) -> Any:
"""
Trigger an event with *name*. If event is not hooked by anything nothing happens. If a function that hooks an event
returns a value, it will replace the first argument when calling next function.
:param name: Name of event to be called
:param args: List of arguments passed to handler function
:param kwargs: Key Value arguments passed to handler function
"""
if name in _events:
for event in get_events(name):
result = event(*args, **kwargs)
if result is not None:
args = (result,) + args[1:]
return args and args[0]
| mit | 3,774,398,002,613,047,300 | 28.571429 | 119 | 0.617452 | false |
initcrash/transdb | transdb/fields.py | 1 | 4827 | from django.db import models
from django.conf import settings
from django.utils.translation import get_language
from django.utils.translation import ugettext as _
from django.utils.encoding import force_unicode, smart_str, smart_unicode
from django.forms.fields import Field
from django.forms import ValidationError
from widgets import TransCharWidget, TransTextWidget
def get_default_language_name():
'''
Get language from default language specified by LANGUAGE_CODE in settings
Used in error messages
'''
lang_name = ''
for lang in settings.LANGUAGES:
if lang[0] == settings.LANGUAGE_CODE:
lang_name = lang[1]
break
return force_unicode(lang_name)
class TransDbValue(unicode):
'''
This class implements a unicode string, but with a hidden attribute raw_data.
When used as a string it returns the translation of the current language
raw_data attribute stores a dictionary with all translations
Also implements a method "get_in_language(language)" that returns the translation on any available language
'''
raw_data = {}
def get_in_language(self, language):
if self.raw_data and self.raw_data.has_key(language):
return self.raw_data[language]
else:
return u''
def set_in_language(self, language, value):
self.raw_data[language] = value
class TransFormField(Field):
'''
forms field, used when ModelForm (or deprecated form_for_model/form_form_instance) is called
Also implements form validation in admin
'''
def clean(self, value):
if isinstance(value, dict) and self.required:
filled_value = [ v for v in value.values() if bool(v) ]
if not filled_value:
raise ValidationError, _("This field is required.")
return super(TransFormField, self).clean(value)
class TransField(models.Field):
'''
Model field to be subclassed
Used for storing a string in many languages at database (with python's dictionary format)
pickle module could be used, but wouldn't alow search on fields?
'''
def get_internal_type(self):
return 'TextField'
def to_python(self, value):
if isinstance(value, TransDbValue):
return value
if isinstance(value, dict): # formfield method makes this function be called with value as a dict
python_value = value
else:
try:
python_value = eval(value)
for k,v in python_value.items():
python_value[k] = smart_unicode(v)
except Exception:
python_value = None
if isinstance(python_value, dict):
if python_value.has_key(get_language()) and python_value[get_language()]:
result = TransDbValue(python_value[get_language()])
elif python_value.has_key(settings.LANGUAGE_CODE) and python_value[settings.LANGUAGE_CODE]:
result = TransDbValue(python_value[settings.LANGUAGE_CODE])
else:
val = "bal"
for item in python_value.items():
try:
val = item[1]
except:
pass
if val: break
result = TransDbValue(python_value.items()[0][1])
result.raw_data = python_value
else:
result = TransDbValue(value)
result.raw_data = {settings.LANGUAGE_CODE: value}
return result
def get_db_prep_save(self, value):
if not isinstance(value, TransDbValue):
return value
value = [u"'%s': '''%s'''" % (k, v) for k, v in value.raw_data.items()]
value = u'{%s}' % u','.join(value)
return smart_str(value)
def formfield(self, **kwargs):
defaults = {'form_class': TransFormField}
defaults.update(kwargs)
return super(TransField, self).formfield(**defaults)
def flatten_data(self, follow, obj=None):
'''
for serializing objects
'''
raw_data = self._get_val_from_obj(obj).raw_data.copy()
for k,v in raw_data.items():
raw_data[k] = smart_str(v)
return {self.attname: raw_data}
class TransCharField(TransField):
'''
TransField used with CharField widget
'''
__metaclass__ = models.SubfieldBase
def formfield(self, **kwargs):
kwargs['widget'] = TransCharWidget
return super(TransCharField, self).formfield(**kwargs)
class TransTextField(TransField):
'''
TransField used with CharField widget
'''
__metaclass__ = models.SubfieldBase
def formfield(self, **kwargs):
kwargs['widget'] = TransTextWidget
return super(TransTextField, self).formfield(**kwargs)
| gpl-3.0 | 5,646,233,992,082,309,000 | 34.755556 | 111 | 0.618189 | false |
opennode/nodeconductor-openstack | src/waldur_openstack/openstack_tenant/migrations/0008_backup_schedule.py | 1 | 2253 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import waldur_core.logging.loggers
import django.utils.timezone
import waldur_core.core.fields
import waldur_core.core.validators
class Migration(migrations.Migration):
dependencies = [
('openstack_tenant', '0007_backup_backuprestoration'),
]
operations = [
migrations.CreateModel(
name='BackupSchedule',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.CharField(max_length=500, verbose_name='description', blank=True)),
('name', models.CharField(max_length=150, verbose_name='name', validators=[waldur_core.core.validators.validate_name])),
('uuid', waldur_core.core.fields.UUIDField()),
('error_message', models.TextField(blank=True)),
('schedule', waldur_core.core.fields.CronScheduleField(max_length=15, validators=[waldur_core.core.validators.validate_cron_schedule, waldur_core.core.validators.MinCronValueValidator(1)])),
('next_trigger_at', models.DateTimeField(null=True)),
('timezone', models.CharField(default=django.utils.timezone.get_current_timezone_name, max_length=50)),
('is_active', models.BooleanField(default=False)),
('retention_time', models.PositiveIntegerField(help_text=b'Retention time in days, if 0 - backup will be kept forever')),
('maximal_number_of_backups', models.PositiveSmallIntegerField()),
('instance', models.ForeignKey(related_name='backup_schedules', to='openstack_tenant.Instance')),
],
options={
'abstract': False,
},
bases=(models.Model, waldur_core.logging.loggers.LoggableMixin),
),
migrations.AddField(
model_name='backup',
name='backup_schedule',
field=models.ForeignKey(related_name='backups', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='openstack_tenant.BackupSchedule', null=True),
),
]
| mit | 3,486,332,113,417,544,700 | 49.066667 | 206 | 0.640923 | false |
jocelynmass/nrf51 | sdk/nRF51_SDK_9.0.0_2e23562/examples/dfu/experimental/master_control_panel_patch/init_packet.py | 1 | 6654 | # Copyright (c) 2015, Nordic Semiconductor
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Nordic Semiconductor ASA nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from enum import Enum
import struct
INIT_PACKET_USES_CRC16 = 0
INIT_PACKET_USES_HASH = 1
INIT_PACKET_EXT_USES_ECDS = 2
class PacketField(Enum):
PACKET_VERSION = 1
COMPRESSION_TYPE = 2
DEVICE_TYPE = 3
DEVICE_REVISION = 4
APP_VERSION = 5
REQUIRED_SOFTDEVICES_ARRAY = 6
OPT_DATA = 7
NORDIC_PROPRIETARY_OPT_DATA_EXT_PACKET_ID = 8
NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_LENGTH = 9
NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_HASH = 10
NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_CRC16 = 11
NORDIC_PROPRIETARY_OPT_DATA_INIT_PACKET_ECDS = 12
class Packet(object):
"""
Class that implements the INIT packet format.
http://developer.nordicsemi.com/nRF51_SDK/doc/7.1.0/s110/html/a00065.html
"""
UNSIGNED_SHORT = "H"
UNSIGNED_INT = "I"
UNSIGNED_CHAR = "B"
CHAR_ARRAY = "s"
def __init__(self, init_packet_fields):
"""
:param init_packet_fields: Dictionary with packet fields
"""
self.init_packet_fields = init_packet_fields
def generate_packet(self):
"""
Generates a binary packet from provided init_packet_fields provided in constructor.
This version includes the extended data
:return str: Returns a string representing the init_packet (in binary)
"""
# Create struct format string based on keys that are
# present in self.init_packet_fields
format_string = self.__generate_struct_format_string()
args = []
for key in sorted(self.init_packet_fields.keys(), key=lambda x: x.value):
# Add length to fields that required that
if key in [PacketField.REQUIRED_SOFTDEVICES_ARRAY,
PacketField.OPT_DATA]:
args.append(len(self.init_packet_fields[key]))
args.extend(self.init_packet_fields[key])
elif key in [PacketField.NORDIC_PROPRIETARY_OPT_DATA_EXT_PACKET_ID]:
args.append(self.init_packet_fields[key]) # Extended packet id format
elif key in [PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_LENGTH]:
args.append(self.init_packet_fields[key]) # Length of firmware image
elif key in [PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_CRC16]:
args.append(self.init_packet_fields[key]) # CRC-16
elif key in [PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_HASH]:
args.append(self.init_packet_fields[key]) # SHA-256 hash of firmware image
elif key in [PacketField.NORDIC_PROPRIETARY_OPT_DATA_INIT_PACKET_ECDS]:
args.append(self.init_packet_fields[key]) # ECDS of base init packet using Curve P-256 amd SHA-256
else:
args.append(self.init_packet_fields[key])
return struct.pack(format_string, *args)
def __generate_struct_format_string(self):
format_string = "<" # Use little endian format with standard sizes for python,
# see https://docs.python.org/2/library/struct.html
for key in sorted(self.init_packet_fields.keys(), key=lambda x: x.value):
if key in [PacketField.PACKET_VERSION,
PacketField.COMPRESSION_TYPE,
PacketField.DEVICE_TYPE,
PacketField.DEVICE_REVISION,
]:
format_string += Packet.UNSIGNED_SHORT
elif key in [PacketField.APP_VERSION]:
format_string += Packet.UNSIGNED_INT
elif key in [PacketField.REQUIRED_SOFTDEVICES_ARRAY]:
array_elements = self.init_packet_fields[key]
format_string += Packet.UNSIGNED_SHORT # Add length field to format packet
for _ in range(len(array_elements)):
format_string += Packet.UNSIGNED_SHORT
elif key in [PacketField.OPT_DATA]:
format_string += Packet.UNSIGNED_SHORT # Add length field to optional data
format_string += "{0}{1}".format(len(self.init_packet_fields[key]), Packet.CHAR_ARRAY)
elif key in [PacketField.NORDIC_PROPRIETARY_OPT_DATA_EXT_PACKET_ID]:
format_string += Packet.UNSIGNED_INT # Add the extended packet id field
elif key == PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_LENGTH:
format_string += Packet.UNSIGNED_INT # Add the firmware length field
elif key == PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_HASH:
format_string += "32{0}".format(Packet.CHAR_ARRAY) # SHA-256 requires 32 bytes
elif key == PacketField.NORDIC_PROPRIETARY_OPT_DATA_FIRMWARE_CRC16:
format_string += Packet.UNSIGNED_SHORT
elif key == PacketField.NORDIC_PROPRIETARY_OPT_DATA_INIT_PACKET_ECDS:
format_string += "64{0}".format(Packet.CHAR_ARRAY) # ECDS based on P-256 using SHA-256 requires 64 bytes
return format_string | gpl-2.0 | 6,692,166,544,894,411,000 | 45.884892 | 121 | 0.651488 | false |
HPI-SWA-Lab/RSqueak | rsqueakvm/plugins/immutability_plugin.py | 1 | 3782 | """
RSqueak/VM plugin which provides support for immutable objects.
Immutable objects can be created as copy of existing objects
or from a list of arguments. The package `ImmutableObjects`, located in
`/repository`, needs to be loaded in the image.
"""
from rsqueakvm.error import PrimitiveFailedError, UnwrappingError
from rsqueakvm.model.variable import W_BytesObject, W_WordsObject
from rsqueakvm.plugins.immutability import patch_w_object
from rsqueakvm.plugins.immutability.bytes import W_Immutable_BytesObject
from rsqueakvm.plugins.immutability.pointers import (
select_immutable_pointers_class)
from rsqueakvm.plugins.immutability.words import W_Immutable_WordsObject
from rsqueakvm.plugins.plugin import Plugin
from rsqueakvm.storage_classes import BYTES, POINTERS, WORDS
class ImmutabilityPlugin(Plugin):
def setup(self):
patch_w_object()
plugin = ImmutabilityPlugin()
@plugin.expose_primitive(unwrap_spec=[object])
def primitiveIsImmutable(interp, s_frame, w_recv):
"""
Tests if `w_recv` is an immutable object.
:param interp: The interpreter proxy.
:param s_frame: The stack frame.
:param w_recv: The receiver object.
:returns: `w_true` if `w_recv` is immutable object, otherwise `w_false`.
"""
if w_recv.is_immutable():
return interp.space.w_true
return interp.space.w_false
@plugin.expose_primitive(unwrap_spec=[object, object])
def primitiveImmutableFrom(interp, s_frame, w_cls, w_obj):
"""
Creates an immutable copy of a given Smalltalk object.
:param interp: The interpreter proxy.
:param s_frame: The stack frame.
:param w_cls: The imutable objects target class.
:param w_obj: The Smalltalk object to produce an immutable copy from.
:returns: An immutable copy of `w_obj` with class `w_cls`.
:raises: PrimitiveFailedError
"""
space = interp.space
instance_kind = w_cls.as_class_get_shadow(space).get_instance_kind()
if instance_kind == POINTERS:
pointers = w_obj.fetch_all(space)
cls = select_immutable_pointers_class(pointers)
return cls(space, w_cls, pointers)
elif instance_kind == BYTES and isinstance(w_obj, W_BytesObject):
return W_Immutable_BytesObject(space, w_cls, w_obj.bytes)
elif instance_kind == WORDS and isinstance(w_obj, W_WordsObject):
return W_Immutable_WordsObject(space, w_cls, w_obj.words)
raise PrimitiveFailedError
@plugin.expose_primitive(unwrap_spec=None)
def primitiveImmutableFromArgs(interp, s_frame, argcount):
"""
Returns an immutable instance of the receiver (which is a class) with
all fields initialized with the arguments given.
:param interp: The interpreter proxy.
:param s_frame: The stack frame.
:param argcount: The number of arguments.
:returns: An immutable object.
:raises: PrimitiveFailedError
"""
if argcount == 0:
raise PrimitiveFailedError
w_args = s_frame.pop_and_return_n(argcount)[:]
w_cls = s_frame.pop()
space = interp.space
instance_kind = w_cls.as_class_get_shadow(space).get_instance_kind()
if instance_kind == POINTERS:
cls = select_immutable_pointers_class(w_args)
return cls(space, w_cls, w_args)
elif instance_kind == BYTES:
try:
bytes = [chr(interp.space.unwrap_uint(b)) for b in w_args]
except (ValueError, TypeError, UnwrappingError):
raise PrimitiveFailedError
return W_Immutable_BytesObject(space, w_cls, bytes)
elif instance_kind == WORDS:
try:
words = [interp.space.unwrap_uint(b) for b in w_args]
except UnwrappingError:
raise PrimitiveFailedError
return W_Immutable_WordsObject(space, w_cls, words)
raise PrimitiveFailedError
| bsd-3-clause | 4,435,581,936,282,197,000 | 35.019048 | 76 | 0.705447 | false |
trezor/micropython | tests/run-natmodtests.py | 1 | 5757 | #!/usr/bin/env python3
# This file is part of the MicroPython project, http://micropython.org/
# The MIT License (MIT)
# Copyright (c) 2019 Damien P. George
import os
import subprocess
import sys
import argparse
sys.path.append('../tools')
import pyboard
# Paths for host executables
CPYTHON3 = os.getenv('MICROPY_CPYTHON3', 'python3')
MICROPYTHON = os.getenv('MICROPY_MICROPYTHON', '../ports/unix/micropython_coverage')
NATMOD_EXAMPLE_DIR = '../examples/natmod/'
# Supported tests and their corresponding mpy module
TEST_MAPPINGS = {
'btree': 'btree/btree_$(ARCH).mpy',
'framebuf': 'framebuf/framebuf_$(ARCH).mpy',
'uheapq': 'uheapq/uheapq_$(ARCH).mpy',
'urandom': 'urandom/urandom_$(ARCH).mpy',
'ure': 'ure/ure_$(ARCH).mpy',
'uzlib': 'uzlib/uzlib_$(ARCH).mpy',
}
# Code to allow a target MicroPython to import an .mpy from RAM
injected_import_hook_code = """\
import sys, uos, uio
class __File(uio.IOBase):
def __init__(self):
self.off = 0
def ioctl(self, request, arg):
return 0
def readinto(self, buf):
buf[:] = memoryview(__buf)[self.off:self.off + len(buf)]
self.off += len(buf)
return len(buf)
class __FS:
def mount(self, readonly, mkfs):
pass
def chdir(self, path):
pass
def stat(self, path):
if path == '__injected.mpy':
return tuple(0 for _ in range(10))
else:
raise OSError(-2) # ENOENT
def open(self, path, mode):
return __File()
uos.mount(__FS(), '/__remote')
uos.chdir('/__remote')
sys.modules['{}'] = __import__('__injected')
"""
class TargetSubprocess:
def __init__(self, cmd):
self.cmd = cmd
def close(self):
pass
def run_script(self, script):
try:
p = subprocess.run(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, input=script)
return p.stdout, None
except subprocess.CalledProcessError as er:
return b'', er
class TargetPyboard:
def __init__(self, pyb):
self.pyb = pyb
self.pyb.enter_raw_repl()
def close(self):
self.pyb.exit_raw_repl()
self.pyb.close()
def run_script(self, script):
try:
self.pyb.enter_raw_repl()
output = self.pyb.exec_(script)
output = output.replace(b'\r\n', b'\n')
return output, None
except pyboard.PyboardError as er:
return b'', er
def run_tests(target_truth, target, args, stats):
for test_file in args.files:
# Find supported test
for k, v in TEST_MAPPINGS.items():
if test_file.find(k) != -1:
test_module = k
test_mpy = v.replace('$(ARCH)', args.arch)
break
else:
print('---- {} - no matching mpy'.format(test_file))
continue
# Read test script
with open(test_file, 'rb') as f:
test_file_data = f.read()
# Create full test with embedded .mpy
try:
with open(NATMOD_EXAMPLE_DIR + test_mpy, 'rb') as f:
test_script = b'__buf=' + bytes(repr(f.read()), 'ascii') + b'\n'
except OSError:
print('---- {} - mpy file not compiled'.format(test_file))
continue
test_script += bytes(injected_import_hook_code.format(test_module), 'ascii')
test_script += test_file_data
# Run test under MicroPython
result_out, error = target.run_script(test_script)
# Work out result of test
extra = ''
if error is None and result_out == b'SKIP\n':
result = 'SKIP'
elif error is not None:
result = 'FAIL'
extra = ' - ' + str(error)
else:
# Check result against truth
try:
with open(test_file + '.exp', 'rb') as f:
result_exp = f.read()
error = None
except OSError:
result_exp, error = target_truth.run_script(test_file_data)
if error is not None:
result = 'TRUTH FAIL'
elif result_out != result_exp:
result = 'FAIL'
print(result_out)
else:
result = 'pass'
# Accumulate statistics
stats['total'] += 1
if result == 'pass':
stats['pass'] += 1
elif result == 'SKIP':
stats['skip'] += 1
else:
stats['fail'] += 1
# Print result
print('{:4} {}{}'.format(result, test_file, extra))
def main():
cmd_parser = argparse.ArgumentParser(description='Run dynamic-native-module tests under MicroPython')
cmd_parser.add_argument('-p', '--pyboard', action='store_true', help='run tests via pyboard.py')
cmd_parser.add_argument('-d', '--device', default='/dev/ttyACM0', help='the device for pyboard.py')
cmd_parser.add_argument('-a', '--arch', default='x64', help='native architecture of the target')
cmd_parser.add_argument('files', nargs='*', help='input test files')
args = cmd_parser.parse_args()
target_truth = TargetSubprocess([CPYTHON3])
if args.pyboard:
target = TargetPyboard(pyboard.Pyboard(args.device))
else:
target = TargetSubprocess([MICROPYTHON])
stats = {'total': 0, 'pass': 0, 'fail':0, 'skip': 0}
run_tests(target_truth, target, args, stats)
target.close()
target_truth.close()
print('{} tests performed'.format(stats['total']))
print('{} tests passed'.format(stats['pass']))
if stats['fail']:
print('{} tests failed'.format(stats['fail']))
if stats['skip']:
print('{} tests skipped'.format(stats['skip']))
if stats['fail']:
sys.exit(1)
if __name__ == "__main__":
main()
| mit | -5,604,099,957,010,733,000 | 29.62234 | 105 | 0.565746 | false |
Cowa/Django-FileManager | FileManager/settings.py | 1 | 5489 | # Django settings for FileManagerHTML project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'fileManagerDB', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Paris'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-EN'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
MEDIA_ROOT = '/home/brice/Programmation/Django-FileManager/media'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = 'media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '(!f!+ygp*i+75v9nvqxrgnk@tt36t+v3%ppdlshos95ct4z74f'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'FileManager.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'FileManager.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'manager',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| gpl-2.0 | -9,046,051,876,094,814,000 | 33.740506 | 127 | 0.692476 | false |
Frodox/buildbot | master/buildbot/db/builders.py | 1 | 5881 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import print_function
import sqlalchemy as sa
from twisted.internet import defer
from buildbot.db import base
class BuildersConnectorComponent(base.DBConnectorComponent):
def findBuilderId(self, name, autoCreate=True):
tbl = self.db.model.builders
name_hash = self.hashColumns(name)
return self.findSomethingId(
tbl=tbl,
whereclause=(tbl.c.name_hash == name_hash),
insert_values=dict(
name=name,
name_hash=name_hash,
), autoCreate=autoCreate)
@defer.inlineCallbacks
def updateBuilderInfo(self, builderid, description, tags):
# convert to tag IDs first, as necessary
def toTagid(tag):
if isinstance(tag, type(1)):
return defer.succeed(tag)
ssConnector = self.master.db.tags
return ssConnector.findTagId(tag)
tagsids = [r[1] for r in (yield defer.DeferredList(
[toTagid(tag) for tag in tags],
fireOnOneErrback=True,
consumeErrors=True))]
def thd(conn):
builders_tbl = self.db.model.builders
builders_tags_tbl = self.db.model.builders_tags
transaction = conn.begin()
q = builders_tbl.update(
whereclause=(builders_tbl.c.id == builderid))
conn.execute(q, description=description).close()
# remove previous builders_tags
conn.execute(builders_tags_tbl.delete(
whereclause=((builders_tags_tbl.c.builderid == builderid)))).close()
# add tag ids
if tagsids:
conn.execute(builders_tags_tbl.insert(),
[dict(builderid=builderid, tagid=tagid)
for tagid in tagsids]).close()
transaction.commit()
defer.returnValue((yield self.db.pool.do(thd)))
def getBuilder(self, builderid):
d = self.getBuilders(_builderid=builderid)
@d.addCallback
def first(bldrs):
if bldrs:
return bldrs[0]
return None
return d
def addBuilderMaster(self, builderid=None, masterid=None):
def thd(conn, no_recurse=False):
try:
tbl = self.db.model.builder_masters
q = tbl.insert()
conn.execute(q, builderid=builderid, masterid=masterid)
except (sa.exc.IntegrityError, sa.exc.ProgrammingError):
pass
return self.db.pool.do(thd)
def removeBuilderMaster(self, builderid=None, masterid=None):
def thd(conn, no_recurse=False):
tbl = self.db.model.builder_masters
conn.execute(tbl.delete(
whereclause=((tbl.c.builderid == builderid) &
(tbl.c.masterid == masterid))))
return self.db.pool.do(thd)
def getBuilders(self, masterid=None, _builderid=None):
def thd(conn):
bldr_tbl = self.db.model.builders
bm_tbl = self.db.model.builder_masters
j = bldr_tbl.outerjoin(bm_tbl)
# if we want to filter by masterid, we must join to builder_masters
# again, so we can still get the full set of masters for each
# builder
if masterid is not None:
limiting_bm_tbl = bm_tbl.alias('limiting_bm')
j = j.join(limiting_bm_tbl,
onclause=(bldr_tbl.c.id == limiting_bm_tbl.c.builderid))
q = sa.select(
[bldr_tbl.c.id, bldr_tbl.c.name,
bldr_tbl.c.description, bm_tbl.c.masterid],
from_obj=[j],
order_by=[bldr_tbl.c.id, bm_tbl.c.masterid])
if masterid is not None:
# filter the masterid from the limiting table
q = q.where(limiting_bm_tbl.c.masterid == masterid)
if _builderid is not None:
q = q.where(bldr_tbl.c.id == _builderid)
# now group those by builderid, aggregating by masterid
rv = []
last = None
for row in conn.execute(q).fetchall():
# pylint: disable=unsubscriptable-object
if not last or row['id'] != last['id']:
last = self._thd_row2dict(conn, row)
rv.append(last)
if row['masterid']:
last['masterids'].append(row['masterid'])
return rv
return self.db.pool.do(thd)
def _thd_row2dict(self, conn, row):
# get tags
builders_tags = self.db.model.builders_tags
tags = self.db.model.tags
from_clause = tags
from_clause = from_clause.join(builders_tags)
q = sa.select([tags.c.name],
(builders_tags.c.builderid == row.id)).select_from(from_clause)
tags = [r.name for r in
conn.execute(q).fetchall()]
return dict(id=row.id, name=row.name, masterids=[],
description=row.description,
tags=tags)
| gpl-2.0 | -5,907,836,894,642,851,000 | 37.437908 | 85 | 0.577113 | false |
ssorj/pencil | setup.py | 1 | 1079 | #!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from distutils.core import setup
setup(name="pencil",
version="1",
url="http://www.ssorj.net/projects/pencil.html",
author="Justin Ross",
author_email="[email protected]",
py_modules=["pencil"],
package_dir={"": "python"})
| apache-2.0 | -6,843,731,593,604,329,000 | 36.206897 | 62 | 0.728452 | false |
apanda/modeling | tests/examples/ConvertedAclFwTest.py | 1 | 1751 | import components
def ConvertedAclFwTest ():
"""ACL firewall test"""
ctx = components.Context (['a', 'b', 'c', 'd', 'fw'],\
['ip_a', 'ip_b', 'ip_c', 'ip_d', 'ip_f'])
net = components.Network (ctx)
a = components.EndHost(ctx.a, net, ctx)
b = components.EndHost(ctx.b, net, ctx)
c = components.EndHost(ctx.c, net, ctx)
d = components.EndHost(ctx.d, net, ctx)
fw = components.ConvertedAclFw(ctx.fw, net, ctx)
net.setAddressMappings([(a, ctx.ip_a), \
(b, ctx.ip_b), \
(c, ctx.ip_c), \
(d, ctx.ip_d), \
(fw, ctx.ip_f)])
addresses = [ctx.ip_a, ctx.ip_b, ctx.ip_c, ctx.ip_d, ctx.ip_f]
net.RoutingTable(a, [(x, fw) for x in addresses])
net.RoutingTable(b, [(x, fw) for x in addresses])
net.RoutingTable(c, [(x, fw) for x in addresses])
net.RoutingTable(d, [(x, fw) for x in addresses])
#net.SetGateway(a, fw)
#net.SetGateway(b, fw)
#net.SetGateway(c, fw)
#net.SetGateway(d, fw)
net.RoutingTable(fw, [(ctx.ip_a, a), \
(ctx.ip_b, b), \
(ctx.ip_c, c), \
(ctx.ip_d, d)])
fw.AddAcls([(ctx.ip_a, ctx.ip_b), (ctx.ip_c, ctx.ip_d)])
net.Attach(a, b, c, d, fw)
endhosts = [a, b, c, d]
class AclFwReturn (object):
def __init__ (self, net, ctx, a, b, c, d, fw):
self.net = net
self.ctx = ctx
self.a = a
self.b = b
self.c = c
self.d = d
self.fw = fw
self.check = components.PropertyChecker (ctx, net)
return AclFwReturn(net, ctx, a, b, c, d, fw)
| bsd-3-clause | -7,122,530,099,745,691,000 | 38.795455 | 71 | 0.475157 | false |
bufke/chat-experiment | chatroom/models.py | 1 | 1963 | from django.db import models
from swampdragon.models import SelfPublishModel
from allauth.account.signals import user_signed_up
from .dragon_serializers import MessageSerializer, ProfileSerializer
class Profile(SelfPublishModel, models.Model):
serializer_class = ProfileSerializer
user = models.OneToOneField('auth.User', primary_key=True)
display_name = models.CharField(max_length=100)
is_online = models.BooleanField(default=False)
status = models.CharField(max_length=255, blank=True)
def __str__(self):
return self.display_name
@staticmethod
def create_profile(request, user, **kwargs):
return Profile.objects.create(
user=user,
display_name='{}.{}'.format(
user.first_name, user.last_name).strip('.'),
)
user_signed_up.connect(Profile.create_profile)
class Organization(models.Model):
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
users = models.ManyToManyField(Profile)
def __str__(self):
return self.name
class Room(models.Model):
name = models.CharField(max_length=75)
organization = models.ManyToManyField(Organization, blank=True)
users = models.ManyToManyField(
Profile,
help_text="Users in this room. May include non organization users.")
is_active = models.BooleanField(default=True)
add_by_default = models.BooleanField(
default=True,
help_text="Organization users will automatically join this room.")
def __str__(self):
return self.name
class Message(SelfPublishModel, models.Model):
serializer_class = MessageSerializer
user = models.ForeignKey('auth.User')
text = models.TextField()
posted = models.DateTimeField(auto_now_add=True)
room = models.ForeignKey(Room)
class Meta:
ordering = ['-posted']
def __str__(self):
return '{}: {}'.format(self.user, self.text)
| agpl-3.0 | -5,963,895,665,154,620,000 | 30.15873 | 76 | 0.683647 | false |
ianb/sheets | sheets/env.py | 1 | 9837 | import os
import ast
import traceback
import time
import sys
import types
import builtins
import collections
import astor
import weakref
from .jsonify import jsonify, jsonify_print, jsonify_print_expr
from .datalayer import Analysis, Execution, FileEdit
from .router import send
from . import stdlib
def now():
return int(time.time() * 1000)
class Environment:
extra_globals = {}
active = weakref.WeakSet()
def __init__(self, path):
self.path = path
self.globals = {
"print": jsonify_print,
"print_expr": jsonify_print_expr,
"jsonify": jsonify,
"jsonify_print": jsonify_print,
"listdir": stdlib.listdir,
"__builtins__": __builtins__,
"FILES": stdlib.FilesDict(self.path),
}
for name in stdlib.builtin_names:
self.globals[name] = getattr(stdlib, name)
self._cached_analysis = {}
self.active.add(self)
predefined_names = set(["parsed"])
def init_commands(self):
"""Returns a list of commands that represent the existing state of the
filesystem"""
for path in os.listdir(self.path):
if path.endswith(".json"):
continue
if not os.path.isfile(os.path.join(self.path, path)):
continue
try:
with open(os.path.join(self.path, path), "r") as fp:
content = fp.read()
yield FileEdit(filename=path, content=content, external_edit=True)
except UnicodeDecodeError:
pass
def fixup_globals(self):
for name, value in self.extra_globals.items():
self.globals.setdefault(name, value)
def execute(self, filename, content, subexpressions=False):
print("Executing", filename, subexpressions)
self.fixup_globals()
stdout = Stdout()
compiled = None
try:
parsed = ast.parse(content, filename, mode='exec')
RewriteExprToPrint(subexpressions).walk(parsed)
var_inspect = VariableInspector()
var_inspect.walk(parsed)
print("varsed used:", sorted(var_inspect.used), "set:", sorted(var_inspect.set), "imported:", var_inspect.imports)
compiled = compile(parsed, filename, 'exec')
except:
stdout.write(traceback.format_exc())
def displayhook(value):
stdout.write_repr(value)
orig_displayhook = sys.displayhook
sys.displayhook = displayhook
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sys.stdout = stdout
sys.stderr = stdout
self.globals["parsed"] = parsed
self.globals["ast"] = ast
globals_before = self.globals.copy()
start = time.time()
try:
try:
if compiled:
exec(compiled, self.globals)
except:
traceback.print_exc()
finally:
end = time.time()
sys.dipslayhook = orig_displayhook
sys.stdout = orig_stdout
sys.stderr = orig_stderr
local_scope = dict(
(name, value)
for name, value in self.globals.items()
if name not in globals_before or globals_before[name] is not value)
defines = dict(
(key, {
"json": jsonify(local_scope[key]),
"type": str(type(local_scope[key])),
})
for key in local_scope
if not isinstance(local_scope[key], types.ModuleType))
command = Execution(
filename=filename,
content=content,
emitted=stdout.emitted,
defines=defines,
start_time=int(start * 1000),
end_time=int(end * 1000),
exec_time=int((end - start) * 1000),
with_subexpressions=subexpressions,
)
send(command)
def analyze(self, filename, content):
print("Analyzing", filename)
properties = {}
try:
parsed = ast.parse(content, filename, mode='exec')
var_inspect = VariableInspector()
var_inspect.walk(parsed)
except:
return
properties["parse_error"] = jsonify(traceback.format_exc())
else:
properties = var_inspect.json
if properties != self._cached_analysis.get(filename):
self._cached_analysis[filename] = properties
send(Analysis(filename=filename, content=content, properties=properties))
class VariableInspector(astor.TreeWalk):
builtin_names = dir(builtins)
def init_variables(self):
self.used = set()
self.set = set()
self.imports = set()
self.in_target = False
@property
def json(self):
used = set(self.used)
for key in self.builtin_names:
used.discard(key)
for key in self.set:
used.discard(key)
for key in Environment.predefined_names:
used.discard(key)
return {
"variables_used": list(used),
"variables_set": list(self.set),
"imports": list(self.imports)
}
def pre_arg(self):
self.set.add(self.cur_node.arg)
def pre_Name(self):
if self.in_target:
# Actually this is a set
self.set.add(self.cur_node.id)
else:
self.used.add(self.cur_node.id)
def pre_For(self):
self.process_assignment(self.cur_node.target)
def pre_Assign(self):
self.process_assignment(self.cur_node.targets)
def pre_withitem(self):
self.process_assignment(self.cur_node.optional_vars)
def pre_ExceptHandler(self):
if self.cur_node.name:
self.set.add(self.cur_node.name)
def pre_alias(self):
# Used in imports
name = self.cur_node.asname or self.cur_node.name
name = name.split(".")[0]
self.set.add(name)
self.imports.add(name)
def pre_FunctionDef(self):
self.set.add(self.cur_node.name)
def pre_ListComp(self):
self.process_assignment(self.cur_node.elt)
def process_assignment(self, item):
if isinstance(item, list):
for x in item:
self.process_assignment(x)
return
old_in_target = self.in_target
self.in_target = True
try:
self.walk(item)
finally:
self.in_target = old_in_target
class RewriteExprToPrint(astor.TreeWalk):
expr_node_types = """
UnaryOp
BinOp
BoolOp
Compare
Call
IfExp
Attribute
Subscript
ListComp SetComp GeneratorExp DictComp
""".split()
# Skipped:
# UAdd USub Not Invert
# Add Sub Mult Div FloorDiv Mod Pow LShift RShift BitOr BitXor BitAnd MatMult
# And Or
# Eq NotEq Lt Gt GtE Is IsNot In NotIn
# Index Slice ExtSlice
def __init__(self, subexpressions=False):
self.subexpressions = subexpressions
self.id_counter = 0
astor.TreeWalk.__init__(self)
if self.subexpressions:
for method in self.expr_node_types:
self.pre_handlers[method] = self.save_node_name
self.post_handlers[method] = self.fixup_subexpressions
del self.post_handlers['Module']
def post_Name(self):
if not self.subexpressions:
return
if isinstance(self.cur_node.ctx, ast.Load):
self.replace(self.rewrite_expr(self.cur_node))
def post_Module(self):
node = self.cur_node
node.body = [
self.rewrite_expr(n) if isinstance(n, ast.Expr) else n
for n in node.body]
def save_node_name(self):
self.cur_node.astor_repr = astor.to_source(self.cur_node)
def fixup_subexpressions(self):
new_node = self.rewrite_expr(self.cur_node, self.cur_node.astor_repr)
self.replace(new_node)
def rewrite_expr(self, node, expr_string=None):
if expr_string is None:
expr_string = astor.to_source(node)
node_string = ast.Str(s=expr_string)
self.id_counter += 1
if isinstance(node, ast.Expr):
new_node = ast.Expr(
ast.Call(
func=ast.Name(id='print_expr', ctx=ast.Load()),
args=[node_string, node.value, ast.Num(n=self.id_counter)],
keywords=[],
starargs=None,
)
)
new_node.is_print_expr = True
else:
new_node = ast.Call(
func=ast.Name(id='print_expr', ctx=ast.Load()),
args=[node_string, node, ast.Num(n=self.id_counter)],
keywords=[],
starargs=None,
)
new_node.is_print_expr = True
ast.fix_missing_locations(new_node)
return new_node
class Stdout:
total_exprs_limit = 100
expr_limit = 10
def __init__(self):
self.emitted = []
self.total_exprs_printed = 0
self.exprs_printed = collections.Counter()
def write(self, content):
self.emitted.append({
"type": "print",
"time": now(),
"parts": [{"type": "str", "str": content}],
})
def writejson(self, json):
assert json.get("type"), "JSON objects must have a type"
json.setdefault("time", now())
self.emitted.append(json)
def write_repr(self, o):
self.emitted.append(jsonify(o))
def flush(self):
pass
def add_global(name, value):
Environment.extra_globals[name] = value
Environment.predefined_names.add(name)
for env in Environment.active:
env.globals.setdefault(name, value)
| mit | 8,401,066,148,762,453,000 | 29.549689 | 126 | 0.564196 | false |
OmkarPathak/Python-Programs | OOP/P11_Property decorators.py | 1 | 1069 | #This shows the usage of property decorators
#Python @property is one of the built-in decorators. The main purpose of any decorator is to change your class methods or attributes in such a way so that the users neeed not make any additional changes in their code.
#Without property decorators
class BankAccount:
def __init__(self,name,balance):
self.name=name
self.balance=balance
self.total= self.name+ " has "+self.balance+ " dollars in the account"
user1=BankAccount("Elon Musk","10000")
user1.name="Tim cook"
print(user1.name)
print(user1.total)
# Output: Tim cook
# Elon Musk has 10000 dollars in the account
#With property decorators
class BankAccount:
def __init__(self,name,balance):
self.name=name
self.balance=balance
@property
def total(self):
return self.name+ " has "+self.balance+ " dollars in the account"
user1=BankAccount("Elon Musk","10000")
user1.name="Tim cook"
print(user1.name)
print(user1.total)
#Output: Tim cook
# Tim cook has 10000 dollars in the account
| gpl-3.0 | 6,660,911,220,316,598,000 | 27.131579 | 217 | 0.705332 | false |
cvandeplas/plaso | plaso/parsers/plist_plugins/timemachine_test.py | 1 | 2768 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2014 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the timemachine plist plugin."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import plist as plist_formatter
from plaso.parsers import plist
from plaso.parsers.plist_plugins import timemachine
from plaso.parsers.plist_plugins import test_lib
class TimeMachinePluginTest(test_lib.PlistPluginTestCase):
"""Tests for the timemachine plist plugin."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._plugin = timemachine.TimeMachinePlugin()
self._parser = plist.PlistParser()
def testProcess(self):
"""Tests the Process function."""
test_file = self._GetTestFilePath(['com.apple.TimeMachine.plist'])
plist_name = 'com.apple.timemachine.plist'
event_object_generator = self._ParsePlistFileWithPlugin(
self._parser, self._plugin, test_file, plist_name)
event_objects = self._GetEventObjectsFromQueue(event_object_generator)
self.assertEquals(len(event_objects), 13)
timestamps = []
for event_object in event_objects:
timestamps.append(event_object.timestamp)
expected_timestamps = frozenset([
1379165051000000, 1380098455000000, 1380810276000000, 1381883538000000,
1382647890000000, 1383351739000000, 1384090020000000, 1385130914000000,
1386265911000000, 1386689852000000, 1387723091000000, 1388840950000000,
1388842718000000])
self.assertTrue(set(timestamps) == expected_timestamps)
event_object = event_objects[0]
self.assertEqual(event_object.root, u'/Destinations')
self.assertEqual(event_object.key, u'item/SnapshotDates')
expected_desc = (
u'TimeMachine Backup in BackUpFast '
u'(5B33C22B-A4A1-4024-A2F5-C9979C4AAAAA)')
self.assertEqual(event_object.desc, expected_desc)
expected_string = u'/Destinations/item/SnapshotDates {}'.format(
expected_desc)
expected_short = expected_string[:77] + u'...'
self._TestGetMessageStrings(
event_object, expected_string, expected_short)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 4,823,953,317,189,606,000 | 37.444444 | 79 | 0.731575 | false |
alex-dot/upwdchg | tests/python-tokendata-test.py | 2 | 4266 | #!/usr/bin/env python3
# -*- mode:python; tab-width:4; c-basic-offset:4; intent-tabs-mode:nil; -*-
# ex: filetype=python tabstop=4 softtabstop=4 shiftwidth=4 expandtab autoindent smartindent
#
# Universal Password Changer (UPwdChg)
# Copyright (C) 2014-2018 Cedric Dufour <http://cedric.dufour.name>
# Author: Cedric Dufour <http://cedric.dufour.name>
#
# The Universal Password Changer (UPwdChg) is free software:
# you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, Version 3.
#
# The Universal Password Changer (UPwdChg) is distributed in the hope
# that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License for more details.
#
# SPDX-License-Identifier: GPL-3.0
# License-Filename: LICENSE/GPL-3.0.txt
#
#------------------------------------------------------------------------------
# DEPENDENCIES
#------------------------------------------------------------------------------
# UPwdChg
from UPwdChg import \
TokenReader, \
TokenWriter
# Standard
import unittest as UT
import sys
#------------------------------------------------------------------------------
# CLASSES
#------------------------------------------------------------------------------
class testTokenData_SetData(UT.TestCase):
def setUp(self):
self.oToken = TokenWriter()
def testPasswordNonceRequest(self):
self.oToken.setData_PasswordNonceRequest('test-Benützername')
def testPasswordChange(self):
self.oToken.setData_PasswordChange('test-Benützername', 'test-Paßw0rt_new', 'test-Paßw0rt_old', 'test-Paßw0rt_nonce')
def testPasswordReset(self):
self.oToken.setData_PasswordReset('test-Benützername', 'test-Paßw0rt_new', 'test-Paßw0rt_nonce')
def testPasswordNonce(self):
sNonce = self.oToken.makePasswordNonce([6, 6])
self.assertRegex(sNonce, '^[A-Za-z0-9]{6}-[A-Za-z0-9]{6}$')
lsNonce = self.oToken.splitPasswordNonce('test-Paßw0rt_nonce')
self.assertListEqual(lsNonce, ['test', 'Paßw0rt_nonce'])
self.oToken.setData_PasswordNonce('test-Benützername', 'test-Paßw0rt_nonce', 300)
class testTokenData_ReadToken(UT.TestCase):
def setUp(self):
self.oToken = TokenReader()
def testPasswordNonce(self):
self.oToken.config('./resources/frontend-private.pem', './resources/backend-public.pem')
self.assertEqual(self.oToken.readToken('./tmp/password-nonce.token'), 0)
class testTokenData_CheckData(UT.TestCase):
def setUp(self):
self.oToken = TokenReader()
self.oToken.config('./resources/frontend-private.pem', './resources/backend-public.pem')
if(self.oToken.readToken('./tmp/password-nonce.token')):
self.skipTest('Failed to read token')
def testTimestamp(self):
self.assertEqual(self.oToken.checkData_Timestamp(9999999999), 0)
self.assertEqual(self.oToken.checkData_Timestamp(0), 1)
def testExpiration(self):
self.assertIn(self.oToken.checkData_Expiration(), (0, 1))
def testPasswordNonce(self):
self.assertEqual(self.oToken.checkData_PasswordNonce('test-Benützername', 'test-Paßw0rt_nonce'), 1)
self.assertEqual(self.oToken.checkData_PasswordNonce('test-Benützername', 'test-Paßw0rt_wrong'), 2)
with self.assertRaises(RuntimeError):
self.oToken.checkData_PasswordNonce('wrong-Benützername', 'test-Paßw0rt_nonce')
with self.assertRaises(RuntimeError):
self.oToken.checkData_PasswordNonce('test-Benützername', 'wrong-Paßw0rt_nonce')
#------------------------------------------------------------------------------
# MAIN
#------------------------------------------------------------------------------
if __name__ == '__main__':
#UT.main()
oTestSuite = UT.TestSuite()
oTestSuite.addTest(UT.makeSuite(testTokenData_SetData))
oTestSuite.addTest(UT.makeSuite(testTokenData_ReadToken))
oTestSuite.addTest(UT.makeSuite(testTokenData_CheckData))
oTestResult = UT.TextTestRunner(verbosity=2).run(oTestSuite)
sys.exit(0 if oTestResult.wasSuccessful() else 1)
| gpl-3.0 | 3,825,925,739,626,083,000 | 37.6 | 125 | 0.63236 | false |
tcmoore3/mbuild | mbuild/tests/test_packing.py | 1 | 6856 | import os
import pytest
import numpy as np
import mbuild as mb
from mbuild.exceptions import MBuildError
from mbuild.tests.base_test import BaseTest
class TestPacking(BaseTest):
def test_fill_box(self, h2o):
filled = mb.fill_box(h2o, n_compounds=50, box=[2, 2, 2, 4, 4, 4])
assert filled.n_particles == 50 * 3
assert filled.n_bonds == 50 * 2
def test_fill_box_density_box(self, h2o):
filled = mb.fill_box(h2o, n_compounds=1000, density=1000)
assert [3.1042931 < period < 3.1042932 for period in filled.periodicity]
def test_fill_box_aspect_ratio(self, h2o):
filled = mb.fill_box(h2o, n_compounds=1000,
density=1000, aspect_ratio=[1, 2, 1])
assert filled.periodicity[0]/filled.periodicity[1] == 0.5
assert filled.periodicity[1]/filled.periodicity[2] == 2
def test_fill_box_density_n_compounds(self, h2o):
filled = mb.fill_box(h2o, density=1000,
box=mb.Box([3.1042931, 3.1042931, 3.1042931]))
assert filled.n_particles == 3000
def test_fill_box_compound_ratio(self, h2o, ethane):
filled = mb.fill_box(compound=[h2o, ethane], density=800,
compound_ratio=[2, 1], box=[2, 2, 2, 4, 4, 4])
n_ethane = len([c for c in filled.children if c.name == 'Ethane'])
n_water = len([c for c in filled.children if c.name == 'H2O'])
assert n_water / n_ethane == 2
def test_fill_region(self, h2o):
filled = mb.fill_region(h2o, n_compounds=50,
region=[3, 2, 2, 4, 4, 3])
assert filled.n_particles == 50 * 3
assert filled.n_bonds == 50 * 2
assert np.min(filled.xyz[:,0]) >= 3
assert np.max(filled.xyz[:,2]) <= 3
def test_fill_region_box(self, h2o):
mybox = mb.Box([4, 4, 4])
filled = mb.fill_region(h2o, n_compounds=50, region=mybox)
assert filled.n_particles == 50 * 3
assert filled.n_bonds == 50 * 2
assert np.min(filled.xyz[:,0]) >= 0
assert np.max(filled.xyz[:,2]) <= 4
def test_fill_region_multiple(self, ethane, h2o):
filled = mb.fill_region(compound=[ethane, h2o], n_compounds=[2, 2],
region=[[2, 2, 2, 4, 4, 4], [4, 2, 2, 6, 4, 4]])
assert filled.n_particles == 2 * 8 + 2 * 3
assert filled.n_bonds == 2 * 7 + 2 * 2
assert np.max(filled.xyz[:16, 0]) < 4
assert np.min(filled.xyz[16:, 0]) > 4
def test_fill_region_multiple_boxes(self, ethane, h2o):
box1 = mb.Box(mins=[2, 2, 2], maxs=[4, 4, 4])
box2 = mb.Box(mins=[4, 2, 2], maxs=[6, 4, 4])
filled = mb.fill_region(compound=[ethane, h2o], n_compounds=[2, 2],
region=[box1, box2])
assert filled.n_particles == 2 * 8 + 2 * 3
assert filled.n_bonds == 2 * 7 + 2 * 2
assert np.max(filled.xyz[:16, 0]) < 4
assert np.min(filled.xyz[16:, 0]) > 4
def test_fill_region_multiple_types(self, ethane, h2o):
box1 = mb.Box(mins=[2, 2, 2], maxs=[4, 4, 4])
box2 = [4, 2, 2, 6, 4, 4]
filled = mb.fill_region(compound=[ethane, h2o], n_compounds=[2, 2],
region=[box1, box2])
assert filled.n_particles == 2 * 8 + 2 * 3
assert filled.n_bonds == 2 * 7 + 2 * 2
assert np.max(filled.xyz[:16, 0]) < 4
assert np.min(filled.xyz[16:, 0]) > 4
def test_fill_box_multiple(self, ethane, h2o):
n_solvent = 100
filled = mb.fill_box([ethane, h2o], [1, 100], box=[4, 4, 4])
assert filled.n_particles == 8 + n_solvent * 3
assert filled.n_bonds == 7 + n_solvent * 2
assert len(filled.children) == 101
def test_solvate(self, ethane, h2o):
n_solvent = 100
solvated = mb.solvate(ethane, h2o, n_solvent=n_solvent, box=[4, 4, 4])
assert solvated.n_particles == 8 + n_solvent * 3
assert solvated.n_bonds == 7 + n_solvent * 2
def test_fill_box_seed(self, h2o):
filled = mb.fill_box(h2o, n_compounds=50, box=[2, 2, 2])
filled_same = mb.fill_box(h2o, n_compounds=50, box=[2, 2, 2])
filled_diff = mb.fill_box(h2o, n_compounds=50, box=[2, 2, 2], seed=2)
def test_solvate_multiple(self, methane, ethane, h2o):
init_box = mb.fill_box(methane, 2, box=[4, 4, 4])
solvated = mb.solvate(init_box, [ethane, h2o], [20, 20], box=[4, 4, 4])
assert solvated.n_particles == 2*5 + 20*8 + 20*3
assert len(solvated.children) == 41
def test_fill_box_seed(self, ethane):
filled = mb.fill_box(ethane, n_compounds=20, box=[2, 2, 2])
filled_same = mb.fill_box(ethane, n_compounds=20, box=[2, 2, 2])
filled_diff = mb.fill_box(ethane, n_compounds=20, box=[2, 2, 2], seed=2)
assert np.array_equal(filled.xyz,filled_same.xyz)
assert not np.array_equal(filled.xyz,filled_diff.xyz)
def test_wrong_box(self, h2o):
with pytest.raises(MBuildError):
filled = mb.fill_box(h2o, n_compounds=50, box=[2, 2])
with pytest.raises(MBuildError):
filled = mb.fill_box(h2o, n_compounds=50, box=[2, 2, 2, 2])
def test_bad_args(self, h2o):
with pytest.raises(ValueError):
mb.fill_box(h2o, n_compounds=10)
with pytest.raises(ValueError):
mb.fill_box(h2o, density=1000)
with pytest.raises(ValueError):
mb.fill_box(h2o, box=[2, 2, 2])
with pytest.raises(ValueError):
mb.fill_box(h2o, n_compounds=10, density=1000, box=[2, 2, 2])
with pytest.raises(ValueError):
mb.fill_box(compound=[h2o, h2o], n_compounds=[10], density=1000)
with pytest.raises(ValueError):
mb.solvate(solute=h2o, solvent=[h2o], n_solvent=[10, 10], box=[2, 2, 2])
with pytest.raises(ValueError):
mb.fill_region(h2o, n_compounds=[10, 10], region=[2, 2, 2, 4, 4, 4])
def test_write_temp_file(self, h2o):
cwd = os.getcwd() # Must keep track of the temp dir that pytest creates
filled = mb.fill_box(h2o, n_compounds=10, box=[4, 4, 4], temp_file='temp_file1.pdb')
region = mb.fill_region(h2o, 10, [2, 2, 2, 4, 4, 4], temp_file='temp_file2.pdb')
solvated = mb.solvate(filled, h2o, 10, box=[4, 4, 4], temp_file='temp_file3.pdb')
assert os.path.isfile(os.path.join(cwd, 'temp_file1.pdb'))
assert os.path.isfile(os.path.join(cwd, 'temp_file2.pdb'))
assert os.path.isfile(os.path.join(cwd, 'temp_file3.pdb'))
def test_packmol_error(self, h2o):
with pytest.raises(RuntimeError):
filled = mb.fill_box(h2o, n_compounds=10, box=[0, 0, 0])
def test_packmol_warning(self, h2o):
with pytest.warns(UserWarning):
filled = mb.fill_box(h2o, n_compounds=10, box=[1, 1, 1], overlap=100)
| mit | 7,965,127,712,161,682,000 | 44.403974 | 92 | 0.573512 | false |
sheagcraig/python-jss | jss/queryset.py | 1 | 5924 | #!/usr/bin/env python
# Copyright (C) 2014-2017 Shea G Craig
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""queryset.py
Class that adds some extra functionality to a basic list. Used as the
result of all queries in python-jss.
"""
from __future__ import absolute_import
from collections import defaultdict
try:
import cPickle # Python 2.X
except ImportError:
import _pickle as cPickle # Python 3+
import datetime
import os
from .jssobject import DATE_FMT, Identity
STR_FMT = "{0:>{1}} | {2:>{3}} | {4:>{5}}"
class QuerySet(list):
"""A list style collection of JSSObjects.
Listing operations retrieve minimal or overview information for most
object types. For example, we may want to see all the Computers on
the JSS but that does not mean we want to do a full object GET for
each one.
QuerySets hold instances of a single type of JSSObject, and use the
python list API, while adding some extra helper-methods on top.
"""
def __init__(self, objects):
"""Construct a list of JSSObjects.
Args:
objects (sequence of JSSObjects):
Sequences must be of a single class.
"""
if objects and not len({i.__class__ for i in objects}) == 1:
raise ValueError
super(QuerySet, self).__init__(objects)
self.sort()
self.contained_class = objects[0].__class__ if objects else None
def __str__(self):
"""Make data human readable."""
# Make ID, Name first, no matter what.
sort_keys = ["id", "name"]
if self:
sort_keys.extend([
key for key in self[0]._basic_identity.keys() if
key not in sort_keys])
# Build a dict of max lengths per column for table output.
lengths = defaultdict(int)
for item in self:
for key in sort_keys:
val = item._basic_identity[key] or ""
length = max(len(key), len(val))
if length > lengths[key]:
lengths[key] = length
# Build a format string for row output.
format_strings = []
for key in sort_keys:
length = lengths[key]
format_strings.append("{{data[{}]:>{}}}".format(key, length))
cached = 'cached'
cached_format = '| {{cached:>{}}} |'.format(len(cached))
fmt = "| " + " | ".join(format_strings) + cached_format
# Begin building output with header lines.
# Contained class can be None
contained_name = self.contained_class.__name__ if self.contained_class is not None else "Empty"
results = ["{} QuerySet".format(contained_name)]
headers = {key: key for key in lengths}
header_line = fmt.format(data=headers, cached="cached")
bar = len(header_line) * '-'
results.extend([bar, header_line, bar])
str_cached = (
lambda i: str(i.cached) if isinstance(i.cached, bool) else 'True')
table = [
fmt.format(data=item._basic_identity, cached=str_cached(item)) for
item in self]
results.extend(table)
results.append(bar)
return "\n".join(results)
def __repr__(self):
"""Make data human readable."""
return "QuerySet({})".format(super(QuerySet, self).__repr__())
def sort(self, **kwargs):
"""Sort list elements by ID.
"""
super(QuerySet, self).sort(key=lambda k: int(k.id))
def sort_by_name(self):
"""Sort list elements by name."""
super(QuerySet, self).sort(key=lambda k: k.name.upper())
def retrieve_all(self):
"""Tell each contained object to retrieve its data from the JSS
This can take a long time given a large number of objects,
and depending on the size of each object.
Returns:
self (QuerySet) to allow method chaining.
"""
for obj in self:
if not obj.cached:
obj.retrieve()
return self
def save_all(self):
"""Tell each contained object to save its data to the JSS
This can take a long time given a large number of objects,
and depending on the size of each object.
Returns:
self (QuerySet) to allow method chaining.
"""
for obj in self:
obj.save()
return self
def invalidate(self):
"""Clear the cache datetime for all contents.
This causes objects to retrieve their data again when accessed.
"""
for i in self: i.cached = False
def names(self):
"""Return a generator of contents names"""
return (item.name for item in self)
def ids(self):
"""Return a generator of contents ids"""
return (item.id for item in self)
@classmethod
def from_response(cls, obj_class, response, jss=None, **kwargs):
"""Build a QuerySet from a listing Response."""
response_objects = (
i for i in response if i is not None and i.tag != "size")
dicts = (
{child.tag: child.text for child in item} for item in
response_objects)
identities = (Identity(d) for d in dicts)
objects = [obj_class(jss, data=i, **kwargs) for i in identities]
return cls(objects)
| gpl-3.0 | -5,587,489,524,983,974,000 | 31.549451 | 103 | 0.603646 | false |
openstack/python-troveclient | troveclient/client.py | 1 | 19150 | # Copyright 2011 OpenStack Foundation
# Copyright 2013 Rackspace Hosting
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
OpenStack Client interface. Handles the REST calls and responses.
"""
import logging
from keystoneauth1 import adapter
from oslo_utils import importutils
import requests
from urllib import parse as urlparse
from troveclient.apiclient import client
from troveclient import exceptions
from troveclient import service_catalog
try:
import eventlet as sleep_lib
except ImportError:
import time as sleep_lib
try:
import json
except ImportError:
import simplejson as json
# Python 2.5 compat fix
if not hasattr(urlparse, 'parse_qsl'):
import cgi
urlparse.parse_qsl = cgi.parse_qsl
osprofiler_web = importutils.try_import("osprofiler.web")
class TroveClientMixin(object):
def get_database_api_version_from_endpoint(self):
magic_tuple = urlparse.urlsplit(self.management_url)
scheme, netloc, path, query, frag = magic_tuple
v = path.split("/")[1]
valid_versions = ['v1.0']
if v not in valid_versions:
msg = "Invalid client version '%s'. must be one of: %s" % (
(v, ', '.join(valid_versions)))
raise exceptions.UnsupportedVersion(msg)
return v[1:]
class HTTPClient(TroveClientMixin):
USER_AGENT = 'python-troveclient'
def __init__(self, user, password, projectid, auth_url, insecure=False,
timeout=None, tenant_id=None, proxy_tenant_id=None,
proxy_token=None, region_name=None,
endpoint_type='publicURL', service_type=None,
service_name=None, database_service_name=None, retries=None,
http_log_debug=False, cacert=None, bypass_url=None,
auth_system='keystone', auth_plugin=None):
if auth_system and auth_system != 'keystone' and not auth_plugin:
raise exceptions.AuthSystemNotFound(auth_system)
if not auth_url and auth_system and auth_system != 'keystone':
auth_url = auth_plugin.get_auth_url()
if not auth_url:
raise exceptions.EndpointNotFound()
self.user = user
self.password = password
self.projectid = projectid
self.tenant_id = tenant_id
self.auth_url = auth_url.rstrip('/') if auth_url else auth_url
self.version = 'v1'
self.region_name = region_name
self.endpoint_type = endpoint_type
self.service_type = service_type
self.service_name = service_name
self.database_service_name = database_service_name
self.retries = int(retries or 0)
self.http_log_debug = http_log_debug
self.management_url = None
self.auth_token = None
self.proxy_token = proxy_token
self.proxy_tenant_id = proxy_tenant_id
self.timeout = timeout
self.bypass_url = bypass_url
self.auth_system = auth_system
self.auth_plugin = auth_plugin
if insecure:
self.verify_cert = False
else:
if cacert:
self.verify_cert = cacert
else:
self.verify_cert = True
self.auth_system = auth_system
self.auth_plugin = auth_plugin
self.LOG = logging.getLogger(__name__)
if self.http_log_debug and not self.LOG.handlers:
ch = logging.StreamHandler()
self.LOG.setLevel(logging.DEBUG)
self.LOG.addHandler(ch)
if hasattr(requests, 'logging'):
requests.logging.getLogger(requests.__name__).addHandler(ch)
def http_log_req(self, args, kwargs):
if not self.http_log_debug:
return
string_parts = ['curl -i']
for element in args:
if element in ('GET', 'POST', 'DELETE', 'PUT'):
string_parts.append(' -X %s' % element)
else:
string_parts.append(' %s' % element)
for element in kwargs['headers']:
header = ' -H "%s: %s"' % (element, kwargs['headers'][element])
string_parts.append(header)
if 'data' in kwargs:
string_parts.append(" -d '%s'" % (kwargs['data']))
self.LOG.debug("\nREQ: %s\n", "".join(string_parts))
def http_log_resp(self, resp):
if not self.http_log_debug:
return
self.LOG.debug(
"RESP: [%s] %s\nRESP BODY: %s\n",
resp.status_code,
resp.headers,
resp.text)
def request(self, url, method, **kwargs):
kwargs.setdefault('headers', kwargs.get('headers', {}))
kwargs['headers']['User-Agent'] = self.USER_AGENT
kwargs['headers']['Accept'] = 'application/json'
if osprofiler_web:
kwargs['headers'].update(osprofiler_web.get_trace_id_headers())
if 'body' in kwargs:
kwargs['headers']['Content-Type'] = 'application/json'
kwargs['data'] = json.dumps(kwargs['body'])
del kwargs['body']
if self.timeout:
kwargs.setdefault('timeout', self.timeout)
self.http_log_req((url, method,), kwargs)
resp = requests.request(
method,
url,
verify=self.verify_cert,
**kwargs)
self.http_log_resp(resp)
if resp.text:
try:
body = json.loads(resp.text)
except ValueError:
pass
body = None
else:
body = None
if resp.status_code >= 400:
raise exceptions.from_response(resp, body, url)
return resp, body
def _cs_request(self, url, method, **kwargs):
auth_attempts = 0
attempts = 0
backoff = 1
while True:
attempts += 1
if not self.management_url or not self.auth_token:
self.authenticate()
kwargs.setdefault('headers', {})['X-Auth-Token'] = self.auth_token
if self.projectid:
kwargs['headers']['X-Auth-Project-Id'] = self.projectid
try:
resp, body = self.request(self.management_url + url, method,
**kwargs)
return resp, body
except exceptions.BadRequest:
if attempts > self.retries:
raise
except exceptions.Unauthorized:
if auth_attempts > 0:
raise
self.LOG.debug("Unauthorized, reauthenticating.")
self.management_url = self.auth_token = None
# First reauth. Discount this attempt.
attempts -= 1
auth_attempts += 1
continue
except exceptions.ClientException as e:
if attempts > self.retries:
raise
if 500 <= e.code <= 599:
pass
else:
raise
except requests.exceptions.ConnectionError as e:
# Catch a connection refused from requests.request
self.LOG.debug("Connection refused: %s", e)
msg = 'Unable to establish connection: %s' % e
raise exceptions.ConnectionRefused(msg)
self.LOG.debug(
"Failed attempt(%s of %s), retrying in %s seconds",
attempts, self.retries, backoff)
sleep_lib.sleep(backoff)
backoff *= 2
def get(self, url, **kwargs):
return self._cs_request(url, 'GET', **kwargs)
def patch(self, url, **kwargs):
return self._cs_request(url, 'PATCH', **kwargs)
def post(self, url, **kwargs):
return self._cs_request(url, 'POST', **kwargs)
def put(self, url, **kwargs):
return self._cs_request(url, 'PUT', **kwargs)
def delete(self, url, **kwargs):
return self._cs_request(url, 'DELETE', **kwargs)
def _extract_service_catalog(self, url, resp, body, extract_token=True):
"""See what the auth service told us and process the response.
We may get redirected to another site, fail or actually get
back a service catalog with a token and our endpoints.
"""
if resp.status_code == 200: # content must always present
try:
self.auth_url = url
self.service_catalog = \
service_catalog.ServiceCatalog(body)
if extract_token:
self.auth_token = self.service_catalog.get_token()
management_url = self.service_catalog.url_for(
attr='region',
filter_value=self.region_name,
endpoint_type=self.endpoint_type,
service_type=self.service_type,
service_name=self.service_name,
database_service_name=self.database_service_name)
self.management_url = management_url.rstrip('/')
return None
except exceptions.AmbiguousEndpoints:
print("Found more than one valid endpoint. Use a more "
"restrictive filter")
raise
except KeyError:
raise exceptions.AuthorizationFailure()
except exceptions.EndpointNotFound:
print("Could not find any suitable endpoint. Correct region?")
raise
elif resp.status_code == 305:
return resp['location']
else:
raise exceptions.from_response(resp, body, url)
def _fetch_endpoints_from_auth(self, url):
"""We have a token, but don't know the final endpoint for
the region. We have to go back to the auth service and
ask again. This request requires an admin-level token
to work. The proxy token supplied could be from a low-level enduser.
We can't get this from the keystone service endpoint, we have to use
the admin endpoint.
This will overwrite our admin token with the user token.
"""
# GET ...:5001/v2.0/tokens/#####/endpoints
url = '/'.join([url, 'tokens', '%s?belongsTo=%s'
% (self.proxy_token, self.proxy_tenant_id)])
self.LOG.debug("Using Endpoint URL: %s", url)
resp, body = self.request(url, "GET",
headers={'X-Auth-Token': self.auth_token})
return self._extract_service_catalog(url, resp, body,
extract_token=False)
def authenticate(self):
magic_tuple = urlparse.urlsplit(self.auth_url)
scheme, netloc, path, query, frag = magic_tuple
port = magic_tuple.port
if port is None:
port = 80
path_parts = path.split('/')
for part in path_parts:
if len(part) > 0 and part[0] == 'v':
self.version = part
break
# TODO(sandy): Assume admin endpoint is 35357 for now.
# Ideally this is going to have to be provided by the service catalog.
new_netloc = netloc.replace(':%d' % port, ':%d' % (35357,))
admin_url = urlparse.urlunsplit((scheme, new_netloc,
path, query, frag))
auth_url = self.auth_url
if self.version == "v2.0":
while auth_url:
if not self.auth_system or self.auth_system == 'keystone':
auth_url = self._v2_auth(auth_url)
else:
auth_url = self._plugin_auth(auth_url)
# Are we acting on behalf of another user via an
# existing token? If so, our actual endpoints may
# be different than that of the admin token.
if self.proxy_token:
self._fetch_endpoints_from_auth(admin_url)
# Since keystone no longer returns the user token
# with the endpoints any more, we need to replace
# our service account token with the user token.
self.auth_token = self.proxy_token
else:
try:
while auth_url:
auth_url = self._v1_auth(auth_url)
# In some configurations trove makes redirection to
# v2.0 keystone endpoint. Also, new location does not contain
# real endpoint, only hostname and port.
except exceptions.AuthorizationFailure:
if auth_url.find('v2.0') < 0:
auth_url = auth_url + '/v2.0'
self._v2_auth(auth_url)
# Allows for setting an endpoint not defined in the catalog
if self.bypass_url is not None and self.bypass_url != '':
self.management_url = self.bypass_url
def _plugin_auth(self, auth_url):
return self.auth_plugin.authenticate(self, auth_url)
def _v1_auth(self, url):
if self.proxy_token:
raise exceptions.NoTokenLookupException()
headers = {'X-Auth-User': self.user,
'X-Auth-Key': self.password}
if self.projectid:
headers['X-Auth-Project-Id'] = self.projectid
resp, body = self.request(url, 'GET', headers=headers)
if resp.status_code in (200, 204): # in some cases we get No Content
try:
mgmt_header = 'x-server-management-url'
self.management_url = resp.headers[mgmt_header].rstrip('/')
self.auth_token = resp.headers['x-auth-token']
self.auth_url = url
except (KeyError, TypeError):
raise exceptions.AuthorizationFailure()
elif resp.status_code == 305:
return resp.headers['location']
else:
raise exceptions.from_response(resp, body, url)
def _v2_auth(self, url):
"""Authenticate against a v2.0 auth service."""
body = {"auth": {
"passwordCredentials": {"username": self.user,
"password": self.password}}}
if self.projectid:
body['auth']['tenantName'] = self.projectid
elif self.tenant_id:
body['auth']['tenantId'] = self.tenant_id
self._authenticate(url, body)
def _authenticate(self, url, body):
"""Authenticate and extract the service catalog."""
token_url = url + "/tokens"
# Make sure we follow redirects when trying to reach Keystone
resp, body = self.request(
token_url,
"POST",
body=body,
allow_redirects=True)
return self._extract_service_catalog(url, resp, body)
class SessionClient(adapter.LegacyJsonAdapter, TroveClientMixin):
def __init__(self, session, auth, **kwargs):
self.database_service_name = kwargs.pop('database_service_name', None)
super(SessionClient, self).__init__(session=session,
auth=auth,
**kwargs)
# FIXME(jamielennox): this is going to cause an authentication request
# on client init. This is different to how the other clients work.
endpoint = self.get_endpoint()
if not endpoint:
raise exceptions.EndpointNotFound()
self.management_url = endpoint.rstrip('/')
def request(self, url, method, **kwargs):
raise_exc = kwargs.pop('raise_exc', True)
resp, body = super(SessionClient, self).request(url,
method,
raise_exc=False,
**kwargs)
if raise_exc and resp.status_code >= 400:
raise exceptions.from_response(resp, body, url)
return resp, body
def _construct_http_client(username=None, password=None, project_id=None,
auth_url=None, insecure=False, timeout=None,
proxy_tenant_id=None, proxy_token=None,
region_name=None, endpoint_type='publicURL',
service_type='database',
service_name=None, database_service_name=None,
retries=None,
http_log_debug=False,
auth_system='keystone', auth_plugin=None,
cacert=None, bypass_url=None, tenant_id=None,
session=None,
**kwargs):
if session:
try:
kwargs.setdefault('interface', endpoint_type)
except KeyError:
pass
return SessionClient(session=session,
service_type=service_type,
service_name=service_name,
region_name=region_name,
database_service_name=database_service_name,
connect_retries=retries,
**kwargs)
else:
return HTTPClient(username,
password,
projectid=project_id,
auth_url=auth_url,
insecure=insecure,
timeout=timeout,
tenant_id=tenant_id,
proxy_token=proxy_token,
proxy_tenant_id=proxy_tenant_id,
region_name=region_name,
endpoint_type=endpoint_type,
service_type=service_type,
service_name=service_name,
database_service_name=database_service_name,
retries=retries,
http_log_debug=http_log_debug,
cacert=cacert,
bypass_url=bypass_url,
auth_system=auth_system,
auth_plugin=auth_plugin,
)
def get_version_map():
return {
'1.0': 'troveclient.v1.client.Client',
}
def Client(version, *args, **kwargs):
version_map = get_version_map()
client_class = client.BaseClient.get_class('database',
version, version_map)
return client_class(*args, **kwargs)
| apache-2.0 | 2,275,167,824,754,101,500 | 36.920792 | 78 | 0.542924 | false |
jhdulaney/dnf | tests/test_history_undo.py | 1 | 11440 | # -*- coding: utf-8 -*-
# Copyright (C) 2013-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
"""Tests of the history undo command."""
from __future__ import absolute_import
from __future__ import unicode_literals
from hawkey import split_nevra, SwdbReason
from dnf.exceptions import PackagesNotAvailableError, PackagesNotInstalledError
from dnf.history import NEVRAOperations
from dnf.package import Package
from dnf.transaction import ERASE, DOWNGRADE, INSTALL, REINSTALL, UPGRADE
from dnf.transaction import TransactionItem
import tests.support
class BaseTest(tests.support.DnfBaseTestCase):
"""Unit tests of dnf.Base."""
REPOS = ['main', 'updates']
def _create_item_matcher(self, op_type, installed=None, erased=None,
obsoleted=[], reason=SwdbReason.UNKNOWN):
"""Create a new instance of dnf.transaction.TransactionItem matcher."""
attrs = {
'op_type': op_type,
'installed': self._create_package_matcher(installed) if installed else installed,
'erased': self._create_package_matcher(erased) if erased else erased,
'obsoleted': [self._create_package_matcher(nevra) for nevra in obsoleted],
'reason': reason
}
return tests.support.ObjectMatcher(TransactionItem, attrs)
def _create_package_matcher(self, nevra_str):
"""Create a new instance of dnf.package.Package matcher."""
nevra = split_nevra(nevra_str)
attrs = {'name': nevra.name,
'epoch': nevra.epoch,
'version': nevra.version,
'release': nevra.release,
'arch': nevra.arch}
return tests.support.ObjectMatcher(Package, attrs)
def test_history_undo_operations_downgrade(self):
"""Test history_undo_operations with a downgrade."""
operations = NEVRAOperations()
operations.add(
'Downgrade',
'pepper-20-0.x86_64',
'pepper-20-1.x86_64',
('lotus-3-16.x86_64',)
)
with self.base:
self.base._history_undo_operations(operations, 0)
transaction_it = iter(self.base.transaction)
self.assertEqual(next(transaction_it), self._create_item_matcher(
UPGRADE, installed='pepper-20-1.x86_64', erased='pepper-20-0.x86_64'))
self.assertEqual(next(transaction_it), self._create_item_matcher(
INSTALL, installed='lotus-3-16.x86_64', reason=SwdbReason.USER))
self.assertRaises(StopIteration, next, transaction_it)
def test_history_undo_operations_downgrade_notavailable(self):
"""Test history_undo_operations with an unavailable downgrade."""
operations = NEVRAOperations()
operations.add('Downgrade', 'pepper-20-0.x86_64', 'pepper-20-2.x86_64')
with self.base, self.assertRaises(PackagesNotAvailableError) as context:
self.base._history_undo_operations(operations, 0)
self.assertEqual(context.exception.pkg_spec, 'pepper-20-2.x86_64')
def test_history_undo_operations_downgrade_notinstalled(self):
"""Test history_undo_operations with a not installed downgrade."""
operations = NEVRAOperations()
operations.add('Downgrade', 'lotus-3-0.x86_64', 'lotus-3-16.x86_64')
with self.base, self.assertRaises(PackagesNotInstalledError) as context:
self.base._history_undo_operations(operations, 0)
self.assertEqual(context.exception.pkg_spec, 'lotus-3-0.x86_64')
def test_history_undo_operations_erase(self):
"""Test history_undo_operations with an erase."""
operations = NEVRAOperations()
operations.add('Erase', 'lotus-3-16.x86_64')
with self.base:
self.base._history_undo_operations(operations, 0)
transaction_it = iter(self.base.transaction)
self.assertEqual(next(transaction_it), self._create_item_matcher(
INSTALL, installed='lotus-3-16.x86_64', reason=SwdbReason.USER))
self.assertRaises(StopIteration, next, transaction_it)
def test_history_undo_operations_erase_twoavailable(self):
"""Test history_undo_operations with an erase available in two repos."""
operations = NEVRAOperations()
operations.add('Erase', 'lotus-3-16.x86_64')
with self.base:
self.base._history_undo_operations(operations, 0)
transaction_it = iter(self.base.transaction)
self.assertEqual(next(transaction_it), self._create_item_matcher(
INSTALL, installed='lotus-3-16.x86_64', reason=SwdbReason.USER))
self.assertRaises(StopIteration, next, transaction_it)
def test_history_undo_operations_erase_notavailable(self):
"""Test history_undo_operations with an unavailable erase."""
operations = NEVRAOperations()
operations.add('Erase', 'hole-1-1.x86_64')
with self.base, self.assertRaises(PackagesNotAvailableError) as context:
self.base._history_undo_operations(operations, 0)
self.assertEqual(context.exception.pkg_spec, 'hole-1-1.x86_64')
def test_history_undo_operations_install(self):
"""Test history_undo_operations with an install."""
operations = NEVRAOperations()
operations.add('Install', 'pepper-20-0.x86_64', obsoleted_nevras=('lotus-3-16.x86_64',))
with self.base:
self.base._history_undo_operations(operations, 0)
transaction_it = iter(self.base.transaction)
self.assertEqual(next(transaction_it), self._create_item_matcher(
ERASE, erased='pepper-20-0.x86_64'))
self.assertEqual(next(transaction_it), self._create_item_matcher(
INSTALL, installed='lotus-3-16.x86_64', reason=SwdbReason.USER))
self.assertRaises(StopIteration, next, transaction_it)
def test_history_undo_operations_install_notinstalled(self):
"""Test history_undo_operations with a not installed install."""
operations = NEVRAOperations()
operations.add('Install', 'mrkite-2-0.x86_64')
with self.base, self.assertRaises(PackagesNotInstalledError) as context:
self.base._history_undo_operations(operations, 0)
self.assertEqual(context.exception.pkg_spec, 'mrkite-2-0.x86_64')
def test_history_undo_operations_reinstall(self):
"""Test history_undo_operations with a reinstall."""
operations = NEVRAOperations()
operations.add(
'Reinstall',
'pepper-20-0.x86_64',
'pepper-20-0.x86_64',
('hole-1-1.x86_64',)
)
with self.base:
self.base._history_undo_operations(operations, 0)
transaction_it = iter(self.base.transaction)
self.assertEqual(next(transaction_it), self._create_item_matcher(
REINSTALL, installed='pepper-20-0.x86_64', erased='pepper-20-0.x86_64',
obsoleted=('hole-1-1.x86_64',)))
self.assertRaises(StopIteration, next, transaction_it)
def test_history_undo_operations_reinstall_notavailable(self):
"""Test history_undo_operations with an unvailable reinstall."""
operations = NEVRAOperations()
operations.add('Reinstall', 'mrkite-2-0.x86_64', 'mrkite-2-0.x86_64')
with self.base, self.assertRaises(PackagesNotInstalledError) as context:
self.base._history_undo_operations(operations, 0)
self.assertEqual(context.exception.pkg_spec, 'mrkite-2-0.x86_64')
def test_history_undo_operations_reinstall_notinstalled(self):
"""Test history_undo_operations with a not installed reinstall."""
operations = NEVRAOperations()
operations.add('Reinstall', 'hole-1-1.x86_64', 'hole-1-1.x86_64')
with self.base, self.assertRaises(PackagesNotAvailableError) as context:
self.base._history_undo_operations(operations, 0)
self.assertEqual(context.exception.pkg_spec, 'hole-1-1.x86_64')
def test_history_undo_operations_reinstall_notinstalled_obsoleted(self):
"""Test history_undo_operations with a not installed obsoleted of a reinstall."""
operations = NEVRAOperations()
operations.add(
'Reinstall',
'pepper-20-0.x86_64',
'pepper-20-0.x86_64',
('lotus-3-16.x86_64',)
)
with self.base:
self.base._history_undo_operations(operations, 0)
transaction_it = iter(self.base.transaction)
self.assertEqual(next(transaction_it), self._create_item_matcher(
REINSTALL, installed='pepper-20-0.x86_64', erased='pepper-20-0.x86_64',
obsoleted=()))
self.assertRaises(StopIteration, next, transaction_it)
def test_history_undo_operations_update(self):
"""Test history_undo_operations with an update."""
operations = NEVRAOperations()
operations.add('Update', 'tour-5-0.noarch', 'tour-4.6-1.noarch', ('lotus-3-16.x86_64',))
with self.base:
self.base._history_undo_operations(operations, 0)
transaction_it = iter(self.base.transaction)
self.assertEqual(next(transaction_it), self._create_item_matcher(
DOWNGRADE, installed='tour-4.6-1.noarch', erased='tour-5-0.noarch'))
self.assertEqual(next(transaction_it), self._create_item_matcher(
INSTALL, installed='lotus-3-16.x86_64', reason=SwdbReason.USER))
self.assertRaises(StopIteration, next, transaction_it)
def test_history_undo_operations_update_notavailable(self):
"""Test history_undo_operations with an unavailable update."""
operations = NEVRAOperations()
operations.add('Update', 'tour-5-0.noarch', 'tour-4.6-2.noarch')
with self.base, self.assertRaises(PackagesNotAvailableError) as context:
self.base._history_undo_operations(operations, 0)
self.assertEqual(context.exception.pkg_spec, 'tour-4.6-2.noarch')
def test_history_undo_operations_update_notinstalled(self):
"""Test history_undo_operations with a not installed update."""
operations = NEVRAOperations()
operations.add('Update', 'lotus-4-0.x86_64', 'lotus-3-16.x86_64')
with self.base, self.assertRaises(PackagesNotInstalledError) as context:
self.base._history_undo_operations(operations, 0)
self.assertEqual(context.exception.pkg_spec, 'lotus-4-0.x86_64')
| gpl-2.0 | -4,553,311,528,763,439,000 | 43.862745 | 96 | 0.657517 | false |
cybergreen-net/etl2 | tests/etl2/etlharness.py | 1 | 2500 | import ETL
import os
import tempfile
import gzip
from io import BytesIO, StringIO
class EtlHarness:
def __init__(self, feed, out_prefix):
root_dir = tempfile.mkdtemp()
self.feed_name = feed
self.out_prefix = out_prefix
self.source_root = os.path.join(root_dir, "raw")
self.source_dir = os.path.join(self.source_root, self.out_prefix)
self.dest_root = os.path.join(root_dir, "clean")
self.agg_root = os.path.join(root_dir, "agg")
self.dest_dir = os.path.join(self.dest_root, self.out_prefix)
os.makedirs(self.source_dir)
os.makedirs(self.dest_dir)
print(self.source_dir, self.dest_dir)
# doesn't effect shell env
os.environ["CYBERGREEN_SOURCE_ROOT"] = self.source_root
os.environ["CYBERGREEN_DEST_ROOT"] = self.dest_root
os.environ["CYBERGREEN_AGG_ROOT"] = self.agg_root
os.environ["CYBERGREEN_MAXMIND_DB_PATH"]= "tests/fixtures/maxminddb/"
os.environ["CYBERGREEN_PYASN_DB_PATH"]= "tests/fixtures/pyasndb/"
os.environ["CYBERGREEN_AGGREGATION_PATH"] = ""
os.environ["CYBERGREEN_BUILD_ENV"]="test"
os.environ["DD_API_KEY"] = ""
os.environ["RDS_PASSWORD"] = ""
os.environ["REDSHIFT_PASSWORD"] = ""
os.environ["AWS_ACCESS_KEY_ID"] = ""
os.environ["AWS_SECRET_ACCESS_KEY"] = ""
os.environ["CYBERGREEN_CERTBR_HOST"] = ""
os.environ["CYBERGREEN_CERTBR_PATH"] = ""
os.environ["CYBERGREEN_OPENX_HOST"] = ""
os.environ["CYBERGREEN_OPENX_PATH"] = ""
def _write_source_file(self, file_name, data):
file_path = os.path.join(self.source_dir, file_name)
with gzip.open(file_path, "w") as f:
f.write(data.encode('ascii'))
def _read_dest_file(self, file_name):
file_path = os.path.join(self.dest_dir, file_name)
with open(file_path, "r") as f:
return f.readlines()
def _get_etl_output(self, data):
#self._write_source_file("parsed.20000101.out.gz", data)
in_handle = BytesIO(data.encode())
out_handle = BytesIO()
etl = ETL.etl_process(event_date="2000-W01", feed=self.feed_name, config_path="configs/config.json",
in_handle=in_handle, out_handle=out_handle)
# Rewind the file
out_handle.seek(0)
byte_lines = out_handle.readlines()
str_lines = [line.decode("utf-8") for line in byte_lines]
return str_lines, etl
| gpl-3.0 | -13,470,691,697,173,366 | 38.68254 | 108 | 0.6052 | false |
LuisAlejandro/condiment | condiment/common/fabric/docker.py | 1 | 13369 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Luis Alejandro Martínez Faneyth
#
# This file is part of Condiment.
#
# Condiment is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Condiment is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains directives to manage Docker containers.
This module define funtions to accomplish the following tasks:
- Creating a Debian (stable) minimal base (Docker) image.
- Creating a Condiment environment (Docker) image.
- Execute commands on a Docker image to create commands.
- Destroy all images and/or containers.
- Other management commands (reset, updat, login, etc).
.. versionadded:: 0.2
"""
import sys
import time
import json
import paramiko
from contextlib import nested
from fabric.api import env, local, hide, run, shell_env, cd
from condiment.common.logger import get_logger
log = get_logger()
def docker_generate_debian_base_image():
"""
Generate a Debian base (Docker) image.
This function generates a minimal Debian (stable) chroot using debootstrap,
then configures apt, cleans and truncates the filesystem, and finally
imports it to docker.
.. versionadded:: 0.2
"""
docker_stop_container()
with hide('warnings', 'stderr', 'running'):
log.info('Generating a fresh Debian image for Docker ...')
local(('sudo bash %(debian_base_image_script)s '
'luisalejandro/debian-%(arch)s '
'wheezy %(arch)s') % env, capture=False)
docker_stop_container()
def docker_generate_condiment_base_image():
"""
Generate a Condiment environment (Docker) image.
This function generates a minimal Debian (stable) chroot using debootstrap.
.. versionadded:: 0.2
"""
docker_stop_container()
with hide('warnings', 'stderr', 'running'):
log.info('Creating a new Condiment base image ...')
local(('sudo bash -c '
'"%(docker)s run -it --name %(condiment_runtime_container)s '
'%(mounts)s %(dvars)s %(debian_base_image)s '
'bash %(condiment_base_image_script)s"') % env, capture=False)
log.info('Creating the runtime container ...')
local(('sudo bash -c '
'"%(docker)s commit %(condiment_runtime_container)s '
'%(condiment_base_image)s"') % env, capture=True)
docker_stop_container()
def docker_kill_all_containers():
"""
Destroy all containers listed with ``docker ps -aq``.
.. versionadded:: 0.2
"""
with hide('warnings', 'stderr', 'running'):
log.info('Listing available containers ...')
containers = local(('sudo bash -c "%(docker)s ps -aq"') % env,
capture=True).split('\n')
for container in containers:
if container:
log.info('Checking if container "%s" exists ...' % container)
inspect = json.loads(local(('sudo bash -c '
'"%s inspect %s"') % (env.docker,
container),
capture=True))
if inspect:
log.info('Destroying container "%s" ...' % container)
local(('sudo bash -c '
'"%s stop --time 1 %s"') % (env.docker, container),
capture=True)
local(('sudo bash -c '
'"%s rm -fv %s"') % (env.docker, container),
capture=True)
def docker_kill_condiment_images():
"""
Destroy all Docker images made for Condiment.
.. versionadded:: 0.2
"""
with hide('warnings', 'stderr', 'running'):
log.info('Listing available images ...')
images = [env.condiment_base_image, env.condiment_runtime_image,
env.debian_base_image]
for image in images:
if image:
log.info('Checking if image "%s" exists ...' % image)
inspect = json.loads(local(('sudo bash -c '
'"%s inspect %s"') % (env.docker,
image),
capture=True))
if inspect:
log.info('Destroying image "%s" ...' % image)
local(('sudo bash -c '
'"%s rmi -f %s"') % (env.docker, image),
capture=True)
def docker_kill_all_images():
"""
Destroy all Docker images.
.. versionadded:: 0.2
"""
with hide('warnings', 'stderr', 'running'):
log.info('Listing available images ...')
images = local(('sudo bash -c "%(docker)s images -aq"') % env,
capture=True).split('\n')
for image in images:
if image:
log.info('Checking if image "%s" exists ...' % image)
inspect = json.loads(local(('sudo bash -c '
'"%s inspect %s"') % (env.docker,
image),
capture=True))
if inspect:
log.info('Destroying image "%s" ...' % image)
local(('sudo bash -c '
'"%s rmi -f %s"') % (env.docker, image),
capture=True)
def docker_pull_debian_base_image():
"""
Pull the Debian base image from the Docker index.
.. versionadded:: 0.2
"""
docker_stop_container()
with hide('warnings', 'stderr', 'running'):
log.info('Downloading the Debian base image ...')
local(('sudo bash -c '
'"%(docker)s pull %(debian_base_image)s"') % env, capture=False)
docker_stop_container()
def docker_pull_condiment_base_image():
"""
Pull the Condiment environment image from the Docker index.
.. versionadded:: 0.2
"""
docker_stop_container()
with hide('warnings', 'stderr', 'running'):
log.info('Downloading the Condiment base image ...')
local(('sudo bash -c '
'"%(docker)s pull %(condiment_base_image)s"') % env, capture=False)
log.info('Creating the runtime container ...')
local(('sudo bash -c '
'"%(docker)s run -it --name %(condiment_runtime_container)s '
'%(condiment_base_image)s true"') % env, capture=False)
docker_stop_container()
def docker_check_image():
"""
Check if the runtime image exists, build environment if not.
.. versionadded:: 0.2
"""
with hide('warnings', 'stderr', 'running'):
log.info('Checking if we have a runtime image ...')
state = json.loads(local(('sudo bash -c '
'"%(docker)s inspect '
'%(condiment_runtime_image)s"') % env,
capture=True))
if not state:
from condiment.common.fabric.django import django_syncdb
docker_pull_debian_base_image()
docker_pull_condiment_base_image()
django_syncdb()
def docker_check_container():
"""
Check if the runtime container is up, start if not.
.. versionadded:: 0.2
"""
with hide('warnings', 'stderr', 'running'):
log.info('Checking if the runtime container is up ...')
state = json.loads(local(('sudo bash -c '
'"%(docker)s inspect '
'%(condiment_runtime_container)s"') % env,
capture=True))
if state:
if not state[0]['State']['Running']:
docker_stop_container()
docker_start_container()
else:
docker_start_container()
docker_check_ssh_to_container()
def docker_check_ssh_to_container():
"""
Test if SSH is up inside the runtime container.
.. versionadded:: 0.2
"""
log.info('Testing communication with container ...')
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
tries = 0
while True:
tries += 1
try:
time.sleep(2)
ssh.connect(hostname=env.host_string, port=env.port,
username=env.user, password=env.password)
except Exception, e:
log.info('SSH is not ready yet: %s' % e)
else:
break
finally:
ssh.close()
if tries == 10:
log.error('Failed to connect to the container.')
sys.exit(1)
log.info('Communication with the container succeded!')
def docker_start_container():
"""
Start the runtime container.
.. versionadded:: 0.2
"""
docker_check_image()
with hide('warnings', 'stderr', 'running'):
log.info('Starting the runtime container ...')
local(('sudo bash -c '
'"%(docker)s run -d '
'-p 127.0.0.1:22222:22 '
'-p 127.0.0.1:8000:8000 '
'--name %(condiment_runtime_container)s '
'%(mounts)s %(dvars)s %(condiment_runtime_image)s '
'bash %(condiment_start_container_script)s"') % env, capture=True)
def docker_stop_container():
"""
Stop & commit the runtime container. Removes intermediate container.
.. versionadded:: 0.2
"""
with hide('warnings', 'stderr', 'running'):
log.info('Checking if the runtime container is up ...')
runtime_id = json.loads(local(('sudo bash -c '
'"%(docker)s inspect '
'%(condiment_runtime_image)s"') % env,
capture=True))
inspect = json.loads(local(('sudo bash -c '
'"%(docker)s inspect '
'%(condiment_runtime_container)s"') % env,
capture=True))
if inspect:
log.info('Stopping the runtime container ...')
local(('sudo bash -c '
'"%(docker)s stop --time 1 '
'%(condiment_runtime_container)s"') % env,
capture=True)
local(('sudo bash -c '
'"%(docker)s commit %(condiment_runtime_container)s '
'%(condiment_runtime_image)s"') % env, capture=True)
local(('sudo bash -c '
'"%(docker)s rm -fv %(condiment_runtime_container)s"') % env,
capture=True)
if runtime_id:
# This way all the dictionary keys are lower case
lower_runtime_id = dict([(k.lower(), v) for k, v in runtime_id[0].items()])
local(('sudo bash -c '
'"%s rmi -f %s"') % (env.docker, lower_runtime_id['id']),
capture=True)
def docker_login_container():
"""
Login into the runtime container.
.. versionadded:: 0.2
"""
docker_check_container()
with nested(hide('warnings', 'stderr', 'running'),
shell_env(**env.fvars), cd(env.basedir)):
log.info('Opening a shell inside the runtime container ...')
log.info('(When you are done, press CTRL+D to get out).')
run('bash')
def docker_update_container():
"""
Update the runtime container with latest changes to dependencies.
This function executes the script that generates the Condiment environment
image inside the runtime container so that it picks up the changes
made to the environment dependencies.
.. versionadded:: 0.2
"""
docker_check_image()
docker_stop_container()
with hide('warnings', 'stderr', 'running'):
log.info('Updating the Condiment base image ...')
local(('sudo bash -c '
'"%(docker)s run -it --name %(condiment_runtime_container)s '
'%(mounts)s %(dvars)s %(condiment_runtime_image)s '
'bash %(condiment_base_image_script)s"') % env, capture=False)
docker_stop_container()
def docker_reset_container():
"""
Restore the Condiment environment image to its original state.
.. versionadded:: 0.2
"""
from condiment.common.fabric.django import django_syncdb
docker_check_image()
docker_stop_container()
with hide('warnings', 'stderr', 'running'):
log.info('Restoring the Condiment base image ...')
local(('sudo bash -c '
'"%(docker)s run -it --name %(condiment_runtime_container)s '
'%(condiment_base_image)s true"') % env, capture=False)
docker_stop_container()
django_syncdb()
| gpl-3.0 | 6,817,259,882,447,641,000 | 28.973094 | 87 | 0.537777 | false |
apache/phoenix | bin/traceserver.py | 2 | 7277 | #!/usr/bin/env python
############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################################################################
#
# Script to handle launching the trace server process.
#
# usage: traceserver.py [start|stop]
#
from __future__ import print_function
from phoenix_utils import tryDecode
import datetime
import getpass
import os
import os.path
import signal
import subprocess
import sys
import tempfile
try:
import daemon
daemon_supported = True
except ImportError:
# daemon script not supported on some platforms (windows?)
daemon_supported = False
import phoenix_utils
phoenix_utils.setPath()
command = None
args = sys.argv
if len(args) > 1:
if tryDecode(args[1]) == 'start':
command = 'start'
elif tryDecode(args[1]) == 'stop':
command = 'stop'
if command:
args = args[2:]
if os.name == 'nt':
args = subprocess.list2cmdline(args[1:])
else:
import pipes # pipes module isn't available on Windows
args = " ".join([pipes.quote(tryDecode(v)) for v in args[1:]])
# HBase configuration folder path (where hbase-site.xml reside) for
# HBase/Phoenix client side property override
hbase_config_path = phoenix_utils.hbase_conf_dir
# default paths ## TODO: add windows support
java_home = os.getenv('JAVA_HOME')
hbase_pid_dir = os.path.join(tempfile.gettempdir(), 'phoenix')
phoenix_log_dir = os.path.join(tempfile.gettempdir(), 'phoenix')
phoenix_file_basename = 'phoenix-%s-traceserver' % getpass.getuser()
phoenix_log_file = '%s.log' % phoenix_file_basename
phoenix_out_file = '%s.out' % phoenix_file_basename
phoenix_pid_file = '%s.pid' % phoenix_file_basename
opts = os.getenv('PHOENIX_TRACESERVER_OPTS', '')
# load hbase-env.??? to extract JAVA_HOME, HBASE_PID_DIR, HBASE_LOG_DIR
hbase_env_path = None
hbase_env_cmd = None
if os.name == 'posix':
hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.sh')
hbase_env_cmd = ['bash', '-c', 'source %s && env' % hbase_env_path]
elif os.name == 'nt':
hbase_env_path = os.path.join(hbase_config_path, 'hbase-env.cmd')
hbase_env_cmd = ['cmd.exe', '/c', 'call %s & set' % hbase_env_path]
if not hbase_env_path or not hbase_env_cmd:
sys.stderr.write("hbase-env file unknown on platform {}{}".format(os.name, os.linesep))
sys.exit(-1)
hbase_env = {}
if os.path.isfile(hbase_env_path):
p = subprocess.Popen(hbase_env_cmd, stdout = subprocess.PIPE)
for x in p.stdout:
(k, _, v) = tryDecode(x).partition('=')
hbase_env[k.strip()] = v.strip()
if 'JAVA_HOME' in hbase_env:
java_home = hbase_env['JAVA_HOME']
if 'HBASE_PID_DIR' in hbase_env:
hbase_pid_dir = hbase_env['HBASE_PID_DIR']
if 'HBASE_LOG_DIR' in hbase_env:
phoenix_log_dir = hbase_env['HBASE_LOG_DIR']
if 'PHOENIX_TRACESERVER_OPTS' in hbase_env:
opts = hbase_env['PHOENIX_TRACESERVER_OPTS']
log_file_path = os.path.join(phoenix_log_dir, phoenix_log_file)
out_file_path = os.path.join(phoenix_log_dir, phoenix_out_file)
pid_file_path = os.path.join(hbase_pid_dir, phoenix_pid_file)
if java_home:
java = os.path.join(java_home, 'bin', 'java')
else:
java = 'java'
# " -Xdebug -Xrunjdwp:transport=dt_socket,address=5005,server=y,suspend=n " + \
# " -XX:+UnlockCommercialFeatures -XX:+FlightRecorder -XX:FlightRecorderOptions=defaultrecording=true,dumponexit=true" + \
java_cmd = '%(java)s ' + \
'-cp ' + hbase_config_path + os.pathsep + phoenix_utils.hadoop_conf + os.pathsep + \
phoenix_utils.phoenix_traceserver_jar + os.pathsep + phoenix_utils.slf4j_backend_jar + os.pathsep + \
phoenix_utils.phoenix_client_embedded_jar + os.pathsep + phoenix_utils.phoenix_queryserver_jar + \
" -Dproc_phoenixtraceserver" + \
" -Dlog4j.configuration=file:" + os.path.join(phoenix_utils.current_dir, "log4j.properties") + \
" -Dpsql.root.logger=%(root_logger)s" + \
" -Dpsql.log.dir=%(log_dir)s" + \
" -Dpsql.log.file=%(log_file)s" + \
" " + opts + \
" org.apache.phoenix.tracingwebapp.http.Main " + args
if command == 'start':
if not daemon_supported:
sys.stderr.write("daemon mode not supported on this platform{}".format(os.linesep))
sys.exit(-1)
# run in the background
d = os.path.dirname(out_file_path)
if not os.path.exists(d):
os.makedirs(d)
with open(out_file_path, 'a+') as out:
context = daemon.DaemonContext(
pidfile = daemon.PidFile(pid_file_path, 'Trace Server already running, PID file found: %s' % pid_file_path),
stdout = out,
stderr = out,
)
print('starting Trace Server, logging to %s' % log_file_path)
with context:
# this block is the main() for the forked daemon process
child = None
cmd = java_cmd % {'java': java, 'root_logger': 'INFO,DRFA', 'log_dir': phoenix_log_dir, 'log_file': phoenix_log_file}
# notify the child when we're killed
def handler(signum, frame):
if child:
child.send_signal(signum)
sys.exit(0)
signal.signal(signal.SIGTERM, handler)
print('%s launching %s' % (datetime.datetime.now(), cmd))
child = subprocess.Popen(cmd.split())
sys.exit(child.wait())
elif command == 'stop':
if not daemon_supported:
sys.stderr.write("daemon mode not supported on this platform{}".format(os.linesep))
sys.exit(-1)
if not os.path.exists(pid_file_path):
sys.stderr.write("no Trace Server to stop because PID file not found, {}{}"
.format(pid_file_path, os.linesep))
sys.exit(0)
if not os.path.isfile(pid_file_path):
sys.stderr.write("PID path exists but is not a file! {}{}"
.format(pid_file_path, os.linesep))
sys.exit(1)
pid = None
with open(pid_file_path, 'r') as p:
pid = int(p.read())
if not pid:
sys.exit("cannot read PID file, %s" % pid_file_path)
print("stopping Trace Server pid %s" % pid)
with open(out_file_path, 'a+') as out:
out.write("%s terminating Trace Server%s" % (datetime.datetime.now(), os.linesep))
os.kill(pid, signal.SIGTERM)
else:
# run in the foreground using defaults from log4j.properties
cmd = java_cmd % {'java': java, 'root_logger': 'INFO,console', 'log_dir': '.', 'log_file': 'psql.log'}
splitcmd = cmd.split()
os.execvp(splitcmd[0], splitcmd)
| apache-2.0 | 1,508,858,877,130,581,200 | 36.317949 | 129 | 0.639962 | false |
lino-framework/xl | lino_xl/lib/lists/fixtures/demo.py | 1 | 1215 | # -*- coding: UTF-8 -*-
# Copyright 2014-2019 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from lino.api import dd, rt, _
from lino.utils.mldbc import babeld
def objects():
ListType = rt.models.lists.ListType
List = rt.models.lists.List
mailing = babeld(ListType, _("Mailing list"))
yield mailing
discuss = babeld(ListType, _("Discussion group"))
yield discuss
flags = ListType(**dd.str2kw('designation', _("Flags")))
yield flags
yield List(list_type=mailing, **dd.str2kw('designation', _("Announcements")))
yield List(list_type=mailing, **dd.str2kw('designation', _("Weekly newsletter")))
yield List(list_type=discuss, **dd.str2kw('designation', _("General discussion")))
yield List(list_type=discuss, **dd.str2kw('designation', _("Beginners forum")))
yield List(list_type=discuss, **dd.str2kw('designation', _("Developers forum")))
yield List(list_type=flags,
**dd.str2kw('designation', _("PyCon 2014")))
yield List(list_type=flags,
**dd.str2kw('designation', _("Free Software Day 2014")))
yield List(list_type=flags, **dd.str2kw('designation', _("Schools")))
| bsd-2-clause | 5,112,860,678,266,054,000 | 34.735294 | 86 | 0.655967 | false |
EduPepperPDTesting/pepper2013-testing | lms/djangoapps/reportlab/platypus/__init__.py | 1 | 1215 | #Copyright ReportLab Europe Ltd. 2000-2016
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/__init__.py
__version__='3.3.0'
__doc__='''Page Layout and Typography Using Scripts" - higher-level framework for flowing documents'''
from reportlab.platypus.flowables import Flowable, Image, Macro, PageBreak, Preformatted, Spacer, XBox, \
CondPageBreak, KeepTogether, TraceInfo, FailOnWrap, FailOnDraw, PTOContainer, \
KeepInFrame, ParagraphAndImage, ImageAndFlowables, ListFlowable, ListItem, FrameBG, \
PageBreakIfNotEmpty
from reportlab.platypus.paragraph import Paragraph, cleanBlockQuotedText, ParaLines
from reportlab.platypus.paraparser import ParaFrag
from reportlab.platypus.tables import Table, TableStyle, CellStyle, LongTable
from reportlab.platypus.frames import Frame
from reportlab.platypus.doctemplate import BaseDocTemplate, NextPageTemplate, PageTemplate, ActionFlowable, \
SimpleDocTemplate, FrameBreak, PageBegin, Indenter, NotAtTopPageBreak
from reportlab.platypus.xpreformatted import XPreformatted
| agpl-3.0 | 1,924,620,570,826,698,800 | 69.470588 | 109 | 0.750617 | false |
lambertdw/Hantek6022API | PyHT6022Tests/LibUsbScopeTest.py | 1 | 5185 | __author__ = 'Robert Cope'
from unittest import TestCase
from PyHT6022.LibUsbScope import Oscilloscope
from PyHT6022.HantekFirmware import stock_firmware, mod_firmware_01
# TODO: Add more unit tests, add unit tests for changing number of active channels.
class BasicTests(TestCase):
def test_find_device(self):
print "Testing finding device and flashing stock firmware."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
assert scope.close_handle()
def test_flash_firmware(self):
print "Testing flashing multiple firmwares."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware(stock_firmware, supports_single_channel=False)
assert scope.flash_firmware(mod_firmware_01)
assert scope.flash_firmware(stock_firmware, supports_single_channel=False)
assert scope.close_handle()
def test_get_cal_values(self):
print "Testing getting calibration values."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
cal_values = scope.get_calibration_values()
assert cal_values
assert scope.close_handle()
def test_read_data(self):
print "Testing reading data from the oscilloscope."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
ch1_data, _ = scope.read_data(data_size=0x400)
print ch1_data
assert ch1_data
assert scope.close_handle()
def test_read_many_sizes(self):
print "Testing reading many different data sizes"
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
data_size = 0x400
for _ in xrange(11):
print "DATA SIZE", data_size
ch1_data, ch2_data = scope.read_data(data_size=data_size, raw=True)
print len(ch1_data)
print len(ch2_data)
assert ch1_data, ch2_data
data_size <<= 1
assert scope.close_handle()
def test_set_sample_rate(self):
print "Testing setting the sample rate."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
for rate_index in scope.SAMPLE_RATES.keys():
scope.set_sample_rate(rate_index)
assert scope.close_handle()
def test_set_channel_voltage_range(self):
print "Testing setting the voltage range."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
for vrange in scope.VOLTAGE_RANGES.keys():
assert scope.set_ch1_voltage_range(vrange)
assert scope.set_ch1_voltage_range(vrange)
assert scope.close_handle()
def test_data_scaling(self):
print "Testing setting various scale facotrs and reading."
scale_factor = 0x01
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
assert scope.set_ch1_voltage_range(scale_factor)
assert scope.set_sample_rate(27)
ch1_data, _ = scope.read_data(0x100000)
ch1_data = scope.scale_read_data(ch1_data, scale_factor)
print "Max:", max(ch1_data), "(V), Min:", min(ch1_data), "(V)"
assert ch1_data
assert scope.close_handle()
def test_set_num_channels(self):
print "Testing setting the number of channels with modified firmware."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware(mod_firmware_01)
assert scope.set_num_channels(1)
assert scope.set_num_channels(2)
assert scope.set_num_channels(1)
assert scope.close_handle()
def test_set_one_channel_and_read(self):
print "Testing setting one channel and reading it."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware(mod_firmware_01)
assert scope.set_ch1_voltage_range(0xA)
assert scope.set_sample_rate(0x10)
assert scope.set_num_channels(1)
ch1_data, ch2_data = scope.read_data(0x4000)
assert ch1_data
assert not ch2_data
assert scope.close_handle()
def test_read_firmware(self):
print "Testing read_firmware method on scope."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
assert scope.read_firmware()
assert scope.close_handle()
def test_clear_fifo(self):
print "Testing explicitly clearing the FIFO."
scope = Oscilloscope()
assert scope.setup()
assert scope.open_handle()
assert scope.flash_firmware()
assert scope.clear_fifo()
assert scope.close_handle() | gpl-2.0 | -1,547,142,649,452,479,000 | 34.765517 | 83 | 0.630473 | false |
alenz33/elca | elca/res.py | 1 | 4431 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: Mon Nov 30 13:28:01 2015
# by: The Resource Compiler for PyQt (Qt v4.8.6)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x03\x3d\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xff\x61\
\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\x74\x77\x61\x72\x65\
\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\x65\x52\x65\x61\x64\
\x79\x71\xc9\x65\x3c\x00\x00\x02\xdf\x49\x44\x41\x54\x78\xda\xa4\
\x53\x4b\x4f\x13\x51\x14\x3e\x33\x9d\x69\xa7\x43\xa9\xb4\xb4\x05\
\x0a\xb1\x91\x97\x90\x80\x0b\x6b\x80\xe0\x6b\xe3\x23\x91\xa8\x09\
\x1a\x17\xba\x34\x71\xeb\x1f\x30\xc4\x90\xb0\x32\xba\x70\xe7\xca\
\x85\xd8\x05\x26\xc4\x84\x68\x4c\x74\xa1\x69\x60\x63\xc0\xc4\x44\
\x6a\xa4\x3c\x42\x69\x69\xc5\xd2\x61\xa6\x33\x9d\xf7\x78\x6e\xcb\
\xcb\xb8\x70\xe1\x49\xce\xcc\x9d\xf3\x9d\xf3\xdd\x7b\xbe\x33\x97\
\x72\x1c\x07\xfe\xc7\x18\xf2\x18\x78\xba\x0c\x0c\xcb\x02\xc3\x30\
\x40\xbb\x5c\x51\x8a\xa2\x46\x31\x3c\x8c\xde\xbd\x9b\xf7\x03\x7d\
\x0e\x37\x9b\xb6\x2d\x2b\x67\x9a\x26\x98\x86\x71\x40\x70\xc8\xae\
\x32\x2e\x67\xbc\xb3\x8d\x0f\xb5\x46\xb8\xa0\x9f\x67\x78\x0a\x83\
\xa2\x62\xf6\x6e\x6c\xa9\x67\xd3\x19\xf9\xae\x6e\xc1\x18\x86\x66\
\xfe\x38\x41\xd5\x1c\xe7\xaa\xd7\x0d\x13\x23\x67\x22\xfd\x65\xc9\
\x02\x4b\x77\x40\xd4\x8d\x3d\x94\x3f\x1a\xe4\xf8\xbe\x63\x75\x6d\
\x33\xc9\xfc\x84\x64\x54\xdb\x9e\xd9\x27\x70\x6c\x3b\x4a\x83\x35\
\x7e\x79\x30\xda\xbf\xb5\xa9\x40\x59\x35\xd5\x74\xae\x9c\xcd\x97\
\x54\x81\xe0\xcd\x01\xae\xa1\x33\xea\x6b\xad\xc8\x0c\x37\x32\xdc\
\xd4\xff\xea\xfd\xfa\xb8\x61\xdb\xf3\xd8\x52\x8e\x26\x09\xa6\xae\
\x8f\xc6\x9a\xb8\x50\x21\x27\x41\xb1\x24\xab\x1f\x17\xb2\xdf\xb2\
\x05\x71\x0c\x89\x4f\x11\x27\x6b\x12\xdb\x46\x6c\x33\x23\x42\x47\
\xab\x37\x44\x6a\x2c\xd4\xa1\x4a\xa0\x6b\xda\xe9\x20\xef\x0a\x96\
\x77\x54\x48\xad\x15\xb3\x86\xae\x3f\x41\xf6\x04\xc1\x14\x49\x02\
\xfc\x4e\x90\x58\x6a\x6d\x3b\x2b\xed\x54\x20\xe4\x67\x83\xa4\x06\
\xbd\x46\x60\xa8\x6a\x17\xd8\x0e\x5f\xc1\x9e\xb3\x79\x41\x40\xf6\
\x84\xae\xaa\xf0\x33\x93\x01\x45\x14\x41\x2a\x95\xc8\x29\x13\x1b\
\x9b\x25\x41\x33\x2d\x70\x51\x14\x4f\x6a\xd0\x6b\x1a\x20\x93\x2c\
\x6a\xa6\xa8\xd9\x4e\x75\x8d\xa3\x02\x22\x13\x8e\x13\xb5\x75\x9e\
\xe1\x32\x29\x8b\xe2\xa4\x0b\xc7\x5c\x41\x40\xd6\x4c\x92\x77\x30\
\x05\xdc\xed\x73\x7e\x47\xf3\xd3\x6e\xb7\xa7\xde\xcf\xe9\xc5\x42\
\xe9\x0a\x1e\xf9\x2d\xeb\xf1\x10\x38\xbe\xeb\x43\xe1\x88\xbf\x41\
\xb0\x28\xd0\x04\x55\xc1\x9a\x25\x02\xd6\x5a\xd0\xb4\x4f\xeb\xd9\
\x62\xaa\x44\xb1\x2b\xf5\xb1\x68\x05\x8b\xcf\xe1\xce\x27\x55\x45\
\x21\x18\x5c\xb8\x31\x1c\xc7\xf7\x50\x53\x5f\x67\x07\xe6\xc0\x6a\
\xe6\xd7\x36\x7e\xcf\x1a\xfb\x1a\xe8\x7a\xb2\xb0\xba\xf1\x55\x74\
\x73\xe9\xa2\xf7\xc8\x4a\xf3\xf9\x01\x2f\x17\x0e\x5c\xb2\x4c\x33\
\x4e\x8e\xba\xaa\xd0\x10\xbf\x75\x31\x9e\xae\xd0\x40\xfb\x78\x58\
\x5e\x5c\x4b\x63\xcd\xb4\xb9\x37\x05\xec\x59\xd0\x15\x65\x2a\xf3\
\xe6\xc3\x17\x81\xf1\x2c\x17\xbc\x81\xb4\x33\x38\x28\x05\x6f\x5e\
\xaf\x47\xf1\x40\x62\xdc\x30\x2f\xd2\xd0\x12\xe6\x61\xe1\xc5\xeb\
\x79\x5d\x96\x67\xb1\x85\x5c\xf9\xdd\xc3\x9a\x06\xd2\xcb\xdb\x40\
\x47\x7a\x56\xe8\xe3\xd7\x2a\xfa\x54\xa5\x97\x6a\x6f\xf7\x51\xb1\
\x98\xdb\x69\x6e\x71\x6c\x24\x60\x39\x16\x02\x2e\x07\xf2\x36\x0a\
\x71\xff\x4e\x3c\xf9\xe8\x39\x29\xfb\x6e\x16\x16\x27\x29\x72\x1b\
\x89\xda\x68\x75\xe8\x61\xca\x1b\x68\x84\x70\x4f\x1d\xf8\xa2\x2c\
\xa5\x8b\x16\x44\x4e\xdc\x83\xc6\xae\xee\xbf\xae\x61\x71\x29\x65\
\xcf\x3d\x7e\x70\x98\xc0\x4d\xfe\x79\x74\x2f\x99\xe0\x3f\x6e\x31\
\x99\xb2\x49\xe4\xfb\x2d\xc0\x00\x96\x11\xa6\x70\xd3\x38\xf1\xe0\
\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x04\
\x00\x06\xc2\x91\
\x00\x65\
\x00\x6c\x00\x63\x00\x61\
\x00\x14\
\x02\x1a\xad\x87\
\x00\x61\
\x00\x72\x00\x72\x00\x6f\x00\x77\x00\x2d\x00\x63\x00\x69\x00\x72\x00\x63\x00\x6c\x00\x65\x00\x2d\x00\x32\x00\x32\x00\x35\x00\x2e\
\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| gpl-2.0 | 8,771,687,666,702,133,000 | 46.645161 | 129 | 0.72241 | false |
fro391/Investing | Sentiments/RSS_URL.py | 1 | 1126 | from bs4 import BeautifulSoup
import gethtml
import re
import urlparse
#gets titles
def getURLs (rss):
Titles = []
soup = BeautifulSoup(gethtml.getHtmlText(rss))
for item in soup.findAll('item'):
#link tag cut off after stripping for item... only </link> is there
for i in item.findAll('title'):
try:
Titles.append(i.contents[0])
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print message
return Titles
#gets links
def getURLs2 (rss):
htmltext = gethtml.getHtmlText(rss)
regex = '<link>(.+?)</link>'
pattern = re.compile(regex)
links = re.findall(pattern,htmltext)
#returns valid links
goodlinks = [link for link in links if bool(urlparse.urlparse(link))==True ]
return goodlinks
#gets dates
def getURLs3 (rss):
htmltext = gethtml.getHtmlText(rss)
regex = '<pubDate>(.+?)</pubDate>'
pattern = re.compile(regex)
date = re.findall(pattern,htmltext)
return date
| gpl-2.0 | 7,792,483,381,561,359,000 | 31.171429 | 80 | 0.627886 | false |
bert/geda-gaf | xorn/src/python/geda/xmlread.py | 1 | 30020 | # Copyright (C) 2013-2017 Roland Lutz
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
## \namespace xorn.geda.xmlread
## Reading gEDA schematic/symbol files in XML format.
import cStringIO, xml.parsers.expat
from gettext import gettext as _
import xorn.base64
import xorn.fixednum
import xorn.hybridnum
import xorn.proxy
import xorn.storage
import xorn.geda.ref
from xorn.geda.xmlformat import *
NSSEP = '!'
class VoidHandler:
def start_element(self, name, attributes):
return self
def end_element(self, name):
pass
def character_data(self, data):
pass
class NullHandler:
def __init__(self, log):
self.log = log
def start_element(self, name, attributes):
self.log.error(_("unexpected element \"%s\"") % name)
return VoidHandler()
def end_element(self, name):
pass
def character_data(self, data):
s = data.strip()
if s:
self.log.error(_("unexpected character data \"%s\"") % s)
class OverbarHandler(NullHandler):
def __init__(self, log, text):
self.log = log
self.text = text
def start_element(self, name, attributes):
if name != 'br':
return NullHandler.start_element(self, name, attributes)
self.text.append('\n')
return NullHandler(self.log)
def character_data(self, data):
self.text.append(data.replace('\\', '\\\\'))
def end_element(self, name):
self.text.append('\\_')
class TextHandler(NullHandler):
def __init__(self, log, rev, attached_to, data, attribute_name):
self.log = log
self.rev = rev
self.attached_to = attached_to
self.data = data
self.text = []
if attribute_name is not None:
self.text.append('%s=' % attribute_name)
def start_element(self, name, attributes):
if name == 'br':
self.text.append('\n')
return NullHandler(self.log)
if name == 'overbar':
self.text.append('\\_')
return OverbarHandler(self.log, self.text)
return NullHandler.start_element(self, name, attributes)
def character_data(self, data):
self.text.append(data.replace('\\', '\\\\'))
def end_element(self, name):
self.data.text = ''.join(self.text).encode('utf-8')
ob = self.rev.add_object(self.data)
if self.attached_to is not None:
self.rev.relocate_object(ob, self.attached_to, None)
class PathHandler(NullHandler):
def __init__(self, log, rev, data):
self.log = log
self.rev = rev
self.data = data
self.fragments = []
def start_element(self, name, attributes):
if name != 'br':
return NullHandler.start_element(self, name, attributes)
self.fragments.append('\n')
return NullHandler(self.log)
def character_data(self, data):
try:
self.fragments.append(data.encode())
except UnicodeEncodeError:
self.log.error(_("non-ASCII character in path data"))
def end_element(self, name):
self.data.pathdata = ''.join(self.fragments)
self.rev.add_object(self.data)
def parse_angle(x):
angle = int(x)
if angle != 0 and angle != 90 and angle != 180 and angle != 270:
raise ValueError
return angle
class ContentHandler(NullHandler):
def __init__(self, c, rev, attached_to):
self.log = c.log
self.c = c
self.rev = rev
self.attached_to = attached_to
def start_element(self, name, attributes):
if name == 'text' or name == 'attribute':
is_attribute = name == 'attribute'
data = xorn.storage.Text(
x = self.c.parse_attribute(
attributes, 'x', None,
self.c.parse, 'X coordinate'),
y = self.c.parse_attribute(
attributes, 'y', None,
self.c.parse, 'Y coordinate'),
color = self.c.parse_attribute(
attributes, 'color',
5 if is_attribute else 9,
ENUM_COLOR.index, 'color'),
text_size = self.c.parse_attribute(
attributes, 'size', None,
int, 'text size'),
visibility = self.c.parse_attribute(
attributes, 'visible', None if is_attribute else 1,
ENUM_BOOLEAN.index, 'text visibility'),
show_name_value = self.c.parse_attribute(
attributes, 'show', None if is_attribute else 0,
ENUM_SHOW_NAME_VALUE.index, 'show name/value value'),
angle = self.c.parse_attribute(
attributes, 'angle', 0,
parse_angle, 'angle'),
alignment = self.c.parse_attribute(
attributes, 'alignment', 0,
ENUM_ALIGNMENT.index, 'alignment'))
if is_attribute:
try:
name = attributes.pop('name')
except KeyError:
self.c.log.error(_("attribute name not specified"))
name = None
else:
name = None
return TextHandler(
self.c.log, self.rev, self.attached_to, data, name)
if self.attached_to:
self.c.log.error(_("non-text element can't be attached"))
return VoidHandler()
if name == 'arc':
self.rev.add_object(
xorn.storage.Arc(
x = self.c.parse_attribute(
attributes, 'x', None,
self.c.parse, 'X coordinate'),
y = self.c.parse_attribute(
attributes, 'y', None,
self.c.parse, 'Y coordinate'),
radius = self.c.parse_attribute(
attributes, 'radius', None,
self.c.parse, 'radius'),
startangle = self.c.parse_attribute(
attributes, 'startangle', None,
int, 'start angle'),
sweepangle = self.c.parse_attribute(
attributes, 'sweepangle', None,
int, 'sweep angle'),
color = self.c.parse_attribute(
attributes, 'color', 3,
ENUM_COLOR.index, 'color'),
line = self.c.parse_line(attributes)))
return NullHandler(self.c.log)
if name == 'box':
self.rev.add_object(
xorn.storage.Box(
x = self.c.parse_attribute(
attributes, 'x', None,
self.c.parse, 'X coordinate'),
y = self.c.parse_attribute(
attributes, 'y', None,
self.c.parse, 'Y coordinate'),
width = self.c.parse_attribute(
attributes, 'width', None,
self.c.parse, 'width'),
height = self.c.parse_attribute(
attributes, 'height', None,
self.c.parse, 'height'),
color = self.c.parse_attribute(
attributes, 'color', 3,
ENUM_COLOR.index, 'color'),
line = self.c.parse_line(attributes),
fill = self.c.parse_fill(attributes)))
return NullHandler(self.c.log)
if name == 'circle':
self.rev.add_object(
xorn.storage.Circle(
x = self.c.parse_attribute(
attributes, 'x', None,
self.c.parse, 'X coordinate'),
y = self.c.parse_attribute(
attributes, 'y', None,
self.c.parse, 'Y coordinate'),
radius = self.c.parse_attribute(
attributes, 'radius', None,
self.c.parse, 'radius'),
color = self.c.parse_attribute(
attributes, 'color', 3,
ENUM_COLOR.index, 'color'),
line = self.c.parse_line(attributes),
fill = self.c.parse_fill(attributes)))
return NullHandler(self.c.log)
if name == 'component':
ob = self.rev.add_object(
xorn.storage.Component(
x = self.c.parse_attribute(
attributes, 'x', None,
self.c.parse, 'X coordinate'),
y = self.c.parse_attribute(
attributes, 'y', None,
self.c.parse, 'Y coordinate'),
selectable = self.c.parse_attribute(
attributes, 'selectable', True,
ENUM_BOOLEAN.index, 'selectability'),
angle = self.c.parse_attribute(
attributes, 'angle', 0,
parse_angle, 'angle'),
mirror = self.c.parse_attribute(
attributes, 'mirror', False,
ENUM_BOOLEAN.index, 'mirror flag')))
try:
symbol_id = attributes.pop('symbol')
except KeyError:
self.c.log.error(_("symbol not specified"))
else:
if not symbol_id:
self.c.log.error(_("symbol id can't be empty"))
else:
self.c.symbol_refs.append(
(self.rev, ob, symbol_id, self.c.log.lineno))
return ContentHandler(self.c, self.rev, ob)
if name == 'line':
x0 = self.c.parse_attribute(attributes, 'x0', None,
self.c.parse, 'first X coordinate')
y0 = self.c.parse_attribute(attributes, 'y0', None,
self.c.parse, 'first Y coordinate')
x1 = self.c.parse_attribute(attributes, 'x1', None,
self.c.parse, 'second X coordinate')
y1 = self.c.parse_attribute(attributes, 'y1', None,
self.c.parse, 'second Y coordinate')
self.rev.add_object(
xorn.storage.Line(
x = x0, y = y0, width = x1 - x0, height = y1 - y0,
color = self.c.parse_attribute(
attributes, 'color', 3,
ENUM_COLOR.index, 'color'),
line = self.c.parse_line(attributes)))
return NullHandler(self.c.log)
if name == 'net' or name == 'pin':
is_pin = name == 'pin'
is_bus = self.c.parse_attribute(attributes, 'type', False,
ENUM_NETTYPE.index, 'net/pin type')
if is_pin:
default_color = 1
is_inverted = self.c.parse_attribute(
attributes, 'inverted', False,
ENUM_BOOLEAN.index, 'invertedness')
else:
if is_bus:
default_color = 10
else:
default_color = 4
is_inverted = False
x0 = self.c.parse_attribute(attributes, 'x0', None,
self.c.parse, 'first X coordinate')
y0 = self.c.parse_attribute(attributes, 'y0', None,
self.c.parse, 'first Y coordinate')
x1 = self.c.parse_attribute(attributes, 'x1', None,
self.c.parse, 'second X coordinate')
y1 = self.c.parse_attribute(attributes, 'y1', None,
self.c.parse, 'second Y coordinate')
ob = self.rev.add_object(
xorn.storage.Net(
x = x0, y = y0, width = x1 - x0, height = y1 - y0,
color = self.c.parse_attribute(
attributes, 'color', default_color,
ENUM_COLOR.index, 'color'),
is_bus = is_bus,
is_pin = is_pin,
is_inverted = is_inverted))
return ContentHandler(self.c, self.rev, ob)
if name == 'path':
return PathHandler(self.c.log, self.rev, xorn.storage.Path(
color = self.c.parse_attribute(attributes, 'color', 3,
ENUM_COLOR.index, 'color'),
line = self.c.parse_line(attributes),
fill = self.c.parse_fill(attributes)))
if name == 'picture':
ob = self.rev.add_object(
xorn.storage.Picture(
x = self.c.parse_attribute(
attributes, 'x', None,
self.c.parse, 'X coordinate'),
y = self.c.parse_attribute(
attributes, 'y', None,
self.c.parse, 'Y coordinate'),
width = self.c.parse_attribute(
attributes, 'width', None,
self.c.parse, 'width'),
height = self.c.parse_attribute(
attributes, 'height', None,
self.c.parse, 'height'),
angle = self.c.parse_attribute(
attributes, 'angle', 0,
parse_angle, 'angle'),
mirror = self.c.parse_attribute(
attributes, 'mirrored', False,
ENUM_BOOLEAN.index, 'mirror flag'),
pixmap = None))
try:
pixmap_id = attributes.pop('pixmap')
except KeyError:
self.c.log.error(_("pixmap not specified"))
else:
if not pixmap_id:
self.c.log.error(_("pixmap id can't be empty"))
else:
self.c.pixmap_refs.append(
(self.rev, ob, pixmap_id, self.c.log.lineno))
return NullHandler(self.c.log)
self.c.log.error(_("unexpected element \"%s\"") % name)
return VoidHandler()
class PixmapHandler(NullHandler):
def __init__(self, log, pixmap, just_verify):
self.log = log
self.pixmap = pixmap
self.just_verify = just_verify
self.f = cStringIO.StringIO()
def character_data(self, data):
self.f.write(data)
def end_element(self, name):
self.f.seek(0)
try:
data = xorn.base64.decode(self.f)
except xorn.base64.DecodingError:
self.log.error(_("base64 decoding error"))
return
if not self.just_verify:
self.pixmap.data = data
elif data != self.pixmap.data:
self.log.warn(_("contents of pixmap file \"%s\" don't match "
"embedded data") % self.pixmap.filename)
class LoadContext:
def __init__(self, log, load_symbol, load_pixmap):
self.log = log
self.ids = set()
self.symbols = {}
self.pixmaps = {}
self.symbol_refs = []
self.pixmap_refs = []
self.load_symbol = load_symbol
self.load_pixmap = load_pixmap
self.use_hybridnum = False
def parse(self, x):
if self.use_hybridnum:
return xorn.hybridnum.parse(x, 2)
else:
return float(xorn.fixednum.parse(x, 2))
def parse_attribute(self, d, key, default, processor, msg_fragment):
try:
x = d.pop(key)
except KeyError:
if default is not None:
return default
self.log.error(_("%s not specified") % msg_fragment)
else:
try:
return processor(x)
except (KeyError, ValueError):
self.log.error(_("invalid %s \"%s\"") % (msg_fragment, x))
# guess a well-formed return value from processor function
return 0. if processor == self.parse else 0
def parse_line(self, attributes):
line = xorn.storage.LineAttr()
line.width = self.parse_attribute(
attributes, 'linewidth', 0, self.parse, 'line width')
line.cap_style = self.parse_attribute(
attributes, 'capstyle', 0, ENUM_CAPSTYLE.index, 'cap style')
line.dash_style = self.parse_attribute(
attributes, 'dashstyle', 0, ENUM_DASHSTYLE.index, 'dash style')
if line.dash_style != 0 and line.dash_style != 1:
line.dash_length = self.parse_attribute(
attributes, 'dashlength', None, self.parse, 'dash length')
else:
line.dash_length = -1
if line.dash_style != 0:
line.dash_space = self.parse_attribute(
attributes, 'dashspace', None, self.parse, 'dash space')
else:
line.dash_space = -1
return line
def parse_fill(self, attributes):
fill = xorn.storage.FillAttr()
fill.type = self.parse_attribute(
attributes, 'filltype', 0, ENUM_FILLTYPE.index, 'fill type')
if fill.type == 2 or fill.type == 3:
fill.width = self.parse_attribute(
attributes, 'fillwidth', None, self.parse, 'fill width')
fill.angle0 = self.parse_attribute(
attributes, 'angle0', None, int, 'first fill angle')
fill.pitch0 = self.parse_attribute(
attributes, 'pitch0', None, self.parse, 'first fill pitch')
else:
fill.width = -1
fill.angle0 = -1
fill.pitch0 = -1
if fill.type == 2:
fill.angle1 = self.parse_attribute(
attributes, 'angle1', None, int, 'second fill angle')
fill.pitch1 = self.parse_attribute(
attributes, 'pitch1', None, self.parse, 'second fill pitch')
else:
fill.angle1 = -1
fill.pitch1 = -1
return fill
class RootElementHandler(NullHandler):
def __init__(self, c):
self.log = c.log
self.c = c
self.rev = xorn.storage.Revision()
self.had_content = False
def start_element(self, name, attributes):
if name == 'content':
if self.had_content:
self.c.log.error(_("duplicate content tag"))
return VoidHandler()
self.had_content = True
return ContentHandler(self.c, self.rev, None)
if name == 'symbol':
try:
mode = attributes.pop('mode')
except KeyError:
self.c.log.error(_("symbol mode not specified"))
return VoidHandler()
if mode == 'omitted':
read_symbol = False
is_embedded = False
elif mode == 'referenced':
read_symbol = True
is_embedded = False
elif mode == 'embedded':
read_symbol = True
is_embedded = True
else:
self.c.log.error(_("invalid symbol mode \"%s\"") % mode)
return VoidHandler()
try:
name = attributes.pop('name')
except KeyError:
if not is_embedded:
self.c.log.error(_("symbol name not specified"))
return VoidHandler()
name = None
if is_embedded:
symbol = xorn.geda.ref.Symbol(name, None, True)
else:
symbol = self.c.load_symbol(name, read_symbol)
if symbol is None:
symbol = xorn.geda.ref.Symbol(name, None, False)
is_embedded = True
assert not symbol.embedded
try:
symbol_id = attributes.pop('id')
except KeyError:
self.c.log.error(_("symbol id not specified"))
return VoidHandler()
if not symbol_id:
self.c.log.error(_("symbol id can't be empty"))
return VoidHandler()
if symbol_id in self.c.ids:
self.c.log.error(_("duplicate id \"%s\"") % symbol_id)
return VoidHandler()
self.c.ids.add(symbol_id)
self.c.symbols[symbol_id] = symbol
if not read_symbol:
return NullHandler(self.c.log)
reh = RootElementHandler(self.c)
if is_embedded:
symbol.prim_objs = reh.rev
return reh
if name == 'pixmap':
try:
mode = attributes.pop('mode')
except KeyError:
self.c.log.error(_("pixmap mode not specified"))
return VoidHandler()
if mode == 'omitted':
read_pixmap = False
is_embedded = False
elif mode == 'referenced':
read_pixmap = True
is_embedded = False
elif mode == 'embedded':
read_pixmap = True
is_embedded = True
else:
self.c.log.error(_("invalid pixmap mode \"%s\"") % mode)
return VoidHandler()
try:
name = attributes.pop('name')
except KeyError:
if not is_embedded:
self.c.log.error(_("pixmap name not specified"))
return VoidHandler()
name = None
if is_embedded:
pixmap = xorn.geda.ref.Pixmap(name, None, True)
else:
pixmap = self.c.load_pixmap(name, read_pixmap)
if pixmap is None:
pixmap = xorn.geda.ref.Pixmap(name, None, False)
is_embedded = True
assert not pixmap.embedded
try:
pixmap_id = attributes.pop('id')
except KeyError:
self.c.log.error(_("pixmap id not specified"))
return VoidHandler()
if not pixmap_id:
self.c.log.error(_("pixmap id can't be empty"))
return VoidHandler()
if pixmap_id in self.c.ids:
self.c.log.error(_("duplicate id \"%s\"") % pixmap_id)
return VoidHandler()
self.c.ids.add(pixmap_id)
self.c.pixmaps[pixmap_id] = pixmap
if read_pixmap:
return PixmapHandler(self.c.log, pixmap, not is_embedded)
else:
return NullHandler(self.c.log)
self.c.log.error(_("unexpected element \"%s\"") % name)
return VoidHandler()
def end_element(self, name):
if not self.had_content:
self.c.log.error(_("content missing"))
def read_file(f, name, log, load_symbol, load_pixmap):
context = LoadContext(log, load_symbol, load_pixmap)
reh = RootElementHandler(context)
def start_root_element(name, attributes):
if name != 'symbol' and name != 'schematic':
log.error(_("invalid root element \"%s\"") % name)
return VoidHandler()
for feature in attributes.pop('file-format-features', '').split(' '):
if not feature:
continue
if feature == 'experimental':
pass
elif feature == 'hybridnum':
if context.use_hybridnum:
log.error(_("duplicate file format feature"))
context.use_hybridnum = True
else:
log.error(_("unsupported file format feature \"%s\"")
% feature)
return reh
read_xml_file(f, log, NAMESPACE, start_root_element)
for rev, ob, symbol_id, lineno in context.symbol_refs:
if symbol_id not in context.symbols:
log.lineno = lineno
log.error(_("undefined symbol \"%s\"") % symbol_id)
continue
data = rev.get_object_data(ob)
data.symbol = context.symbols[symbol_id]
rev.set_object_data(ob, data)
for rev, ob, pixmap_id, lineno in context.pixmap_refs:
if pixmap_id not in context.pixmaps:
log.lineno = lineno
log.error(_("undefined pixmap \"%s\"") % pixmap_id)
continue
data = rev.get_object_data(ob)
data.pixmap = context.pixmaps[pixmap_id]
rev.set_object_data(ob, data)
return xorn.proxy.RevisionProxy(reh.rev)
def read_xml_file(f, log, namespace, start_root_element):
stack = []
def strip_namespace(name, ignore_errors):
try:
pos = name.index(NSSEP)
except ValueError:
if not ignore_errors:
log.error(_("element name \"%s\" without namespace") % name)
return None
if name[:pos] != namespace and not ignore_errors:
log.error(_("invalid namespace \"%s\"") % name[:pos])
return None
return name[pos + 1:]
def StartElementHandler(name, attributes):
log.lineno = p.CurrentLineNumber - 1
name = strip_namespace(name, False)
if name is None:
new_handler = VoidHandler()
elif stack:
new_handler = stack[-1].start_element(name, attributes)
else:
new_handler = start_root_element(name, attributes)
stack.append(new_handler)
if attributes and not isinstance(new_handler, VoidHandler):
log.error(_("unexpected attribute(s) %s") % _(", ").join(
_("\"%s\"") % attr for attr in sorted(attributes)))
def EndElementHandler(name):
log.lineno = p.CurrentLineNumber - 1
name = strip_namespace(name, True)
stack.pop().end_element(name)
def CharacterDataHandler(data):
log.lineno = p.CurrentLineNumber - 1
stack[-1].character_data(data)
def StartDoctypeDeclHandler(doctype_name, system_id, public_id,
has_internal_subset):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML document type declaration"))
def ElementDeclHandler(name, model):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML element type declaration"))
def AttlistDeclHandler(elname, attname, type, default, required):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML element type attribute declaration"))
def ProcessingInstructionHandler(target, data):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML processing instruction"))
def UnparsedEntityDeclHandler(entity_name, base, system_id, public_id,
notationName):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML unparsed entity declaration"))
def EntityDeclHandler(entity_name, is_parameter_entity, value, base,
system_id, public_id, notation_name):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML entity declaration"))
def NotationDeclHandler(notation_name, base, system_id, public_id):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML notation declaration"))
def StartCdataSectionHandler():
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected XML CDATA section"))
def DefaultHandler(data):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected characters in XML document"))
def NotStandaloneHandler():
log.lineno = p.CurrentLineNumber - 1
log.error(_("XML document hasn't been declared as standalone"))
def ExternalEntityRefHandler(context, base, systemId, publicId):
log.lineno = p.CurrentLineNumber - 1
log.error(_("unexpected reference to external XML entity"))
p = xml.parsers.expat.ParserCreate(namespace_separator = '!')
p.XmlDeclHandler = None
p.StartDoctypeDeclHandler = StartDoctypeDeclHandler
p.EndDoctypeDeclHandler = None
p.ElementDeclHandler = ElementDeclHandler
p.AttlistDeclHandler = AttlistDeclHandler
p.StartElementHandler = StartElementHandler
p.EndElementHandler = EndElementHandler
p.ProcessingInstructionHandler = ProcessingInstructionHandler
p.CharacterDataHandler = CharacterDataHandler
p.UnparsedEntityDeclHandler = UnparsedEntityDeclHandler
p.EntityDeclHandler = EntityDeclHandler
p.NotationDeclHandler = NotationDeclHandler
p.StartNamespaceDeclHandler = None
p.EndNamespaceDeclHandler = None
p.CommentHandler = None
p.StartCdataSectionHandler = StartCdataSectionHandler
p.EndCdataSectionHandler = None
p.DefaultHandler = DefaultHandler
p.DefaultHandlerExpand = None
p.NotStandaloneHandler = NotStandaloneHandler
p.ExternalEntityRefHandler = ExternalEntityRefHandler
try:
p.ParseFile(f)
except xml.parsers.expat.ExpatError as e:
log.lineno = e.lineno - 1
log.error(_("%s") % e)
| gpl-2.0 | -6,952,313,439,646,739,000 | 37.78553 | 79 | 0.521286 | false |
JoaoCarabetta/bradata | bradata/tse/pipeline.py | 1 | 6105 | import bradata.utils
import bradata.connection
import os
import io
from zipfile import ZipFile
import pandas as pd
import glob
import yaml
import shutil
import luigi
import luigi.contrib.postgres
def _find_header(data_type, year, path):
with open(path, 'r') as f:
data = yaml.load(f)
a = data[data_type]['columns']
final = min(list(a.keys()))
for k in a.keys():
if int(year) >= k:
final = k
return str(a[final])
class Get_Headers(luigi.Task):
def output(self):
return luigi.LocalTarget(os.path.join(bradata.__download_dir__, 'tse', 'config', 'headers.csv'))
def run(self):
conn = bradata.connection.Connection()
result = conn.perform_request('https://raw.githubusercontent.com/labFGV/bradata/master/bradata/tse/headersTSE.csv')
if result['status'] == 'ok':
result = result['content']
else:
print('File was not dowloaded')
with self.output().open('w') as o_file:
o_file.write(result)
class Get_Header_Relation(luigi.Task):
def output(self):
return luigi.LocalTarget(os.path.join(bradata.__download_dir__, 'tse', 'config', 'header_relation.yaml'))
def run(self):
conn = bradata.connection.Connection()
result = conn.perform_request(
'https://raw.githubusercontent.com/labFGV/bradata/master/bradata/tse/header_relation.yaml')
if result['status'] == 'ok':
result = result['content']
else:
raise Warning ('Header Relation was not dowloaded')
with self.output().open('w') as o_file:
o_file.write(result)
class Download_Unzip(luigi.Task):
"""
Download and unzip
"""
year = luigi.Parameter()
data_type = luigi.Parameter()
def output(self):
"""
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.LocalTarget(os.path.join(bradata.__download_dir__, 'tse', 'temp', '{}_{}'.format(self.data_type, self.year)))
def requires(self):
"""
* :py:class:`~.Streams`
:return: list of object (:py:class:`luigi.task.Task`)
"""
return Get_Header_Relation()
def run(self):
conn = bradata.connection.Connection()
with self.input().open('r') as input_file:
base_url = self.select_url(self.data_type)
url = base_url + bradata.utils._treat_inputs(self.year) + '.zip'
result = conn.perform_request(url, binary=True)
if result['status'] == 'ok':
result = result['content']
else:
raise Exception ('File was not dowloaded')
zipfile = ZipFile(io.BytesIO(result))
zipfile.extractall(self.output().path)
def select_url(self, data_type):
with open(self.input().path, 'r') as f:
data = yaml.load(f)
return data[data_type]['url']
class Aggregat(luigi.Task):
"""
Get all states csv files aggregate it to a unique file with header
"""
year = luigi.Parameter()
data_type = luigi.Parameter()
def requires(self):
"""
"""
return {'download': Download_Unzip(data_type=self.data_type, year=self.year),
'headers': Get_Headers(),
'header_relation': Get_Header_Relation()}
def output(self):
"""
"""
return luigi.LocalTarget(os.path.join(bradata.__download_dir__, 'tse', '{}_{}.csv'.format(self.data_type, self.year)))
def run(self):
headers = pd.read_csv(self.input()['headers'].path)
files = glob.glob(self.input()['download'].path + "/*.txt".format(self.year))
header = _find_header(self.data_type, self.year, self.input()['header_relation'].path)
df_list = []
for filename in sorted(files):
df_list.append(
pd.read_csv(filename, sep=';', names=headers[header].dropna().tolist(), encoding='latin1'))
full_df = pd.concat(df_list)
full_df.to_csv(self.output().path, index=False, encoding='utf-8')
print('Completed! Access your file at',
os.path.join(bradata.__download_dir__, 'tse', '{}_{}.csv'.format(self.data_type, self.year)))
class ToSQl(luigi.Task):
data_type = luigi.Parameter()
year = luigi.Parameter()
def requires(self):
return Aggregat(data_type=self.data_type, year=self.year)
def run(self):
with open('bradata/tse/config_server.yaml', 'r') as f:
server = yaml.load(f)
host = server['host']
database = server['database']
user = server['user']
password = server['password']
schema = 'tse'
table = '{}_{}'.format(self.data_type, self.year)
from sqlalchemy import create_engine
url = 'postgresql://{}:{}@{}/{}'
url = url.format(user, password, host, database)
engine = create_engine(url)
headers = pd.read_csv(self.input().path)
print('Inserting data do DB. It can take a while...')
headers.to_sql(table, engine, schema=schema, if_exists='replace')
print('The data is on your DB! Check schema {}, table {}'.format(schema, table))
with self.output().open('w') as f:
f.write('')
def output(self):
return luigi.LocalTarget(os.path.join(bradata.__download_dir__, 'tse', 'temp',
'{}_{}'.format(self.data_type, self.year), 'dumb.txt'))
class Fetch(luigi.WrapperTask):
data_types = luigi.Parameter()
years = luigi.Parameter()
def requires(self):
data_types = self.string_to_list(self.data_types)
years = self.string_to_list(self.years)
yield [ToSQl(data_type=t, year=y) for t in data_types for y in years]
def string_to_list(self, string):
string = string.replace("'",'').replace('[', '').replace(']','').replace(' ', '')
return [s for s in string.split(',')]
if __name__ == "__main__":
luigi.run() | mit | -2,733,309,150,001,114,000 | 27.666667 | 130 | 0.578051 | false |
vathpela/anaconda | pyanaconda/ui/gui/helpers.py | 1 | 13727 | # Abstract base classes for GUI classes
#
# Copyright (C) 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# This file contains abstract base classes that are specific to GUI
# functionality. See also pyanaconda.ui.helpers.
from pyanaconda.anaconda_loggers import get_module_logger
log = get_module_logger(__name__)
from abc import ABCMeta, abstractmethod
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from pyanaconda.flags import flags
from pyanaconda.ui.helpers import InputCheck, InputCheckHandler
from pyanaconda.ui.gui.utils import timed_action
from pyanaconda.core.i18n import _
from pyanaconda.errors import NonInteractiveError
from pyanaconda.core import constants
def autoinstall_stopped(reason):
""" Reaction on stop of automatic kickstart installation
Log why the installation stopped and raise the NonInteractiveError in
non interactive mode.
:param data: Kickstart data object.
:param reason: Why the automatic kickstart installation stopped.
"""
log.info("kickstart installation stopped for info: %s", reason)
if not flags.ksprompt:
raise NonInteractiveError("Non interactive installation failed: %s" % reason)
class GUIInputCheck(InputCheck):
""" Add timer awareness to an InputCheck.
Add a delay before running the validation function so that the
function is not run for every keystroke. Run any pending actions
before returning a status.
"""
def __init__(self, parent, input_obj, run_check, data=None):
super().__init__(parent, input_obj, run_check, data)
# Add the timer here instead of decorating a method so that a new
# TimedAction is created for every instance
self.update_check_status = timed_action(busy_cursor=False)(self.update_check_status)
@property
def check_status(self):
if self.update_check_status.timer_active:
# The timer is hooked up to update_check_status, which takes no arguments.
# Since the timed_action wrapper was made around the bound method of a
# GUIInputCheck instance and not the function of a GUIInputCheck class,
# self is already applied and update_check_status is just a regular TimedAction
# object, not a curried function around the object.
self.update_check_status.run_now()
return super().check_status
# Inherit abstract methods from InputCheckHandler
# pylint: disable=abstract-method
class GUIInputCheckHandler(InputCheckHandler, metaclass=ABCMeta):
"""Provide InputCheckHandler functionality for Gtk input screens.
This class assumes that all input objects are of type GtkEditable and
attaches InputCheck.update_check_status to the changed signal.
"""
def _update_check_status(self, editable, inputcheck):
inputcheck.update_check_status()
def get_input(self, input_obj):
return input_obj.get_text()
def add_check(self, input_obj, run_check, data=None):
# Use a GUIInputCheck to run the validation in a GLib timer
checkRef = GUIInputCheck(self, input_obj, run_check, data)
# Start a new timer on each keystroke
input_obj.connect_after("changed", self._update_check_status, checkRef)
# Add the InputCheck to the parent class's list of checks
self._check_list.append(checkRef)
return checkRef
def can_go_back_focus_if_not(self):
"""Check whether the input validation checks allow the spoke to be exited.
Return True if yes, otherwise focus the problematic input field and return False.
"""
failed_check = next(self.failed_checks, None)
if failed_check:
failed_check.input_obj.grab_focus()
return False
else:
return True
class GUIDialogInputCheckHandler(GUIInputCheckHandler, metaclass=ABCMeta):
"""Provide InputCheckHandler functionality for Gtk dialogs.
If an OK button is provided in the constructor, this class will
handle setting the sensitivity of the button to match the input
check result. A method on_ok_clicked is provided to determine whether
the dialog can be exited, similar to on_back_clicked for spokes.
It's not possible (or at least not easy) to prent a GtkDialog from
returning a response, so the caller of gtk_dialog_run needs to check
whether the input is valid and decide based on that whether to destroy
the dialog or call gtk_dialog_run again.
"""
def __init__(self, ok_button=None):
super().__init__()
self._ok_button = ok_button
def _update_check_status(self, editable, inputcheck):
# If an OK button was provided, set it to sensitive on any change in
# input. This way if a user changes invalid input to valid, they can
# immediately leave the dialog. This also means that there will be a
# period in which the user is not prented from leaving with empty input,
# and this condition needs to be checked.
if self._ok_button:
self._ok_button.set_sensitive(True)
return super()._update_check_status(editable, inputcheck)
def set_status(self, inputcheck):
if inputcheck.check_status in (InputCheck.CHECK_OK, InputCheck.CHECK_SILENT):
inputcheck.input_obj.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, None)
inputcheck.input_obj.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY, "")
else:
inputcheck.input_obj.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY,
"dialog-error")
inputcheck.input_obj.set_icon_tooltip_text(Gtk.EntryIconPosition.SECONDARY,
inputcheck.check_status)
# Update the ok button sensitivity based on the check status.
# If the result is CHECK_OK, set_sensitive(True) still needs to be
# called, even though the changed handler above also makes the button
# sensitive. A direct call to update_check_status may have bypassed the
# changed signal.
if self._ok_button:
self._ok_button.set_sensitive(inputcheck.check_status == InputCheck.CHECK_OK)
def on_ok_clicked(self):
"""Return whether the input validation checks allow the dialog to be exited.
Unlike GUISpokeInputCheckHandler.on_back_clicked, it is not expected that
subclasses will implement this method.
"""
failed_check = next(self.failed_checks, None)
if failed_check:
failed_check.input_obj.grab_focus()
return False
else:
return True
class GUISpokeInputCheckHandler(GUIInputCheckHandler, metaclass=ABCMeta):
"""Provide InputCheckHandler functionality for graphical spokes.
This class implements set_status to set a message in the warning area of
the spoke window and provides an implementation of on_back_clicked to
prevent the user from exiting a spoke with bad input.
"""
def __init__(self):
super().__init__()
self._checker = None
self._prev_status = None
self._password_kickstarted = False
# return to hub logic
self._can_go_back = False
self._needs_waiver = False
self._waive_clicks = 0
# important UI object instances
self._password_entry = None
self._password_confirmation_entry = None
self._password_bar = None
self._password_label = None
@property
def checker(self):
return self._checker
# Implemented by NormalSpoke
@abstractmethod
def clear_info(self):
pass
# Implemented by GUIObject
@abstractmethod
def set_warning(self, msg):
pass
# Implemented by NormalSpoke
@abstractmethod
def show_warning_message(self, message):
pass
def set_status(self, inputcheck):
"""Update the warning with the input validation error from the first
error message.
"""
failed_check = next(self.failed_checks_with_message, None)
if not failed_check:
self.clear_info()
self._prev_status = None
elif failed_check.check_status != self._prev_status:
self._prev_status = failed_check.check_status
self.clear_info()
self.set_warning(failed_check.check_status)
def remove_placeholder_texts(self):
"""Remove password and confirmation placeholder texts."""
self.password_entry.set_placeholder_text("")
self.password_confirmation_entry.set_placeholder_text("")
@property
def password_bar(self):
"""Password strength bar."""
return self._password_bar
@property
def password_label(self):
"""Short password status label."""
return self._password_label
def set_password_score(self, score):
self.password_bar.set_value(score)
def set_password_status(self, status_message):
self.password_label.set_text(status_message)
@property
def password_entry(self):
"""The password entry widget."""
return self._password_entry
@property
def password(self):
"""Input to be checked.
Content of the input field, etc.
:returns: input to be checked
:rtype: str
"""
return self.password_entry.get_text()
@property
def password_confirmation_entry(self):
"""The password confirmation entry widget."""
return self._password_confirmation_entry
@property
def password_confirmation(self):
"""Content of the input confirmation field.
Note that not all spokes might have a password confirmation field.
:returns: content of the password confirmation field
:rtype: str
"""
return self.password_confirmation_entry.get_text()
@property
def password_kickstarted(self):
"""Reports if the input was initialized from kickstart.
:returns: if the input was initialized from kickstart
:rtype: bool
"""
return self._password_kickstarted
@password_kickstarted.setter
def password_kickstarted(self, value):
self._password_kickstarted = value
@property
def can_go_back(self):
return self._can_go_back
@can_go_back.setter
def can_go_back(self, value):
self._can_go_back = value
@property
def needs_waiver(self):
return self._needs_waiver
@needs_waiver.setter
def needs_waiver(self, value):
self._needs_waiver = value
@property
def waive_clicks(self):
"""Number of waive clicks the user has done to override an input check.
:returns: number of waive clicks
:rtype: int
"""
return self._waive_clicks
@waive_clicks.setter
def waive_clicks(self, clicks):
"""Set number of waive clicks.
:param int clicks: number of waive clicks
"""
self._waive_clicks = clicks
def on_password_changed(self, editable, data=None):
"""Tell checker that the content of the password field changed."""
self.checker.password.content = self.password
def on_password_confirmation_changed(self, editable, data=None):
"""Tell checker that the content of the password confirmation field changed."""
self.checker.password_confirmation.content = self.password_confirmation
def try_to_go_back(self):
"""Check whether the input validation checks allow the spoke to be exited.
Unlike NormalSpoke.on_back_clicked, this function returns a boolean value.
Classes implementing this class should run GUISpokeInputCheckHandler.try_to_go_back,
and if it succeeded, run NormalSpoke.on_back_clicked.
"""
# check if we can go back
if self.can_go_back:
if self.needs_waiver:
# We can proceed but need waiver.
# - this means we can start accumulating thw waive clicks
self.waive_clicks += 1
# we need to have enough waive clicks to go back
if self.waive_clicks == 1:
self.show_warning_message(_(constants.PASSWORD_FINAL_CONFIRM))
elif self.waive_clicks >= 2:
# clear the waive clicks & any messages
self.waive_clicks = 0
self.clear_info()
return True
# we can go back unconditionally
else:
# clear the waive clicks & any messages
self.waive_clicks = 0
self.clear_info()
return True
# we can't get back
return False
| gpl-2.0 | 1,719,857,370,931,019,000 | 35.703209 | 95 | 0.660523 | false |
testmana2/test | Preferences/ConfigurationPages/HelpFlashCookieManagerPage.py | 1 | 2201 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing the Flash Cookies Manager configuration page.
"""
from __future__ import unicode_literals
from E5Gui.E5PathPicker import E5PathPickerModes
from .ConfigurationPageBase import ConfigurationPageBase
from .Ui_HelpFlashCookieManagerPage import Ui_HelpFlashCookieManagerPage
import Preferences
class HelpFlashCookieManagerPage(ConfigurationPageBase,
Ui_HelpFlashCookieManagerPage):
"""
Class implementing the Flash Cookies Manager configuration page.
"""
def __init__(self):
"""
Constructor
"""
super(HelpFlashCookieManagerPage, self).__init__()
self.setupUi(self)
self.setObjectName("HelpFlashCookieManagerPage")
self.flashDataPathPicker.setMode(E5PathPickerModes.DirectoryMode)
# set initial values
self.flashDataPathPicker.setText(
Preferences.getHelp("FlashCookiesDataPath"))
self.autoModeGroup.setChecked(
Preferences.getHelp("FlashCookieAutoRefresh"))
self.notificationGroup.setChecked(
Preferences.getHelp("FlashCookieNotify"))
self.deleteGroup.setChecked(
Preferences.getHelp("FlashCookiesDeleteOnStartExit"))
def save(self):
"""
Public slot to save the Flash Cookies Manager configuration.
"""
Preferences.setHelp("FlashCookiesDataPath",
self.flashDataPathPicker.text())
Preferences.setHelp("FlashCookieAutoRefresh",
self.autoModeGroup.isChecked())
Preferences.setHelp("FlashCookieNotify",
self.notificationGroup.isChecked())
Preferences.setHelp("FlashCookiesDeleteOnStartExit",
self.deleteGroup.isChecked())
def create(dlg):
"""
Module function to create the configuration page.
@param dlg reference to the configuration dialog
@return reference to the instantiated page (ConfigurationPageBase)
"""
page = HelpFlashCookieManagerPage()
return page
| gpl-3.0 | 7,723,948,479,234,742,000 | 31.850746 | 73 | 0.6597 | false |
OlexandrI/pyside | paste/util/looper.py | 1 | 4017 | """
Helper for looping over sequences, particular in templates.
Often in a loop in a template it's handy to know what's next up,
previously up, if this is the first or last item in the sequence, etc.
These can be awkward to manage in a normal Python loop, but using the
looper you can get a better sense of the context. Use like::
>>> for loop, item in looper(['a', 'b', 'c']):
... print loop.number, item
... if not loop.last:
... print '---'
1 a
---
2 b
---
3 c
"""
import collections
__all__ = ['looper']
class looper(object):
"""
Helper for looping (particularly in templates)
Use this like::
for loop, item in looper(seq):
if loop.first:
...
"""
def __init__(self, seq):
self.seq = seq
def __iter__(self):
return looper_iter(self.seq)
def __repr__(self):
return '<%s for %r>' % (
self.__class__.__name__, self.seq)
class looper_iter(object):
def __init__(self, seq):
self.seq = list(seq)
self.pos = 0
def __iter__(self):
return self
def __next__(self):
if self.pos >= len(self.seq):
raise StopIteration
result = loop_pos(self.seq, self.pos), self.seq[self.pos]
self.pos += 1
return result
class loop_pos(object):
def __init__(self, seq, pos):
self.seq = seq
self.pos = pos
def __repr__(self):
return '<loop pos=%r at %r>' % (
self.seq[pos], pos)
def index(self):
return self.pos
index = property(index)
def number(self):
return self.pos + 1
number = property(number)
def item(self):
return self.seq[self.pos]
item = property(item)
def __next__(self):
try:
return self.seq[self.pos+1]
except IndexError:
return None
next = property(next)
def previous(self):
if self.pos == 0:
return None
return self.seq[self.pos-1]
previous = property(previous)
def odd(self):
return not self.pos % 2
odd = property(odd)
def even(self):
return self.pos % 2
even = property(even)
def first(self):
return self.pos == 0
first = property(first)
def last(self):
return self.pos == len(self.seq)-1
last = property(last)
def length(self):
return len(self.seq)
length = property(length)
def first_group(self, getter=None):
"""
Returns true if this item is the start of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.first:
return True
return self._compare_group(self.item, self.previous, getter)
def last_group(self, getter=None):
"""
Returns true if this item is the end of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.last:
return True
return self._compare_group(self.item, self.__next__, getter)
def _compare_group(self, item, other, getter):
if getter is None:
return item != other
elif (isinstance(getter, str)
and getter.startswith('.')):
getter = getter[1:]
if getter.endswith('()'):
getter = getter[:-2]
return getattr(item, getter)() != getattr(other, getter)()
else:
return getattr(item, getter) != getattr(other, getter)
elif isinstance(getter, collections.Callable):
return getter(item) != getter(other)
else:
return item[getter] != other[getter]
| lgpl-3.0 | -7,224,671,793,880,922,000 | 25.254902 | 74 | 0.550411 | false |
rialto-px/openprocurement.tender.twostage | openprocurement/tender/twostage/views/bid_document.py | 1 | 13476 | # -*- coding: utf-8 -*-
from openprocurement.api.models import get_now
from openprocurement.api.utils import (
get_file,
save_tender,
upload_file,
apply_patch,
update_file_content_type,
opresource,
json_view,
context_unpack,
)
from openprocurement.api.validation import (
validate_file_update,
validate_file_upload,
validate_patch_document_data,
)
from openprocurement.tender.twostage.utils import (
bid_financial_documents_resource,
)
from openprocurement.tender.openua.views.bid_document import TenderUaBidDocumentResource
@opresource(name='Tender Two Stage Bid Documents',
collection_path='/tenders/{tender_id}/bids/{bid_id}/documents',
path='/tenders/{tender_id}/bids/{bid_id}/documents/{document_id}',
procurementMethodType='aboveThresholdTS',
description="Tender Two Stage bidder documents")
class TenderTSBidDocumentResource(TenderUaBidDocumentResource):
container = "documents"
view_forbidden_states = ['active.tendering']
view_forbidden_bid_states = ['invalid', 'deleted']
def _doc_access_restricted(self, doc):
is_bid_owner = self.request.authenticated_role == 'bid_owner'
is_tender_owner = self.request.authenticated_role == 'tender_owner'
return doc.confidentiality != 'public' and not (is_bid_owner or is_tender_owner)
@json_view(permission='view_tender')
def collection_get(self):
"""Tender Bid Documents List"""
if self.request.validated['tender_status'] in self.view_forbidden_states and self.request.authenticated_role != 'bid_owner':
self.request.errors.add('body', 'data', 'Can\'t view bid documents in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
if self.context.status in self.view_forbidden_bid_states and self.request.authenticated_role != 'bid_owner':
self.request.errors.add('body', 'data', 'Can\'t view bid documents in current ({}) bid status'.format(self.context.status))
self.request.errors.status = 403
return
if self.request.params.get('all', ''):
collection_data = [i.serialize("restricted_view") if self._doc_access_restricted(i) else i.serialize("view")
for i in getattr(self.context, self.container)]
else:
collection_data = sorted(dict([(i.id, i.serialize("restricted_view") if self._doc_access_restricted(i) else i.serialize("view"))
for i in getattr(self.context, self.container)]).values(), key=lambda i: i['dateModified'])
return {'data': collection_data}
@json_view(validators=(validate_file_upload,), permission='edit_bid')
def collection_post(self):
"""Tender Bid Document Upload
"""
if self.request.validated['tender_status'] not in ['active.tendering', 'active.qualification', 'active.awarded']:
self.request.errors.add('body', 'data', 'Can\'t add document in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
tender = self.request.validated['tender']
if self.request.validated['tender_status'] == 'active.tendering' and (tender.tenderPeriod.startDate and get_now() < tender.tenderPeriod.startDate or get_now() > tender.tenderPeriod.endDate):
self.request.errors.add('body', 'data', 'Document can be added only during the tendering period: from ({}) to ({}).'.format(tender.tenderPeriod.startDate and tender.tenderPeriod.startDate.isoformat(), tender.tenderPeriod.endDate.isoformat()))
self.request.errors.status = 403
return
if self.request.validated['tender_status'] in ['active.qualification', 'active.awarded'] and \
not [i for i in self.request.validated['tender'].awards if i.status in ['pending', 'active'] and i.bid_id == self.request.validated['bid_id']]:
self.request.errors.add('body', 'data', 'Can\'t add document because award of bid is not in pending or active state')
self.request.errors.status = 403
return
if self.context.status in ['invalid', 'unsuccessful', 'deleted']:
self.request.errors.add('body', 'data', 'Can\'t add document to \'{}\' bid'.format(self.context.status))
self.request.errors.status = 403
return
document = upload_file(self.request)
getattr(self.context, self.container).append(document)
if self.request.validated['tender_status'] == 'active.tendering':
self.request.validated['tender'].modified = False
if save_tender(self.request):
self.LOGGER.info('Created tender bid document {}'.format(document.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_bid_document_create'}, {'document_id': document.id}))
self.request.response.status = 201
document_route = self.request.matched_route.name.replace("collection_", "")
self.request.response.headers['Location'] = self.request.current_route_url(_route_name=document_route, document_id=document.id, _query={})
return {'data': document.serialize("view")}
@json_view(permission='view_tender')
def get(self):
"""Tender Bid Document Read"""
is_bid_owner = self.request.authenticated_role == 'bid_owner'
if self.request.validated['tender_status'] in self.view_forbidden_states and not is_bid_owner:
self.request.errors.add('body', 'data', 'Can\'t view bid document in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
if self.request.validated['bid'].status in self.view_forbidden_bid_states and self.request.authenticated_role != 'bid_owner':
self.request.errors.add('body', 'data', 'Can\'t view bid documents in current ({}) bid status'.format(self.request.validated['bid'].status))
self.request.errors.status = 403
return
document = self.request.validated['document']
if self.request.params.get('download'):
if self._doc_access_restricted(document):
self.request.errors.add('body', 'data', 'Document download forbidden.')
self.request.errors.status = 403
return
else:
return get_file(self.request)
document_data = document.serialize('restricted_view' if self._doc_access_restricted(document) else 'view')
document_data['previousVersions'] = [i.serialize('restricted_view') if self._doc_access_restricted(i) else i.serialize('view')
for i in self.request.validated['documents'] if i.url != document.url]
return {'data': document_data}
@json_view(content_type="application/json", validators=(validate_patch_document_data,), permission='edit_bid')
def patch(self):
"""Tender Bid Document Update"""
if self.request.validated['tender_status'] not in ['active.tendering', 'active.qualification', 'active.awarded']:
self.request.errors.add('body', 'data', 'Can\'t update document in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
tender = self.request.validated['tender']
if self.request.validated['tender_status'] == 'active.tendering' and (tender.tenderPeriod.startDate and get_now() < tender.tenderPeriod.startDate or get_now() > tender.tenderPeriod.endDate):
self.request.errors.add('body', 'data', 'Document can be updated only during the tendering period: from ({}) to ({}).'.format(tender.tenderPeriod.startDate and tender.tenderPeriod.startDate.isoformat(), tender.tenderPeriod.endDate.isoformat()))
self.request.errors.status = 403
return
if self.request.validated['tender_status'] in ['active.qualification', 'active.awarded'] and \
not [i for i in self.request.validated['tender'].awards if i.status in ['pending', 'active'] and i.bid_id == self.request.validated['bid_id']]:
self.request.errors.add('body', 'data', 'Can\'t update document because award of bid is not in pending or active state')
self.request.errors.status = 403
return
if self.request.validated['tender_status'] != 'active.tendering' and 'confidentiality' in self.request.validated['data']:
if self.context.confidentiality != self.request.validated['data']['confidentiality']:
self.request.errors.add('body', 'data', 'Can\'t update document confidentiality in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
bid = getattr(self.context, "__parent__")
if bid and bid.status in ['invalid', 'unsuccessful', 'deleted']:
self.request.errors.add('body', 'data', 'Can\'t update document data for \'{}\' bid'.format(bid.status))
self.request.errors.status = 403
return
if self.request.validated['tender_status'] == 'active.tendering':
self.request.validated['tender'].modified = False
if apply_patch(self.request, src=self.request.context.serialize()):
update_file_content_type(self.request)
self.LOGGER.info('Updated tender bid document {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_bid_document_patch'}))
return {'data': self.request.context.serialize("view")}
@json_view(validators=(validate_file_update,), permission='edit_bid')
def put(self):
"""Tender Bid Document Update"""
if self.request.validated['tender_status'] not in ['active.tendering', 'active.qualification', 'active.awarded']:
self.request.errors.add('body', 'data', 'Can\'t update document in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
tender = self.request.validated['tender']
if self.request.validated['tender_status'] == 'active.tendering' and (tender.tenderPeriod.startDate and get_now() < tender.tenderPeriod.startDate or get_now() > tender.tenderPeriod.endDate):
self.request.errors.add('body', 'data', 'Document can be updated only during the tendering period: from ({}) to ({}).'.format(tender.tenderPeriod.startDate and tender.tenderPeriod.startDate.isoformat(), tender.tenderPeriod.endDate.isoformat()))
self.request.errors.status = 403
return
if self.request.validated['tender_status'] in ['active.qualification', 'active.awarded'] and \
not [i for i in self.request.validated['tender'].awards if i.status in ['pending', 'active'] and i.bid_id == self.request.validated['bid_id']]:
self.request.errors.add('body', 'data', 'Can\'t update document because award of bid is not in pending or active state')
self.request.errors.status = 403
return
if self.request.validated['tender_status'] != 'active.tendering' and 'confidentiality' in self.request.validated.get('data', {}):
if self.context.confidentiality != self.request.validated['data']['confidentiality']:
self.request.errors.add('body', 'data', 'Can\'t update document confidentiality in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
bid = getattr(self.context, "__parent__")
if bid and bid.status in ['invalid', 'unsuccessful', 'deleted']:
self.request.errors.add('body', 'data', 'Can\'t update document in \'{}\' bid'.format(bid.status))
self.request.errors.status = 403
return
document = upload_file(self.request)
getattr(self.request.validated['bid'], self.container).append(document)
if self.request.validated['tender_status'] == 'active.tendering':
self.request.validated['tender'].modified = False
if save_tender(self.request):
self.LOGGER.info('Updated tender bid document {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_bid_document_put'}))
return {'data': document.serialize("view")}
@bid_financial_documents_resource(
name='Tender Two Stage Bid Financial Documents',
collection_path='/tenders/{tender_id}/bids/{bid_id}/financial_documents',
path='/tenders/{tender_id}/bids/{bid_id}/financial_documents/{document_id}',
procurementMethodType='aboveThresholdTS',
description="Tender Two Stage bidder financial documents")
class TenderTSBidFinancialDocumentResource(TenderTSBidDocumentResource):
""" Tender Two Stage Bid Financial Documents """
container = "financialDocuments"
view_forbidden_states = ['active.tendering', 'active.pre-qualification',
'active.pre-qualification.stand-still', 'active.auction']
view_forbidden_bid_states = ['invalid', 'deleted', 'invalid.pre-qualification', 'unsuccessful']
| apache-2.0 | 8,687,690,805,947,830,000 | 64.736585 | 256 | 0.65279 | false |
lamogui/ogre_blender_importer | OgreMeshFileFormat.py | 1 | 2168 | from enum import IntEnum;
class OgreMeshChunkID(IntEnum):
"""
Definition of the OGRE .mesh file format
.mesh files are binary files (for read efficiency at runtime) and are arranged into chunks
of data, very like 3D Studio's format.
A chunk always consists of:
unsigned short CHUNK_ID : one of the following chunk ids identifying the chunk
unsigned long LENGTH : length of the chunk in bytes, including this header
void* DATA : the data, which may contain other sub-chunks (various data types)
A .mesh file can contain both the definition of the Mesh itself, and optionally the definitions
of the materials is uses (although these can be omitted, if so the Mesh assumes that at runtime the
Materials referred to by name in the Mesh are loaded/created from another source)
A .mesh file only contains a single mesh, which can itself have multiple submeshes.
"""
M_HEADER = 0x1000;
M_MESH = 0x3000;
M_SUBMESH = 0x4000;
M_SUBMESH_OPERATION = 0x4010;
M_SUBMESH_BONE_ASSIGNMENT = 0x4100;
M_SUBMESH_TEXTURE_ALIAS = 0x4200;
M_GEOMETRY = 0x5000;
M_GEOMETRY_VERTEX_DECLARATION = 0x5100;
M_GEOMETRY_VERTEX_ELEMENT = 0x5110;
M_GEOMETRY_VERTEX_BUFFER = 0x5200;
M_GEOMETRY_VERTEX_BUFFER_DATA = 0x5210;
M_MESH_SKELETON_LINK = 0x6000;
M_MESH_BONE_ASSIGNMENT = 0x7000;
M_MESH_LOD_LEVEL = 0x8000;
M_MESH_LOD_USAGE = 0x8100;
M_MESH_LOD_MANUAL = 0x8110;
M_MESH_LOD_GENERATED = 0x8120;
M_MESH_BOUNDS = 0x9000;
M_SUBMESH_NAME_TABLE = 0xA000;
M_SUBMESH_NAME_TABLE_ELEMENT = 0xA100;
M_EDGE_LISTS = 0xB000;
M_EDGE_LIST_LOD = 0xB100;
M_EDGE_GROUP = 0xB110;
M_POSES = 0xC000;
M_POSE = 0xC100;
M_POSE_VERTEX = 0xC111;
M_ANIMATIONS = 0xD000;
M_ANIMATION = 0xD100;
M_ANIMATION_BASEINFO = 0xD105;
M_ANIMATION_TRACK = 0xD110;
M_ANIMATION_MORPH_KEYFRAME = 0xD111;
M_ANIMATION_POSE_KEYFRAME = 0xD112;
M_ANIMATION_POSE_REF = 0xD113;
M_TABLE_EXTREMES = 0xE000;
M_GEOMETRY_NORMALS = 0x5100;
M_GEOMETRY_COLOURS = 0x5200;
M_GEOMETRY_TEXCOORDS = 0x5300;
| mit | 5,566,725,582,043,546,000 | 37.035088 | 106 | 0.674815 | false |
gbd-consult/CartoCSS-Export | CartoCSSExport/ce/error.py | 1 | 1066 | """Error codes."""
#: No layers in this project (loading error?)
EMPTY_PROJECT = 'EMPTY_PROJECT'
#: No converter for this Qgis class.
CLASS_NOT_IMPLEMENTED = 'CLASS_NOT_IMPLEMENTED'
#: No converter for this Qgis property
PROP_NOT_IMPLEMENTED = 'PROP_NOT_IMPLEMENTED'
#: No converter for this Qgis data provider
DATA_PROVIDER_NOT_IMPLEMENTED = 'DATA_PROVIDER_NOT_IMPLEMENTED'
#: No converter for this Qgis measurement unit
UNIT_NOT_IMPLEMENTED = 'UNIT_NOT_IMPLEMENTED'
#: No converter for this Qgis value
VALUE_NOT_IMPLEMENTED = 'VALUE_NOT_IMPLEMENTED'
#: Expression is not supported in CartoCSS
EXPRESSION_NOT_SUPPORTED = 'EXPRESSION_NOT_SUPPORTED'
#: Empty expression
EMPTY_EXPRESSION = 'EMPTY_EXPRESSION'
#: Invalid number
INVALID_NUMBER = 'INVALID_NUMBER'
#: Invalid color specification
INVALID_COLOR = 'INVALID_COLOR'
#: Invalid field specification, only identifier fields are supported
INVALID_FIELD = 'INVALID_FIELD'
#: Unknown CSS property
INVALID_CSS_PROP = 'INVALID_CSS_PROP'
#: expression too complex
COMPLEX_EXPRESSION = 'COMPLEX_EXPRESSION'
| gpl-2.0 | 356,750,921,144,582,340 | 25.65 | 68 | 0.763602 | false |
wolfbeacon/wolfbeacon-core-api | api/migrations/0008_auto_20171230_0919.py | 1 | 2732 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-12-30 09:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0007_mentor_organizer_volunteer'),
]
operations = [
migrations.AlterField(
model_name='event',
name='location',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='event',
name='tagline',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='hackathon',
name='location',
field=models.CharField(max_length=250),
),
migrations.AlterField(
model_name='hackathon',
name='name',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='hackathon',
name='shipping_address',
field=models.CharField(max_length=150),
),
migrations.AlterField(
model_name='hackathon',
name='university_name',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='user',
name='about_me',
field=models.CharField(max_length=1000, null=True),
),
migrations.AlterField(
model_name='user',
name='city',
field=models.CharField(max_length=75),
),
migrations.AlterField(
model_name='user',
name='country',
field=models.CharField(max_length=75),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.CharField(max_length=50),
),
migrations.AlterField(
model_name='user',
name='major_of_study',
field=models.CharField(max_length=100),
),
migrations.AlterField(
model_name='user',
name='school_last_attended',
field=models.CharField(max_length=100, null=True),
),
migrations.AlterField(
model_name='user',
name='special_accommodations',
field=models.CharField(max_length=250, null=True),
),
migrations.AlterField(
model_name='user',
name='street_address',
field=models.CharField(max_length=100, null=True),
),
]
| gpl-3.0 | -4,158,574,478,643,937,000 | 29.355556 | 63 | 0.528917 | false |
mic4ael/indico | indico/modules/events/abstracts/lists.py | 1 | 12697 | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from collections import OrderedDict
from operator import attrgetter
from flask import flash, request, session
from sqlalchemy.orm import joinedload, subqueryload
from indico.core.db import db
from indico.modules.events.abstracts.models.abstracts import Abstract, AbstractState
from indico.modules.events.abstracts.models.fields import AbstractFieldValue
from indico.modules.events.abstracts.models.reviews import AbstractReview
from indico.modules.events.contributions.models.fields import ContributionField
from indico.modules.events.tracks.models.tracks import Track
from indico.modules.events.util import ListGeneratorBase
from indico.util.i18n import _
from indico.web.flask.templating import get_template_module
class AbstractListGeneratorBase(ListGeneratorBase):
"""Listing and filtering actions in an abstract list."""
show_contribution_fields = True
def __init__(self, event):
super(AbstractListGeneratorBase, self).__init__(event)
self.default_list_config = {
'items': (),
'filters': {'fields': {}, 'items': {}, 'extra': {}}
}
track_empty = {None: _('No track')}
type_empty = {None: _('No type')}
track_choices = OrderedDict((unicode(t.id), t.title) for t in sorted(self.event.tracks,
key=attrgetter('title')))
type_choices = OrderedDict((unicode(t.id), t.name) for t in sorted(self.event.contribution_types,
key=attrgetter('name')))
self.static_items = OrderedDict([
('state', {'title': _('State'), 'filter_choices': {state.value: state.title for state in AbstractState}}),
('submitter', {'title': _('Submitter')}),
('authors', {'title': _('Primary authors')}),
('accepted_track', {'title': _('Accepted track'),
'filter_choices': OrderedDict(track_empty.items() + track_choices.items())}),
('submitted_for_tracks', {'title': _('Submitted for tracks'),
'filter_choices': OrderedDict(track_empty.items() + track_choices.items())}),
('reviewed_for_tracks', {'title': _('Reviewed for tracks'),
'filter_choices': OrderedDict(track_empty.items() + track_choices.items())}),
('accepted_contrib_type', {'title': _('Accepted type'),
'filter_choices': OrderedDict(type_empty.items() + type_choices.items())}),
('submitted_contrib_type', {'title': _('Submitted type'),
'filter_choices': OrderedDict(type_empty.items() + type_choices.items())}),
('score', {'title': _('Score')}),
('submitted_dt', {'title': _('Submission date')}),
('modified_dt', {'title': _('Modification date')})
])
self.extra_filters = {}
self.list_config = self._get_config()
def _get_static_columns(self, ids):
"""
Retrieve information needed for the header of the static columns.
:return: a list of {'id': ..., 'caption': ...} dicts
"""
return [{'id': id_, 'caption': self.static_items[id_]['title']} for id_ in self.static_items if id_ in ids]
def get_all_contribution_fields(self):
"""Return the list of contribution fields for the event"""
return self.event.contribution_fields if self.show_contribution_fields else []
def _get_sorted_contribution_fields(self, item_ids):
"""Return the contribution fields ordered by their position in the abstract form."""
if not item_ids or not self.show_contribution_fields:
return []
return (ContributionField.query
.with_parent(self.event)
.filter(ContributionField.id.in_(item_ids))
.order_by(ContributionField.position)
.all())
def _get_filters_from_request(self):
filters = super(AbstractListGeneratorBase, self)._get_filters_from_request()
for field in self.event.contribution_fields:
if field.field_type == 'single_choice':
options = request.form.getlist('field_{}'.format(field.id))
if options:
filters['fields'][unicode(field.id)] = options
return filters
def _build_query(self):
return (Abstract.query
.with_parent(self.event)
.options(joinedload('submitter'),
joinedload('accepted_track'),
joinedload('accepted_contrib_type'),
joinedload('submitted_contrib_type'),
joinedload('contribution').load_only('id', 'event_id'),
subqueryload('field_values'),
subqueryload('submitted_for_tracks'),
subqueryload('reviewed_for_tracks'),
subqueryload('person_links'),
subqueryload('reviews').joinedload('ratings'))
.order_by(Abstract.friendly_id))
def _filter_list_entries(self, query, filters):
criteria = []
field_filters = filters.get('fields')
item_filters = filters.get('items')
extra_filters = filters.get('extra')
if not (field_filters or item_filters or extra_filters):
return query
if field_filters:
for contribution_type_id, field_values in field_filters.iteritems():
criteria.append(Abstract.field_values.any(db.and_(
AbstractFieldValue.contribution_field_id == contribution_type_id,
AbstractFieldValue.data.op('#>>')('{}').in_(field_values)
)))
if item_filters:
static_filters = {
'accepted_track': Abstract.accepted_track_id,
'accepted_contrib_type': Abstract.accepted_contrib_type_id,
'submitted_contrib_type': Abstract.submitted_contrib_type_id,
'submitted_for_tracks': Abstract.submitted_for_tracks,
'reviewed_for_tracks': Abstract.reviewed_for_tracks
}
for key, column in static_filters.iteritems():
ids = set(item_filters.get(key, ()))
if not ids:
continue
column_criteria = []
if '_for_tracks' in key:
if None in ids:
column_criteria.append(~column.any())
ids.discard(None)
if ids:
column_criteria.append(column.any(Track.id.in_(ids)))
else:
if None in ids:
column_criteria.append(column.is_(None))
ids.discard(None)
if ids:
column_criteria.append(column.in_(ids))
criteria.append(db.or_(*column_criteria))
if 'state' in item_filters:
states = [AbstractState(int(state)) for state in item_filters['state']]
criteria.append(Abstract.state.in_(states))
if extra_filters:
if extra_filters.get('multiple_tracks'):
submitted_for_count = (db.select([db.func.count()])
.as_scalar()
.where(Abstract.submitted_for_tracks.prop.primaryjoin))
criteria.append(submitted_for_count > 1)
if extra_filters.get('comments'):
criteria.append(Abstract.submission_comment != '')
return query.filter(db.and_(*criteria))
def get_list_kwargs(self):
list_config = self._get_config()
abstracts_query = self._build_query()
total_entries = abstracts_query.count()
abstracts = self._filter_list_entries(abstracts_query, list_config['filters']).all()
dynamic_item_ids, static_item_ids = self._split_item_ids(list_config['items'], 'dynamic')
static_columns = self._get_static_columns(static_item_ids)
dynamic_columns = self._get_sorted_contribution_fields(dynamic_item_ids)
return {
'abstracts': abstracts,
'total_abstracts': total_entries,
'static_columns': static_columns,
'dynamic_columns': dynamic_columns,
'filtering_enabled': total_entries != len(abstracts)
}
def get_list_export_config(self):
list_config = self._get_config()
static_item_ids, dynamic_item_ids = self._split_item_ids(list_config['items'], 'static')
return {
'static_item_ids': static_item_ids,
'dynamic_items': self._get_sorted_contribution_fields(dynamic_item_ids)
}
def render_list(self, abstract=None):
list_kwargs = self.get_list_kwargs()
tpl = get_template_module('events/abstracts/management/_abstract_list.html')
filtering_enabled = list_kwargs.pop('filtering_enabled')
tpl_lists = get_template_module('events/management/_lists.html')
filter_statistics = tpl_lists.render_displayed_entries_fragment(len(list_kwargs['abstracts']),
list_kwargs['total_abstracts'])
return {
'html': tpl.render_abstract_list(**list_kwargs),
'filtering_enabled': filtering_enabled,
'filter_statistics': filter_statistics,
'hide_abstract': abstract not in list_kwargs['abstracts'] if abstract else None
}
def flash_info_message(self, abstract):
flash(_("The abstract '{}' is not displayed in the list due to the enabled filters")
.format(abstract.title), 'info')
class AbstractListGeneratorManagement(AbstractListGeneratorBase):
"""Listing and filtering actions in the abstract list in the management view"""
list_link_type = 'abstract_management'
endpoint = '.manage_abstract_list'
def __init__(self, event):
super(AbstractListGeneratorManagement, self).__init__(event)
self.default_list_config['items'] = ('submitted_contrib_type', 'accepted_contrib_type', 'state')
if event.tracks:
self.default_list_config['items'] += ('submitted_for_tracks', 'reviewed_for_tracks', 'accepted_track')
self.extra_filters = OrderedDict([
('multiple_tracks', {'title': _('Proposed for multiple tracks'), 'type': 'bool'}),
('comments', {'title': _('Must have comments'), 'type': 'bool'})
])
class AbstractListGeneratorDisplay(AbstractListGeneratorBase):
"""Listing and filtering actions in the abstract list in the display view"""
list_link_type = 'abstract_display'
endpoint = '.display_reviewable_track_abstracts'
show_contribution_fields = False
def __init__(self, event, track):
super(AbstractListGeneratorDisplay, self).__init__(event)
self.track = track
self.default_list_config['items'] = ('accepted_contrib_type', 'state')
items = {'submitted_contrib_type', 'submitter', 'accepted_contrib_type', 'state'}
if self.track.can_convene(session.user):
items.add('score')
self.static_items = OrderedDict((key, value)
for key, value in self.static_items.iteritems()
if key in items)
def _build_query(self):
return (super(AbstractListGeneratorDisplay, self)._build_query()
.filter(Abstract.state != AbstractState.invited,
Abstract.reviewed_for_tracks.contains(self.track)))
def get_user_reviewed_abstracts_for_track(self, user, track):
return (Abstract.query
.join(Abstract.reviews)
.filter(AbstractReview.user == user,
Abstract.state != AbstractState.invited,
Abstract.reviewed_for_tracks.contains(track),
~Abstract.is_deleted)
.all())
def get_list_kwargs(self):
kwargs = super(AbstractListGeneratorDisplay, self).get_list_kwargs()
kwargs['reviewed_abstracts'] = self.get_user_reviewed_abstracts_for_track(session.user, self.track)
return kwargs
| mit | -2,068,415,674,711,308,500 | 47.277567 | 118 | 0.579349 | false |
symbolicdata/code | src/sdeval/classes/templates/comp/GB_Z_lp/Maple/template.py | 1 | 1407 | """
This is the template for the computation problem of computing a Groebner basis of an ideal
generated by a finite set of polynomials with integer coefficients (commutative). It creates
code for the computer algebra system Maple.
.. moduleauthor:: Albert Heinle <[email protected]>
"""
#--------------------------------------------------
#---------------The template-----------------------
#--------------------------------------------------
def generateCode(vars, basis):
"""
The main function generating the Maple code for the computation of
the Groebner basis given the input variables.
:param vars: A list of variables used in the IntPS-System
:type vars: list
:param basis: The polynomials forming a basis of the IntPS-System. This input will not be checked whether
there are polynomials using variables not in the list of variables.
:type basis: list
"""
result = "\
with(Groebner):\n\
Ideal := {%s}:\n\
ordering := plex(%s):\n\
B := Basis(Ideal, ordering):\n\
printf(\"=====Solution Begin=====\");\n\
printf(\"%%a\\n\",B);\n\
printf(\"=====Solution End=====\");\n\
quit;\
" % (",".join(basis),
",".join(vars))
return result
#--------------------------------------------------
#----------------Help Functions--------------------
#--------------------------------------------------
| gpl-3.0 | 8,111,292,419,854,983,000 | 35.076923 | 116 | 0.518834 | false |
schinmayee/metric-learning | losses.py | 1 | 1953 | import torch
import torch.nn as nn
import torch.nn.functional as F
def SimpleHingeLoss(dista, distb, distc, target, margin, hard_triplet=False):
if hard_triplet:
dist_neg = torch.cat([distb, distc], dim=1)
dist_neg = torch.min(dist_neg, dim=1)[0]
else:
dist_neg = distb
return nn.MarginRankingLoss(margin = margin)(dista, dist_neg, target)
def SimpleSquareHingeLoss(dista, distb, distc, target, margin, hard_triplet=False):
if hard_triplet:
dist_neg = torch.cat([distb, distc], dim=1)
dist_neg = torch.min(dist_neg, dim=1)[0]
else:
dist_neg = distb
return nn.MarginRankingLoss(margin = margin)(torch.pow(dista, 2), torch.pow(dist_neg, 2), target)
def RatioLoss(dista, distb, distc, target, margin, hard_triplet=False):
if hard_triplet:
dist_neg = torch.cat([distb, distc], dim=1)
dist_neg = torch.min(dist_neg, dim=1)[0]
else:
dist_neg = distb
ep = torch.exp(dista)
en = torch.exp(dist_neg)
t1 = ep/(ep+en)
t2 = en/(ep+en)
loss = torch.mean(torch.pow(t1, 2) + 1 - torch.pow(t2, 2))
return loss
def EmbHingeLoss(emba, embb, embc, margin, target):
triplet_loss = nn.functional.triplet_margin_loss(
emba, embb, embc, margin=margin)
return triplet_loss
def EmbSquareHingeLoss(emba, embb, embc, margin, target):
dist_pos = F.pairwise_distance(emba, embb, 2)
dist_neg = F.pairwise_distance(emba, embc, 2)
triplet_loss = nn.MarginRankingLoss(margin = margin)(torch.pow(dist_pos, 2), torch.pow(dist_neg, 2), target)
return triplet_loss
def EmbSoftHingeLoss(emba, embb, embc, margin, target):
dist_pos = F.pairwise_distance(emba, embb, 2)
dist_neg1 = F.pairwise_distance(emba, embc, 2)
dist_neg2 = F.pairwise_distance(embb, embc, 2)
dist_neg_s = (torch.exp(margin - dist_neg1) + torch.exp(margin - dist_neg2))
loss = torch.mean(torch.log(dist_neg_s) + dist_pos)
return loss
| mit | -4,633,895,436,246,591,000 | 37.294118 | 112 | 0.65745 | false |
AntonSax/plantcv | utils/util-avg_background_img.py | 1 | 1578 | #!/usr/bin/env python
import argparse
import numpy as np
import sys, os
from os import listdir
import plantcv as pcv
import datetime
### Parse command-line arguments
def options():
parser = argparse.ArgumentParser(description="Get images from an SQLite database and some input information")
parser.add_argument("-d", "--directory", help="path to directory of images to average.")
parser.add_argument("-o", "--outdir", help="Output directory.", required=False)
args = parser.parse_args()
return args
### Functions
def average_all_img(directory,outdir):
allfiles=os.listdir(directory)
path=str(directory)
allpaths=[]
for files in allfiles:
p=path+str(files)
allpaths.append(p)
img, path, filename = pcv.readimage(allpaths[0])
n=len(allpaths)
if len(np.shape(img))==3:
ix,iy,iz=np.shape(img)
arr=np.zeros((ix,iy,iz),np.float)
else:
ix,iy=np.shape(img)
arr=np.zeros((ix,iy,iz),np.float)
# Build up average pixel intensities, casting each image as an array of floats
for i,paths in enumerate(allpaths):
img,path,filename=pcv.readimage(allpaths[i])
imarr=np.array(img,dtype=np.float)
arr=arr+imarr/n
#Round values in array and cast as 8-bit integer
arr=np.array(np.round(arr),dtype=np.uint8)
pcv.print_image(arr, (str(outdir)+"average_"+str(allfiles[0])))
### Main pipeline
def main():
# Get options
args = options()
average_all_img(args.directory, args.outdir)
if __name__ == '__main__':
main() | mit | 1,587,402,601,414,127,600 | 22.924242 | 111 | 0.652725 | false |
digibodies/auth_core | tests/api_tests/test_access_token.py | 1 | 7722 | import mock
import datetime
from tests import TestCaseBase
from auth_core.internal.entities import AuthUserEntity, AuthUserMethodEntity
from auth_core.models import AuthUser
from auth_core.appengine_tools import get_resource_id_from_key
from auth_core.api import access_tokens as access_tokens_api
from auth_core.errors import AuthenticationError
class CreateAccessTokenTests(TestCaseBase):
def test_simple(self):
# Test to ensure that we can generate a JWT and it contains 2 separators
access_token = access_tokens_api.create_access_token({})
self.assertEqual(access_token.count('.'), 2)
@mock.patch('auth_core.api.access_tokens._make_expiration_date')
@mock.patch('auth_core.api.access_tokens.datetime')
@mock.patch('auth_core.api.access_tokens.jwt')
def test_mocked(self, m_jwt, m_datetime, m_expiration):
expected_iat = mock.Mock(name='mock_utcnow')
expected_exp = mock.Mock(name='mock_later')
m_datetime.datetime.utcnow.return_value = expected_iat
m_expiration.return_value = expected_exp
result = access_tokens_api.create_access_token({'foo': 'bar'})
self.assertEqual(result, m_jwt.encode.return_value)
claims = {'iss': 'jwt_issuer',
'iat': expected_iat,
'data': {'foo': 'bar'},
'aud': 'jwt_aud',
'exp': expected_exp
}
m_jwt.encode.assert_called_once_with(*[claims, 'jwt_secret'], **{'algorithm': 'HS256'})
m_expiration.assert_called_once_with(expected_iat, 60*60)
@mock.patch('auth_core.api.access_tokens._make_expiration_date')
@mock.patch('auth_core.api.access_tokens.datetime')
class ReadAccessTokenTests(TestCaseBase):
def test_simple(self, m_datetime, m_expiration):
# Setup Tests
mock_iat = datetime.datetime.utcnow()
mock_exp = datetime.datetime.utcnow() + datetime.timedelta(seconds=30)
m_datetime.datetime.utcnow.return_value = mock_iat
m_expiration.return_value = mock_exp
access_token = access_tokens_api.create_access_token({'foo': 'bar'})
# Run Code to Test
result = access_tokens_api.read_access_token(access_token)
# Check Results
self.assertEquals(result, {u'foo': u'bar'})
def test_expired(self, m_datetime, m_expiration):
# Create a token created 60 seconds ago and expired 30 seconds ago
mock_iat = datetime.datetime.utcnow() - datetime.timedelta(seconds=60)
mock_exp = datetime.datetime.utcnow() - datetime.timedelta(seconds=30)
m_datetime.datetime.utcnow.return_value = mock_iat
m_expiration.return_value = mock_exp
access_token = access_tokens_api.create_access_token({'foo': 'bar'})
# Run Code to Test
self.assertRaises(AuthenticationError, access_tokens_api.read_access_token, access_token)
def test_bad_secret(self, m_datetime, m_expiration):
mock_iat = datetime.datetime.utcnow()
mock_exp = datetime.datetime.utcnow() + datetime.timedelta(seconds=30)
m_datetime.datetime.utcnow.return_value = mock_iat
m_expiration.return_value = mock_exp
access_token = access_tokens_api.create_access_token({'foo': 'bar'})
with mock.patch('auth_core.api.access_tokens.auth_settings') as m_settings:
m_settings.JWT_SECRET = 'different_secret'
self.assertRaises(AuthenticationError,
access_tokens_api.read_access_token,
access_token)
class MakeTokenUserDataDictTests(TestCaseBase):
def test_base(self):
# Setup Test
user_key = AuthUserEntity(username="testUser1").put()
user_id = get_resource_id_from_key(user_key)
login_key = AuthUserMethodEntity(key=AuthUserMethodEntity.generate_key(user_key, 'basic', user_id),
auth_type='basic',
auth_key=user_id,
auth_data='hashed_password:salt',
user_key=user_key).put()
# Run Code to Test
result = access_tokens_api.make_token_user_data_dict(user_key.get(),
login_key.get(),
version=1)
# Check Results
self.assertDictEqual(result, {'login_type': u'basic',
'version': 1,
'login_key': user_id,
'id': user_id})
class GetUserAndLoginFromAccessTokenTests(TestCaseBase):
def test_base(self):
# Setup Test
user_key = AuthUserEntity(username="testUser1").put()
user_id = get_resource_id_from_key(user_key)
login_key = AuthUserMethodEntity(key=AuthUserMethodEntity.generate_key(user_key, 'basic', user_id),
auth_type='basic',
auth_key=user_id,
auth_data='hashed_password:salt',
user_key=user_key).put()
user_data = {'id': user_id, 'login_key': user_id, 'login_type': 'basic', 'version': 1}
access_token = access_tokens_api.create_access_token(user_data)
# Run Code To Test
result = access_tokens_api.get_user_and_login_from_access_token(access_token)
# Check results
self.assertEquals(len(result), 2)
self.assertTrue(isinstance(result[0], AuthUser))
self.assertEquals(result[0].id, get_resource_id_from_key(user_key))
self.assertTrue(isinstance(result[1], AuthUserMethodEntity))
self.assertEquals(result[1].key, login_key)
def test_empty_dict(self):
access_token = access_tokens_api.create_access_token({})
self.assertRaises(AuthenticationError, access_tokens_api.get_user_and_login_from_access_token, access_token)
def test_invalid_user_id(self):
user_key = AuthUserEntity(username="testUser1").put()
user_id = get_resource_id_from_key(user_key)
login_key = AuthUserMethodEntity(key=AuthUserMethodEntity.generate_key(user_key, 'basic', user_id),
auth_type='basic',
auth_key=user_id,
auth_data='hashed_password:salt',
user_key=user_key).put()
user_data = {'id': 'invalid', 'login_key': user_id, 'login_type': 'basic', 'version': 1}
access_token = access_tokens_api.create_access_token(user_data)
self.assertRaises(AuthenticationError, access_tokens_api.get_user_and_login_from_access_token, access_token)
def test_invalid_login(self):
user_key = AuthUserEntity(username="testUser1").put()
user_id = get_resource_id_from_key(user_key)
login_key = AuthUserMethodEntity(key=AuthUserMethodEntity.generate_key(user_key, 'basic', user_id),
auth_type='basic',
auth_key=user_id,
auth_data='hashed_password:salt',
user_key=user_key).put()
user_data = {'id': user_id, 'login_key': user_id, 'login_type': 'invalid_type', 'version': 1}
access_token = access_tokens_api.create_access_token(user_data)
access_tokens_api.get_user_and_login_from_access_token(access_token)
class MakeExpirationDateTests(TestCaseBase):
def test_base(self):
dt = datetime.datetime(1982, 9, 2, 0, 0, 0)
result = access_tokens_api._make_expiration_date(dt, 30)
delta = result - dt
self.assertEquals(30, delta.seconds)
| mit | -8,616,054,429,391,284 | 43.37931 | 116 | 0.604766 | false |
tatyankaZSGX/addressbook | fixture/application.py | 1 | 1228 | __author__ = 'ZSGX'
from selenium import webdriver
from fixture.Session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, homeurl):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Browser %s is not recognized" % browser)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.homeurl = homeurl
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
wd.get(self.homeurl)
def destroy(self):
self.wd.quit()
def edit_field(self, field_name, text):
wd = self.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text) | apache-2.0 | -6,081,401,815,482,364,000 | 27.581395 | 70 | 0.588762 | false |
jic-dtool/dtool-create | dtool_create/dataset.py | 1 | 18296 | """Commands for creating datasets."""
import sys
import os
import getpass
import datetime
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import click
import dtoolcore
import dtoolcore.storagebroker
import dtoolcore.utils
from ruamel.yaml import YAML
from ruamel.yaml.comments import CommentedMap
from ruamel.yaml.parser import ParserError
from ruamel.yaml.constructor import DuplicateKeyError
from ruamel.yaml.scalarfloat import ScalarFloat
from ruamel.yaml.scanner import ScannerError
from dtool_cli.cli import (
base_dataset_uri_argument,
proto_dataset_uri_argument,
dataset_uri_argument,
CONFIG_PATH,
)
from dtool_create.utils import valid_handle
_HERE = os.path.dirname(__file__)
_TEMPLATE_DIR = os.path.join(_HERE, "templates")
README_TEMPLATE_FPATH = os.path.join(_TEMPLATE_DIR, "README.yml")
def _get_readme_template(fpath=None):
if fpath is None:
fpath = dtoolcore.utils.get_config_value(
"DTOOL_README_TEMPLATE_FPATH",
CONFIG_PATH
)
if fpath is None:
fpath = README_TEMPLATE_FPATH
with open(fpath) as fh:
readme_template = fh.read()
user_email = dtoolcore.utils.get_config_value(
"DTOOL_USER_EMAIL",
CONFIG_PATH,
"[email protected]"
)
user_full_name = dtoolcore.utils.get_config_value(
"DTOOL_USER_FULL_NAME",
CONFIG_PATH,
"Your Name"
)
readme_template = readme_template.format(
username=getpass.getuser(),
DTOOL_USER_FULL_NAME=user_full_name,
DTOOL_USER_EMAIL=user_email,
date=datetime.date.today(),
)
return readme_template
def _prompt_for_values(d):
"""Update the descriptive metadata interactively.
Uses values entered by the user. Note that the function keeps recursing
whenever a value is another ``CommentedMap`` or a ``list``. The
function works as passing dictionaries and lists into a function edits
the values in place.
"""
for key, value in d.items():
if isinstance(value, CommentedMap):
_prompt_for_values(value)
elif isinstance(value, list):
for item in value:
_prompt_for_values(item)
elif isinstance(value, datetime.date):
def parse_date(value):
try:
date = datetime.datetime.strptime(value, "%Y-%m-%d")
except ValueError as e:
raise click.BadParameter(
"Could not parse date, {}".format(e), param=value)
return date
new_value = click.prompt(key, default=value, value_proc=parse_date)
if isinstance(new_value, datetime.date):
d[key] = new_value
else:
d[key] = new_value.date()
else:
typ = type(value)
if isinstance(value, ScalarFloat): # Deal with ruamel.yaml floats.
typ = float
new_value = click.prompt(key, type=typ, default=value)
d[key] = new_value
return d
def _validate_name(name):
if not dtoolcore.utils.name_is_valid(name):
click.secho("Invalid dataset name '{}'".format(name), fg="red")
click.secho(
"Name must be 80 characters or less",
)
click.secho(
"Dataset names may only contain the characters: {}".format(
" ".join(dtoolcore.utils.NAME_VALID_CHARS_LIST)
),
)
click.secho("Example: field-trial-scores-T8.3")
sys.exit(6)
@click.command()
@click.option("--quiet", "-q", is_flag=True, help="Only return new URI")
@click.argument("name")
@click.argument("base_uri", default="")
@click.option("--symlink-path", "-s", type=click.Path(exists=True))
def create(quiet, name, base_uri, symlink_path):
"""Create a proto dataset."""
_validate_name(name)
admin_metadata = dtoolcore.generate_admin_metadata(name)
parsed_base_uri = dtoolcore.utils.generous_parse_uri(base_uri)
if parsed_base_uri.scheme == "symlink":
if symlink_path is None:
raise click.UsageError("Need to specify symlink path using the -s/--symlink-path option") # NOQA
if symlink_path:
base_uri = dtoolcore.utils.sanitise_uri(
"symlink:" + parsed_base_uri.path
)
parsed_base_uri = dtoolcore.utils.generous_parse_uri(base_uri)
# Create the dataset.
proto_dataset = dtoolcore.generate_proto_dataset(
admin_metadata=admin_metadata,
base_uri=dtoolcore.utils.urlunparse(parsed_base_uri),
config_path=CONFIG_PATH)
# If we are creating a symlink dataset we need to set the symlink_path
# attribute on the storage broker.
if symlink_path:
symlink_abspath = os.path.abspath(symlink_path)
proto_dataset._storage_broker.symlink_path = symlink_abspath
try:
proto_dataset.create()
except dtoolcore.storagebroker.StorageBrokerOSError as err:
raise click.UsageError(str(err))
proto_dataset.put_readme("")
if quiet:
click.secho(proto_dataset.uri)
else:
# Give the user some feedback and hints on what to do next.
click.secho("Created proto dataset ", nl=False, fg="green")
click.secho(proto_dataset.uri)
click.secho("Next steps: ")
step = 1
if parsed_base_uri.scheme != "symlink":
click.secho("{}. Add raw data, eg:".format(step))
click.secho(
" dtool add item my_file.txt {}".format(proto_dataset.uri),
fg="cyan")
if parsed_base_uri.scheme == "file":
# Find the abspath of the data directory for user feedback.
data_path = proto_dataset._storage_broker._data_abspath
click.secho(" Or use your system commands, e.g: ")
click.secho(
" mv my_data_directory {}/".format(data_path),
fg="cyan"
)
step = step + 1
click.secho("{}. Add descriptive metadata, e.g: ".format(step))
click.secho(
" dtool readme interactive {}".format(proto_dataset.uri),
fg="cyan")
step = step + 1
click.secho(
"{}. Convert the proto dataset into a dataset: ".format(step)
)
click.secho(" dtool freeze {}".format(proto_dataset.uri), fg="cyan")
@click.command()
@base_dataset_uri_argument
@click.argument("new_name", default="")
def name(dataset_uri, new_name):
"""
Report / update the name of the dataset.
It is only possible to update the name of a proto dataset,
i.e. a dataset that has not yet been frozen.
"""
if new_name != "":
_validate_name(new_name)
try:
dataset = dtoolcore.ProtoDataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
except dtoolcore.DtoolCoreTypeError:
dataset = dtoolcore.DataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
dataset.update_name(new_name)
admin_metadata = dtoolcore._admin_metadata_from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
click.secho(admin_metadata["name"])
@click.group()
def readme():
"""Edit / show readme content.
The readme content is descriptive metadata describing the dataset.
"""
@readme.command()
@proto_dataset_uri_argument
def interactive(proto_dataset_uri):
"""Interactive prompting to populate the readme."""
proto_dataset = dtoolcore.ProtoDataSet.from_uri(
uri=proto_dataset_uri,
config_path=CONFIG_PATH)
# Create an CommentedMap representation of the yaml readme template.
readme_template = _get_readme_template()
yaml = YAML()
yaml.explicit_start = True
yaml.indent(mapping=2, sequence=4, offset=2)
descriptive_metadata = yaml.load(readme_template)
descriptive_metadata = _prompt_for_values(descriptive_metadata)
# Write out the descriptive metadata to the readme file.
stream = StringIO()
yaml.dump(descriptive_metadata, stream)
proto_dataset.put_readme(stream.getvalue())
click.secho("Updated readme ", fg="green")
click.secho("To edit the readme using your default editor:")
click.secho(
"dtool readme edit {}".format(proto_dataset_uri),
fg="cyan")
def _validate_readme(readme_content):
"""Return (YAML string, error message)."""
yaml = YAML()
# Ensure that the content is valid YAML.
try:
readme_formatted = yaml.load(readme_content)
return readme_formatted, None
except (ParserError, DuplicateKeyError, ScannerError) as message:
readme_formatted = None
return None, str(message)
def _validate_and_put_readme(dataset, readme_content):
# Create YAML object to standardise the output formatting.
yaml = YAML()
yaml.explicit_start = True
yaml.indent(mapping=2, sequence=4, offset=2)
# Validate the YAML.
readme_formatted, message = _validate_readme(readme_content)
if message is not None:
click.secho("Error: Invalid YAML", fg="red")
click.secho(str(message))
click.secho("Did not update readme ", fg="red")
sys.exit(5)
# Write out formatted YAML.
stream = StringIO()
yaml.dump(readme_formatted, stream)
dataset.put_readme(stream.getvalue())
@readme.command()
@base_dataset_uri_argument
def edit(dataset_uri):
"""Default editor updating of readme content.
"""
try:
dataset = dtoolcore.ProtoDataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
except dtoolcore.DtoolCoreTypeError:
dataset = dtoolcore.DataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
readme_content = dataset.get_readme_content()
try:
# Python2 compatibility.
readme_content = unicode(readme_content, "utf-8")
except NameError:
pass
edited_content = click.edit(readme_content, extension=".yml")
if edited_content is not None:
_validate_and_put_readme(dataset, edited_content)
click.secho("Updated readme ", nl=False, fg="green")
else:
click.secho("Did not update readme ", nl=False, fg="red")
click.secho(dataset_uri)
@readme.command()
@base_dataset_uri_argument
def show(dataset_uri):
"""Show the descriptive metadata in the readme."""
try:
dataset = dtoolcore.ProtoDataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
except dtoolcore.DtoolCoreTypeError:
dataset = dtoolcore.DataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
readme_content = dataset.get_readme_content()
click.secho(readme_content)
@readme.command()
@base_dataset_uri_argument
def validate(dataset_uri):
"""Validate that the README is valid YAML.
"""
try:
dataset = dtoolcore.ProtoDataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
except dtoolcore.DtoolCoreTypeError:
dataset = dtoolcore.DataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
readme_content = dataset.get_readme_content()
try:
# Python2 compatibility.
readme_content = unicode(readme_content, "utf-8")
except NameError:
pass
_, message = _validate_readme(readme_content)
if message is not None:
click.secho("Invalid YAML", fg="red")
click.secho(str(message))
else:
click.secho("All good! :)", fg="green")
@readme.command()
@base_dataset_uri_argument
@click.argument('input', type=click.File('r'))
def write(dataset_uri, input):
"""Use YAML from a file or stdin to populate the readme.
To stream content from stdin use "-", e.g.
echo "desc: my data" | dtool readme write <DS_URI> -
"""
try:
dataset = dtoolcore.ProtoDataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
except dtoolcore.DtoolCoreTypeError:
dataset = dtoolcore.DataSet.from_uri(
uri=dataset_uri,
config_path=CONFIG_PATH
)
_validate_and_put_readme(dataset, input.read())
@click.group()
def add():
"""Add items and item metadata to a proto dataset."""
@add.command()
@click.argument("input_file", type=click.Path(exists=True))
@proto_dataset_uri_argument
@click.argument("relpath_in_dataset", default="")
def item(proto_dataset_uri, input_file, relpath_in_dataset):
"""Add a file to the proto dataset."""
proto_dataset = dtoolcore.ProtoDataSet.from_uri(
proto_dataset_uri,
config_path=CONFIG_PATH)
if relpath_in_dataset == "":
relpath_in_dataset = os.path.basename(input_file)
proto_dataset.put_item(input_file, relpath_in_dataset)
@add.command()
@proto_dataset_uri_argument
@click.argument("relpath_in_dataset")
@click.argument("key")
@click.argument("value")
def metadata(proto_dataset_uri, relpath_in_dataset, key, value):
"""Add metadata to a file in the proto dataset."""
proto_dataset = dtoolcore.ProtoDataSet.from_uri(
uri=proto_dataset_uri,
config_path=CONFIG_PATH)
proto_dataset.add_item_metadata(
handle=relpath_in_dataset,
key=key,
value=value)
@click.command()
@proto_dataset_uri_argument
def freeze(proto_dataset_uri):
"""Convert a proto dataset into a dataset.
This step is carried out after all files have been added to the dataset.
Freezing a dataset finalizes it with a stamp marking it as frozen.
"""
proto_dataset = dtoolcore.ProtoDataSet.from_uri(
uri=proto_dataset_uri,
config_path=CONFIG_PATH
)
num_items = len(list(proto_dataset._identifiers()))
max_files_limit = int(dtoolcore.utils.get_config_value(
"DTOOL_MAX_FILES_LIMIT",
CONFIG_PATH,
10000
))
assert isinstance(max_files_limit, int)
if num_items > max_files_limit:
click.secho(
"Too many items ({} > {}) in proto dataset".format(
num_items,
max_files_limit
),
fg="red"
)
click.secho("1. Consider splitting the dataset into smaller datasets")
click.secho("2. Consider packaging small files using tar")
click.secho("3. Increase the limit using the DTOOL_MAX_FILES_LIMIT")
click.secho(" environment variable")
sys.exit(2)
handles = [h for h in proto_dataset._storage_broker.iter_item_handles()]
for h in handles:
if not valid_handle(h):
click.secho(
"Invalid item name: {}".format(h),
fg="red"
)
click.secho("1. Consider renaming the item")
click.secho("2. Consider removing the item")
sys.exit(3)
with click.progressbar(length=len(list(proto_dataset._identifiers())),
label="Generating manifest") as progressbar:
try:
proto_dataset.freeze(progressbar=progressbar)
except dtoolcore.storagebroker.DiskStorageBrokerValidationWarning as e:
click.secho("")
click.secho(str(e), fg="red", nl=False)
sys.exit(4)
click.secho("Dataset frozen ", nl=False, fg="green")
click.secho(proto_dataset_uri)
def _copy(resume, quiet, dataset_uri, dest_base_uri):
src_dataset = dtoolcore.DataSet.from_uri(dataset_uri)
dest_uri = dtoolcore._generate_uri(
admin_metadata=src_dataset._admin_metadata,
base_uri=dest_base_uri
)
if not resume:
# Check if the destination URI is already a dataset
# and exit gracefully if true.
if dtoolcore._is_dataset(dest_uri, config_path=CONFIG_PATH):
raise click.UsageError(
"Dataset already exists: {}".format(dest_uri))
# If the destination URI is a "file" dataset one needs to check if
# the path already exists and exit gracefully if true.
parsed_dataset_uri = dtoolcore.utils.generous_parse_uri(dest_uri)
if parsed_dataset_uri.scheme == "file":
if os.path.exists(parsed_dataset_uri.path):
raise click.UsageError(
"Path already exists: {}".format(parsed_dataset_uri.path))
# Define the copy function to use.
copy_func = dtoolcore.copy
if resume:
copy_func = dtoolcore.copy_resume
# Finally do the copy
if quiet:
dest_uri = copy_func(
src_uri=dataset_uri,
dest_base_uri=dest_base_uri,
config_path=CONFIG_PATH
)
click.secho(dest_uri)
else:
num_items = len(list(src_dataset.identifiers))
with click.progressbar(length=num_items*2,
label="Copying dataset") as progressbar:
dest_uri = copy_func(
src_uri=dataset_uri,
dest_base_uri=dest_base_uri,
config_path=CONFIG_PATH,
progressbar=progressbar
)
click.secho("Dataset copied to:\n{}".format(dest_uri))
@click.command()
@click.option("--resume", is_flag=True, help="Resume an interrupted copy")
@click.option("--quiet", "-q", is_flag=True, help="Only return new URI")
@dataset_uri_argument
@click.argument("dest_base_uri")
def copy(resume, quiet, dataset_uri, dest_base_uri):
"""DEPRECATED: Copy a dataset to a different location."""
click.secho(
"The ``dtool copy`` command is deprecated",
fg="red",
err=True
)
click.secho(
"Use ``dtool cp`` instead",
fg="red",
err=True
)
_copy(resume, quiet, dataset_uri, dest_base_uri)
@click.command()
@click.option("--resume", is_flag=True, help="Resume an interrupted copy")
@click.option("--quiet", "-q", is_flag=True, help="Only return new URI")
@dataset_uri_argument
@click.argument("dest_base_uri")
def cp(resume, quiet, dataset_uri, dest_base_uri):
"""Copy a dataset to a different location."""
_copy(resume, quiet, dataset_uri, dest_base_uri)
| mit | 8,100,220,894,942,859,000 | 30.328767 | 109 | 0.618878 | false |
LCAV/pyroomacoustics | pyroomacoustics/tests/tests_libroom/test_wall_side_reflect.py | 1 | 5375 | # Test of Wall reflection side test
# Copyright (C) 2019 Robin Scheibler, Cyril Cadoux
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# You should have received a copy of the MIT License along with this program. If
# not, see <https://opensource.org/licenses/MIT>.
from __future__ import division
import unittest
import numpy as np
import pyroomacoustics as pra
eps = 1e-6
corners = {
'3d' : np.array([
[-1, 1, 1],
[1, -1, 1],
[1, 1, -1],
]),
'2d' : np.array([
[ -1, 1 ],
[ -2, 2 ],
]),
}
points = {
'3d_up' : {
'd' : '3d',
'p' : [-1,-1,-1],
'expect' : -1,
'reflect' : [5/3, 5/3, 5/3],
},
'3d_down' : {
'd' : '3d',
'p' : [1,1,1],
'expect' : 1,
'reflect' : [-1/3, -1/3, -1/3],
},
'3d_on' : {
'd' : '3d',
'p' : [1/3, 1/3, 1/3],
'expect' : 0,
'reflect' : [1/3, 1/3, 1/3],
},
'2d_down' : {
'd' : '2d',
'p' : [-2, 1],
'expect' : -1,
'reflect' : [2, -1],
},
'2d_up' : {
'd' : '2d',
'p' : [2, -1],
'expect' : 1,
'reflect' : [-2, 1],
},
'2d_on' : {
'd' : '2d',
'p' : [0.5, 1],
'expect' : 0,
'reflect' : [0.5, 1],
},
}
def run_side(label):
p = points[label]['p']
r_exp = points[label]['expect']
d = points[label]['d']
wall = pra.wall_factory(corners[d], [0.1], [0.1])
r = wall.side(p)
print('{}: returned={} expected={}'.format(label, r, r_exp))
return r == r_exp
def run_reflect(label):
p = points[label]['p']
p_refl = np.array(points[label]['reflect'])
r_exp = points[label]['expect']
d = points[label]['d']
wall = pra.wall_factory(corners[d], [0.1], [0.1])
x = np.zeros(wall.dim, dtype=np.float32)
wall.reflect(p, x)
err = np.linalg.norm(x - p_refl) < eps
print('{}: error={}'.format(label, err))
return err
class TestUtilityRoutines(unittest.TestCase):
def test_side_3d_up(self):
ret = run_side('3d_up')
self.assertTrue(ret)
def test_side_3d_down(self):
ret = run_side('3d_down')
self.assertTrue(ret)
def test_side_3d_on(self):
ret = run_side('3d_on')
self.assertTrue(ret)
def test_side_2d_up(self):
ret = run_side('2d_up')
self.assertTrue(ret)
def test_side_2d_down(self):
ret = run_side('2d_down')
self.assertTrue(ret)
def test_side_2d_on(self):
ret = run_side('2d_on')
self.assertTrue(ret)
def test_reflect_3d_up(self):
ret = run_reflect('3d_up')
self.assertTrue(ret)
def test_reflect_3d_down(self):
ret = run_reflect('3d_down')
self.assertTrue(ret)
def test_reflect_3d_on(self):
ret = run_reflect('3d_on')
self.assertTrue(ret)
def test_reflect_2d_up(self):
ret = run_reflect('2d_up')
self.assertTrue(ret)
def test_reflect_2d_down(self):
ret = run_reflect('2d_down')
self.assertTrue(ret)
def test_reflect_2d_on(self):
ret = run_reflect('2d_on')
self.assertTrue(ret)
def test_reflected_end2D(self):
eps = 0.001
start = [1,3]
hit = [5,3]
#normal = [-1, -1]
corners = np.array([
[ 6, 4 ],
[ 4, 6 ],
])
wall = pra.wall_factory(corners, [0.1], [0.1])
length = 4
res = wall.normal_reflect(start, hit, length)
self.assertTrue(np.allclose(res, [5.,-1.], atol=eps))
def test_reflected_end3D(self):
eps = 0.001
start = [1,1,1]
hit = [-1, 1, 3]
#normal = [1, 0, 0]
corners = np.array([
[ -1, -1, -1, -1, ],
[ 0, 2, 2, 0, ],
[ 2, 2, 4, 4, ],
])
wall = pra.wall_factory(corners, [0.1], [0.1])
length = 2*np.sqrt(2)
res = wall.normal_reflect(start, hit, length)
self.assertTrue(np.allclose(res, [1,1,5], atol=eps))
if __name__ == '__main__':
unittest.main()
| mit | -4,107,866,286,093,832,700 | 25.608911 | 80 | 0.513302 | false |
akrherz/iem | htdocs/plotting/auto/scripts100/p151.py | 1 | 13587 | """Period deltas"""
import datetime
from collections import OrderedDict
from geopandas import read_postgis
import numpy as np
from pyiem.plot import MapPlot, centered_bins, get_cmap
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
PDICT = {
"state": "State Level Maps (select state)",
"cornbelt": "Corn Belt",
"midwest": "Midwest Map",
}
PDICT2 = {
"both": "Show both contour and values",
"values": "Show just the values",
"contour": "Show just the contour",
}
PDICT3 = OrderedDict(
[
("total_precip", "Annual Precipitation"),
("gdd", "Growing Degree Days (base=50/86)"),
("sdd", "Stress Degree Days (High > 86)"),
("avg_temp", "Average Temperature"),
("avg_high", "Average High Temperature"),
("avg_low", "Average Low Temperature"),
("days_high_above", "Days with High Temp At or Above [Threshold]"),
("days_high_below", "Days with High Temp Below [Threshold]"),
("days_low_above", "Days with Low Temp At or Above [Threshold]"),
("days_low_below", "Days with Low Temp Below [Threshold]"),
]
)
PDICT4 = {
"english": "English",
"metric": "Metric",
}
UNITS = {
"total_precip": "inch",
"gdd": "F",
"sdd": "F",
"avg_temp": "F",
"avg_high": "F",
"avg_low": "F",
"days_high_above": "days",
"days_high_below": "days",
"days_low_above": "days",
"days_low_below": "days",
}
MUNITS = {
"total_precip": "mm",
"gdd": "C",
"sdd": "C",
"avg_temp": "C",
"avg_high": "C",
"avg_low": "C",
"days_high_above": "days",
"days_high_below": "days",
"days_low_above": "days",
"days_low_below": "days",
}
PRECISION = {"total_precip": 2}
MDICT = OrderedDict(
[
("all", "Annual"),
("spring", "Spring (MAM)"),
("fall", "Fall (SON)"),
("winter", "Winter (DJF)"),
("summer", "Summer (JJA)"),
("gs", "1 May to 30 Sep"),
("jan", "January"),
("feb", "February"),
("mar", "March"),
("apr", "April"),
("may", "May"),
("jun", "June"),
("jul", "July"),
("aug", "August"),
("sep", "September"),
("oct", "October"),
("nov", "November"),
("dec", "December"),
]
)
OPT1 = {"diff": "Plot Difference", "p1": "Just Plot Period One Values"}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc[
"description"
] = """This map produces an analysis yearly averages. You
can either plot the difference between two period of years or simply the
years between the first period. This app is meant to address the question
about changes in climate or just to produce a simple plot of yearly
averages over some period of years."""
desc["arguments"] = [
dict(
type="select",
name="month",
default="all",
options=MDICT,
label="Show Monthly or Annual Averages",
),
dict(
type="select",
name="sector",
default="state",
options=PDICT,
label="Select Map Region",
),
dict(
type="state",
name="state",
default="IA",
label="Select State to Plot (when appropriate)",
),
dict(
type="select",
name="opt",
options=PDICT2,
default="both",
label="Map Plot/Contour View Option",
),
dict(
type="select",
name="var",
options=PDICT3,
default="total_precip",
label="Which Variable to Plot",
),
dict(
type="select",
name="r",
options=PDICT4,
default="english",
label="Which Unit System to Use (GDD/SDD always english)",
),
dict(
type="float",
name="threshold",
default=-99,
label="Enter threshold (where appropriate)",
),
dict(
type="select",
options=OPT1,
default="diff",
name="opt1",
label="Period plotting option",
),
dict(
type="year",
name="p1syear",
default=1951,
label="Start Year (inclusive) of Period One:",
),
dict(
type="year",
name="p1eyear",
default=1980,
label="End Year (inclusive) of Period One:",
),
dict(
type="year",
name="p2syear",
default=1981,
label="Start Year (inclusive) of Period Two:",
),
dict(
type="year",
name="p2eyear",
default=2010,
label="End Year (inclusive) of Period Two:",
),
dict(
type="cmap", name="cmap", default="seismic_r", label="Color Ramp:"
),
]
return desc
def get_data(ctx):
"""Get the data, please."""
pgconn = get_dbconn("coop")
state = ctx["state"]
sector = ctx["sector"]
threshold = ctx["threshold"]
month = ctx["month"]
p1syear = ctx["p1syear"]
p1eyear = ctx["p1eyear"]
p1years = p1eyear - p1syear + 1
p2syear = ctx["p2syear"]
p2eyear = ctx["p2eyear"]
p2years = p2eyear - p2syear + 1
if month == "all":
months = range(1, 13)
elif month == "fall":
months = [9, 10, 11]
elif month == "winter":
months = [12, 1, 2]
elif month == "spring":
months = [3, 4, 5]
elif month == "summer":
months = [6, 7, 8]
elif month == "gs":
months = [5, 6, 7, 8, 9]
else:
ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
# make sure it is length two for the trick below in SQL
months = [ts.month]
table = "alldata"
if sector == "state":
# optimization
table = f"alldata_{state}"
hcol = "high"
lcol = "low"
pcol = "precip"
if ctx["r"] == "metric":
hcol = "f2c(high)"
lcol = "f2c(low)"
pcol = "precip * 25.4"
df = read_postgis(
f"""
WITH period1 as (
SELECT station, year, sum({pcol}) as total_precip,
avg(({hcol}+{lcol}) / 2.) as avg_temp, avg({hcol}) as avg_high,
avg({lcol}) as avg_low,
sum(gddxx(50, 86, high, low)) as sum_gdd,
sum(case when high > 86 then high - 86 else 0 end) as sum_sdd,
sum(case when {hcol} >= %s then 1 else 0 end) as days_high_above,
sum(case when {hcol} < %s then 1 else 0 end) as days_high_below,
sum(case when {lcol} >= %s then 1 else 0 end) as days_low_above,
sum(case when {lcol} < %s then 1 else 0 end) as days_low_below
from {table} WHERE year >= %s and year <= %s
and month in %s GROUP by station, year),
period2 as (
SELECT station, year, sum({pcol}) as total_precip,
avg(({hcol}+{lcol}) / 2.) as avg_temp, avg({hcol}) as avg_high,
avg({lcol}) as avg_low,
sum(gddxx(50, 86, high, low)) as sum_gdd,
sum(case when high > 86 then high - 86 else 0 end) as sum_sdd,
sum(case when {hcol} >= %s then 1 else 0 end) as days_high_above,
sum(case when {hcol} < %s then 1 else 0 end) as days_high_below,
sum(case when {lcol} >= %s then 1 else 0 end) as days_low_above,
sum(case when {lcol} < %s then 1 else 0 end) as days_low_below
from {table} WHERE year >= %s and year <= %s
and month in %s GROUP by station, year),
p1agg as (
SELECT station, avg(total_precip) as precip,
avg(avg_temp) as avg_temp, avg(avg_high) as avg_high,
avg(avg_low) as avg_low, avg(sum_sdd) as sdd,
avg(sum_gdd) as gdd,
avg(days_high_above) as avg_days_high_above,
avg(days_high_below) as avg_days_high_below,
avg(days_low_above) as avg_days_low_above,
avg(days_low_below) as avg_days_low_below,
count(*) as count
from period1 GROUP by station),
p2agg as (
SELECT station, avg(total_precip) as precip,
avg(avg_temp) as avg_temp, avg(avg_high) as avg_high,
avg(avg_low) as avg_low, avg(sum_sdd) as sdd,
avg(sum_gdd) as gdd,
avg(days_high_above) as avg_days_high_above,
avg(days_high_below) as avg_days_high_below,
avg(days_low_above) as avg_days_low_above,
avg(days_low_below) as avg_days_low_below,
count(*) as count
from period2 GROUP by station),
agg as (
SELECT p2.station,
p2.precip as p2_total_precip,
p1.precip as p1_total_precip,
p2.gdd as p2_gdd, p1.gdd as p1_gdd,
p2.sdd as p2_sdd, p1.sdd as p1_sdd,
p2.avg_temp as p2_avg_temp, p1.avg_temp as p1_avg_temp,
p1.avg_high as p1_avg_high, p2.avg_high as p2_avg_high,
p1.avg_low as p1_avg_low, p2.avg_low as p2_avg_low,
p1.avg_days_high_above as p1_days_high_above,
p2.avg_days_high_above as p2_days_high_above,
p1.avg_days_high_below as p1_days_high_below,
p2.avg_days_high_below as p2_days_high_below,
p1.avg_days_low_above as p1_days_low_above,
p2.avg_days_low_above as p2_days_low_above,
p1.avg_days_low_below as p1_days_low_below,
p2.avg_days_low_below as p2_days_low_below
from p1agg p1 JOIN p2agg p2 on
(p1.station = p2.station)
WHERE p1.count >= %s and p2.count >= %s)
SELECT ST_X(geom) as lon, ST_Y(geom) as lat, t.geom,
d.* from agg d JOIN stations t ON (d.station = t.id)
WHERE t.network ~* 'CLIMATE'
and substr(station, 3, 1) != 'C' and substr(station, 3, 4) != '0000'
""",
pgconn,
params=[
threshold,
threshold,
threshold,
threshold,
p1syear,
p1eyear,
tuple(months),
threshold,
threshold,
threshold,
threshold,
p2syear,
p2eyear,
tuple(months),
p1years,
p2years,
],
index_col="station",
geom_col="geom",
)
if df.empty:
raise NoDataFound("No Data Found.")
df["total_precip"] = df["p2_total_precip"] - df["p1_total_precip"]
df["avg_temp"] = df["p2_avg_temp"] - df["p1_avg_temp"]
df["avg_high"] = df["p2_avg_high"] - df["p1_avg_high"]
df["avg_low"] = df["p2_avg_low"] - df["p1_avg_low"]
df["gdd"] = df["p2_gdd"] - df["p1_gdd"]
df["sdd"] = df["p2_sdd"] - df["p1_sdd"]
df["days_high_above"] = df["p2_days_high_above"] - df["p1_days_high_above"]
df["days_high_below"] = df["p2_days_high_below"] - df["p1_days_high_below"]
df["days_low_above"] = df["p2_days_low_above"] - df["p1_days_low_above"]
df["days_low_below"] = df["p2_days_low_below"] - df["p1_days_low_below"]
return df
def geojson(fdict):
"""Handle GeoJSON output."""
ctx = get_autoplot_context(fdict, get_description())
return (get_data(ctx).drop(["lat", "lon"], axis=1)), ctx["var"]
def plotter(fdict):
""" Go """
ctx = get_autoplot_context(fdict, get_description())
df = get_data(ctx)
state = ctx["state"]
varname = ctx["var"]
sector = ctx["sector"]
threshold = ctx["threshold"]
opt = ctx["opt"]
month = ctx["month"]
p1syear = ctx["p1syear"]
p1eyear = ctx["p1eyear"]
p2syear = ctx["p2syear"]
p2eyear = ctx["p2eyear"]
opt1 = ctx["opt1"]
column = varname
title = "%s %s" % (MDICT[month], PDICT3[varname])
title = title.replace("[Threshold]", "%.1f" % (threshold,))
if opt1 == "p1":
column = "p1_%s" % (varname,)
title = "%.0f-%.0f %s" % (p1syear, p1eyear, title)
else:
title = ("%.0f-%.0f minus %.0f-%.0f %s Difference (%s)") % (
p2syear,
p2eyear,
p1syear,
p1eyear,
title,
UNITS[varname] if ctx["r"] == "english" else MUNITS[varname],
)
# Reindex so that most extreme values are first
df = df.reindex(df[column].abs().sort_values(ascending=False).index)
# drop 5% most extreme events, too much?
df2 = df.iloc[int(len(df.index) * 0.05) :]
mp = MapPlot(
sector=sector,
state=state,
axisbg="white",
title=title,
subtitle=("based on IEM Archives"),
titlefontsize=12,
)
if opt1 == "diff":
# Create 9 levels centered on zero
abval = df2[column].abs().max()
levels = centered_bins(abval)
else:
levels = [
round(v, PRECISION.get(varname, 1))
for v in np.percentile(df2[column].values, range(0, 101, 10))
]
if opt in ["both", "contour"]:
mp.contourf(
df2["lon"].values,
df2["lat"].values,
df2[column].values,
levels,
cmap=get_cmap(ctx["cmap"]),
units=UNITS[varname] if ctx["r"] == "english" else MUNITS[varname],
)
if sector == "state":
mp.drawcounties()
if opt in ["both", "values"]:
mp.plot_values(
df2["lon"].values,
df2["lat"].values,
df2[column].values,
fmt="%%.%if" % (PRECISION.get(varname, 1),),
labelbuffer=5,
)
return mp.fig, df.drop("geom", axis=1).round(2)
if __name__ == "__main__":
plotter(dict(over="annual"))
| mit | -7,669,432,662,049,230,000 | 31.120567 | 79 | 0.524693 | false |
zprpa-ca/last.fm | 03.artist-correlation.py | 1 | 8514 | #!/usr/bin/env python
''' Calculate the correlation between the artists. Intermediate datasets are
saved in the HDF5 file and the final dataset is saved in the database as
well. The artist correlation matrix is saved only for the single
selected artist, used in the final step for the similarity comparison.
#--------------------------------------------------------------------------#
Copyright (C) 2014, Zlatko Prpa <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
#--------------------------------------------------------------------------#
'''
#-- standard libs
import os, sys, sqlite3, time, locale, itertools as it
#-- add-on libs
import numpy, h5py
#-- custom libs
import utils
#==============================================================================#
#--------------------------------- SETUP --------------------------------------#
#==============================================================================#
log = utils.ZpLog( 'logs/' + os.path.basename(__file__) + '.log')
elog = utils.ZpErrLog('logs/' + os.path.basename(__file__) + '.ERROR-traceback.log')
log.write(''.ljust(150,'*'), skip_line=1, add_line=1)
#-- setup number formatting
locale.setlocale( locale.LC_ALL, "" )
fmt = locale.format
#-- open the HDF5 file for the storage of the intermediate datasets
h5f = h5py.File('data/artist-correlation-datasets.h5','w')
vlen_dtype = h5py.special_dtype(vlen=str)
#==============================================================================#
#------------------------- Load and process data ------------------------------#
#==============================================================================#
#--------------------------------------#
#-- load data and apply basic filter #
#--------------------------------------#
''' Load the records from the artist/tag table.
There is no reason to apply any filter to this basic dataset, as opposite
to the tag correlation procedure. We do not need to generalize any
specific artist, as we had to do with tag data.
Otherwise, the whole processing logic is very much the same.
'''
log.write('Load data.')
dbs = sqlite3.connect('data/lastfm.sql3', detect_types=sqlite3.PARSE_DECLTYPES)
cur = dbs.cursor()
cur.execute("SELECT t.artist_name, t.tag, t.count FROM top_artist_tags t")
recs = numpy.array([r for r in cur],dtype=[('art','O'),('tag','O'),('count','i4')])
cur.close()
dbs.close()
log.write('Loaded %s records.'%fmt('%12d',recs.shape[0],True).strip())
#--------------------------------------#
#-- prepare data for correlation calc #
#--------------------------------------#
log.write('Prepare data for the correlation calc.')
#-- Get unique list of artists and tags.
unique_art = numpy.unique( recs['art'] )
unique_tags = numpy.unique( recs['tag'] )
''' Create 2d array to hold the vector for each artist. The vector size is 2x
the length of the list of the unique tags. First part will have the
value 0/1, depending if the given artist is associated with the given tag.
The second part will have the tag ranking (count) value, at the same
position for the given tag.
Assuming the following tuples in the basic dataset [recs]:
(art1,tag1,90), (art1,tag2,80), (art1,tag3,60),
(art2,tag1,80), (art2,tag3,90),
(art3,tag2,90), (art3,tag3,80),
(art4,tag1,50), (art4,tag2,70), (art4,tag3,70)
The "unique_art" list is: [art1,art2,art3,art4]
The "unique_tags" list is: [tag1,tag2,tag3]
offset = 3
Single artist vector is [0,0,0,0,0,0], with logical mask as
[tag1,tag2,tag3,rank1,rank2,rank3].
Based on the above described data, the complete matrix "tags_mx"
will have 4 vectors with following values:
[[1,1,1,90,80,60],
[1,0,1,80, 0,90],
[0,1,1, 0,90,80],
[1,1,1,50,70,70]]
The sample data (tags for 1000 artists) is very small and this executes
fast, otherwise this loop would be a strong candidate for parallel
execution.
'''
offset = unique_tags.shape[0]
art_mx = numpy.zeros((unique_art.shape[0],offset*2),'i4')
for i in xrange(unique_art.shape[0]):
#-- find indicies for all records in the basic dataset for given artist
idx = numpy.where( recs['art']==unique_art[i] )[0]
#-- get all tags and counts for the given artist
tags = recs['tag'].take(idx)
counts = recs['count'].take(idx)
#-- find the index positions in the tag unique list, for all tag artists
idx = unique_tags.searchsorted(tags)
#-- fill in the first part of the artist vector with 1, for each tag found
numpy.put( art_mx[i], idx, 1 )
#-- fill in the tag count (rank) in the second part of the artist vector
numpy.put( art_mx[i], idx+offset, counts )
ds = h5f.create_dataset('unique_art', unique_art.shape, dtype=vlen_dtype)
ds[...] = unique_art
ds = h5f.create_dataset('unique_tags', unique_tags.shape, dtype=vlen_dtype)
ds[...] = unique_tags
ds = h5f.create_dataset('art_mx', art_mx.shape, dtype=art_mx.dtype)
ds[...] = art_mx
h5f.flush()
log.write('Saved following datasets:')
log.write('unique_art: shape->%s\tdtype->%s'%(unique_art.shape, unique_art.dtype))
log.write('unique_tags: shape->%s\tdtype->%s'%(unique_tags.shape,unique_tags.dtype))
log.write('art_mx: shape->%s\tdtype->%s'%(art_mx.shape, art_mx.dtype), add_line=1)
#--------------------------------------#
#-- calculate artist correlation #
#--------------------------------------#
log.write('Calculate artist correlation.')
''' Calculate correlation for each distinct pair of artist vectors.
Again, in case of high data volume, this could be executed in parallel
using the pool of worker processes.
For the present dataset, the approx size of the artist correlation matrix
is around 500K recs.
'''
#-- first iterator to get the matrix size
itr = ((i,j) for i in xrange(unique_art.shape[0]) for j in xrange(i+1,unique_art.shape[0]))
size = sum(1 for _ in itr)
corr = numpy.empty( size, dtype=[('art1','O'),('art2','O'),('c','f8')] )
#-- full iterator
itr = it.izip( ((i,j) for i in xrange(unique_art.shape[0]) for j in xrange(i+1,unique_art.shape[0])),
(k for k in xrange(size)) )
t = time.time()
for (x,y),z in itr:
c = numpy.corrcoef( art_mx[x], art_mx[y] )[0,1]
corr[z] = (unique_art[x], unique_art[y], c)
#-- update progres every 10K recs
if z%10000==0:
log.write_timing1( z, size, t, time.time(), out_type='TTY')
''' Because the full dataset is somewhat big, save only the sample used later
in the "similar artist" comparison.
Comment out if you want to re-run and get all records.
'''
log.write('Full artist correlation matrix: [corr] shape->%s\tdtype->%s'%(corr.shape,corr.dtype))
sample_artist = 'Cyndi Lauper'
i = numpy.where( (corr['art1']==sample_artist)|(corr['art2']==sample_artist) )[0]
corr = corr.take(i)
log.write('Sample artist correlation matrix: [corr] shape->%s\tdtype->%s'%(corr.shape,corr.dtype))
ds = h5f.create_dataset('corr', corr.shape, dtype=[('art1',vlen_dtype),('art2',vlen_dtype),('c','f8')])
ds[...] = corr
h5f.close()
log.write('Saved sample artist correlation matrix: [corr] shape->%s\tdtype->%s'%(corr.shape,corr.dtype),add_line=1)
#-- save the records in the database as well
dbs = sqlite3.connect('data/lastfm.sql3', detect_types=sqlite3.PARSE_DECLTYPES)
cur = dbs.cursor()
cur.execute("DELETE FROM artist_correlation")
cur.executemany("INSERT INTO artist_correlation VALUES (?,?,?)",(r for r in corr))
log.write('Loaded %s records in the database.'%fmt('%6d',cur.rowcount,True))
dbs.commit()
cur.close()
dbs.close()
log.write(''.ljust(150,'*'), add_line=1)
log.close()
#==============================================================================#
#------------------------------------------------------------------------------#
#==============================================================================#
| gpl-3.0 | -9,014,966,826,731,524,000 | 42.218274 | 115 | 0.592436 | false |
ROCmSoftwarePlatform/rocFFT | library/src/device/kernel-generator.py | 1 | 39884 | #!/usr/bin/env python3
"""rocFFT kernel generator.
Currently this acts as a shim between CMake and the C++ kernel generator.
It accept two sub-commands:
1. list - lists files that will be generated
2. generate - pass arguments down to the old generator
Note that 'small' kernels don't decompose their lengths.
"""
import argparse
import collections
import functools
import itertools
import os
import subprocess
import sys
from pathlib import Path
from types import SimpleNamespace as NS
from functools import reduce
from operator import mul
from copy import deepcopy
from generator import (ArgumentList, BaseNode, Call, CommentBlock, ExternC, Function, Include,
LineBreak, Map, Pragma, StatementList, Variable, name_args, format_and_write)
import stockham
supported_large = [50, 64, 81, 100, 128, 200, 256, 336]
old_gen_supported_large = [50, 64, 81, 100, 128, 200, 256]
#
# CMake helpers
#
def scjoin(xs):
"""Join 'xs' with semi-colons."""
return ';'.join(str(x) for x in xs)
def scprint(xs):
"""Print 'xs', joined by semi-colons, on a single line. CMake friendly."""
print(scjoin(xs), end='', flush=True)
def cjoin(xs):
"""Join 'xs' with commas."""
return ','.join(str(x) for x in xs)
#
# Helpers
#
def product(*args):
"""Cartesian product of input iteratables, as a list."""
return list(itertools.product(*args))
def merge(*ds):
"""Merge dictionaries; last one wins."""
r = collections.OrderedDict()
for d in ds:
r.update(d)
return r
def pmerge(d, fs):
"""Merge d with dicts of {(length, precision, scheme, transpose): f}."""
r = collections.OrderedDict()
r.update(d)
for f in fs:
r[f.meta.length, f.meta.precision, f.meta.scheme, f.meta.transpose] = f
return r
def flatten(lst):
"""Flatten a list of lists to a list."""
return sum(lst, [])
# this function should eventually go away
def pick(all, new_kernels, subtract_from_all=True):
"""From all old kernels, pick out those supported by new kernel, and remove from old list."""
old = collections.OrderedDict(all)
new = []
for nk in new_kernels:
assert hasattr(nk, 'length')
for target_length in all:
if nk.length == target_length:
new.append(nk) # pick out, put to new
if subtract_from_all:
del old[target_length] # remove from old
break
# old-list to old-gen, new-list to new-gen
return old, new
def merge_length(kernel_list, ks):
"""Merge kernel lists without duplicated meta.length; ignore later ones."""
merged_list = list(kernel_list)
lengths = [ item.length for item in kernel_list ]
for k in ks:
if k.length not in lengths:
merged_list.append(k)
return merged_list
#
# Supported kernel sizes
#
# this function should eventually go away
def supported_small_sizes(precision, pow2=True, pow3=True, pow5=True, commonRadix=True):
"""Return list of 1D small kernels."""
upper_bound = {
'sp': 4096,
'dp': 4096, # of course this isn't 2048... not sure why (double len 1594323 will fail)
}
powers = {
5: [5**k for k in range(6 if pow5 else 1)],
3: [3**k for k in range(8 if pow3 else 1)],
2: [2**k for k in range(13 if pow2 else 1)],
}
lengths = [p2 * p3 * p5 for p2, p3, p5 in product(powers[2], powers[3], powers[5])]
# common radix 7, 11, and 13
if commonRadix:
lengths += [7, 14, 21, 28, 42, 49, 56, 84, 112, 168, 224, 336, 343]
lengths += [11, 22, 44, 88, 121, 176]
lengths += [13, 17, 26, 52, 104, 169, 208, 272, 528, 1040]
def filter_bound(length):
return length <= upper_bound[precision]
filtered = sorted([x for x in set(lengths) if filter_bound(x)])
return product(filtered, ['CS_KERNEL_STOCKHAM'])
def supported_large_sizes(precision):
"""Return list of 1D large block kernels."""
return product(supported_large, ['CS_KERNEL_STOCKHAM_BLOCK_CC',
'CS_KERNEL_STOCKHAM_BLOCK_RC'])
# this function should eventually go away
def supported_2d_sizes(precision):
"""Return list of 2D kernels."""
# for now, mimic order of old generator so diffing is easier
powers = {
5: [5**k for k in range(3, 1, -1)],
3: [3**k for k in range(5, 1, -1)],
2: [2**k for k in range(8, 1, -1)],
}
lengths = []
for b1, b2 in [(2, 2), (3, 3), (5, 5), (2, 3), (3, 2), (3, 5), (5, 3), (2, 5), (5, 2)]:
lengths.extend(product(powers[b1], powers[b2]))
max_lds_size_bytes = 64 * 1024
bytes_per_element = {'sp': 8, 'dp': 16}[precision]
def filter_lds(length):
return length[0] * length[1] * bytes_per_element * 1.5 <= max_lds_size_bytes
# explicit list of fused 2D kernels that the old generator doesn't
# like; usually because the thread counts are wonky.
avoid = {
'sp': [(16, 243), (16, 256), (27, 125), (27, 128), (64, 64), (64, 81)],
'dp': [(16, 243), (16, 256), (25, 125), (27, 125), (32, 125), (25, 128), (27, 128), (32, 128), (64, 64), (64, 81)]
}[precision]
def filter_threads(length):
rlength = (length[1], length[0])
return length not in avoid and rlength not in avoid
filtered = [x for x in lengths if filter_lds(x) and filter_threads(x)]
return product(filtered, ['CS_KERNEL_2D_SINGLE'])
# this function should eventually go away
def get_dependent_1D_sizes(list_2D):
dep_1D = set()
for problem in list_2D:
dep_1D.update( [problem[0][0], problem[0][1]] )
return product(dep_1D, ['CS_KERNEL_STOCKHAM'])
#
# Prototype generators
#
@name_args(['function'])
class FFTKernel(BaseNode):
def __str__(self):
f = 'FFTKernel('
if self.function.meta.runtime_compile:
f += 'nullptr'
else:
f += str(self.function.address())
use_3steps_large_twd = getattr(self.function.meta, 'use_3steps_large_twd', None)
if use_3steps_large_twd is not None:
f += ', ' + str(use_3steps_large_twd[self.function.meta.precision])
else:
f += ', false'
factors = getattr(self.function.meta, 'factors', None)
if factors is not None:
f += ', {' + cjoin(factors) + '}'
transforms_per_block = getattr(self.function.meta, 'transforms_per_block', None)
if transforms_per_block is not None:
f += ', ' + str(transforms_per_block)
threads_per_block = getattr(self.function.meta, 'threads_per_block', None)
if threads_per_block is not None:
f += ', ' + str(threads_per_block)
f += ')'
return f
def generate_cpu_function_pool(functions):
"""Generate function to populate the kernel function pool."""
function_map = Map('function_map')
precisions = { 'sp': 'rocfft_precision_single',
'dp': 'rocfft_precision_double' }
populate = StatementList()
for f in functions:
length, precision, scheme, transpose = f.meta.length, f.meta.precision, f.meta.scheme, f.meta.transpose
if isinstance(length, (int, str)):
length = [length, 0]
key = Call(name='std::make_tuple',
arguments=ArgumentList('std::array<size_t, 2>({' + cjoin(length) + '})',
precisions[precision],
scheme,
transpose or 'NONE')).inline()
populate += function_map.assert_emplace(key, FFTKernel(f))
return StatementList(
Include('<iostream>'),
Include('"../include/function_pool.h"'),
StatementList(*[f.prototype() for f in functions]),
Function(name='function_pool::function_pool',
value=False,
arguments=ArgumentList(),
body=populate))
# this function should eventually go away
def generate_small_1d_prototypes(precision, transforms):
"""Generate prototypes for 1D small kernels that will be generated by the old generator."""
data = Variable('data_p', 'const void *')
back = Variable('back_p', 'void *')
functions = []
def add(name, scheme, transpose=None):
functions.append(Function(name=name,
arguments=ArgumentList(data, back),
meta=NS(
length=length,
precision=precision,
scheme=scheme,
transpose=transpose,
runtime_compile=False)))
for length, scheme in transforms.items():
add(f'rocfft_internal_dfn_{precision}_ci_ci_stoc_{length}', scheme)
return functions
# this function should eventually go away
def generate_large_1d_prototypes(precision, transforms):
"""Generate prototypes for 1D large block kernels that will be generated from the old generator."""
data = Variable('data_p', 'const void *')
back = Variable('back_p', 'void *')
functions = []
def add(name, scheme, transpose=None):
use3Steps = {'sp': 'true', 'dp': 'true'}
if length == 81:
use3Steps['dp'] = 'false'
elif length == 200:
use3Steps['sp'] = use3Steps['dp'] = 'false'
functions.append(Function(name=name,
arguments=ArgumentList(data, back),
meta=NS(
length=length,
precision=precision,
scheme=scheme,
use_3steps_large_twd=use3Steps,
transpose=transpose,
runtime_compile=False)))
for length, scheme in transforms.items():
if 0:
add(f'rocfft_internal_dfn_{precision}_ci_ci_sbcc_{length}', 'CS_KERNEL_STOCKHAM_BLOCK_CC')
elif scheme == 'CS_KERNEL_STOCKHAM_BLOCK_RC':
# for old-sbcc compatibility: always include the sbcc function (but will be overwritten if new gen has it)
add(f'rocfft_internal_dfn_{precision}_ci_ci_sbcc_{length}', 'CS_KERNEL_STOCKHAM_BLOCK_CC')
add(f'rocfft_internal_dfn_{precision}_op_ci_ci_sbrc_{length}', 'CS_KERNEL_STOCKHAM_BLOCK_RC')
add(f'rocfft_internal_dfn_{precision}_op_ci_ci_sbrc3d_fft_trans_xy_z_tile_aligned_{length}', 'CS_KERNEL_STOCKHAM_TRANSPOSE_XY_Z', 'TILE_ALIGNED')
add(f'rocfft_internal_dfn_{precision}_op_ci_ci_sbrc3d_fft_trans_z_xy_tile_aligned_{length}', 'CS_KERNEL_STOCKHAM_TRANSPOSE_Z_XY', 'TILE_ALIGNED')
add(f'rocfft_internal_dfn_{precision}_op_ci_ci_sbrc3d_fft_erc_trans_z_xy_tile_aligned_{length}', 'CS_KERNEL_STOCKHAM_R_TO_CMPLX_TRANSPOSE_Z_XY', 'TILE_ALIGNED')
if length in [128, 256]:
add(f'rocfft_internal_dfn_{precision}_op_ci_ci_sbrc3d_fft_trans_xy_z_diagonal_{length}', 'CS_KERNEL_STOCKHAM_TRANSPOSE_XY_Z', 'DIAGONAL')
add(f'rocfft_internal_dfn_{precision}_op_ci_ci_sbrc3d_fft_trans_z_xy_diagonal_{length}', 'CS_KERNEL_STOCKHAM_TRANSPOSE_Z_XY', 'DIAGONAL')
add(f'rocfft_internal_dfn_{precision}_op_ci_ci_sbrc3d_fft_erc_trans_z_xy_diagonal_{length}', 'CS_KERNEL_STOCKHAM_R_TO_CMPLX_TRANSPOSE_Z_XY', 'DIAGONAL')
return functions
# this function should eventually go away
def generate_2d_prototypes(precision, transforms):
"""Generate prototypes for 2D kernels that will be generated by the old generator."""
data = Variable('data_p', 'const void *')
back = Variable('back_p', 'void *')
functions = []
def add(name, scheme, transpose=None):
functions.append(Function(name=name,
arguments=ArgumentList(data, back),
meta=NS(
length=length,
precision=precision,
scheme=scheme,
transpose=transpose,
runtime_compile=False)))
for length, scheme in transforms.items():
add(f'rocfft_internal_dfn_{precision}_ci_ci_2D_{length[0]}_{length[1]}', 'CS_KERNEL_2D_SINGLE', 'NONE')
return functions
# this function should eventually go away
def list_old_generated_kernels(patterns=None,
precisions=None,
num_small_kernel_groups=150):
"""Return a list (for CMake) of files created by the (old) generator."""
if patterns is None:
patterns = ['all']
if precisions is None:
precisions = ['all']
#
# all this 'generated_kernels' should go away when the old generator goes away
#
generated_kernels = {
'kernels_launch_basic': [
'function_pool.cpp',
],
'kernels_launch_small_sp':
[f'kernel_launch_single_{i}.cpp' for i in range(num_small_kernel_groups)]
+ [f'kernel_launch_single_{i}.cpp.h' for i in range(num_small_kernel_groups)],
'kernels_launch_small_dp':
[f'kernel_launch_double_{i}.cpp' for i in range(num_small_kernel_groups)]
+ [f'kernel_launch_double_{i}.cpp.h' for i in range(num_small_kernel_groups)],
'kernels_launch_large_sp': [
'kernel_launch_single_large.cpp',
],
'kernels_launch_large_dp': [
'kernel_launch_double_large.cpp',
],
'kernels_launch_2D_sp': [
'kernel_launch_single_2D_pow2.cpp',
'kernel_launch_single_2D_pow3.cpp',
'kernel_launch_single_2D_pow5.cpp',
'kernel_launch_single_2D_mix_pow2_3.cpp',
'kernel_launch_single_2D_mix_pow3_2.cpp',
'kernel_launch_single_2D_mix_pow3_5.cpp',
'kernel_launch_single_2D_mix_pow5_3.cpp',
'kernel_launch_single_2D_mix_pow2_5.cpp',
'kernel_launch_single_2D_mix_pow5_2.cpp',
],
'kernels_launch_2D_dp': [
'kernel_launch_double_2D_pow2.cpp',
'kernel_launch_double_2D_pow3.cpp',
'kernel_launch_double_2D_pow5.cpp',
'kernel_launch_double_2D_mix_pow2_3.cpp',
'kernel_launch_double_2D_mix_pow3_2.cpp',
'kernel_launch_double_2D_mix_pow3_5.cpp',
'kernel_launch_double_2D_mix_pow5_3.cpp',
'kernel_launch_double_2D_mix_pow2_5.cpp',
'kernel_launch_double_2D_mix_pow5_2.cpp',
],
}
generated_kernels['kernels_launch_small_all'] = generated_kernels['kernels_launch_small_sp'] + generated_kernels['kernels_launch_small_dp']
generated_kernels['kernels_launch_large_all'] = generated_kernels['kernels_launch_large_sp'] + generated_kernels['kernels_launch_large_dp']
generated_kernels['kernels_launch_2D_all'] = generated_kernels['kernels_launch_2D_sp'] + generated_kernels['kernels_launch_2D_dp']
generated_kernels['kernels_launch_all_sp'] = generated_kernels['kernels_launch_small_sp'] + generated_kernels['kernels_launch_large_sp'] + generated_kernels['kernels_launch_2D_sp']
generated_kernels['kernels_launch_all_dp'] = generated_kernels['kernels_launch_small_dp'] + generated_kernels['kernels_launch_large_dp'] + generated_kernels['kernels_launch_2D_dp']
generated_kernels['kernels_launch_all_all'] = generated_kernels['kernels_launch_all_sp'] + generated_kernels['kernels_launch_all_dp']
gen = generated_kernels['kernels_launch_basic']
for patt in patterns:
for prec in precisions:
gen += generated_kernels[f'kernels_launch_{patt}_{prec}']
return list(set(gen))
def list_generated_kernels(kernels):
"""Return list of kernel filenames."""
return [kernel_file_name(x) for x in kernels if not x.runtime_compile]
#
# Main!
#
@name_args(['name', 'ip_fwd', 'ip_inv', 'op_fwd', 'op_inv', 'precision'])
class POWX_SMALL_GENERATOR(BaseNode):
def __str__(self):
return f'POWX_SMALL_GENERATOR({cjoin(self.args)});'
def function(self, meta, precision):
data = Variable('data_p', 'const void *')
back = Variable('back_p', 'void *')
meta = NS(precision=precision, **meta.__dict__)
return Function(name=self.name,
arguments=ArgumentList(data, back),
meta=meta)
@name_args(['name', 'ip_fwd', 'ip_inv', 'op_fwd', 'op_inv', 'precision'])
class POWX_LARGE_SBCC_GENERATOR(POWX_SMALL_GENERATOR):
def __str__(self):
return f'POWX_LARGE_SBCC_GENERATOR({cjoin(self.args)});'
@name_args(['name', 'op_fwd', 'op_inv', 'precision', 'sbrc_type', 'transpose_type'])
class POWX_LARGE_SBRC_GENERATOR(POWX_SMALL_GENERATOR):
def __str__(self):
return f'POWX_LARGE_SBRC_GENERATOR({cjoin(self.args)});'
def kernel_file_name(ns):
"""Given kernel info namespace, return reasonable file name."""
assert hasattr(ns, 'length')
length = ns.length
if isinstance(length, (tuple, list)):
length = 'x'.join(str(x) for x in length)
postfix = ''
if ns.scheme == 'CS_KERNEL_STOCKHAM_BLOCK_CC':
postfix = '_sbcc'
elif ns.scheme == 'CS_KERNEL_STOCKHAM_BLOCK_RC':
postfix = '_sbrc'
return f'rocfft_len{length}{postfix}.cpp'
def list_new_kernels():
"""Return list of kernels to generate with the new generator."""
# remaining lenghts less than 1024: 121 192 224 250 320 336 375
# 384 405 432 450 480 500 512 576 600 625 640 675 750 768 800 810
# 864 900 972 1000
# dictionary of (flavour, threads_per_block) -> list of kernels to generate
# note the length property is necessary for the latter pick and merge_length
small_kernels = {
('uwide', 256): [
# NS(length=2, factors=[2]),
# NS(length=3, factors=[3]),
# NS(length=5, factors=[5]),
# NS(length=6, factors=[6]),
# NS(length=7, factors=[7]),
# NS(length=8, factors=[8]),
NS(length=9, factors=[3,3], runtime_compile=True),
# NS(length=10, factors=[10]),
NS(length=12, factors=[6,2]),
NS(length=14, factors=[7,2]),
NS(length=15, factors=[5,3]),
NS(length=17, factors=[17]),
# NS(length=18, factors=[6,3]),
NS(length=20, factors=[10,2]),
NS(length=21, factors=[7,3]),
NS(length=24, factors=[8,3]),
NS(length=25, factors=[5,5]),
# NS(length=27, factors=[3,3,3]),
NS(length=28, factors=[7,4]),
NS(length=30, factors=[10,3]),
NS(length=36, factors=[6,6]),
NS(length=42, factors=[7,6]),
NS(length=45, factors=[5,3,3]),
# NS(length=49, factors=[7,7]),
NS(length=50, factors=[10,5]),
NS(length=54, factors=[6,3,3]),
NS(length=56, factors=[8,7]),
# NS(length=64, factors=[16,4]),
# NS(length=72, factors=[8,3,3]),
NS(length=75, factors=[5,5,3]),
NS(length=80, factors=[16,5]),
# NS(length=81, factors=[3,3,3,3]),
# NS(length=96, factors=[16,6]),
# NS(length=100, factors=[10,10]),
NS(length=108, factors=[6,6,3]),
NS(length=112, factors=[16,7]),
NS(length=125, factors=[5,5,5]),
# NS(length=128, factors=[16,8]),
# NS(length=135, factors=[5,3,3,3]),
# NS(length=150, factors=[10,5,3]),
NS(length=160, factors=[16,10]),
# NS(length=162, factors=[6,3,3,3]),
NS(length=168, factors=[8,7,3]),
NS(length=180, factors=[10,6,3]),
# NS(length=216, factors=[8,3,3,3]),
NS(length=225, factors=[5,5,3,3]),
NS(length=240, factors=[16,5,3]),
# NS(length=243, factors=[3,3,3,3,3]),
# NS(length=256, factors=[16,16]),
# NS(length=270, factors=[10,3,3,3]),
# NS(length=288, factors=[16,6,3]),
NS(length=324, factors=[6,6,3,3]),
NS(length=343, factors=[7,7,7]),
NS(length=360, factors=[10,6,6]),
NS(length=400, factors=[16,5,5]),
# NS(length=486, factors=[6,3,3,3,3]),
# NS(length=540, factors=[10,6,3,3]),
NS(length=648, factors=[8,3,3,3,3]),
NS(length=720, factors=[16,5,3,3]),
# NS(length=729, factors=[3,3,3,3,3,3]),
NS(length=960, factors=[16,10,6]),
NS(length=1040, factors=[13,16,5]),
],
('uwide', 128): [
NS(length=272, factors=[16,17]),
],
('wide', 64): [
# NS(length=11, factors=[11]),
NS(length=22, factors=[2,11]),
NS(length=44, factors=[4,11]),
NS(length=60, factors=[6,10]),
NS(length=84, factors=[2,6,7]),
NS(length=90, factors=[3,3,10]),
NS(length=120, factors=[2,6,10]),
# NS(length=200, factors=[2,10,10]),
NS(length=300, factors=[3,10,10]),
NS(length=528, factors=[4,4,3,11]),
],
('uwide', 64): [
NS(length=32, factors=[16,2]),
NS(length=40, factors=[10,4]),
NS(length=48, factors=[3,4,4]),
NS(length=88, factors=[11,8]),
NS(length=176, factors=[16,11]),
NS(length=336, factors=[7,8,6]),
],
# ('tall', X): [
# NS(length=4),
# NS(length=13),
# NS(length=16),
# NS(length=26),
# NS(length=52),
# NS(length=104),
# NS(length=169),
# NS(length=192),
# NS(length=208),
# NS(length=320),
# NS(length=512),
# NS(length=625),
# NS(length=864),
# NS(length=1000),
# ]
}
expanded = []
for params, kernels in small_kernels.items():
flavour, threads_per_block = params
expanded.extend(NS(**kernel.__dict__,
flavour=flavour,
threads_per_block=threads_per_block,
scheme='CS_KERNEL_STOCKHAM') for kernel in kernels)
return expanded
def list_new_2d_kernels():
"""Return list of fused 2D kernels to generate with new generator."""
# can probably merge this with above when old gen is gone
fused_kernels = {
(('tall', 'tall'), 128): [
NS(length=[32, 32], factors=[[8,4],[8,4]], threads_per_transform=4),
],
}
expanded = []
for params, kernels in fused_kernels.items():
flavours, threads_per_block = params
expanded.extend(NS(**kernel.__dict__,
flavour=flavours,
threads_per_block=threads_per_block,
scheme='CS_KERNEL_2D_SINGLE') for kernel in kernels)
return expanded
def list_new_large_kernels():
"""Return list of large kernels to generate with the new generator."""
kernels = [
NS(length=50, factors=[10, 5], use_3steps_large_twd={'sp': 'true', 'dp': 'true'}, threads_per_block=256),
NS(length=64, factors=[8, 8], use_3steps_large_twd={'sp': 'true', 'dp': 'false'}),
NS(length=81, factors=[3, 3, 3, 3], use_3steps_large_twd={'sp': 'true', 'dp': 'true'}),
# NS(length=100, factors=[5, 5, 4], use_3steps_large_twd={'sp': 'true', 'dp': 'false'}),
NS(length=128, factors=[8, 4, 4], use_3steps_large_twd={'sp': 'true', 'dp': 'false'}),
NS(length=200, factors=[8, 5, 5], use_3steps_large_twd={'sp': 'false', 'dp': 'false'}),
NS(length=256, factors=[4, 4, 4, 4], use_3steps_large_twd={'sp': 'true', 'dp': 'false'}),
NS(length=336, factors=[6, 7, 8], use_3steps_large_twd={'sp': 'false', 'dp': 'false'})
]
# for SBCC kernel, increase desired threads_per_block so that columns per
# thread block is also increased. currently targeting for 16 columns
block_width = 16
for k in kernels:
k.scheme = 'CS_KERNEL_STOCKHAM_BLOCK_CC'
if not hasattr(k, 'threads_per_block'):
k.threads_per_block = block_width * reduce(mul, k.factors, 1) // min(k.factors)
if not hasattr(k, 'length'):
k.length = functools.reduce(lambda a, b: a * b, k.factors)
# kernels += [
# NS(length=64, factors=[4, 4, 4], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=128),
# NS(length=128, factors=[8, 4, 4], scheme='CS_KERNEL_STOCKHAM_BLOCK_RC', threads_per_block=128),
# ]
return kernels
def default_runtime_compile(kernels):
'''Returns a copy of input kernel list with a default value for runtime_compile.'''
return [k if hasattr(k, 'runtime_compile') else NS(**k.__dict__, runtime_compile=False) for k in kernels]
def generate_kernel(kernel, precisions):
"""Generate a single kernel file for 'kernel'.
The kernel file contains all kernel variations corresponding to
the kernel meta data in 'kernel'.
A list of CPU functions is returned.
"""
fname = Path(__file__).resolve()
typename_dict = {
'sp': 'float2',
'dp': 'double2',
}
src = StatementList(
CommentBlock(
'Stockham kernels generated by:',
'',
' ' + ' '.join(sys.argv),
'',
'Generator is: ' + str(fname),
''
'Kernel is: ' + str(kernel)),
LineBreak(),
Include('<hip/hip_runtime.h>'),
Include('"kernel_launch.h"'),
Include('"kernels/common.h"'),
Include('"kernels/butterfly_constant.h"'),
Include('"rocfft_butterfly_template.h"'),
Include('"real2complex.h"'),
LineBreak())
kdevice, kglobal = stockham.stockham(**kernel.__dict__)
# forward runtime compile flag into kglobal.meta so we can know
# whether to put a prototype into the function pool
kglobal.meta = NS(**kglobal.meta.__dict__, runtime_compile=kernel.runtime_compile)
length = kglobal.meta.length
forward, inverse = kglobal.name, kglobal.name.replace('forward', 'inverse')
if not kernel.runtime_compile:
src += stockham.make_variants(kdevice, kglobal)
cpu_functions = []
for p in precisions:
if kglobal.meta.scheme == 'CS_KERNEL_STOCKHAM':
prototype = POWX_SMALL_GENERATOR(f'rocfft_internal_dfn_{p}_ci_ci_stoc_{length}',
'ip_' + forward, 'ip_' + inverse,
'op_' + forward, 'op_' + inverse, typename_dict[p])
src += prototype
cpu_functions.append(prototype.function(kglobal.meta, p))
elif kglobal.meta.scheme == 'CS_KERNEL_STOCKHAM_BLOCK_CC':
prototype = POWX_LARGE_SBCC_GENERATOR(f'rocfft_internal_dfn_{p}_ci_ci_sbcc_{length}',
'ip_' + forward, 'ip_' + inverse,
'op_' + forward, 'op_' + inverse, typename_dict[p])
src += prototype
cpu_functions.append(prototype.function(kglobal.meta, p))
elif kglobal.meta.scheme == 'CS_KERNEL_2D_SINGLE':
prototype = POWX_SMALL_GENERATOR(f'rocfft_internal_dfn_{p}_ci_ci_2D_{length[0]}_{length[1]}',
'ip_' + forward, 'ip_' + inverse,
'op_' + forward, 'op_' + inverse, typename_dict[p])
src += prototype
cpu_functions.append(prototype.function(kglobal.meta, p))
elif kglobal.meta.scheme == 'CS_KERNEL_STOCKHAM_BLOCK_RC':
# SBRC_2D
sbrc_type, transpose_type, meta = 'SBRC_2D', 'TILE_ALIGNED', deepcopy(kglobal.meta)
prototype = POWX_LARGE_SBRC_GENERATOR(f'rocfft_internal_dfn_{p}_op_ci_ci_sbrc_{length}',
'op_' + forward, 'op_' + inverse, typename_dict[p],
sbrc_type, transpose_type)
src += prototype
cpu_functions.append(prototype.function(meta, p))
# SBRC_3D_FFT_TRANS_XY_Z
sbrc_type, transpose_type, meta = 'SBRC_3D_FFT_TRANS_XY_Z', 'TILE_ALIGNED', deepcopy(kglobal.meta)
prototype = POWX_LARGE_SBRC_GENERATOR(f'rocfft_internal_dfn_{p}_op_ci_ci_sbrc3d_fft_trans_xy_z_tile_aligned_{length}',
'op_' + forward, 'op_' + inverse, typename_dict[p],
sbrc_type, transpose_type)
src += prototype
meta.scheme, meta.transpose = 'CS_KERNEL_STOCKHAM_TRANSPOSE_XY_Z', 'TILE_ALIGNED'
cpu_functions.append(prototype.function(meta, p))
sbrc_type, transpose_type, meta = 'SBRC_3D_FFT_TRANS_XY_Z', 'DIAGONAL', deepcopy(kglobal.meta)
prototype = POWX_LARGE_SBRC_GENERATOR(f'rocfft_internal_dfn_{p}_op_ci_ci_sbrc3d_fft_trans_xy_z_diagonal_{length}',
'op_' + forward, 'op_' + inverse, typename_dict[p],
sbrc_type, transpose_type)
src += prototype
meta.scheme, meta.transpose = 'CS_KERNEL_STOCKHAM_TRANSPOSE_XY_Z', 'DIAGONAL'
cpu_functions.append(prototype.function(meta, p))
# SBRC_3D_FFT_TRANS_Z_XY
sbrc_type, transpose_type, meta = 'SBRC_3D_FFT_TRANS_Z_XY', 'TILE_ALIGNED', deepcopy(kglobal.meta)
prototype = POWX_LARGE_SBRC_GENERATOR(f'rocfft_internal_dfn_{p}_op_ci_ci_sbrc3d_fft_trans_z_xy_tile_aligned_{length}',
'op_' + forward, 'op_' + inverse, typename_dict[p],
sbrc_type, transpose_type)
src += prototype
meta.scheme, meta.transpose = 'CS_KERNEL_STOCKHAM_TRANSPOSE_Z_XY', 'TILE_ALIGNED'
cpu_functions.append(prototype.function(meta, p))
# SBRC_3D_FFT_TRANS_Z_XY
sbrc_type, transpose_type, meta = 'SBRC_3D_FFT_ERC_TRANS_Z_XY', 'TILE_ALIGNED', deepcopy(kglobal.meta)
prototype = POWX_LARGE_SBRC_GENERATOR(f'rocfft_internal_dfn_{p}_op_ci_ci_sbrc3d_fft_erc_trans_z_xy_tile_aligned_{length}',
'op_' + forward, 'op_' + inverse, typename_dict[p],
sbrc_type, transpose_type)
src += prototype
meta.scheme, meta.transpose = 'CS_KERNEL_STOCKHAM_R_TO_CMPLX_TRANSPOSE_Z_XY', 'TILE_ALIGNED'
cpu_functions.append(prototype.function(meta, p))
else:
raise NotImplementedError(f'Unable to generate host functions for scheme {kglobal.meta.scheme}.')
if not kernel.runtime_compile:
format_and_write(kernel_file_name(kernel), src)
return cpu_functions
def generate_new_kernels(kernels, precisions):
"""Generate and write kernels from the kernel list.
Entries in the kernel list are simple namespaces. These are
passed as keyword arguments to the Stockham generator.
A list of CPU functions is returned.
"""
return flatten([generate_kernel(k, precisions) for k in kernels])
def cli():
"""Command line interface..."""
parser = argparse.ArgumentParser(prog='kernel-generator')
subparsers = parser.add_subparsers(dest='command')
parser.add_argument('--groups', type=int, help='Numer of small kernel groups.', default=150)
parser.add_argument('--pattern', type=str, help='Kernel pattern to generate.', default='all')
parser.add_argument('--precision', type=str, help='Precision to generate.', default='all')
parser.add_argument('--manual-small', type=str, help='Small kernel sizes to generate.')
parser.add_argument('--manual-large', type=str, help='Large kernel sizes to generate.')
list_parser = subparsers.add_parser('list', help='List kernel files that will be generated.')
generate_parser = subparsers.add_parser('generate', help='Generate kernels.')
generate_parser.add_argument('generator', type=str, help='Kernel generator executable.')
args = parser.parse_args()
#
# which kernels to build? set the flags for generate before modifying patterns
#
patterns = args.pattern.split(',')
large = 'all' in patterns or 'large' in patterns
small = 'all' in patterns or 'small' in patterns
dim2 = 'all' in patterns or '2D' in patterns
pow2 = small or 'pow2' in patterns
pow3 = small or 'pow3' in patterns
pow5 = small or 'pow5' in patterns
pow7 = small or 'pow7' in patterns
if patterns == ['none']:
patterns = []
if args.manual_small:
patterns += ['small']
if args.manual_large:
patterns += ['large']
# TODO- if dim2, pattern += small as well
replacements = {
'pow2': 'small',
'pow3': 'small',
'pow5': 'small',
'pow7': 'small',
}
patterns = [replacements.get(key, key) for key in patterns if key != 'none']
if 'all' in patterns:
patterns += ['small']
patterns += ['large']
patterns += ['2D']
patterns = set(patterns)
#
# which precicions to build?
#
precisions = args.precision.split(',')
replacements = {
'single': 'sp',
'double': 'dp',
}
precisions = [replacements.get(key, key) for key in precisions if key != 'none']
if 'all' in precisions:
precisions = ['sp', 'dp']
precisions = set(precisions)
#
# list all the exact sizes of kernels to build
#
manual_small = None
if args.manual_small:
manual_small = product(map(int, args.manual_small.split(',')),
['CS_KERNEL_STOCKHAM'])
manual_large = None
if args.manual_large:
manual_large = product(map(int, args.manual_large.split(',')),
['CS_KERNEL_STOCKHAM_BLOCK_CC', 'CS_KERNEL_STOCKHAM_BLOCK_RC'])
# all kernels to be generated from arguments
expand_sizes = {
'small': { 'sp': [], 'dp': [] },
'large': { 'sp': [], 'dp': [] },
}
if small or pow2 or pow3 or pow5 or pow7:
for p in precisions:
expand_sizes['small'][p] = merge(expand_sizes['small'][p], supported_small_sizes(p, pow2, pow3, pow5, pow7))
if manual_small:
for p in precisions:
expand_sizes['small'][p] = merge(expand_sizes['small'][p], manual_small)
if large:
for p in precisions:
expand_sizes['large'][p] = merge(expand_sizes['large'][p], supported_large_sizes(p))
if manual_large:
for p in precisions:
expand_sizes['large'][p] = merge(expand_sizes['large'][p], manual_large)
# TODO- let dim2 ("CS_KERNEL_2D_SINGLE"-typed) use new-gen 1D kernels, and get the dependent kernels.
# For now, 2D_SINGLE kernels still use old-gen small kernels
#
# which kernels by new-gen and which by old-gen? categorize input kernels
#
supported_new_small_kernels = list_new_kernels()
supported_new_large_kernels = list_new_large_kernels() # currently 'large' really is sbcc kernels only
new_small_kernels = new_large_kernels = []
# Don't subtract_from_all for large, since so far sbrc and transpose still rely on old-gen.
for p in precisions:
expand_sizes['small'][p], new_smalls = pick(expand_sizes['small'][p], supported_new_small_kernels)
expand_sizes['large'][p], new_larges = pick(expand_sizes['large'][p], supported_new_large_kernels, subtract_from_all=False)
# remove unsupported length in old_gen
for length in list(expand_sizes['large'][p]):
if length not in old_gen_supported_large:
del expand_sizes['large'][p][length]
new_small_kernels = merge_length(new_small_kernels, new_smalls)
new_large_kernels = merge_length(new_large_kernels, new_larges)
new_kernels = new_small_kernels + new_large_kernels + list_new_2d_kernels()
# set runtime_compile on new kernels that haven't already set a
# value
new_kernels = default_runtime_compile(new_kernels)
# update the patterns after removing new kernels from old generator to avoid including some missing cpp
if 'small' in patterns and len(expand_sizes['small']['sp']) == 0 and len(expand_sizes['small']['dp']) == 0:
patterns.remove('small')
if 'large' in patterns and len(expand_sizes['large']['sp']) == 0 and len(expand_sizes['large']['dp']) == 0:
patterns.remove('large')
#
# return the necessary include files to cmake
#
if args.command == 'list':
scprint(set(list_old_generated_kernels(patterns=patterns,
precisions=precisions,
num_small_kernel_groups=args.groups)
+ list_generated_kernels(new_kernels)))
return
if args.command == 'generate':
# collection of Functions to generate prototypes for
psmall, plarge, p2d = {}, {}, {}
# already excludes small and large-1D from new-generators
for p in precisions:
psmall = pmerge(psmall, generate_small_1d_prototypes(p, expand_sizes['small'][p]))
plarge = pmerge(plarge, generate_large_1d_prototypes(p, expand_sizes['large'][p]))
if dim2:
for p in precisions:
transform_2D = merge([], supported_2d_sizes(p))
p2d = pmerge(p2d, generate_2d_prototypes(p, transform_2D))
# hijack a few new kernels...
pnew = pmerge({}, generate_new_kernels(new_kernels, precisions))
cpu_functions = list(merge(psmall, plarge, p2d, pnew).values())
format_and_write('function_pool.cpp', generate_cpu_function_pool(cpu_functions))
old_small_lengths = {f.meta.length for f in psmall.values()}
old_large_lengths = {f.meta.length for f in plarge.values()} # sbcc=new-gen, sbrc/transpose=old-gen
new_large_lengths = {k.length for k in new_large_kernels} # sbcc by new-gen
if old_small_lengths:
subprocess.run([args.generator, '-g', str(args.groups), '-p', args.precision, '-t', 'none', '--manual-small', cjoin(sorted(old_small_lengths))], check=True)
if old_large_lengths:
if new_large_lengths:
subprocess.run([args.generator, '-g', str(args.groups), '-p', args.precision, '-t', 'none', '--manual-large', cjoin(sorted(old_large_lengths)), '--no-sbcc', cjoin(sorted(new_large_lengths))], check=True)
else:
subprocess.run([args.generator, '-g', str(args.groups), '-p', args.precision, '-t', 'none', '--manual-large', cjoin(sorted(old_large_lengths))], check=True)
if dim2:
# XXX: currently new2d does both precisions...
new2d = {tuple(x.length) for x in list_new_2d_kernels()}
if 'sp' in precisions:
old2d = {f.meta.length for f in p2d.values() if f.meta.precision == 'sp'}
subprocess.run([args.generator, '-g', str(args.groups), '-p', 'single', '-t', '2D', '--manual-2d', cjoin('x'.join(map(str, lengths)) for lengths in old2d - new2d)], check=True)
if 'dp' in precisions:
old2d = {f.meta.length for f in p2d.values() if f.meta.precision == 'dp'}
subprocess.run([args.generator, '-g', str(args.groups), '-p', 'double', '-t', '2D', '--manual-2d', cjoin('x'.join(map(str, lengths)) for lengths in old2d - new2d)], check=True)
if __name__ == '__main__':
cli()
| mit | -6,375,603,604,371,238,000 | 40.159959 | 219 | 0.570605 | false |
jkyeung/XlsxWriter | xlsxwriter/test/worksheet/test_worksheet05.py | 1 | 2213 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2016, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
from ...sharedstrings import SharedStringTable
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with strings in cells."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.str_table = SharedStringTable()
worksheet.select()
# Write some strings.
worksheet.write_string(0, 0, 'Foo')
worksheet.write_string(2, 0, 'Bar')
worksheet.write_string(2, 3, 'Baz')
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:D3"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<sheetData>
<row r="1" spans="1:4">
<c r="A1" t="s">
<v>0</v>
</c>
</row>
<row r="3" spans="1:4">
<c r="A3" t="s">
<v>1</v>
</c>
<c r="D3" t="s">
<v>2</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| bsd-2-clause | -873,628,103,883,006,600 | 32.029851 | 171 | 0.474469 | false |
erggo/Harpy | harpia/bpGUI/sum.py | 1 | 5658 | # -*- coding: utf-8 -*-
# [HARPIA PROJECT]
#
#
# S2i - Intelligent Industrial Systems
# DAS - Automation and Systems Department
# UFSC - Federal University of Santa Catarina
# Copyright: 2006 - 2007 Luis Carlos Dill Junges ([email protected]), Clovis Peruchi Scotti ([email protected]),
# Guilherme Augusto Rutzen ([email protected]), Mathias Erdtmann ([email protected]) and S2i (www.s2i.das.ufsc.br)
# 2007 - 2009 Clovis Peruchi Scotti ([email protected]), S2i (www.s2i.das.ufsc.br)
#
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# For further information, check the COPYING file distributed with this software.
#
#----------------------------------------------------------------------
from harpia.GladeWindow import GladeWindow
from harpia.amara import binderytools as bt
import gtk
from harpia.s2icommonproperties import S2iCommonProperties
#i18n
import os
import gettext
APP='harpia'
DIR=os.environ['HARPIA_DATA_DIR']+'po'
_ = gettext.gettext
gettext.bindtextdomain(APP, DIR)
gettext.textdomain(APP)
#----------------------------------------------------------------------
class Properties( GladeWindow, S2iCommonProperties ):
#----------------------------------------------------------------------
def __init__( self, PropertiesXML, S2iBlockProperties):
self.m_sDataDir = os.environ['HARPIA_DATA_DIR']
filename = self.m_sDataDir+'glade/sum.glade'
self.m_oPropertiesXML = PropertiesXML
self.m_oS2iBlockProperties = S2iBlockProperties
widget_list = [
'Properties',
'SUMBackgroundColor',
'SUMBorderColor',
'SUMHelpView'
]
handlers = [
'on_sum_cancel_clicked',
'on_sum_confirm_clicked',
'on_SUMBackColorButton_clicked',
'on_SUMBorderColorButton_clicked'
]
top_window = 'Properties'
GladeWindow.__init__(self, filename, top_window, widget_list, handlers)
self.widgets['Properties'].set_icon_from_file(self.m_sDataDir+"images/harpia_ave.png")
#load properties values
#there is no properties
#load border color
self.m_oBorderColor = self.m_oS2iBlockProperties.GetBorderColor()
t_nBorderRed = self.m_oBorderColor[0] * 257
t_nBorderGreen = self.m_oBorderColor[1] * 257
t_nBorderBlue = self.m_oBorderColor[2] * 257
t_oBorderColor = gtk.gdk.Color(red=t_nBorderRed,green=t_nBorderGreen,blue=t_nBorderBlue)
self.widgets['SUMBorderColor'].modify_bg(gtk.STATE_NORMAL,t_oBorderColor)
#load block color
self.m_oBackColor = self.m_oS2iBlockProperties.GetBackColor()
t_nBackRed = self.m_oBackColor[0] * 257
t_nBackGreen = self.m_oBackColor[1] * 257
t_nBackBlue = self.m_oBackColor[2] * 257
t_oBackColor = gtk.gdk.Color(red=t_nBackRed,green=t_nBackGreen,blue=t_nBackBlue)
self.widgets['SUMBackgroundColor'].modify_bg(gtk.STATE_NORMAL,t_oBackColor)
#load help text
t_oS2iHelp = bt.bind_file(self.m_sDataDir+"help/sum"+ _("_en.help"))
t_oTextBuffer = gtk.TextBuffer()
t_oTextBuffer.set_text( unicode( str( t_oS2iHelp.help.content) ) )
self.widgets['SUMHelpView'].set_buffer( t_oTextBuffer )
#----------------------------------------------------------------------
def __del__(self):
pass
#----------------------------------------------------------------------
def on_sum_cancel_clicked( self, *args ):
self.widgets['Properties'].destroy()
#----------------------------------------------------------------------
def on_sum_confirm_clicked( self, *args ):
self.m_oS2iBlockProperties.SetBorderColor( self.m_oBorderColor )
self.m_oS2iBlockProperties.SetBackColor( self.m_oBackColor )
self.widgets['Properties'].destroy()
#----------------------------------------------------------------------
def on_SUMBackColorButton_clicked(self,*args):
t_oColor = self.RunColorSelection()
if t_oColor <> None:
self.widgets['SUMBackgroundColor'].modify_bg(gtk.STATE_NORMAL,t_oColor)
self.m_oBackColor[0] = t_oColor.red / 257
self.m_oBackColor[1] = t_oColor.green / 257
self.m_oBackColor[2] = t_oColor.blue / 257
#----------------------------------------------------------------------
def on_SUMBorderColorButton_clicked(self,*args):
t_oColor = self.RunColorSelection()
if t_oColor <> None:
self.widgets['SUMBorderColor'].modify_bg(gtk.STATE_NORMAL,t_oColor)
self.m_oBorderColor[0] = t_oColor.red / 257
self.m_oBorderColor[1] = t_oColor.green / 257
self.m_oBorderColor[2] = t_oColor.blue / 257
#----------------------------------------------------------------------
#SumProperties = Properties()
#SumProperties.show( center=0 )
| gpl-3.0 | -8,235,328,954,891,495,000 | 32.678571 | 139 | 0.567515 | false |
wdbm/abstraction | fix_database.py | 1 | 7300 | #!/usr/bin/env python
"""
################################################################################
# #
# fix_database #
# #
################################################################################
# #
# LICENCE INFORMATION #
# #
# This program inspects an existing database of conversational exchanges, #
# changes data stored in the database to appropriate types and then saves the #
# changed data to a new database. The original database is not modified. #
# #
# copyright (C) 2016 William Breaden Madden #
# #
# This software is released under the terms of the GNU General Public License #
# version 3 (GPLv3). #
# #
# This program is free software: you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# For a copy of the GNU General Public License, see #
# <http://www.gnu.org/licenses/>. #
# #
################################################################################
Usage:
program [options]
Options:
-h, --help display help message
--version display version and exit
-v, --verbose verbose logging
-s, --silent silent
-u, --username=USERNAME username
--inputdatabase=FILE database [default: database.db]
--outputdatabase=FILE database [default: database_1.db]
--table=NAME table [default: exchanges]
--tablemetadata=NAME metadata table [default: metadata]
"""
name = "fix_database"
version = "2016-06-17T1559Z"
logo = None
import ast
import datetime
import docopt
import inspect
import logging
import os
import subprocess
import sys
import time
import abstraction
import dataset
import propyte
import pyprel
import shijian
import technicolor
def main(options):
global program
program = propyte.Program(
options = options,
name = name,
version = version,
logo = logo
)
global log
from propyte import log
filename_database = options["--inputdatabase"]
filename_database_out = options["--outputdatabase"]
name_table = options["--table"]
name_table_metadata = options["--tablemetadata"]
log.info("\naccess database {filename}".format(
filename = filename_database
))
database = dataset.connect(
"sqlite:///{filename_database}".format(
filename_database = filename_database
)
)
log.info("access table \"{name_table}\"".format(
name_table = name_table
))
table = database[name_table]
log.info("number of rows in table \"{name_table}\": {number_of_rows}".format(
name_table = name_table,
number_of_rows = str(len(table))
))
# Fix database with data version 2015-01-06T172242Z.
# Build a list of unique exchanges.
exchanges = []
for entry in table:
utterance = entry["utterance"]
response = entry["response"]
utterance_time_UNIX = entry["utteranceTimeUNIX"]
response_time_UNIX = entry["responseTimeUNIX"]
utterance_reference = entry["utteranceReference"]
response_reference = entry["responseReference"]
exchange_reference = entry["exchangeReference"]
if type(utterance_reference) is tuple:
log.debug("\nchange utterance reference")
log.debug("from:\n{utterance_reference}".format(
utterance_reference = utterance_reference
))
utterance_reference = utterance_reference[0]
log.debug("to:\n{utterance_reference}".format(
utterance_reference = utterance_reference
))
if type(response_reference) is tuple:
log.debug("\nchange response reference")
log.debug("from:\n{response_reference}".format(
response_reference = response_reference
))
response_reference = response_reference[0]
log.debug("to:\n{response_reference}".format(
response_reference = response_reference
))
if exchange_reference[0] == "(":
log.debug("\nchange exchange reference")
log.debug("from:\n{exchange_reference}".format(
exchange_reference = exchange_reference
))
exchange_reference = ast.literal_eval(exchange_reference)
exchange_reference = unicode(str(exchange_reference[0]), "utf-8")
log.debug("to:\n{exchange_reference}".format(
exchange_reference = exchange_reference
))
# Create a new exchange object using the fixed entries and append it to
# the list of modified exchanges.
exchange = abstraction.Exchange(
utterance = utterance,
response = response,
utterance_time_UNIX = utterance_time_UNIX,
response_time_UNIX = response_time_UNIX,
utterance_reference = utterance_reference,
response_reference = response_reference,
exchange_reference = exchange_reference
)
exchange.printout()
exchanges.append(exchange)
# Save the exchanges to the new database.
log.info("save exchanges to database")
abstraction.save_exchanges_to_database(
exchanges = exchanges,
filename = filename_database_out
)
# Save metadata to the new database.
abstraction.save_database_metadata(filename = filename_database_out)
program.terminate()
if __name__ == "__main__":
options = docopt.docopt(__doc__)
if options["--version"]:
print(version)
exit()
main(options)
| gpl-3.0 | 9,110,137,506,986,018,000 | 39.782123 | 81 | 0.499178 | false |
Azure/azure-sdk-for-python | sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/mysql_flexibleservers/operations/_check_virtual_network_subnet_usage_operations.py | 1 | 5283 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class CheckVirtualNetworkSubnetUsageOperations(object):
"""CheckVirtualNetworkSubnetUsageOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.mysql_flexibleservers.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def execute(
self,
location_name, # type: str
parameters, # type: "_models.VirtualNetworkSubnetUsageParameter"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetworkSubnetUsageResult"
"""Get virtual network subnet usage for a given vNet resource id.
:param location_name: The name of the location.
:type location_name: str
:param parameters: The required parameters for creating or updating a server.
:type parameters: ~azure.mgmt.rdbms.mysql_flexibleservers.models.VirtualNetworkSubnetUsageParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkSubnetUsageResult, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mysql_flexibleservers.models.VirtualNetworkSubnetUsageResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkSubnetUsageResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.execute.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'locationName': self._serialize.url("location_name", location_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkSubnetUsageParameter')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkSubnetUsageResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
execute.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DBForMySql/locations/{locationName}/checkVirtualNetworkSubnetUsage'} # type: ignore
| mit | -119,604,675,933,735,000 | 47.916667 | 168 | 0.680863 | false |
nlm/netgen | netgen/templateutils.py | 1 | 3361 | from colors import color
from functools import partial
from jinja2.exceptions import TemplateRuntimeError
import math
from math import log, ceil
from six.moves import xrange
class TemplateUtils(object):
def __init__(self, ipversion):
if ipversion not in (4, 6):
raise ValueError('ipversion must be 4 or 6')
self.function_ip46 = partial(self.ipver, ipversion)
self.function_minpref = partial(self.minpref, ipversion)
@property
def ipversion(self):
'''
ipversion() -> int
Returns the ip version for which this class is instantiated
'''
return self._ipversion
@staticmethod
def ipver(ipversion, valuev4, valuev6):
if ipversion == 4:
return valuev4
elif ipversion == 6:
return valuev6
else:
raise ValueError('invalid value for ipversion: {0}'
.format(ipversion))
@staticmethod
def filter_dotreverse(value, sep=None):
'''
filter_dot_reverse('1.2.3.4.5') -> '5.4.3.2.1'
Reverses a dotted string
'''
if sep is None:
sep = '.'
return sep.join(reversed(str(value).split(sep)))
@staticmethod
def filter_colored(text, fg, bg=None, style=None):
try:
return color(text, fg=fg, bg=bg, style=style)
except Exception as exc:
raise TemplateRuntimeError(exc)
@staticmethod
def minpref(ipversion, host_count):
if ipversion == 4:
return 32 - int(ceil(log(host_count, 2)))
elif ipversion == 6:
return 128 - int(ceil(log(host_count, 2)))
else:
raise ValueError('invalid value for ipversion: {0}'
.format(ipversion))
@staticmethod
def function_orange(*args, **kwargs):
offset = int(kwargs.get('offset', 0))
return [i + offset for i in range(*args)]
@staticmethod
def function_xorange(*args, **kwargs):
offset = int(kwargs.get('offset', 0))
return (i + offset for i in xrange(*args))
@classmethod
def function_range1(cls, *args, **kwargs):
return cls.function_orange(*args, offset=1, **kwargs)
@classmethod
def function_xrange1(cls, *args, **kwargs):
return cls.function_xorange(*args, offset=1, **kwargs)
@staticmethod
def function_raise(message):
raise TemplateRuntimeError(message)
@staticmethod
def function_assert(expr, message):
if not expr:
raise TemplateRuntimeError(message)
# Setup the environment
def add_custom_filters(self, env):
for name in ('colored', 'dotreverse'):
env.filters[name] = getattr(self, 'filter_{0}'.format(name))
def add_custom_functions(self, env):
for name in ('assert', 'ip46', 'minpref', 'raise'):
env.globals[name] = getattr(self, 'function_{0}'.format(name))
env.globals['range'] = self.function_xorange
env.globals['range1'] = self.function_xrange1
math.int = int
math.float = float
math.round = round
math.min = min
math.max = max
env.globals['math'] = math
def setup_environment(self, env):
self.add_custom_functions(env)
self.add_custom_filters(env)
return env
| gpl-2.0 | 542,898,760,346,410,400 | 29.554545 | 74 | 0.591491 | false |
leonro/magpy-git | magpy/acquisition/palmacqprotocol.py | 1 | 11090 | import sys, time, os, socket
import struct, binascii, re, csv
from datetime import datetime, timedelta
from twisted.protocols.basic import LineReceiver
from twisted.internet import reactor
from twisted.python import usage, log
from twisted.internet.serialport import SerialPort
from twisted.web.server import Site
from twisted.web.static import File
try: # version > 0.8.0
from autobahn.wamp1.protocol import exportRpc
except:
from autobahn.wamp import exportRpc
iddict = {'f': '10', 'x': '11', 'y': '12', 'z': '13', 'df': '14', 't': '30', 'rh': '33', 'p': '35', 'w': '38'}
"""
0: clientname -- str (atlas)
1: timestamp (PC) -- str (2013-01-23 12:10:32.712475)
2: date (PC) -- str (2013-01-23)
3: outtime (PC) -- str (12:10:32.712475)
4: timestamp (sensor) -- str (2013-01-23 12:10:32.712475)
5: GPS coordinates -- str (??.??N ??.??E)
9: Sensor Description -- str (to be found in the adict)
10: f -- float (48633.04) [nT]
11: x -- float (20401.3) [nT]
12: y -- float (-30.0) [nT]
13: z -- float (43229.7) [nT]
14: df -- float (0.06) [nT]
30: T (ambient) -- float (7.2) [C]
31: T (sensor) -- float (10.0) [C]
32: T (electronics) -- float (12.5) [C]
33: rh (relative humidity) -- float (99.0) [%]
34: T (dewpoint) -- float (6.0) [C]
38: W (weight) -- float (24.0042) [g]
40: Error code (POS1) -- float (80) [-]
60: VDD (support voltage) -- float (5.02) [V]
61: VAD (measured voltage) -- float (2.03) [V]
62: VIS (measured voltage) -- float (0.00043) [V]
"""
def timeToArray(timestring):
# Converts time string of format 2013-12-12 23:12:23.122324
# to an array similiar to a datetime object
try:
splittedfull = timestring.split(' ')
splittedday = splittedfull[0].split('-')
splittedsec = splittedfull[1].split('.')
splittedtime = splittedsec[0].split(':')
datearray = splittedday + splittedtime
datearray.append(splittedsec[1])
datearray = map(int,datearray)
return datearray
except:
log.msg('Error while extracting time array')
return []
def dataToFile(outputdir, sensorid, filedate, bindata, header):
# File Operations
try:
hostname = socket.gethostname()
path = os.path.join(outputdir,hostname,sensorid)
# outputdir defined in main options class
if not os.path.exists(path):
os.makedirs(path)
savefile = os.path.join(path, sensorid+'_'+filedate+".bin")
if not os.path.isfile(savefile):
with open(savefile, "wb") as myfile:
myfile.write(header + "\n")
myfile.write(bindata + "\n")
else:
with open(savefile, "a") as myfile:
myfile.write(bindata + "\n")
except:
log.err("PalmAcq - Protocol: Error while saving file")
## PalmAcq protocol
## --------------------
class PalmAcqProtocol(LineReceiver):
"""
Protocol to read Arduino data (usually from ttyACM0)
Tested so far only for Arduino Uno on a Linux machine
The protocol works only if the serial output follows the MagPy convention:
Up to 99 Sensors are supported identified by unique sensor names and ID's.
ARDUINO OUTPUT:
- serial output on ttyACM0 needs to follow the MagPy definition:
Three data sequences are supported:
1.) The meta information
The meta information line contains all information for a specific sensor.
If more than one sensor is connected, then several meta information
lines should be sent (e.g. M1:..., M2:..., M99:...)
Meta lines should be resent once in a while (e.g. every 10-100 data points)
Example:
M1: SensorName: MySensor, SensorID: 12345, SensorRevision: 0001
2.) The header line
The header line contains information on the provided data for each sensor.
The typical format includes the MagPy key, the actual Variable and the unit.
Key and Variable are separeted by an underscore, unit is provided in brackets.
Like the Meta information the header should be sent out once in a while
Example:
H1: f_F [nT], t1_Temp [deg C], var1_Quality [None], var2_Pressure [mbar]
3.) The data line:
The data line containes all data from a specific sensor
Example:
D1: 46543.7898, 6.9, 10, 978.000
- recording starts after meta and header information have been received
MARTAS requirements:
- add the following line to the sensor.txt
ARDUINO ACM0 9600
- on the MARTAS machine an additional information file will be created
containing the sensor information for connected ARDUINO boards:
arduinolist.csv:
"HMC5883_12345_0001","['x', 'y', 'z']"
This file is used by the MARCOS machine to identify connected sensors and their keys
"""
delimiter = "\r"
## need a reference to our WS-MCU gateway factory to dispatch PubSub events
##
def __init__(self, wsMcuFactory, sensor, outputdir):
self.wsMcuFactory = wsMcuFactory
self.sensorid = sensor
self.hostname = socket.gethostname()
self.outputdir = outputdir
self.sensor = ''
self.sensordict = {}
self.ConversionConstant = 40/4/float(int("0x800000",16))
eventstring = "evt0,evt1,evt3,evt11,evt12,evt13,evt32,evt60,evt99"
self.eventlist = eventstring.split(',')
def connectionMade(self):
log.msg('%s connected.' % self.sensorid)
def extractPalmAcqData(self, line):
"""
Method to convert hexadecimals to doubles
Returns a data array
"""
# INTERPRETING INCOMING DATA AND CONVERTING HEXDECIMALS TO DOUBLE
if line.startswith('*'):
try:
data = []
chunks = []
line = line.strip('*')
chunks.append(line[:6])
chunks.append(line[6:12])
chunks.append(line[12:18])
trigger = line[18]
ar = line.split(':')
if len(ar) == 2:
extended = ar[1]
chunks.append(extended[:4])
chunks.append(extended[4:8])
chunks.append(extended[8:12])
chunks.append(extended[12:16])
chunks.append(extended[16:20])
for idx, chunk in enumerate(chunks):
if len(chunk) == 6:
val = hex(int('0x'+chunk,16) ^ int('0x800000',16))
val = hex(int(val,16) - int('0x800000',16))
# Conversion constanst should be obtained from palmacq-init
val = float(int(val,16)) * self.ConversionConstant
elif len(chunk) == 4:
val = hex(int('0x'+chunk,16) ^ int('0x8000',16))
val = hex(int(val,16) - int('0x8000',16))
if idx == 3:
val = float(int(val,16)) * 0.000575 + 1.0
elif idx == 4:
val = float(int(val,16)) / 128.0
elif idx > 4:
val = float(int(val,16)) / 8000.0
data.append(val)
# SOME TEST OUTPUT
#if len(data)> 4:
# print datetime.utcnow(), data
#print data, trigger
return data, trigger
except:
#print "PALMACQ: an error occurred while interpreting the hexadecimal code"
return [], 'N'
else:
return [], 'N'
def processPalmAcqData(self, data):
"""Convert raw ADC counts into SI units as per datasheets"""
printdata = False
currenttime = datetime.utcnow()
outdate = datetime.strftime(currenttime, "%Y-%m-%d")
filename = outdate
outtime = datetime.strftime(currenttime, "%H:%M:%S")
# IMPORTANT : GET TIMESTAMP FROM DATA !!!!!!
timestamp = datetime.strftime(currenttime, "%Y-%m-%d %H:%M:%S.%f")
datearray = timeToArray(timestamp)
packcode = '6hL'
# Would probably be good to preserve the hexadecimal format
# Seems to be extremely effective regarding accuracy and storage
x = data[0]
y = data[1]
z = data[2]
v = 0.0
t = 0.0
p = 0.0
q = 0.0
r = 0.0
if len(data) > 4:
v = data[3]
t = data[4]
p = data[5]
q = data[6]
r = data[7]
datearray.append(x)
datearray.append(y)
datearray.append(z)
datearray.append(int(float(v)*10000))
datearray.append(int(float(t)*10000))
datearray.append(p)
datearray.append(q)
datearray.append(r)
packcode = packcode + 'fffllfff'
multiplier = [1,1,1,10000,10000,1,1,1]
try:
data_bin = struct.pack(packcode,*datearray)
except:
log.msg('Error while packing binary data')
pass
header = "# MagPyBin %s %s %s %s %s %s %d" % (self.sensorid, "[x,y,z,v,t,p,q,r]", "[x,y,z,v,t,p,q,r]", "[V,V,V,V,C,V,V,V]", str(multiplier).replace(" ",""), packcode, struct.calcsize(packcode))
if printdata:
#print header
print timestamp
# File Operations
try:
dataToFile(self.outputdir, self.sensorid, filename, data_bin, header)
except:
log.msg('Saving failed')
pass
evt0 = {'id': 0, 'value': self.hostname}
evt1 = {'id': 1, 'value': timestamp}
evt3 = {'id': 3, 'value': outtime}
evt11 = {'id': 11, 'value': x}
evt12 = {'id': 12, 'value': y}
evt13 = {'id': 13, 'value': z}
evt32 = {'id': 32, 'value': t}
evt60 = {'id': 60, 'value': v}
evt99 = {'id': 99, 'value': 'eol'}
return evt0,evt1,evt3,evt11,evt12,evt13,evt32,evt60,evt99
def lineReceived(self, line):
data=[]
if line:
data, trigger = self.extractPalmAcqData(line)
if len(data) > 1:
evt0,evt1,evt3,evt11,evt12,evt13,evt32,evt60,evt99 = self.processPalmAcqData(data)
dispatch_url = "http://example.com/"+self.hostname+"/pal#"+self.sensorid+"-value"
# eventlist defined in init
for event in self.eventlist:
self.wsMcuFactory.dispatch(dispatch_url, eval(event))
| gpl-3.0 | 7,457,627,093,789,535,000 | 38.049296 | 201 | 0.533724 | false |
deplicate/deplicate | duplicate/utils/fs/osx.py | 1 | 1187 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from ..init import compilecards
from .common import fsdecode
from .posix import has_hidden_attribute as _has_hidden_attribute
from .posix import has_archive_attribute, is_archived
WILDCARDS = (
'*.DS_Store', '.AppleDouble', '.LSOverride', 'Icon', '._*',
'.DocumentRevisions-V100', '.fseventsd', '.Spotlight-V100',
'.TemporaryItems', '.Trashes', '.VolumeIcon.icns',
'.com.apple.timemachine.donotpresent', '.AppleDB', '.AppleDesktop',
'Network Trash Folder', 'Temporary Items', '.apdisk')
_wildcards_match = compilecards(WILDCARDS).match
def has_hidden_attribute(filename):
try:
import Foundation
ufilename = fsdecode(filename)
url = Foundation.NSURL.fileURLWithPath_(ufilename)
res = url.getResourceValue_forKey_error_(
None, Foundation.NSURLIsHiddenKey, None
)
flag = res[1]
except ImportError:
flag = _has_hidden_attribute(filename)
return flag
def is_hidden(filename):
return filename.startswith('.') or has_hidden_attribute(filename)
def is_system(filename):
return bool(_wildcards_match(filename))
| mit | 7,296,047,325,512,074,000 | 26.604651 | 71 | 0.676495 | false |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/linear_model/tests/test_randomized_l1.py | 1 | 4733 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.datasets import load_diabetes, load_iris
from sklearn.feature_selection import f_regression, f_classif
from sklearn.linear_model.base import _preprocess_data
from sklearn.linear_model.randomized_l1 import (lasso_stability_path,
RandomizedLasso,
RandomizedLogisticRegression)
from sklearn.preprocessing import StandardScaler
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
diabetes = load_diabetes()
X = diabetes.data
y = diabetes.target
X = StandardScaler().fit_transform(X)
X = X[:, [2, 3, 6, 7, 8]]
# test that the feature score of the best features
F, _ = f_regression(X, y)
def test_lasso_stability_path():
# Check lasso stability path
# Load diabetes data and add noisy features
scaling = 0.3
coef_grid, scores_path = lasso_stability_path(X, y, scaling=scaling,
random_state=42,
n_resampling=30)
assert_array_equal(np.argsort(F)[-3:],
np.argsort(np.sum(scores_path, axis=1))[-3:])
def test_randomized_lasso():
# Check randomized lasso
scaling = 0.3
selection_threshold = 0.5
# or with 1 alpha
clf = RandomizedLasso(verbose=False, alpha=1, random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
# or with many alphas
clf = RandomizedLasso(verbose=False, alpha=[1, 0.8], random_state=42,
scaling=scaling,
selection_threshold=selection_threshold)
feature_scores = clf.fit(X, y).scores_
assert_equal(clf.all_scores_.shape, (X.shape[1], 2))
assert_array_equal(np.argsort(F)[-3:], np.argsort(feature_scores)[-3:])
X_r = clf.transform(X)
X_full = clf.inverse_transform(X_r)
assert_equal(X_r.shape[1], np.sum(feature_scores > selection_threshold))
assert_equal(X_full.shape, X.shape)
clf = RandomizedLasso(verbose=False, alpha='aic', random_state=42,
scaling=scaling)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(feature_scores, X.shape[1] * [1.])
clf = RandomizedLasso(verbose=False, scaling=-0.1)
assert_raises(ValueError, clf.fit, X, y)
clf = RandomizedLasso(verbose=False, scaling=1.1)
assert_raises(ValueError, clf.fit, X, y)
def test_randomized_logistic():
# Check randomized sparse logistic regression
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
X_orig = X.copy()
feature_scores = clf.fit(X, y).scores_
assert_array_equal(X, X_orig) # fit does not modify X
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
clf = RandomizedLogisticRegression(verbose=False, C=[1., 0.5],
random_state=42, scaling=scaling,
n_resampling=50, tol=1e-3)
feature_scores = clf.fit(X, y).scores_
assert_array_equal(np.argsort(F), np.argsort(feature_scores))
def test_randomized_logistic_sparse():
# Check randomized sparse logistic regression on sparse data
iris = load_iris()
X = iris.data[:, [0, 2]]
y = iris.target
X = X[y != 2]
y = y[y != 2]
# center here because sparse matrices are usually not centered
# labels should not be centered
X, _, _, _, _ = _preprocess_data(X, y, True, True)
X_sp = sparse.csr_matrix(X)
F, _ = f_classif(X, y)
scaling = 0.3
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores = clf.fit(X, y).scores_
clf = RandomizedLogisticRegression(verbose=False, C=1., random_state=42,
scaling=scaling, n_resampling=50,
tol=1e-3)
feature_scores_sp = clf.fit(X_sp, y).scores_
assert_array_equal(feature_scores, feature_scores_sp)
| mit | 1,920,787,194,317,546,000 | 36.267717 | 77 | 0.594971 | false |
abilng/Mtech-proj-scripts | Others/testDTW.py | 1 | 5762 | import numpy as np
from collections import Counter
from numpy import array, zeros, argmin, inf
from numpy.linalg import norm
import sys,argparse
DATA_PATH='/others/abilng/Database/MSR2-abil/test/data_out/'
GroundTruthFile="/others/abilng/Database/MSR2-abil/Videos/groundtruth.txt";
PrintProgress=True
def dtw(x, y, dist=lambda x, y: norm(x - y, ord=1)):
""" Computes the DTW of two sequences.
:param array x: N1*M array
:param array y: N2*M array
:param func dist: distance used as cost measure (default L1 norm)
Returns the minimum distance, the accumulated cost matrix and the wrap path.
"""
x = array(x)
if len(x.shape) == 1:
x = x.reshape(-1, 1)
y = array(y)
if len(y.shape) == 1:
y = y.reshape(-1, 1)
r, c = len(x), len(y)
D = zeros((r + 1, c + 1))
D[0, 1:] = inf
D[1:, 0] = inf
for i in range(r):
for j in range(c):
D[i+1, j+1] = dist(x[i], y[j])
for i in range(r):
for j in range(c):
D[i+1, j+1] += min(D[i, j], D[i, j+1], D[i+1, j])
D = D[1:, 1:]
dist = D[-1, -1] / sum(D.shape)
return dist, D
def getMSR2GroundTruth(GroundTruthFile):
labels = {}
with open(GroundTruthFile) as f:
data = f.read();
for line in data.splitlines():
if line[0]=='#':
#comment
continue;
seg={};
words=line.split()
#video_name, left, width, top, height, start, time duration, action(1-clapping-2-waving-3-boxing)
seg['action']=int(words[7])
seg['start']=int(words[5])
seg['length']=int(words[6])
video=(words[0].strip('".avi'));
try:
labels[video]
except KeyError:
labels[video]=list();
finally:
labels[video].append(seg);
return labels;
def getRes(groundTruth, qFile, classes=[], nFiles=54):
targetScore={}
nonTargetScore={}
Tp={}
Fp={}
q={}
for cls in classes:
targetScore[cls]=list()
nonTargetScore[cls]=list()
Tp[cls]=Fp[cls]=0
q[cls]=None
##############################
#READ Q File
f = open(DATA_PATH+'/'+str(qFile)+'.avi.txt','r');
f.readline();
dat=np.loadtxt(f);
f.close()
for label in groundTruth[str(qFile)]:
if label['action'] not in classes:
continue
start=label['start']
end=label['start']+label['length']
q[label['action']]=dat[start:end]
############
##For each File
for name in xrange(1,nFiles+1):
filename=str(name)
#if filename==str(qFile):
# continue
#init var
#read data
f = open(DATA_PATH+'/'+filename+'.avi.txt','r');
f.readline();
dat=np.loadtxt(f);
f.close()
#print filename,Query
if PrintProgress:
sys.stderr.write('[Query '+str(qFile)+' ]Testing on File:'+filename+'\r')
#for each label
for label in groundTruth[filename]:
orgLabel=label['action']
if orgLabel not in classes:
continue
start=label['start']
end=label['start']+label['length']
distance ={}
for cls in classes:
#dtw scores
if q[cls] is None:
continue
distance[cls], _ = dtw(dat[start:end], q[cls])
if cls==orgLabel:
targetScore[orgLabel].append(distance[cls])
else:
nonTargetScore[orgLabel].append(distance[cls])
preLabel=min(distance, key=distance.get);
if preLabel==orgLabel:
Tp[preLabel]+=1
else:
Fp[preLabel]+=1
if PrintProgress:
sys.stderr.write('[Query '+str(qFile)+' ]Testing on File: [DONE]\n')
return targetScore,nonTargetScore,Tp,Fp
def precision(Tp,Fp,Total):
retrieved =Counter(Tp)+Counter(Fp)
prec=dict()
for (key,val) in retrieved.iteritems():
prec[key]=float(Tp[key])/retrieved[key]
prec['Avg'] = sum(i for i in Tp.itervalues())/sum(i for i in retrieved.itervalues())
return prec
def recall(Tp,Fp,Total):
rec=dict()
for (key,val) in Total.iteritems():
rec[key]=float(Tp[key])/Total[key]
rec['Avg'] = sum(i for i in Tp.itervalues())/sum(i for i in Total.itervalues())
return rec
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='GMM Testing')
parser.add_argument('-v','--verbose', action='store_true')
parser.add_argument('targetFile')
parser.add_argument('nonTargetFile')
args = parser.parse_args()
PrintProgress = args.verbose
targetFile = args.targetFile
nonTargetFile = args.nonTargetFile
groundTruth = getMSR2GroundTruth(GroundTruthFile);
q=[2,11,44,50,32,8,45,33,20,25]
frameLen=15
nClass =3
nFiles=54
classes = range(1,nClass+1)
AvgTp = Counter({1:0,2:0,3:0})
AvgFp = Counter({1:0,2:0,3:0})
targetFptr=file(targetFile,'w');
nonTargetFptr=file(nonTargetFile,'w');
print "|| Query |",
for cls in classes:
print "Tp(%02d) | Fp(%02d) |"%(cls,cls),
print "Tp(Avg) | Fp(Avg) ||"
print "||=======",
for cls in classes:
print "======== ========",
print "===================||"
for qFile in q:
(targetScore,nonTargetScore,Tp,Fp)=getRes(groundTruth,qFile,classes,nFiles)
AvgTp +=Counter(Tp)
AvgFp +=Counter(Fp)
print "|| %2d |"%(qFile),
for cls in classes:
print " %02d | %02d |"%(Tp[cls],Fp[cls]),
print "%.04f | %.04f ||"%(
sum(i for i in Tp.itervalues())/float(len(classes)),
sum(i for i in Fp.itervalues())/float(len(classes)))
for scores in targetScore.itervalues():
for score in scores:
targetFptr.write("%.5f"%score+"\n")
for scores in nonTargetScore.itervalues():
for score in scores:
nonTargetFptr.write("%.5f"%score+"\n")
targetFptr.close()
nonTargetFptr.close()
n=float(len(q))
for (key,val) in AvgTp.iteritems():
AvgTp[key] = AvgTp[key]/n
for (key,val) in AvgFp.iteritems():
AvgFp[key] = AvgFp[key]/n
print "|| Avg |",
for cls in classes:
print " %02d | %02d |"%(AvgTp[cls],AvgFp[cls]),
print "%.04f | %.04f ||"%(
sum(i for i in AvgTp.itervalues())/float(nClass),
sum(i for i in AvgFp.itervalues())/float(nClass))
| apache-2.0 | 1,400,461,167,598,897,200 | 22.140562 | 100 | 0.618709 | false |
napalm-automation/napalm-yang | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs/subTLVs_/prefix_sid/sid/__init__.py | 1 | 12694 | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class sid(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix/subTLVs/subTLVs/prefix-sid/sid. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Prefix Segment-ID list. IGP-Prefix Segment is an IGP segment attached
to an IGP prefix. An IGP-Prefix Segment is global (unless explicitly
advertised otherwise) within the SR/IGP domain.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "sid"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
"subTLVs",
"subTLVs",
"prefix-sid",
"sid",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs/subTLVs/prefix_sid/sid/state (container)
YANG Description: State parameters for Prefix-SID.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs/subTLVs/prefix_sid/sid/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters for Prefix-SID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class sid(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/extended-ipv4-reachability/prefixes/prefix/subTLVs/subTLVs/prefix-sid/sid. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Prefix Segment-ID list. IGP-Prefix Segment is an IGP segment attached
to an IGP prefix. An IGP-Prefix Segment is global (unless explicitly
advertised otherwise) within the SR/IGP domain.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "sid"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"extended-ipv4-reachability",
"prefixes",
"prefix",
"subTLVs",
"subTLVs",
"prefix-sid",
"sid",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs/subTLVs/prefix_sid/sid/state (container)
YANG Description: State parameters for Prefix-SID.
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/extended_ipv4_reachability/prefixes/prefix/subTLVs/subTLVs/prefix_sid/sid/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters for Prefix-SID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
| apache-2.0 | 1,686,331,334,871,978,000 | 37.12012 | 375 | 0.576178 | false |
jakebas/homepage | routes.py | 1 | 1408 | from flask import Flask, render_template, request
from forms import ContactForm
from flask.ext.mail import Message, Mail
mail = Mail()
app = Flask(__name__)
app.secret_key = #removed from public version
app.config['MAIL_SERVER'] = "smtp.gmail.com"
app.config['MAIL_PORT'] = 465
app.config['MAIL_USE_SSL'] = True
app.config['MAIL_USERNAME'] = #removed from public version
app.config['MAIL_PASSWORD'] = #removed from public version
mail.init_app(app)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/contact', methods=['GET', 'POST'])
def contact():
form = ContactForm()
if request.method == 'POST':
if not form.validate():
return render_template('contact.html', form=form)
else:
msg = Message(form.subject.data, sender="[email protected]", recipients=['[email protected]'])
msg.body = """
From: %s <%s>
%s
""" % (form.name.data, form.email.data, form.message.data)
mail.send(msg)
return render_template('contact.html', success=True)
elif request.method == 'GET':
return render_template('contact.html', form=form)
@app.route('/teaching')
def teaching():
return render_template('teaching.html')
@app.route('/compsci')
def compsci():
return render_template('compsci.html')
if __name__ == '__main__':
app.run(debug=True)
| mit | -5,081,545,841,456,852,000 | 24.6 | 104 | 0.667614 | false |
patpatpatpatpat/stormpath-django | django_stormpath/admin.py | 1 | 1326 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import StormpathUser
from .forms import StormpathUserCreationForm, StormpathUserChangeForm
class StormpathUserAdmin(UserAdmin):
# Set the add/modify forms
add_form = StormpathUserCreationForm
form = StormpathUserChangeForm
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference specific fields on auth.User.
list_display = ('username', 'email', 'is_staff', 'given_name', 'surname')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups')
search_fields = ('email', 'given_name', 'surname')
ordering = ('email',)
filter_horizontal = ('groups', 'user_permissions',)
fieldsets = (
(None, {'fields': ('username', 'email', 'password')}),
('Personal info', {'fields': ('given_name', 'surname')}),
('Permissions', {'fields': ('is_active', 'is_staff', 'is_superuser', 'groups',)}),
('Important dates', {'fields': ('last_login',)}),
)
add_fieldsets = (
(None, {'classes': ('wide',),
'fields': ('given_name', 'surname', 'email', 'password1', 'password2')}),
)
# Register the new CustomUserAdmin
admin.site.register(StormpathUser, StormpathUserAdmin)
| apache-2.0 | 1,799,797,049,859,641,000 | 40.4375 | 90 | 0.6546 | false |
Comp-UFSCar/neural-networks-2 | tasks/assignment-1/p3.py | 1 | 1933 | """Problem 3.
Author: Lucas David -- <[email protected]>
"""
import multiprocessing
from mpl_toolkits.mplot3d import Axes3D
from scipy.io import loadmat
from sklearn.model_selection import GridSearchCV
from sklearn.neural_network.multilayer_perceptron import MLPRegressor
from sklearn.pipeline import Pipeline
from algorithms import ExtremeMachine
Axes3D
N_JOBS = multiprocessing.cpu_count()
def train(clf, params, X, y):
grid = GridSearchCV(clf, params, n_jobs=N_JOBS)
grid.fit(X, y)
print('grid parameters: %s' % params)
print('best parameters: %s' % grid.best_params_)
print('best estimator\'s score in testing fold: %.2f' % grid.best_score_)
evaluate(grid.best_estimator_, X, y)
return grid
def evaluate(machine, X, y):
print('score: %.2f' % machine.score(X, y))
def a(X, y):
machine = MLPRegressor()
params = {
'hidden_layer_sizes': [(100,), (200,), (1024,)],
'learning_rate_init': [.001],
'max_iter': [100, 200, 300],
}
return train(machine, params, X, y)
def b(X, y):
machine = Pipeline([('ex', ExtremeMachine()), ('rg', MLPRegressor())])
params = {
'ex__n_features': [32, 64, 128, 256, 512, 1024],
'ex__random_state': [0],
'rg__hidden_layer_sizes': [(100,), (200,), (1024,), (2048,)],
'rg__random_state': [1],
'rg__max_iter': [200],
}
return train(machine, params, X, y)
def c(X, y, X_test, y_test):
evaluate(a(X, y).best_estimator_, X_test, y_test)
evaluate(b(X, y).best_estimator_, X_test, y_test)
def main():
print(__doc__)
data = loadmat('./data/dados2.mat')
X = data['ponto'][:, :2]
y = data['ponto'][:, 2].flatten()
data = loadmat('./data/dados3.mat')
X_test = data['ponto'][:, :2]
y_test = data['ponto'][:, 2].flatten()
print('shapes: ', X.shape, y.shape)
c(X, y, X_test, y_test)
if __name__ == '__main__':
main()
| mit | -4,467,317,084,805,600,000 | 22.011905 | 77 | 0.592344 | false |
polyaxon/polyaxon | core/tests/test_polyflow/test_termination/test_termination.py | 1 | 1645 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from polyaxon.polyflow.termination import V1Termination
from tests.utils import BaseTestCase, assert_equal_dict
@pytest.mark.termination_mark
class TestV1Terminations(BaseTestCase):
def test_termination_config(self):
config_dict = {}
config = V1Termination.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
config_dict["maxRetries"] = "{{ fs }}"
config = V1Termination.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add max_retries
config_dict["maxRetries"] = 4
config = V1Termination.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add timeout
config_dict["timeout"] = 4
config = V1Termination.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
# Add ttl
config_dict["ttl"] = 40
config = V1Termination.from_dict(config_dict)
assert_equal_dict(config_dict, config.to_dict())
| apache-2.0 | 6,786,107,716,321,763,000 | 34 | 74 | 0.691793 | false |
jturner314/map_ssh_attempts | map_ssh_attempts/geoip.py | 1 | 4272 | # Copyright (C) 2014 Jim Turner
# This file is part of map_ssh_attempts.
# map_ssh_attempts is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 2 of the License, or (at your option) any
# later version.
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more details.
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from pygeoip import GeoIPError
import gzip
import collections
import os
import os.path
import pygeoip
import urllib.request
Coordinate = collections.namedtuple('Coordinate', ('longitude', 'latitude'))
class GeoIPMultiversion(object):
versions = [4, 6]
db_names = {4: 'GeoLiteCity.dat',
6: 'GeoLiteCityv6.dat'}
db_sources = {4: 'http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz',
6: 'http://geolite.maxmind.com/download/geoip/database/GeoLiteCityv6-beta/GeoLiteCityv6.dat.gz'}
def __init__(self, cache_dir='~/.cache/map_ssh_attempts'):
"""Create an object to lookup GeoIP information regardless of IP version.
:param cache_dir: directory in which to place the GeoIP databases
"""
self.cache_dir = os.path.expanduser(cache_dir)
self.dbs = {}
def update_cache(self):
"""Update GeoIP database cache."""
if not os.path.isdir(self.cache_dir):
if os.path.lexists(self.cache_dir):
raise NotADirectoryError('Download location exists but is not a directory.')
else:
os.makedirs(self.cache_dir)
for version in GeoIPMultiversion.versions:
name = GeoIPMultiversion.db_names[version]
url = GeoIPMultiversion.db_sources[version]
with open(os.path.join(self.cache_dir, name), 'wb') as f:
print("Updating {}... ".format(name), end='')
db_gz = urllib.request.urlopen(url).read()
db = gzip.decompress(db_gz)
f.write(db)
print("100%")
def check_cache(self):
"""Check if GeoIP database files exist in cache."""
for version in GeoIPMultiversion.versions:
name = GeoIPMultiversion.db_names[version]
if not os.path.isfile(os.path.join(self.cache_dir, name)):
return False
else:
return True
def load_dbs(self):
"""Load GeoIP objects from database files."""
if not self.check_cache():
self.update_cache()
self.dbs = {}
for version in GeoIPMultiversion.versions:
name = GeoIPMultiversion.db_names[version]
print("Loading {}... ".format(name), end='')
self.dbs[version] = pygeoip.GeoIP(os.path.join(self.cache_dir, name))
print("100%")
def check_loaded(self):
"""Check if GeoIP databases have been loaded."""
for version in GeoIPMultiversion.versions:
if not version in self.dbs:
return False
else:
return True
def coord_by_addr(self, addr):
"""Given an IPv4Address or IPv6Address, return a location Coordinate.
:param addr: IPv4Address or IPv6Address object with address of host
:return: Coordinate object
"""
if not self.check_loaded():
self.load_dbs()
record = self.dbs[addr.version].record_by_addr(str(addr))
if record:
return Coordinate(record['longitude'], record['latitude'])
else:
raise GeoIPError("Unable to determine coordinates.")
def __getattr__(self, name):
if name.endswith('_by_addr'):
def f(addr):
if not self.check_loaded():
self.load_dbs()
return getattr(self.dbs[addr.version], name)(str(addr))
return f
else:
raise AttributeError("'GeoIPMultiversion' has no attribute '{}'".format(name))
| gpl-2.0 | 1,047,337,268,620,959,500 | 37.142857 | 114 | 0.61868 | false |
sbustreamspot/sbustreamspot-cdm | test_kafka_vm.py | 1 | 2510 | #!/usr/bin/env python
import argparse
from constants import *
import json
import pdb
from pykafka import KafkaClient
from pykafka.exceptions import OffsetOutOfRangeError, RequestTimedOut
from pykafka.partitioners import HashingPartitioner
import sys
from tc.schema.serialization import Utils
from tc.schema.serialization.kafka import KafkaAvroGenericSerializer, KafkaAvroGenericDeserializer
parser = argparse.ArgumentParser()
parser.add_argument('--kafka-group', help='Kafka consumer group', required=True)
parser.add_argument('--only-produce', help='Only produce messages',
required=False, action='store_true')
args = vars(parser.parse_args())
kafka_client = KafkaClient(KAFKA_URL)
kafka_topic = kafka_client.topics[args['kafka_group']]
producer = kafka_topic.get_producer(
partitioner=HashingPartitioner(),
sync=True, linger_ms=1, ack_timeout_ms=30000, max_retries=0)
schema = Utils.load_schema(SCHEMA_FILE)
input_file = open('avro/infoleak_small_units.CDM13.avro', 'rb')
serializer = KafkaAvroGenericSerializer(schema)
deserializer = KafkaAvroGenericDeserializer(schema, input_file=input_file)
records = deserializer.deserialize_from_file()
i = 0
produced = []
for edge in records:
#kafka_key = str(i).encode() # this is hashed to select a partition
kafka_key = '0'
produced.append(edge)
message = serializer.serialize(args['kafka_group'], edge)
producer.produce(message, kafka_key)
i += 1
print 'Pushed', i, 'messages'
producer.stop()
input_file.close()
if args['only_produce']:
sys.exit(0)
consumer = kafka_topic.get_balanced_consumer(
consumer_group=args['kafka_group'], auto_commit_enable=True,
auto_commit_interval_ms=1000, reset_offset_on_start=False,
consumer_timeout_ms=100, fetch_wait_max_ms=0, managed=True)
j = 0
consumed = []
while True:
if j >= i:
break
try:
for kafka_message in consumer:
if kafka_message.value is not None:
message = deserializer.deserialize(args['kafka_group'],
kafka_message.value)
consumed.append(message)
j += 1
except RequestTimedOut:
logger.warn('Kafka consumer request timed out')
except OffsetOutOfRangeError:
logger.warn('Kafka consumer offset out of range')
print 'Consumed', i, 'messages'
consumer.stop()
for i in range(len(produced)):
assert consumed[i] == produced[i]
| apache-2.0 | 5,254,498,871,606,089,000 | 30.375 | 98 | 0.686853 | false |
aclowes/yawn | yawn/task/serializers.py | 1 | 2485 | from rest_framework import serializers
from yawn.task.models import Task, Execution
from yawn.worker.serializers import MessageSerializer, WorkerSerializer
from yawn.workflow.models import Workflow
class SimpleWorkflowSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='name.name', read_only=True)
class Meta:
model = Workflow
fields = ('id', 'name', 'version')
class TaskSerializer(serializers.ModelSerializer):
name = serializers.CharField(source='template.name', read_only=True)
workflow = SimpleWorkflowSerializer(source='template.workflow', read_only=True)
class Meta:
model = Task
exclude = ('run', 'template')
class ExecutionDetailSerializer(serializers.ModelSerializer):
worker = WorkerSerializer(read_only=True)
class Meta:
model = Execution
exclude = ('task',)
class ExecutionListSerializer(serializers.ModelSerializer):
worker = WorkerSerializer(read_only=True)
task = TaskSerializer(read_only=True)
minutes_running = serializers.SerializerMethodField()
class Meta:
model = Execution
fields = ('id', 'task', 'worker', 'status', 'start_timestamp', 'minutes_running')
def get_minutes_running(self, obj):
if obj.stop_timestamp:
runtime = (obj.stop_timestamp - obj.start_timestamp).total_seconds()
return '{:.0f}m {:.2f}s'.format(runtime // 60, runtime % 60)
class TaskDetailSerializer(TaskSerializer):
executions = serializers.SerializerMethodField()
messages = MessageSerializer(many=True, source='message_set', read_only=True)
max_retries = serializers.IntegerField(source='template.max_retries')
timeout = serializers.IntegerField(source='template.timeout')
command = serializers.CharField(source='template.command')
# actions
terminate = serializers.IntegerField(write_only=True)
enqueue = serializers.BooleanField(write_only=True)
def get_executions(self, instance):
executions = instance.execution_set.order_by('id')
return ExecutionDetailSerializer(executions, many=True).data
def update(self, instance, validated_data):
if validated_data.get('terminate'):
instance.execution_set.filter(
id=validated_data['terminate'], status=Execution.RUNNING
).update(status=Execution.KILLED)
if validated_data.get('enqueue'):
instance.enqueue()
return instance
| mit | -2,446,936,753,634,268,000 | 34 | 89 | 0.700604 | false |
ifduyue/sentry | tests/sentry/integrations/vsts/testutils.py | 1 | 33812 | from __future__ import absolute_import
import responses
from six.moves.urllib.parse import urlparse, urlencode, parse_qs
from sentry.integrations.vsts import VstsIntegrationProvider
from sentry.testutils import IntegrationTestCase
class VstsIntegrationTestCase(IntegrationTestCase):
provider = VstsIntegrationProvider
def setUp(self):
super(VstsIntegrationTestCase, self).setUp()
self.access_token = '9d646e20-7a62-4bcc-abc0-cb2d4d075e36'
self.refresh_token = '32004633-a3c0-4616-9aa0-a40632adac77'
self.vsts_account_id = 'c8a585ae-b61f-4ba6-833c-9e8d5d1674d8'
self.vsts_account_name = 'MyVSTSAccount'
self.vsts_account_uri = 'https://MyVSTSAccount.vssps.visualstudio.com:443/'
self.vsts_user_id = 'd6245f20-2af8-44f4-9451-8107cb2767db'
self.vsts_user_name = 'Foo Bar'
self.vsts_user_email = '[email protected]'
self.repo_id = '47166099-3e16-4868-9137-22ac6b05b06e'
self.repo_name = 'cool-service'
self.project_a = {
'id': 'eb6e4656-77fc-42a1-9181-4c6d8e9da5d1',
'name': 'ProjectA',
}
self.project_b = {
'id': '6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c',
'name': 'ProjectB',
}
responses.start()
self._stub_vsts()
def tearDown(self):
responses.stop()
def _stub_vsts(self):
responses.reset()
responses.add(
responses.POST,
'https://app.vssps.visualstudio.com/oauth2/token',
json={
'access_token': self.access_token,
'token_type': 'grant',
'expires_in': 300, # seconds (5 min)
'refresh_token': self.refresh_token,
},
)
responses.add(
responses.GET,
'https://app.vssps.visualstudio.com/_apis/accounts',
json=[{
'AccountId': self.vsts_account_id,
'AccountUri': self.vsts_account_uri,
'AccountName': self.vsts_account_name,
'Properties': {},
}],
)
responses.add(
responses.GET,
'https://app.vssps.visualstudio.com/_apis/profile/profiles/me?api-version=1.0',
json={
'id': self.vsts_user_id,
'displayName': self.vsts_user_name,
'emailAddress': self.vsts_user_email,
},
)
responses.add(
responses.GET,
'https://app.vssps.visualstudio.com/_apis/connectionData/',
json={
'authenticatedUser': {
'subjectDescriptor': self.vsts_account_id,
},
},
)
responses.add(
responses.GET,
'https://{}.visualstudio.com/DefaultCollection/_apis/projects'.format(
self.vsts_account_name.lower(),
),
json={
'value': [
self.project_a,
self.project_b,
],
},
)
responses.add(
responses.POST,
'https://{}.visualstudio.com/_apis/hooks/subscriptions'.format(
self.vsts_account_name.lower(),
),
json=CREATE_SUBSCRIPTION,
)
responses.add(
responses.GET,
'https://{}.visualstudio.com/_apis/git/repositories'.format(
self.vsts_account_name.lower(),
),
json={
'value': [{
'id': self.repo_id,
'name': self.repo_name,
'project': {
'name': self.project_a['name'],
},
}],
},
)
responses.add(
responses.GET,
'https://{}.visualstudio.com/{}/_apis/wit/workitemtypes/{}/states'.format(
self.vsts_account_name.lower(),
self.project_a['name'],
'Bug',
),
json={
'value': [{'name': 'resolve_status'},
{'name': 'resolve_when'},
{'name': 'regression_status'},
{'name': 'sync_comments'},
{'name': 'sync_forward_assignment'},
{'name': 'sync_reverse_assignment'}],
}
)
def make_init_request(self, path=None, body=None):
return self.client.get(
path or self.init_path,
body or {},
)
def make_oauth_redirect_request(self, state):
return self.client.get('{}?{}'.format(
self.setup_path,
urlencode({
'code': 'oauth-code',
'state': state,
}),
))
def assert_vsts_oauth_redirect(self, redirect):
assert redirect.scheme == 'https'
assert redirect.netloc == 'app.vssps.visualstudio.com'
assert redirect.path == '/oauth2/authorize'
def assert_account_selection(self, response, account_id=None):
account_id = account_id or self.vsts_account_id
assert response.status_code == 200
assert '<option value="{}"'.format(account_id) in response.content
def assert_installation(self):
# Initial request to the installation URL for VSTS
resp = self.make_init_request()
redirect = urlparse(resp['Location'])
assert resp.status_code == 302
self.assert_vsts_oauth_redirect(redirect)
query = parse_qs(redirect.query)
# OAuth redirect back to Sentry (identity_pipeline_view)
resp = self.make_oauth_redirect_request(query['state'][0])
self.assert_account_selection(resp)
# User choosing which VSTS Account to use (AccountConfigView)
# Final step.
return self.client.post(
self.setup_path,
{
'account': self.vsts_account_id,
'provider': 'vsts',
},
)
COMPARE_COMMITS_EXAMPLE = b"""
{
"count": 1,
"value": [
{
"commitId": "6c36052c58bde5e57040ebe6bdb9f6a52c906fff",
"author": {
"name": "max bittker",
"email": "[email protected]",
"date": "2018-04-24T00:03:18Z"
},
"committer": {
"name": "max bittker",
"email": "[email protected]",
"date": "2018-04-24T00:03:18Z"
},
"comment": "Updated README.md",
"changeCounts": {"Add": 0, "Edit": 1, "Delete": 0},
"url":
"https://mbittker.visualstudio.com/_apis/git/repositories/b1e25999-c080-4ea1-8c61-597c4ec41f06/commits/6c36052c58bde5e57040ebe6bdb9f6a52c906fff",
"remoteUrl":
"https://mbittker.visualstudio.com/_git/MyFirstProject/commit/6c36052c58bde5e57040ebe6bdb9f6a52c906fff"
}
]
}
"""
FILE_CHANGES_EXAMPLE = b"""
{
"changeCounts": {"Edit": 1},
"changes": [
{
"item": {
"objectId": "b48e843656a0a12926a0bcedefe8ef3710fe2867",
"originalObjectId": "270b590a4edf3f19aa7acc7b57379729e34fc681",
"gitObjectType": "blob",
"commitId": "6c36052c58bde5e57040ebe6bdb9f6a52c906fff",
"path": "/README.md",
"url":
"https://mbittker.visualstudio.com/DefaultCollection/_apis/git/repositories/b1e25999-c080-4ea1-8c61-597c4ec41f06/items/README.md?versionType=Commit&version=6c36052c58bde5e57040ebe6bdb9f6a52c906fff"
},
"changeType": "edit"
}
]
}
"""
WORK_ITEM_RESPONSE = """{
"id": 309,
"rev": 1,
"fields": {
"System.AreaPath": "Fabrikam-Fiber-Git",
"System.TeamProject": "Fabrikam-Fiber-Git",
"System.IterationPath": "Fabrikam-Fiber-Git",
"System.WorkItemType": "Product Backlog Item",
"System.State": "New",
"System.Reason": "New backlog item",
"System.CreatedDate": "2015-01-07T18:13:01.807Z",
"System.CreatedBy": "Jamal Hartnett <[email protected]>",
"System.ChangedDate": "2015-01-07T18:13:01.807Z",
"System.ChangedBy": "Jamal Hartnett <[email protected]>",
"System.Title": "Hello",
"Microsoft.VSTS.Scheduling.Effort": 8,
"WEF_6CB513B6E70E43499D9FC94E5BBFB784_Kanban.Column": "New",
"System.Description": "Fix this."
},
"_links": {
"self": {
"href": "https://fabrikam-fiber-inc.visualstudio.com/DefaultCollection/_apis/wit/workItems/309"
},
"workItemUpdates": {
"href": "https://fabrikam-fiber-inc.visualstudio.com/DefaultCollection/_apis/wit/workItems/309/updates"
},
"workItemRevisions": {
"href": "https://fabrikam-fiber-inc.visualstudio.com/DefaultCollection/_apis/wit/workItems/309/revisions"
},
"workItemHistory": {
"href": "https://fabrikam-fiber-inc.visualstudio.com/DefaultCollection/_apis/wit/workItems/309/history"
},
"html": {
"href": "https://fabrikam-fiber-inc.visualstudio.com/web/wi.aspx?pcguid=d81542e4-cdfa-4333-b082-1ae2d6c3ad16&id=309"
},
"workItemType": {
"href": "https://fabrikam-fiber-inc.visualstudio.com/DefaultCollection/6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c/_apis/wit/workItemTypes/Product%20Backlog%20Item"
},
"fields": {
"href": "https://fabrikam-fiber-inc.visualstudio.com/DefaultCollection/_apis/wit/fields"
}
},
"url": "https://fabrikam-fiber-inc.visualstudio.com/DefaultCollection/_apis/wit/workItems/309"
}"""
GET_USERS_RESPONSE = b"""{
"count": 4,
"value": [
{
"subjectKind": "user",
"cuid": "ec09a4d8-d914-4f28-9e39-23d52b683f90",
"domain": "Build",
"principalName": "51ac8d19-6694-459f-a65e-bec30e9e2e33",
"mailAddress": "",
"origin": "vsts",
"originId": "ec09a4d8-d914-4f28-9e39-23d52b683f90",
"displayName": "Project Collection Build Service (Ftottentest2)",
"_links": {
"self": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlNlcnZpY2VJZGVudGl0eTtmMzViOTAxNS1jZGU4LTQ4MzQtYTFkNS0wOWU4ZjM1OWNiODU6QnVpbGQ6NTFhYzhkMTktNjY5NC00NTlmLWE2NWUtYmVjMzBlOWUyZTMz"
},
"memberships": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/memberships/TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlNlcnZpY2VJZGVudGl0eTtmMzViOTAxNS1jZGU4LTQ4MzQtYTFkNS0wOWU4ZjM1OWNiODU6QnVpbGQ6NTFhYzhkMTktNjY5NC00NTlmLWE2NWUtYmVjMzBlOWUyZTMz"
}
},
"url": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlNlcnZpY2VJZGVudGl0eTtmMzViOTAxNS1jZGU4LTQ4MzQtYTFkNS0wOWU4ZjM1OWNiODU6QnVpbGQ6NTFhYzhkMTktNjY5NC00NTlmLWE2NWUtYmVjMzBlOWUyZTMz",
"descriptor": "TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlNlcnZpY2VJZGVudGl0eTtmMzViOTAxNS1jZGU4LTQ4MzQtYTFkNS0wOWU4ZjM1OWNiODU6QnVpbGQ6NTFhYzhkMTktNjY5NC00NTlmLWE2NWUtYmVjMzBlOWUyZTMz"
},
{
"subjectKind": "user",
"metaType": "member",
"cuid": "00ca946b-2fe9-4f2a-ae2f-40d5c48001bc",
"domain": "LOCAL AUTHORITY",
"principalName": "TeamFoundationService (TEAM FOUNDATION)",
"mailAddress": "",
"origin": "vsts",
"originId": "00ca946b-2fe9-4f2a-ae2f-40d5c48001bc",
"displayName": "TeamFoundationService (TEAM FOUNDATION)",
"_links": {
"self": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5Ozc3ODlmMDlkLWUwNTMtNGYyZS1iZGVlLTBjOGY4NDc2YTRiYw"
},
"memberships": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/memberships/TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5Ozc3ODlmMDlkLWUwNTMtNGYyZS1iZGVlLTBjOGY4NDc2YTRiYw"
}
},
"url": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5Ozc3ODlmMDlkLWUwNTMtNGYyZS1iZGVlLTBjOGY4NDc2YTRiYw",
"descriptor": "TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5Ozc3ODlmMDlkLWUwNTMtNGYyZS1iZGVlLTBjOGY4NDc2YTRiYw"
},
{
"subjectKind": "user",
"metaType": "member",
"cuid": "ddd94918-1fc8-459b-994a-cca86c4fbe95",
"domain": "TEAM FOUNDATION",
"principalName": "Anonymous",
"mailAddress": "",
"origin": "vsts",
"originId": "ddd94918-1fc8-459b-994a-cca86c4fbe95",
"displayName": "Anonymous",
"_links": {
"self": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlVuYXV0aGVudGljYXRlZElkZW50aXR5O1MtMS0wLTA"
},
"memberships": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/memberships/TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlVuYXV0aGVudGljYXRlZElkZW50aXR5O1MtMS0wLTA"
}
},
"url": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlVuYXV0aGVudGljYXRlZElkZW50aXR5O1MtMS0wLTA",
"descriptor": "TWljcm9zb2Z0LlRlYW1Gb3VuZGF0aW9uLlVuYXV0aGVudGljYXRlZElkZW50aXR5O1MtMS0wLTA"
},
{
"subjectKind": "user",
"metaType": "member",
"cuid": "65903f92-53dc-61b3-bb0e-e69cfa1cb719",
"domain": "45aa3d2d-7442-473d-b4d3-3c670da9dd96",
"principalName": "[email protected]",
"mailAddress": "[email protected]",
"origin": "aad",
"originId": "4be8f294-000d-4431-8506-57420b88e204",
"displayName": "Francis Totten",
"_links": {
"self": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5OzQ1YWEzZDJkLTc0NDItNDczZC1iNGQzLTNjNjcwZGE5ZGQ5NlxmdG90dGVuQHZzY3NpLnVz"
},
"memberships": {
"href": "https://fabrikam.vssps.visualstudio.com/_apis/graph/memberships/TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5OzQ1YWEzZDJkLTc0NDItNDczZC1iNGQzLTNjNjcwZGE5ZGQ5NlxmdG90dGVuQHZzY3NpLnVz"
}
},
"url": "https://fabrikam.vssps.visualstudio.com/_apis/graph/users/TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5OzQ1YWEzZDJkLTc0NDItNDczZC1iNGQzLTNjNjcwZGE5ZGQ5NlxmdG90dGVuQHZzY3NpLnVz",
"descriptor": "TWljcm9zb2Z0LklkZW50aXR5TW9kZWwuQ2xhaW1zLkNsYWltc0lkZW50aXR5OzQ1YWEzZDJkLTc0NDItNDczZC1iNGQzLTNjNjcwZGE5ZGQ5NlxmdG90dGVuQHZzY3NpLnVz"
}
]
}
"""
CREATE_SUBSCRIPTION = {
'id': 'fd672255-8b6b-4769-9260-beea83d752ce',
'url': 'https://fabrikam.visualstudio.com/_apis/hooks/subscriptions/fd672255-8b6b-4769-9260-beea83d752ce',
'publisherId': 'tfs',
'eventType': 'workitem.update',
'resourceVersion': '1.0-preview.1',
'eventDescription': 'WorkItem Updated',
'consumerId': 'webHooks',
'consumerActionId': 'httpRequest',
'actionDescription': 'To host myservice',
'createdBy': {
'id': '00ca946b-2fe9-4f2a-ae2f-40d5c48001bc'
},
'createdDate': '2014-10-27T15:37:24.873Z',
'modifiedBy': {
'id': '00ca946b-2fe9-4f2a-ae2f-40d5c48001bc'
},
'modifiedDate': '2014-10-27T15:37:26.23Z',
'publisherInputs': {
'buildStatus': 'Failed',
'definitionName': 'MyWebSite CI',
'hostId': 'd81542e4-cdfa-4333-b082-1ae2d6c3ad16',
'projectId': '6ce954b1-ce1f-45d1-b94d-e6bf2464ba2c',
'tfsSubscriptionId': '3e8b33e7-426d-4c92-9bf9-58e163dd7dd5'
},
'consumerInputs': {
'url': 'https://myservice/newreceiver'
}
}
WORK_ITEM_UPDATED = {
u'resourceContainers': {
u'project': {u'id': u'c0bf429a-c03c-4a99-9336-d45be74db5a6', u'baseUrl': u'https://laurynsentry.visualstudio.com/'},
u'account': {u'id': u'90e9a854-eb98-4c56-ae1a-035a0f331dd6', u'baseUrl': u'https://laurynsentry.visualstudio.com/'},
u'collection': {u'id': u'80ded3e8-3cd3-43b1-9f96-52032624aa3a', u'baseUrl': u'https://laurynsentry.visualstudio.com/'}
},
u'resource': {
u'revisedBy': {
u'displayName': u'lauryn', u'name': u'lauryn <[email protected]>', u'url': u'https://app.vssps.visualstudio.com/A90e9a854-eb98-4c56-ae1a-035a0f331dd6/_apis/Identities/21354f98-ab06-67d9-b974-5a54d992082e', u'imageUrl': u'https://laurynsentry.visualstudio.com/_api/_common/identityImage?id=21354f98-ab06-67d9-b974-5a54d992082e', u'descriptor': u'msa.MjEzNTRmOTgtYWIwNi03N2Q5LWI5NzQtNWE1NGQ5OTIwODJl', u'_links': {u'avatar': {u'href': u'https://laurynsentry.visualstudio.com/_apis/GraphProfile/MemberAvatars/msa.MjEzNTRmOTgtYWIwNi03N2Q5LWI5NzQtNWE1NGQ5OTIwODJl'}},
u'uniqueName': u'[email protected]', u'id': u'21354f98-ab06-67d9-b974-5a54d992082e'
},
u'revisedDate': u'9999-01-01T00:00:00Z',
u'url': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31/updates/2',
u'fields': {
u'System.AuthorizedDate': {u'newValue': u'2018-07-05T20:52:14.777Z', u'oldValue': u'2018-07-05T20:51:58.927Z'},
u'System.AssignedTo': {u'newValue': u'lauryn <[email protected]>', u'oldValue': u'lauryn2 <[email protected]>'},
u'System.Watermark': {u'newValue': 78, u'oldValue': 77},
u'System.Rev': {u'newValue': 2, u'oldValue': 1},
u'System.RevisedDate': {u'newValue': u'9999-01-01T00:00:00Z', u'oldValue': u'2018-07-05T20:52:14.777Z'},
u'System.ChangedDate': {u'newValue': u'2018-07-05T20:52:14.777Z', u'oldValue': u'2018-07-05T20:51:58.927Z'}
},
u'workItemId': 31,
u'rev': 2,
u'_links': {
u'self': {u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31/updates/2'},
u'workItemUpdates': {u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31/updates'},
u'html': {u'href': u'https://laurynsentry.visualstudio.com/web/wi.aspx?pcguid=80ded3e8-3cd3-43b1-9f96-52032624aa3a&id=31'},
u'parent': {u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31'}
},
u'id': 2,
u'revision': {
u'url': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31/revisions/2',
u'fields': {
u'System.AreaPath': u'MyFirstProject',
u'System.WorkItemType': u'Bug',
u'System.Reason': u'New',
u'System.Title': u"NameError: global name 'BitbucketRepositoryProvider' is not defined",
u'Microsoft.VSTS.Common.Priority': 2,
u'System.CreatedBy': u'lauryn <[email protected]>',
u'System.AssignedTo': u'lauryn <[email protected]>',
u'System.CreatedDate': u'2018-07-05T20:51:58.927Z',
u'System.TeamProject': u'MyFirstProject',
u'Microsoft.VSTS.Common.Severity': u'3 - Medium',
u'Microsoft.VSTS.Common.ValueArea': u'Business',
u'System.State': u'New',
u'System.Description': u'<p><a href="https://lauryn.ngrok.io/sentry/internal/issues/55/">https://lauryn.ngrok.io/sentry/internal/issues/55/</a></p>\n<pre><code>NameError: global name \'BitbucketRepositoryProvider\' is not defined\n(1 additional frame(s) were not displayed)\n...\n File "sentry/runner/__init__.py", line 125, in configure\n configure(ctx, py, yaml, skip_service_validation)\n File "sentry/runner/settings.py", line 152, in configure\n skip_service_validation=skip_service_validation\n File "sentry/runner/initializer.py", line 315, in initialize_app\n register_plugins(settings)\n File "sentry/runner/initializer.py", line 60, in register_plugins\n integration.setup()\n File "sentry/integrations/bitbucket/integration.py", line 78, in setup\n BitbucketRepositoryProvider,\n\nNameError: global name \'BitbucketRepositoryProvider\' is not defined\n</code></pre>\n',
u'System.ChangedBy': u'lauryn <[email protected]>',
u'System.ChangedDate': u'2018-07-05T20:52:14.777Z',
u'Microsoft.VSTS.Common.StateChangeDate': u'2018-07-05T20:51:58.927Z',
u'System.IterationPath': u'MyFirstProject'},
u'rev': 2,
u'id': 31,
u'_links': {u'self': {u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31/revisions/2'}, u'workItemRevisions': {u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31/revisions'}, u'parent': {u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/31'}}
}
},
u'eventType': u'workitem.updated',
u'detailedMessage': None,
u'createdDate': u'2018-07-05T20:52:16.3051288Z',
u'id': u'18f51331-2640-4bce-9ebd-c59c855956a2',
u'resourceVersion': u'1.0',
u'notificationId': 1,
u'subscriptionId': u'7bf628eb-b3a7-4fb2-ab4d-8b60f2e8cb9b',
u'publisherId': u'tfs',
u'message': None
}
WORK_ITEM_UNASSIGNED = {
u'resourceContainers': {
u'project': {
u'id': u'c0bf429a-c03c-4a99-9336-d45be74db5a6',
u'baseUrl': u'https://laurynsentry.visualstudio.com/'
},
u'account': {
u'id': u'90e9a854-eb98-4c56-ae1a-035a0f331dd6',
u'baseUrl': u'https://laurynsentry.visualstudio.com/'
},
u'collection': {
u'id': u'80ded3e8-3cd3-43b1-9f96-52032624aa3a',
u'baseUrl': u'https://laurynsentry.visualstudio.com/'
}
},
u'resource': {
u'revisedBy': {
u'displayName': u'lauryn',
u'name': u'lauryn <[email protected]>',
u'url': u'https://app.vssps.visualstudio.com/A90e9a854-eb98-4c56-ae1a-035a0f331dd6/_apis/Identities/21354f98-ab06-67d9-b974-5a54d992082e',
u'imageUrl': u'https://laurynsentry.visualstudio.com/_api/_common/identityImage?id=21354f98-ab06-67d9-b974-5a54d992082e',
u'descriptor': u'msa.MjEzNTRmOTgtYWIwNi03N2Q5LWI5NzQtNWE1NGQ5OTIwODJl',
u'_links': {
u'avatar': {
u'href': u'https://laurynsentry.visualstudio.com/_apis/GraphProfile/MemberAvatars/msa.MjEzNTRmOTgtYWIwNi03N2Q5LWI5NzQtNWE1NGQ5OTIwODJl'
}
},
u'uniqueName': u'[email protected]',
u'id': u'21354f98-ab06-67d9-b974-5a54d992082e'
},
u'revisedDate': u'9999-01-01T00:00:00 Z',
u'url': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/updates/3',
u'fields': {
u'System.AuthorizedDate': {
u'newValue': u'2018-07-05T23:23:09.493 Z',
u'oldValue': u'2018-07-05T23:21:38.243 Z'
},
u'System.AssignedTo': {
u'oldValue': u'lauryn <[email protected]>'
},
u'System.Watermark': {
u'newValue': 83,
u'oldValue': 82
},
u'System.Rev': {
u'newValue': 3,
u'oldValue': 2
},
u'System.RevisedDate': {
u'newValue': u'9999-01-01T00:00:00 Z',
u'oldValue': u'2018-07-05T23:23:09.493 Z'
},
u'System.ChangedDate': {
u'newValue': u'2018-07-05T23:23:09.493 Z',
u'oldValue': u'2018-07-05T23:21:38.243 Z'
}
},
u'workItemId': 33,
u'rev': 3,
u'_links': {
u'self': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/updates/3'
},
u'workItemUpdates': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/updates'
},
u'html': {
u'href': u'https://laurynsentry.visualstudio.com/web/wi.aspx?pcguid=80ded3e8-3cd3-43b1-9f96-52032624aa3a&id=33'
},
u'parent': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33'
}
},
u'id': 3,
u'revision': {
u'url': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/revisions/3',
u'fields': {
u'System.AreaPath': u'MyFirstProject',
u'System.WorkItemType': u'Bug',
u'System.Reason': u'New',
u'System.Title': u'NotImplementedError:Visual Studio Team Services requires an organization_id',
u'Microsoft.VSTS.Common.Priority': 2,
u'System.CreatedBy': u'lauryn <[email protected]>',
u'Microsoft.VSTS.Common.StateChangeDate': u'2018-07-05T23:21:25.847 Z',
u'System.CreatedDate': u'2018-07-05T23:21:25.847 Z',
u'System.TeamProject': u'MyFirstProject',
u'Microsoft.VSTS.Common.ValueArea': u'Business',
u'System.State': u'New',
u'System.Description': u'<p><a href="https: //lauryn.ngrok.io/sentry/internal/issues/196/">https: //lauryn.ngrok.io/sentry/internal/issues/196/</a></p>\n<pre><code>NotImplementedError:Visual Studio Team Services requires an organization_id\n(57 additional frame(s) were not displayed)\n...\n File "sentry/tasks/base.py"',
u'System.ChangedBy': u'lauryn <[email protected]>',
u'System.ChangedDate': u'2018-07-05T23:23:09.493 Z',
u'Microsoft.VSTS.Common.Severity': u'3 - Medium',
u'System.IterationPath': u'MyFirstProject'
},
u'rev': 3,
u'id': 33,
u'_links': {
u'self': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/revisions/3'
},
u'workItemRevisions': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/revisions'
},
u'parent': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33'
}
}
}
},
u'eventType': u'workitem.updated',
u'detailedMessage': None,
u'createdDate': u'2018-07-05T23:23:11.1935112 Z',
u'id': u'cc349c85-6595-4939-9b69-f89480be6a26',
u'resourceVersion': u'1.0',
u'notificationId': 2,
u'subscriptionId': u'7405a600-6a25-48e6-81b6-1dde044783ad',
u'publisherId': u'tfs',
u'message': None
}
WORK_ITEM_UPDATED_STATUS = {
u'resourceContainers': {
u'project': {
u'id': u'c0bf429a-c03c-4a99-9336-d45be74db5a6',
u'baseUrl': u'https://laurynsentry.visualstudio.com/'
},
u'account': {
u'id': u'90e9a854-eb98-4c56-ae1a-035a0f331dd6',
u'baseUrl': u'https://laurynsentry.visualstudio.com/'
},
u'collection': {
u'id': u'80ded3e8-3cd3-43b1-9f96-52032624aa3a',
u'baseUrl': u'https://laurynsentry.visualstudio.com/'
}
},
u'resource': {
u'revisedBy': {
u'displayName': u'lauryn',
u'name': u'lauryn <[email protected]>',
u'url': u'https://app.vssps.visualstudio.com/A90e9a854-eb98-4c56-ae1a-035a0f331dd6/_apis/Identities/21354f98-ab06-67d9-b974-5a54d992082e',
u'imageUrl': u'https://laurynsentry.visualstudio.com/_api/_common/identityImage?id=21354f98-ab06-67d9-b974-5a54d992082e',
u'descriptor': u'msa.MjEzNTRmOTgtYWIwNi03N2Q5LWI5NzQtNWE1NGQ5OTIwODJl',
u'_links': {
u'avatar': {
u'href': u'https://laurynsentry.visualstudio.com/_apis/GraphProfile/MemberAvatars/msa.MjEzNTRmOTgtYWIwNi03N2Q5LWI5NzQtNWE1NGQ5OTIwODJl'
}
},
u'uniqueName': u'[email protected]',
u'id': u'21354f98-ab06-67d9-b974-5a54d992082e'
},
u'revisedDate': u'9999-01-01T00:00:00 Z',
u'url': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/updates/3',
u'fields': {
u'System.AuthorizedDate': {
u'newValue': u'2018-07-05T23:23:09.493 Z',
u'oldValue': u'2018-07-05T23:21:38.243 Z'
},
u'System.State': {
u'oldValue': u'New',
u'newValue': u'Resolved'
},
u'System.Watermark': {
u'newValue': 83,
u'oldValue': 82
},
u'System.Rev': {
u'newValue': 3,
u'oldValue': 2
},
u'System.RevisedDate': {
u'newValue': u'9999-01-01T00:00:00 Z',
u'oldValue': u'2018-07-05T23:23:09.493 Z'
},
u'System.ChangedDate': {
u'newValue': u'2018-07-05T23:23:09.493 Z',
u'oldValue': u'2018-07-05T23:21:38.243 Z'
}
},
u'workItemId': 33,
u'rev': 3,
u'_links': {
u'self': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/updates/3'
},
u'workItemUpdates': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/updates'
},
u'html': {
u'href': u'https://laurynsentry.visualstudio.com/web/wi.aspx?pcguid=80ded3e8-3cd3-43b1-9f96-52032624aa3a&id=33'
},
u'parent': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33'
}
},
u'id': 3,
u'revision': {
u'url': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/revisions/3',
u'fields': {
u'System.AreaPath': u'MyFirstProject',
u'System.WorkItemType': u'Bug',
u'System.Reason': u'New',
u'System.Title': u'NotImplementedError:Visual Studio Team Services requires an organization_id',
u'Microsoft.VSTS.Common.Priority': 2,
u'System.CreatedBy': u'lauryn <[email protected]>',
u'Microsoft.VSTS.Common.StateChangeDate': u'2018-07-05T23:21:25.847 Z',
u'System.CreatedDate': u'2018-07-05T23:21:25.847 Z',
u'System.TeamProject': u'MyFirstProject',
u'Microsoft.VSTS.Common.ValueArea': u'Business',
u'System.State': u'New',
u'System.Description': u'<p><a href="https: //lauryn.ngrok.io/sentry/internal/issues/196/">https: //lauryn.ngrok.io/sentry/internal/issues/196/</a></p>\n<pre><code>NotImplementedError:Visual Studio Team Services requires an organization_id\n(57 additional frame(s) were not displayed)\n...\n File "sentry/tasks/base.py"',
u'System.ChangedBy': u'lauryn <[email protected]>',
u'System.ChangedDate': u'2018-07-05T23:23:09.493 Z',
u'Microsoft.VSTS.Common.Severity': u'3 - Medium',
u'System.IterationPath': u'MyFirstProject'
},
u'rev': 3,
u'id': 33,
u'_links': {
u'self': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/revisions/3'
},
u'workItemRevisions': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33/revisions'
},
u'parent': {
u'href': u'https://laurynsentry.visualstudio.com/c0bf429a-c03c-4a99-9336-d45be74db5a6/_apis/wit/workItems/33'
}
}
}
},
u'eventType': u'workitem.updated',
u'detailedMessage': None,
u'createdDate': u'2018-07-05T23:23:11.1935112 Z',
u'id': u'cc349c85-6595-4939-9b69-f89480be6a26',
u'resourceVersion': u'1.0',
u'notificationId': 2,
u'subscriptionId': u'7405a600-6a25-48e6-81b6-1dde044783ad',
u'publisherId': u'tfs',
u'message': None
}
WORK_ITEM_STATES = {
'count': 5,
'value': [
{
'name': 'New',
'color': 'b2b2b2',
'category': 'Proposed'
},
{
'name': 'Active',
'color': '007acc',
'category': 'InProgress'
},
{
'name': 'CustomState',
'color': '5688E0',
'category': 'InProgress'
},
{
'name': 'Resolved',
'color': 'ff9d00',
'category': 'Resolved'
},
{
'name': 'Closed',
'color': '339933',
'category': 'Completed'
}
]
}
GET_PROJECTS_RESPONSE = """{
"count": 1,
"value": [{
"id": "ac7c05bb-7f8e-4880-85a6-e08f37fd4a10",
"name": "Fabrikam-Fiber-Git",
"url": "https://jess-dev.visualstudio.com/_apis/projects/ac7c05bb-7f8e-4880-85a6-e08f37fd4a10",
"state": "wellFormed",
"revision": 16,
"visibility": "private"
}]
}"""
| bsd-3-clause | -7,866,642,662,394,803,000 | 43.784106 | 966 | 0.593073 | false |
mganeva/mantid | Framework/PythonInterface/plugins/algorithms/Abins.py | 1 | 40996 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
try:
import pathos.multiprocessing as mp
PATHOS_FOUND = True
except ImportError:
PATHOS_FOUND = False
import numpy as np
import six
import os
from mantid.api import AlgorithmFactory, FileAction, FileProperty, PythonAlgorithm, Progress, WorkspaceProperty, mtd
from mantid.api import WorkspaceFactory, AnalysisDataService
# noinspection PyProtectedMember
from mantid.api._api import WorkspaceGroup
from mantid.simpleapi import CloneWorkspace, GroupWorkspaces, SaveAscii, Load, Scale
from mantid.kernel import logger, StringListValidator, Direction, StringArrayProperty, Atom
import AbinsModules
# noinspection PyPep8Naming,PyMethodMayBeStatic
class Abins(PythonAlgorithm):
_ab_initio_program = None
_vibrational_or_phonon_data_file = None
_experimental_file = None
_temperature = None
_bin_width = None
_scale = None
_sample_form = None
_instrument_name = None
_atoms = None
_sum_contributions = None
_scale_by_cross_section = None
_calc_partial = None
_out_ws_name = None
_num_quantum_order_events = None
_extracted_ab_initio_data = None
def category(self):
return "Simulation"
# ----------------------------------------------------------------------------------------
def summary(self):
return "Calculates inelastic neutron scattering."
# ----------------------------------------------------------------------------------------
def PyInit(self):
# Declare all properties
self.declareProperty(name="AbInitioProgram",
direction=Direction.Input,
defaultValue="CASTEP",
validator=StringListValidator(["CASTEP", "CRYSTAL", "DMOL3", "GAUSSIAN"]),
doc="An ab initio program which was used for vibrational or phonon calculation.")
self.declareProperty(FileProperty("VibrationalOrPhononFile", "",
action=FileAction.Load,
direction=Direction.Input,
extensions=["phonon", "out", "outmol", "log", "LOG"]),
doc="File with the data from a vibrational or phonon calculation.")
self.declareProperty(FileProperty("ExperimentalFile", "",
action=FileAction.OptionalLoad,
direction=Direction.Input,
extensions=["raw", "dat"]),
doc="File with the experimental inelastic spectrum to compare.")
self.declareProperty(name="TemperatureInKelvin",
direction=Direction.Input,
defaultValue=10.0,
doc="Temperature in K for which dynamical structure factor S should be calculated.")
self.declareProperty(name="BinWidthInWavenumber", defaultValue=1.0, doc="Width of bins used during rebining.")
self.declareProperty(name="Scale", defaultValue=1.0,
doc='Scale the intensity by the given factor. Default is no scaling.')
self.declareProperty(name="SampleForm",
direction=Direction.Input,
defaultValue="Powder",
validator=StringListValidator(AbinsModules.AbinsConstants.ALL_SAMPLE_FORMS),
# doc="Form of the sample: SingleCrystal or Powder.")
doc="Form of the sample: Powder.")
self.declareProperty(name="Instrument",
direction=Direction.Input,
defaultValue="TOSCA",
# validator=StringListValidator(AbinsModules.AbinsConstants.ALL_INSTRUMENTS)
validator=StringListValidator(["TOSCA"]),
doc="Name of an instrument for which analysis should be performed.")
self.declareProperty(StringArrayProperty("Atoms", Direction.Input),
doc="List of atoms to use to calculate partial S."
"If left blank, workspaces with S for all types of atoms will be calculated.")
self.declareProperty(name="SumContributions", defaultValue=False,
doc="Sum the partial dynamical structure factors into a single workspace.")
self.declareProperty(name="ScaleByCrossSection", defaultValue='Incoherent',
validator=StringListValidator(['Total', 'Incoherent', 'Coherent']),
doc="Scale the partial dynamical structure factors by the scattering cross section.")
self.declareProperty(name="QuantumOrderEventsNumber", defaultValue='1',
validator=StringListValidator(['1', '2', '3', '4']),
doc="Number of quantum order effects included in the calculation "
"(1 -> FUNDAMENTALS, 2-> first overtone + FUNDAMENTALS + "
"2nd order combinations, 3-> FUNDAMENTALS + first overtone + second overtone + 2nd "
"order combinations + 3rd order combinations etc...)")
self.declareProperty(WorkspaceProperty("OutputWorkspace", '', Direction.Output),
doc="Name to give the output workspace.")
def validateInputs(self):
"""
Performs input validation. Use to ensure the user has defined a consistent set of parameters.
"""
input_file_validators = {"CASTEP": self._validate_castep_input_file,
"CRYSTAL": self._validate_crystal_input_file,
"DMOL3": self._validate_dmol3_input_file,
"GAUSSIAN": self._validate_gaussian_input_file}
issues = dict()
temperature = self.getProperty("TemperatureInKelvin").value
if temperature < 0:
issues["TemperatureInKelvin"] = "Temperature must be positive."
scale = self.getProperty("Scale").value
if scale < 0:
issues["Scale"] = "Scale must be positive."
ab_initio_program = self.getProperty("AbInitioProgram").value
vibrational_or_phonon_data_filename = self.getProperty("VibrationalOrPhononFile").value
output = input_file_validators[ab_initio_program](filename_full_path=vibrational_or_phonon_data_filename)
bin_width = self.getProperty("BinWidthInWavenumber").value
if not (isinstance(bin_width, float) and 1.0 <= bin_width <= 10.0):
issues["BinWidthInWavenumber"] = ["Invalid bin width. Valid range is [1.0, 10.0] cm^-1"]
if output["Invalid"]:
issues["VibrationalOrPhononFile"] = output["Comment"]
workspace_name = self.getPropertyValue("OutputWorkspace")
# list of special keywords which cannot be used in the name of workspace
forbidden_keywords = ["total"]
if workspace_name in mtd:
issues["OutputWorkspace"] = "Workspace with name " + workspace_name + " already in use; please give " \
"a different name for workspace."
elif workspace_name == "":
issues["OutputWorkspace"] = "Please specify name of workspace."
for word in forbidden_keywords:
if word in workspace_name:
issues["OutputWorkspace"] = "Keyword: " + word + " cannot be used in the name of workspace."
break
self._check_advanced_parameter()
return issues
def PyExec(self):
# 0) Create reporter to report progress
steps = 9
begin = 0
end = 1.0
prog_reporter = Progress(self, begin, end, steps)
# 1) get input parameters from a user
self._get_properties()
prog_reporter.report("Input data from the user has been collected.")
# 2) read ab initio data
ab_initio_loaders = {"CASTEP": AbinsModules.LoadCASTEP, "CRYSTAL": AbinsModules.LoadCRYSTAL,
"DMOL3": AbinsModules.LoadDMOL3, "GAUSSIAN": AbinsModules.LoadGAUSSIAN}
rdr = ab_initio_loaders[self._ab_initio_program](input_ab_initio_filename=self._vibrational_or_phonon_data_file)
ab_initio_data = rdr.get_formatted_data()
prog_reporter.report("Vibrational/phonon data has been read.")
# 3) calculate S
s_calculator = AbinsModules.CalculateS.init(filename=self._vibrational_or_phonon_data_file,
temperature=self._temperature,
sample_form=self._sample_form, abins_data=ab_initio_data,
instrument=self._instrument,
quantum_order_num=self._num_quantum_order_events,
bin_width=self._bin_width)
s_data = s_calculator.get_formatted_data()
prog_reporter.report("Dynamical structure factors have been determined.")
# 4) get atoms for which S should be plotted
self._extracted_ab_initio_data = ab_initio_data.get_atoms_data().extract()
num_atoms = len(self._extracted_ab_initio_data)
all_atms_smbls = list(set([self._extracted_ab_initio_data["atom_%s" % atom]["symbol"]
for atom in range(num_atoms)]))
all_atms_smbls.sort()
if len(self._atoms) == 0: # case: all atoms
atoms_symbol = all_atms_smbls
else: # case selected atoms
if len(self._atoms) != len(set(self._atoms)): # only different types
raise ValueError("Not all user defined atoms are unique.")
for atom_symbol in self._atoms:
if atom_symbol not in all_atms_smbls:
raise ValueError("User defined atom not present in the system.")
atoms_symbol = self._atoms
prog_reporter.report("Atoms, for which dynamical structure factors should be plotted, have been determined.")
# at the moment only types of atom, e.g, for benzene three options -> 1) C, H; 2) C; 3) H
# 5) create workspaces for atoms in interest
workspaces = []
if self._sample_form == "Powder":
workspaces.extend(self._create_partial_s_per_type_workspaces(atoms_symbols=atoms_symbol, s_data=s_data))
prog_reporter.report("Workspaces with partial dynamical structure factors have been constructed.")
# 6) Create a workspace with sum of all atoms if required
if self._sum_contributions:
total_atom_workspaces = []
for ws in workspaces:
if "total" in ws:
total_atom_workspaces.append(ws)
total_workspace = self._create_total_workspace(partial_workspaces=total_atom_workspaces)
workspaces.insert(0, total_workspace)
prog_reporter.report("Workspace with total S has been constructed.")
# 7) add experimental data if available to the collection of workspaces
if self._experimental_file != "":
workspaces.insert(0, self._create_experimental_data_workspace().name())
prog_reporter.report("Workspace with the experimental data has been constructed.")
GroupWorkspaces(InputWorkspaces=workspaces, OutputWorkspace=self._out_ws_name)
# 8) save workspaces to ascii_file
num_workspaces = mtd[self._out_ws_name].getNumberOfEntries()
for wrk_num in range(num_workspaces):
wrk = mtd[self._out_ws_name].getItem(wrk_num)
SaveAscii(InputWorkspace=Scale(wrk, 1.0/self._bin_width, "Multiply"),
Filename=wrk.name() + ".dat", Separator="Space", WriteSpectrumID=False)
prog_reporter.report("All workspaces have been saved to ASCII files.")
# 9) set OutputWorkspace
self.setProperty('OutputWorkspace', self._out_ws_name)
prog_reporter.report("Group workspace with all required dynamical structure factors has been constructed.")
def _create_workspaces(self, atoms_symbols=None, s_data=None):
"""
Creates workspaces for all types of atoms. Creates both partial and total workspaces for all types of atoms.
:param atoms_symbols: list of atom types for which S should be created
:param s_data: dynamical factor data of type SData
:returns: workspaces for list of atoms types, S for the particular type of atom
"""
s_data_extracted = s_data.extract()
shape = [self._num_quantum_order_events]
shape.extend(list(s_data_extracted["atom_0"]["s"]["order_1"].shape))
s_atom_data = np.zeros(shape=tuple(shape), dtype=AbinsModules.AbinsConstants.FLOAT_TYPE)
shape.pop(0)
num_atoms = len([key for key in s_data_extracted.keys() if "atom" in key])
temp_s_atom_data = np.copy(s_atom_data)
result = []
masses = {}
for i in range(num_atoms):
symbol = self._extracted_ab_initio_data["atom_%s" % i]["symbol"]
mass = self._extracted_ab_initio_data["atom_%s" % i]["mass"]
if symbol not in masses:
masses[symbol] = set()
masses[symbol].add(mass)
one_m = AbinsModules.AbinsConstants.ONLY_ONE_MASS
eps = AbinsModules.AbinsConstants.MASS_EPS
# convert set to list to fix order
for s in masses:
masses[s] = sorted(list(set(masses[s])))
for symbol in atoms_symbols:
sub = len(masses[symbol]) > one_m or abs(Atom(symbol=symbol).mass - masses[symbol][0]) > eps
for m in masses[symbol]:
result.extend(self._atom_type_s(num_atoms=num_atoms, mass=m, s_data_extracted=s_data_extracted,
element_symbol=symbol, temp_s_atom_data=temp_s_atom_data,
s_atom_data=s_atom_data, substitution=sub))
return result
def _atom_type_s(self, num_atoms=None, mass=None, s_data_extracted=None, element_symbol=None, temp_s_atom_data=None,
s_atom_data=None, substitution=None):
"""
Helper function for calculating S for the given type of atom
:param num_atoms: number of atoms in the system
:param s_data_extracted: data with all S
:param element_symbol: label for the type of atom
:param temp_s_atom_data: helper array to store S
:param s_atom_data: stores all S for the given type of atom
:param substitution: True if isotope substitution and False otherwise
"""
atom_workspaces = []
s_atom_data.fill(0.0)
element = Atom(symbol=element_symbol)
for atom in range(num_atoms):
eps = AbinsModules.AbinsConstants.MASS_EPS
if (self._extracted_ab_initio_data["atom_%s" % atom]["symbol"] == element_symbol and
abs(self._extracted_ab_initio_data["atom_%s" % atom]["mass"] - mass) < eps):
temp_s_atom_data.fill(0.0)
for order in range(AbinsModules.AbinsConstants.FUNDAMENTALS,
self._num_quantum_order_events + AbinsModules.AbinsConstants.S_LAST_INDEX):
order_indx = order - AbinsModules.AbinsConstants.PYTHON_INDEX_SHIFT
temp_s_order = s_data_extracted["atom_%s" % atom]["s"]["order_%s" % order]
temp_s_atom_data[order_indx] = temp_s_order
s_atom_data += temp_s_atom_data # sum S over the atoms of the same type
total_s_atom_data = np.sum(s_atom_data, axis=0)
nucleons_number = int(round(mass))
if substitution:
atom_workspaces.append(self._create_workspace(atom_name=str(nucleons_number) + element_symbol,
s_points=np.copy(total_s_atom_data),
optional_name="_total", protons_number=element.z_number,
nucleons_number=nucleons_number))
atom_workspaces.append(self._create_workspace(atom_name=str(nucleons_number) + element_symbol,
s_points=np.copy(s_atom_data),
protons_number=element.z_number,
nucleons_number=nucleons_number))
else:
atom_workspaces.append(self._create_workspace(atom_name=element_symbol,
s_points=np.copy(total_s_atom_data),
optional_name="_total", protons_number=element.z_number))
atom_workspaces.append(self._create_workspace(atom_name=element_symbol,
s_points=np.copy(s_atom_data),
protons_number=element.z_number))
return atom_workspaces
def _create_partial_s_per_type_workspaces(self, atoms_symbols=None, s_data=None):
"""
Creates workspaces for all types of atoms. Each workspace stores quantum order events for S for the given
type of atom. It also stores total workspace for the given type of atom.
:param atoms_symbols: list of atom types for which quantum order events of S should be calculated
:param s_data: dynamical factor data of type SData
:returns: workspaces for list of atoms types, each workspace contains quantum order events of
S for the particular atom type
"""
return self._create_workspaces(atoms_symbols=atoms_symbols, s_data=s_data)
def _fill_s_workspace(self, s_points=None, workspace=None, protons_number=None, nucleons_number=None):
"""
Puts S into workspace(s).
:param s_points: dynamical factor for the given atom
:param workspace: workspace to be filled with S
:param protons_number: number of protons in the given type fo atom
:param nucleons_number: number of nucleons in the given type of atom
"""
if self._instrument.get_name() in AbinsModules.AbinsConstants.ONE_DIMENSIONAL_INSTRUMENTS:
# only FUNDAMENTALS
if s_points.shape[0] == AbinsModules.AbinsConstants.FUNDAMENTALS:
self._fill_s_1d_workspace(s_points=s_points[0], workspace=workspace, protons_number=protons_number,
nucleons_number=nucleons_number)
# total workspaces
elif len(s_points.shape) == AbinsModules.AbinsConstants.ONE_DIMENSIONAL_SPECTRUM:
self._fill_s_1d_workspace(s_points=s_points, workspace=workspace, protons_number=protons_number,
nucleons_number=nucleons_number)
# quantum order events (fundamentals or overtones + combinations for the given order)
else:
dim = s_points.shape[0]
partial_wrk_names = []
for n in range(dim):
seed = "quantum_event_%s" % (n + 1)
wrk_name = workspace + "_" + seed
partial_wrk_names.append(wrk_name)
self._fill_s_1d_workspace(s_points=s_points[n], workspace=wrk_name, protons_number=protons_number,
nucleons_number=nucleons_number)
GroupWorkspaces(InputWorkspaces=partial_wrk_names, OutputWorkspace=workspace)
def _fill_s_1d_workspace(self, s_points=None, workspace=None, protons_number=None, nucleons_number=None):
"""
Puts 1D S into workspace.
:param protons_number: number of protons in the given type fo atom
:param nucleons_number: number of nucleons in the given type of atom
:param s_points: dynamical factor for the given atom
:param workspace: workspace to be filled with S
"""
if protons_number is not None:
s_points = s_points * self._scale * self._get_cross_section(protons_number=protons_number,
nucleons_number=nucleons_number)
dim = 1
length = s_points.size
wrk = WorkspaceFactory.create("Workspace2D", NVectors=dim, XLength=length + 1, YLength=length)
for i in range(dim):
wrk.getSpectrum(i).setDetectorID(i + 1)
wrk.setX(0, self._bins)
wrk.setY(0, s_points)
AnalysisDataService.addOrReplace(workspace, wrk)
# Set correct units on workspace
self._set_workspace_units(wrk=workspace)
def _get_cross_section(self, protons_number=None, nucleons_number=None):
"""
Calculates cross section for the given element.
:param protons_number: number of protons in the given type fo atom
:param nucleons_number: number of nucleons in the given type of atom
:returns: cross section for that element
"""
if nucleons_number is not None:
try:
atom = Atom(a_number=nucleons_number, z_number=protons_number)
# isotopes are not implemented for all elements so use different constructor in that cases
except RuntimeError:
atom = Atom(z_number=protons_number)
else:
atom = Atom(z_number=protons_number)
cross_section = None
if self._scale_by_cross_section == 'Incoherent':
cross_section = atom.neutron()["inc_scatt_xs"]
elif self._scale_by_cross_section == 'Coherent':
cross_section = atom.neutron()["coh_scatt_xs"]
elif self._scale_by_cross_section == 'Total':
cross_section = atom.neutron()["tot_scatt_xs"]
return cross_section
def _create_total_workspace(self, partial_workspaces=None):
"""
Sets workspace with total S.
:param partial_workspaces: list of workspaces which should be summed up to obtain total workspace
:returns: workspace with total S from partial_workspaces
"""
total_workspace = self._out_ws_name + "_total"
if isinstance(mtd[partial_workspaces[0]], WorkspaceGroup):
local_partial_workspaces = mtd[partial_workspaces[0]].names()
else:
local_partial_workspaces = partial_workspaces
if len(local_partial_workspaces) > 1:
# get frequencies
ws = mtd[local_partial_workspaces[0]]
# initialize S
s_atoms = np.zeros_like(ws.dataY(0))
# collect all S
for partial_ws in local_partial_workspaces:
if self._instrument.get_name() in AbinsModules.AbinsConstants.ONE_DIMENSIONAL_INSTRUMENTS:
s_atoms += mtd[partial_ws].dataY(0)
# create workspace with S
self._fill_s_workspace(s_atoms, total_workspace)
# # Otherwise just repackage the workspace we have as the total
else:
CloneWorkspace(InputWorkspace=local_partial_workspaces[0], OutputWorkspace=total_workspace)
return total_workspace
def _create_workspace(self, atom_name=None, s_points=None, optional_name="", protons_number=None,
nucleons_number=None):
"""
Creates workspace for the given frequencies and s_points with S data. After workspace is created it is rebined,
scaled by cross-section factor and optionally multiplied by the user defined scaling factor.
:param atom_name: symbol of atom for which workspace should be created
:param s_points: S(Q, omega)
:param optional_name: optional part of workspace name
:returns: workspace for the given frequency and S data
:param protons_number: number of protons in the given type fo atom
:param nucleons_number: number of nucleons in the given type of atom
"""
ws_name = self._out_ws_name + "_" + atom_name + optional_name
self._fill_s_workspace(s_points=s_points, workspace=ws_name, protons_number=protons_number,
nucleons_number=nucleons_number)
return ws_name
def _create_experimental_data_workspace(self):
"""
Loads experimental data into workspaces.
:returns: workspace with experimental data
"""
experimental_wrk = Load(self._experimental_file)
self._set_workspace_units(wrk=experimental_wrk.name())
return experimental_wrk
def _set_workspace_units(self, wrk=None):
"""
Sets x and y units for a workspace.
:param wrk: workspace which units should be set
"""
mtd[wrk].getAxis(0).setUnit("DeltaE_inWavenumber")
mtd[wrk].setYUnitLabel("S /Arbitrary Units")
mtd[wrk].setYUnit("Arbitrary Units")
def _check_advanced_parameter(self):
"""
Checks if parameters from AbinsParameters.py are valid. If any parameter is invalid then RuntimeError is thrown
with meaningful message.
"""
message = " in AbinsParameters.py. "
self._check_general_resolution(message)
self._check_tosca_parameters(message)
self._check_folder_names(message)
self._check_rebining(message)
self._check_threshold(message)
self._check_chunk_size(message)
self._check_threads(message)
def _check_general_resolution(self, message_end=None):
"""
Checks general parameters used in construction resolution functions.
:param message_end: closing part of the error message.
"""
# check fwhm
fwhm = AbinsModules.AbinsParameters.fwhm
if not (isinstance(fwhm, float) and 0.0 < fwhm < 10.0):
raise RuntimeError("Invalid value of fwhm" + message_end)
# check delta_width
delta_width = AbinsModules.AbinsParameters.delta_width
if not (isinstance(delta_width, float) and 0.0 < delta_width < 1.0):
raise RuntimeError("Invalid value of delta_width" + message_end)
def _check_tosca_parameters(self, message_end=None):
"""
Checks TOSCA parameters.
:param message_end: closing part of the error message.
"""
# TOSCA final energy in cm^-1
final_energy = AbinsModules.AbinsParameters.tosca_final_neutron_energy
if not (isinstance(final_energy, float) and final_energy > 0.0):
raise RuntimeError("Invalid value of final_neutron_energy for TOSCA" + message_end)
angle = AbinsModules.AbinsParameters.tosca_cos_scattering_angle
if not isinstance(angle, float):
raise RuntimeError("Invalid value of cosines scattering angle for TOSCA" + message_end)
resolution_const_a = AbinsModules.AbinsParameters.tosca_a
if not isinstance(resolution_const_a, float):
raise RuntimeError("Invalid value of constant A for TOSCA (used by the resolution TOSCA function)" +
message_end)
resolution_const_b = AbinsModules.AbinsParameters.tosca_b
if not isinstance(resolution_const_b, float):
raise RuntimeError("Invalid value of constant B for TOSCA (used by the resolution TOSCA function)" +
message_end)
resolution_const_c = AbinsModules.AbinsParameters.tosca_c
if not isinstance(resolution_const_c, float):
raise RuntimeError("Invalid value of constant C for TOSCA (used by the resolution TOSCA function)" +
message_end)
def _check_folder_names(self, message_end=None):
"""
Checks folders names.
:param message_end: closing part of the error message.
"""
folder_names = []
ab_initio_group = AbinsModules.AbinsParameters.ab_initio_group
if not isinstance(ab_initio_group, str) or ab_initio_group == "":
raise RuntimeError("Invalid name for folder in which the ab initio data should be stored.")
folder_names.append(ab_initio_group)
powder_data_group = AbinsModules.AbinsParameters.powder_data_group
if not isinstance(powder_data_group, str) or powder_data_group == "":
raise RuntimeError("Invalid value of powder_data_group" + message_end)
elif powder_data_group in folder_names:
raise RuntimeError("Name for powder_data_group already used by as name of another folder.")
folder_names.append(powder_data_group)
crystal_data_group = AbinsModules.AbinsParameters.crystal_data_group
if not isinstance(crystal_data_group, str) or crystal_data_group == "":
raise RuntimeError("Invalid value of crystal_data_group" + message_end)
elif crystal_data_group in folder_names:
raise RuntimeError("Name for crystal_data_group already used as a name of another folder.")
s_data_group = AbinsModules.AbinsParameters.s_data_group
if not isinstance(s_data_group, str) or s_data_group == "":
raise RuntimeError("Invalid value of s_data_group" + message_end)
elif s_data_group in folder_names:
raise RuntimeError("Name for s_data_group already used as a name of another folder.")
def _check_rebining(self, message_end=None):
"""
Checks rebinning parameters.
:param message_end: closing part of the error message.
"""
pkt_per_peak = AbinsModules.AbinsParameters.pkt_per_peak
if not (isinstance(pkt_per_peak, six.integer_types) and 1 <= pkt_per_peak <= 1000):
raise RuntimeError("Invalid value of pkt_per_peak" + message_end)
min_wavenumber = AbinsModules.AbinsParameters.min_wavenumber
if not (isinstance(min_wavenumber, float) and min_wavenumber >= 0.0):
raise RuntimeError("Invalid value of min_wavenumber" + message_end)
max_wavenumber = AbinsModules.AbinsParameters.max_wavenumber
if not (isinstance(max_wavenumber, float) and max_wavenumber > 0.0):
raise RuntimeError("Invalid number of max_wavenumber" + message_end)
if min_wavenumber > max_wavenumber:
raise RuntimeError("Invalid energy window for rebinning.")
def _check_threshold(self, message_end=None):
"""
Checks threshold for frequencies.
:param message_end: closing part of the error message.
"""
freq_threshold = AbinsModules.AbinsParameters.frequencies_threshold
if not (isinstance(freq_threshold, float) and freq_threshold >= 0.0):
raise RuntimeError("Invalid value of frequencies_threshold" + message_end)
# check s threshold
s_absolute_threshold = AbinsModules.AbinsParameters.s_absolute_threshold
if not (isinstance(s_absolute_threshold, float) and s_absolute_threshold > 0.0):
raise RuntimeError("Invalid value of s_absolute_threshold" + message_end)
s_relative_threshold = AbinsModules.AbinsParameters.s_relative_threshold
if not (isinstance(s_relative_threshold, float) and s_relative_threshold > 0.0):
raise RuntimeError("Invalid value of s_relative_threshold" + message_end)
def _check_chunk_size(self, message_end=None):
"""
Check optimal size of chunk
:param message_end: closing part of the error message.
"""
optimal_size = AbinsModules.AbinsParameters.optimal_size
if not (isinstance(optimal_size, six.integer_types) and optimal_size > 0):
raise RuntimeError("Invalid value of optimal_size" + message_end)
def _check_threads(self, message_end=None):
"""
Checks number of threads
:param message_end: closing part of the error message.
"""
if PATHOS_FOUND:
threads = AbinsModules.AbinsParameters.threads
if not (isinstance(threads, six.integer_types) and 1 <= threads <= mp.cpu_count()):
raise RuntimeError("Invalid number of threads for parallelisation over atoms" + message_end)
def _validate_ab_initio_file_extension(self, filename_full_path=None, expected_file_extension=None):
"""
Checks consistency between name of ab initio program and extension.
:param expected_file_extension: file extension
:returns: dictionary with error message
"""
ab_initio_program = self.getProperty("AbInitioProgram").value
msg_err = "Invalid %s file. " % filename_full_path
msg_rename = "Please rename your file and try again."
# check extension of a file
found_filename_ext = os.path.splitext(filename_full_path)[1]
if found_filename_ext.lower() != expected_file_extension:
return dict(Invalid=True,
Comment=msg_err + "Output from ab initio program " + ab_initio_program + " is expected." +
" The expected extension of file is ." + expected_file_extension +
". Found: " + found_filename_ext + ". " + msg_rename)
else:
return dict(Invalid=False, Comment="")
def _validate_dmol3_input_file(self, filename_full_path=None):
"""
Method to validate input file for DMOL3 ab initio program.
:param filename_full_path: full path of a file to check.
:returns: True if file is valid otherwise false.
"""
logger.information("Validate DMOL3 file with vibrational data.")
return self._validate_ab_initio_file_extension(filename_full_path=filename_full_path,
expected_file_extension=".outmol")
def _validate_gaussian_input_file(self, filename_full_path=None):
"""
Method to validate input file for GAUSSIAN ab initio program.
:param filename_full_path: full path of a file to check.
:returns: True if file is valid otherwise false.
"""
logger.information("Validate GAUSSIAN file with vibration data.")
return self._validate_ab_initio_file_extension(filename_full_path=filename_full_path,
expected_file_extension=".log")
def _validate_crystal_input_file(self, filename_full_path=None):
"""
Method to validate input file for CRYSTAL ab initio program.
:param filename_full_path: full path of a file to check.
:returns: True if file is valid otherwise false.
"""
logger.information("Validate CRYSTAL file with vibrational or phonon data.")
return self._validate_ab_initio_file_extension(filename_full_path=filename_full_path,
expected_file_extension=".out")
def _validate_castep_input_file(self, filename_full_path=None):
"""
Check if ab initio input vibrational or phonon file has been produced by CASTEP. Currently the crucial
keywords in the first few lines are checked (to be modified if a better validation is found...)
:param filename_full_path: full path of a file to check
:returns: Dictionary with two entries "Invalid", "Comment". Valid key can have two values: True/ False. As it
comes to "Comment" it is an empty string if Valid:True, otherwise stores description of the problem.
"""
logger.information("Validate CASTEP file with vibrational or phonon data.")
msg_err = "Invalid %s file. " % filename_full_path
output = self._validate_ab_initio_file_extension(filename_full_path=filename_full_path,
expected_file_extension=".phonon")
if output["Invalid"]:
return output
# check a structure of the header part of file.
# Here fortran convention is followed: case of letter does not matter
with open(filename_full_path) as castep_file:
line = self._get_one_line(castep_file)
if not self._compare_one_line(line, "beginheader"): # first line is BEGIN header
return dict(Invalid=True, Comment=msg_err + "The first line should be 'BEGIN header'.")
line = self._get_one_line(castep_file)
if not self._compare_one_line(one_line=line, pattern="numberofions"):
return dict(Invalid=True, Comment=msg_err + "The second line should include 'Number of ions'.")
line = self._get_one_line(castep_file)
if not self._compare_one_line(one_line=line, pattern="numberofbranches"):
return dict(Invalid=True, Comment=msg_err + "The third line should include 'Number of branches'.")
line = self._get_one_line(castep_file)
if not self._compare_one_line(one_line=line, pattern="numberofwavevectors"):
return dict(Invalid=True, Comment=msg_err + "The fourth line should include 'Number of wavevectors'.")
line = self._get_one_line(castep_file)
if not self._compare_one_line(one_line=line,
pattern="frequenciesin"):
return dict(Invalid=True, Comment=msg_err + "The fifth line should be 'Frequencies in'.")
return output
def _get_one_line(self, file_obj=None):
"""
:param file_obj: file object from which reading is done
:returns: string containing one non empty line
"""
line = file_obj.readline().replace(" ", "").lower()
while line and line == "":
line = file_obj.readline().replace(" ", "").lower()
return line
def _compare_one_line(self, one_line, pattern):
"""
compares line in the the form of string with a pattern.
:param one_line: line in the for mof string to be compared
:param pattern: string which should be present in the line after removing white spaces and setting all
letters to lower case
:returns: True is pattern present in the line, otherwise False
"""
return one_line and pattern in one_line.replace(" ", "")
def _get_properties(self):
"""
Loads all properties to object's attributes.
"""
self._ab_initio_program = self.getProperty("AbInitioProgram").value
self._vibrational_or_phonon_data_file = self.getProperty("VibrationalOrPhononFile").value
self._experimental_file = self.getProperty("ExperimentalFile").value
self._temperature = self.getProperty("TemperatureInKelvin").value
self._bin_width = self.getProperty("BinWidthInWavenumber").value
self._scale = self.getProperty("Scale").value
self._sample_form = self.getProperty("SampleForm").value
instrument_name = self.getProperty("Instrument").value
if instrument_name in AbinsModules.AbinsConstants.ALL_INSTRUMENTS:
self._instrument_name = instrument_name
instrument_producer = AbinsModules.InstrumentProducer()
self._instrument = instrument_producer.produce_instrument(name=self._instrument_name)
else:
raise ValueError("Unknown instrument %s" % instrument_name)
self._atoms = self.getProperty("Atoms").value
self._sum_contributions = self.getProperty("SumContributions").value
# conversion from str to int
self._num_quantum_order_events = int(self.getProperty("QuantumOrderEventsNumber").value)
self._scale_by_cross_section = self.getPropertyValue('ScaleByCrossSection')
self._out_ws_name = self.getPropertyValue('OutputWorkspace')
self._calc_partial = (len(self._atoms) > 0)
# user defined interval is exclusive with respect to
# AbinsModules.AbinsParameters.min_wavenumber
# AbinsModules.AbinsParameters.max_wavenumber
# with bin width AbinsModules.AbinsParameters.bin_width
step = self._bin_width
start = AbinsModules.AbinsParameters.min_wavenumber + step / 2.0
stop = AbinsModules.AbinsParameters.max_wavenumber + step / 2.0
self._bins = np.arange(start=start, stop=stop, step=step, dtype=AbinsModules.AbinsConstants.FLOAT_TYPE)
AlgorithmFactory.subscribe(Abins)
| gpl-3.0 | 8,994,032,376,073,273,000 | 47.804762 | 120 | 0.609791 | false |
Oli76/rwslib | rwslib/builder_constants.py | 1 | 6274 | # -*- coding: utf-8 -*-
__author__ = 'isparks'
import enum
class DataType(enum.Enum):
"""ODM Data Types"""
Text = 'text'
Integer = 'integer'
Float = 'float'
Date = 'date'
DateTime = 'datetime'
Time = 'time'
String = 'string' # Used only by codelists
class QueryStatusType(enum.Enum):
"""MdsolQuery action type"""
Open = "Open"
Cancelled = "Cancelled"
Answered = "Answered"
Forwarded = "Forwarded"
Closed = "Closed"
class StepType(enum.Enum):
"""Edit/Derivation step types"""
CustomFunction = "CustomFunction"
IsEmpty = "IsEmpty"
IsNotEmpty = "IsNotEmpty"
Contains = "Contains"
StartsWith = "StartsWith"
IsLessThan = "IsLessThan"
IsLessThanOrEqualTo = "IsLessThanOrEqualTo"
IsGreaterThan = "IsGreaterThan"
IsGreaterThanOrEqualTo = "IsGreaterThanOrEqualTo"
IsEqualTo = "IsEqualTo"
IsNonConformant = "IsNonConformant"
IsNotEqualTo = "IsNotEqualTo"
InLocalLabRange = "InLocalLabRange"
LengthIsLessThan = "LengthIsLessThan"
LengthIsLessThanOrEqualTo = "LengthIsLessThanOrEqualTo"
LengthIsGreaterThan = "LengthIsGreaterThan"
LengthIsGreaterThanOrEqualTo = "LengthIsGreaterThanOrEqualTo"
LengthIsEqualTo = "LengthIsEqualTo"
Or = "Or"
And = "And"
Not = "Not"
Now = "Now"
IsPresent = "IsPresent"
IsActive = "IsActive"
Add = "Add"
Subtract = "Subtract"
Multiply = "Multiply"
Divide = "Divide"
AddDay = "AddDay"
AddMonth = "AddMonth"
AddYear = "AddYear"
AddSec = "AddSec"
AddMin = "AddMin"
AddHour = "AddHour"
DaySpan = "DaySpan"
TimeSpan = "TimeSpan"
Age = "Age"
StringAdd = "StringAdd"
Space = "Space"
ALL_STEPS = [StepType.CustomFunction,
StepType.IsEmpty,
StepType.IsNotEmpty,
StepType.Contains,
StepType.StartsWith,
StepType.IsLessThan,
StepType.IsLessThanOrEqualTo,
StepType.IsGreaterThan,
StepType.IsGreaterThanOrEqualTo,
StepType.IsEqualTo,
StepType.IsNonConformant,
StepType.IsNotEqualTo,
StepType.InLocalLabRange,
StepType.LengthIsLessThan,
StepType.LengthIsLessThanOrEqualTo,
StepType.LengthIsGreaterThan,
StepType.LengthIsGreaterThanOrEqualTo,
StepType.LengthIsEqualTo,
StepType.Or,
StepType.And,
StepType.Not,
StepType.Now,
StepType.IsPresent,
StepType.IsActive,
StepType.Add,
StepType.Subtract,
StepType.Multiply,
StepType.Divide,
StepType.AddDay,
StepType.AddMonth,
StepType.AddYear,
StepType.AddSec,
StepType.AddMin,
StepType.AddHour,
StepType.DaySpan,
StepType.TimeSpan,
StepType.Age,
StepType.StringAdd]
# Note: Missing 2015 additions to edit check step functions.
VALID_DERIVATION_STEPS = [
StepType.Age,
StepType.Subtract,
StepType.Multiply,
StepType.Divide,
StepType.AddDay,
StepType.AddMonth,
StepType.AddYear,
StepType.AddSec,
StepType.AddMin,
StepType.AddHour,
StepType.DaySpan,
StepType.TimeSpan,
StepType.Now,
StepType.StringAdd,
StepType.CustomFunction,
StepType.Space,
StepType.Add
]
class ActionType(enum.Enum):
OpenQuery = "OpenQuery"
RequireReview = "RequireReview"
RequireVerification = "RequireVerification"
AddComment = "AddComment"
AddDeviation = "AddDeviation"
CustomFunction = "CustomFunction"
PlaceSticky = "PlaceSticky"
AddForm = "AddForm"
AddMatrix = "AddMatrix"
MrgMatrix = "MrgMatrix"
OldMrgMatrix = "OldMrgMatrix"
SetNonconformant = "SetNonconformant"
SendMessage = "SendMessage"
SetDataPoint = "SetDataPoint"
SetTimeZero = "SetTimeZero"
SetTimeForward = "SetTimeForward"
SetSubjectStatus = "SetSubjectStatus"
SetSubjectName = "SetSubjectName"
UpdateFormName = "UpdateFormName"
UpdateFolderName = "UpdateFolderName"
SetRecordDate = "SetRecordDate"
SetDataPageDate = "SetDataPageDate"
SetInstanceDate = "SetInstanceDate"
SetSubjectDate = "SetSubjectDate"
SetDataPointVisible = "SetDataPointVisible"
SetSecondarySubjectName = "SetSecondarySubjectName"
SetFormRequiresSignature = "SetFormRequiresSignature"
SetFolderRequiresSignature = "SetFolderRequiresSignature"
SetSubjectRequiresSignature = "SetSubjectRequiresSignature"
SetDynamicSearchList = "SetDynamicSearchList"
ALL_ACTIONS = [
ActionType.OpenQuery,
ActionType.RequireReview,
ActionType.RequireVerification,
ActionType.AddComment,
ActionType.AddDeviation,
ActionType.CustomFunction,
ActionType.PlaceSticky,
ActionType.AddForm,
ActionType.AddMatrix,
ActionType.MrgMatrix,
ActionType.OldMrgMatrix,
ActionType.SetNonconformant,
ActionType.SendMessage,
ActionType.SetDataPoint,
ActionType.SetTimeZero,
ActionType.SetTimeForward,
ActionType.SetSubjectStatus,
ActionType.SetSubjectName,
ActionType.UpdateFormName,
ActionType.UpdateFolderName,
ActionType.SetRecordDate,
ActionType.SetDataPageDate,
ActionType.SetInstanceDate,
ActionType.SetSubjectDate,
ActionType.SetDataPointVisible,
ActionType.SetSecondarySubjectName,
ActionType.SetFormRequiresSignature,
ActionType.SetFolderRequiresSignature,
ActionType.SetSubjectRequiresSignature,
ActionType.SetDynamicSearchList
]
class RangeCheckComparatorType(enum.Enum):
LessThanEqualTo = 'LE'
GreaterThanEqualTo = 'GE'
class RangeCheckType(enum.Enum):
Soft = 'Soft'
Hard = 'Hard'
class ControlType(enum.Enum):
CheckBox = 'CheckBox'
Text = 'Text'
DateTime = 'DateTime'
DropDownList = 'DropDownList'
SearchList = 'SearchList'
RadioButton = 'RadioButton'
RadioButtonVertical = 'RadioButton (Vertical)'
FileUpload = 'File Upload'
LongText = 'LongText'
SignaturePage = 'Signature page'
SignatureFolder = 'Signature folder'
SignatureSubject = 'Signature subject'
| mit | -2,006,527,805,749,792,300 | 27.261261 | 65 | 0.669908 | false |
litong01/python-monasca | kiloeyes/v2/elasticsearch/versions.py | 1 | 1877 | # Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import falcon
from kiloeyes.common import resource_api
from kiloeyes.openstack.common import log
try:
import ujson as json
except ImportError:
import json
LOG = log.getLogger(__name__)
UPDATED = str(datetime.datetime(2014, 1, 1, 0, 0, 0))
class VersionDispatcher(object):
def __init__(self, global_conf):
LOG.debug('initializing V2API!')
super(VersionDispatcher, self).__init__()
@resource_api.Restify('/', method='get')
def get_versions(self, req, res):
res.body = json.dumps([{
"id": "v2.0",
"links": [{"rel": "self",
"href": req.uri}],
"status": "CURRENT",
"updated": UPDATED}])
res.status = getattr(falcon, 'HTTP_200')
@resource_api.Restify('/{version_id}', method='get')
def get_version_by_id(self, req, res, version_id):
if version_id in ['v2.0', '2.0', '2']:
res.body = json.dumps({
"id": "v2.0",
"links": [{"rel": "self",
"href": req.uri}],
"status": "CURRENT",
"updated": UPDATED})
res.status = getattr(falcon, 'HTTP_200')
else:
res.body = ''
res.status = getattr(falcon, 'HTTP_501')
| apache-2.0 | -6,706,174,373,474,287,000 | 31.362069 | 75 | 0.595631 | false |
alex-ip/agdc | agdc/testdb.py | 1 | 9300 | #!/usr/bin/env python
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
"""
Command line interface to test db server utilities.
"""
from __future__ import absolute_import
from __future__ import print_function
import argparse
import os
import sys
import logging
import re
from . import dbutil
#
# Temporary test database pattern
#
# This is the regular expression used to identify a test database.
#
# The current pattern looks for a name containing 'test' and ending
# in an underscore followed by a 9 digit number.
#
TESTDB_PATTERN = r".*test.*_\d{9}$"
#
# Default database file
#
# This is the path to the empty hypercube dump used as a base for newly
# created databases.
#
DEFAULT_DBFILE = os.path.join(dbutil.TEST_RESOURCES_ROOT,
'databases/hypercube_empty.sql')
#
# Temporary directory.
#
# This is used for a temporary copy of the config file.
#
#TEMP_DIR = dbutil.temp_directory()
TEMP_DIR = './temp'
#
# Set up logging
#
logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
LOGGER = logging.getLogger()
#
# Argument parser setup functions
#
def command_line_parser():
"""Return the top level parser."""
description = "Run utility commands on the test database server."
parser = argparse.ArgumentParser(description=description)
subparser_factory = parser.add_subparsers(title="subcommands")
add_create_subcommand(subparser_factory)
add_save_subcommand(subparser_factory)
add_drop_subcommand(subparser_factory)
add_list_subcommand(subparser_factory)
add_cleanup_subcommand(subparser_factory)
# add_dbupdate_subcommand(subparser_factory)
return parser
def add_create_subcommand(subparser_factory):
"""Add a subparser for the create subcommand."""
create_help = "Create and load a database from an sql dump file."
subparser = subparser_factory.add_parser('create', help=create_help,
description=create_help)
dbname_help = "The name of the database to be created."
subparser.add_argument('dbname', help=dbname_help)
dbfile_help = ("An sql database dump to be loaded into the new " +
"database. If not given, an empty hypercube database " +
"will be loaded.")
subparser.add_argument('dbfile', help=dbfile_help, nargs='?',
default=DEFAULT_DBFILE)
subparser.set_defaults(subcommand=run_create_subcommand)
def add_save_subcommand(subparser_factory):
"""Add a subparser for the save subcommand."""
save_help = "Save a database to an sql dump file."
subparser = subparser_factory.add_parser('save', help=save_help,
description=save_help)
dbname_help = "The name of the database to be saved."
subparser.add_argument('dbname', help=dbname_help)
dbfile_help = "The sql dump file to save to."
subparser.add_argument('dbfile', help=dbfile_help)
subparser.set_defaults(subcommand=run_save_subcommand)
def add_drop_subcommand(subparser_factory):
"""Add a subparser for the drop subcommand."""
drop_help = "Drop a database from the test server."
subparser = subparser_factory.add_parser('drop', help=drop_help,
description=drop_help)
dbname_help = "The name of the database to drop."
subparser.add_argument('dbname', help=dbname_help)
subparser.set_defaults(subcommand=run_drop_subcommand)
def add_list_subcommand(subparser_factory):
"""Add a subparser for the list subcommand."""
list_help = "List the databases on the test server."
subparser = subparser_factory.add_parser('list', help=list_help,
description=list_help)
subparser.set_defaults(subcommand=run_list_subcommand)
def add_cleanup_subcommand(subparser_factory):
"""Add a subparser for the cleanup subcommand."""
cleanup_help = "Drop all temporary test databases."
description = (cleanup_help + " Note that running " +
"this command may cause tests currently running to fail.")
subparser = subparser_factory.add_parser('cleanup', help=cleanup_help,
description=description)
subparser.set_defaults(subcommand=run_cleanup_subcommand)
def add_dbupdate_subcommand(subparser_factory):
"""Add a subparser for the dbupdate subcommand."""
dbupdate_help = "Run dbupdater.py to catalog a dataset or datasets."
description = (dbupdate_help + " This will create an acquisition_record " +
"and a dataset_record if they do not already exist.")
subparser = subparser_factory.add_parser('dbupdate', help=dbupdate_help,
description=description)
dbname_help = "The name of the database to update."
subparser.add_argument('dbname', help=dbname_help)
source_dir_help = "The source directory for the datasets."
subparser.add_argument('source_dir', help=source_dir_help)
subparser.set_defaults(subcommand=run_dbupdate_subcommand)
#
# Subcommand functions
#
def run_create_subcommand(args):
"""Run the create subcommand."""
LOGGER.debug("Running create subcommand:")
LOGGER.debug(" dbname = %s", args.dbname)
LOGGER.debug(" dbfile = %s", args.dbfile)
dbutil.TESTSERVER.create(args.dbname, "", args.dbfile)
def run_save_subcommand(args):
"""Run the save subcommand."""
LOGGER.debug("Running save subcommand:")
LOGGER.debug(" dbname = %s", args.dbname)
LOGGER.debug(" dbfile = %s", args.dbfile)
dbutil.TESTSERVER.save(args.dbname, "", args.dbfile)
def run_drop_subcommand(args):
"""Run the drop subcommand."""
LOGGER.debug("Running drop subcommand:")
LOGGER.debug(" dbname = %s", args.dbname)
dbutil.TESTSERVER.drop(args.dbname)
def run_list_subcommand(dummy_args):
"""Run the list subcommand."""
LOGGER.debug("Running list subcommand:")
dblist = dbutil.TESTSERVER.dblist()
for dbname in sorted(dblist):
print(dbname)
def run_cleanup_subcommand(dummy_args):
"""Run the cleanup subcommand."""
LOGGER.debug("Running cleanup subcommand:")
dblist = dbutil.TESTSERVER.dblist()
test_dblist = [db for db in dblist if re.match(TESTDB_PATTERN, db)]
print("Dropping temporary test databases:")
if test_dblist:
for dbname in test_dblist:
print(" %s" % dbname)
dbutil.TESTSERVER.drop(dbname)
else:
print(" nothing to do.")
def run_dbupdate_subcommand(args):
"""Run the dbupdate subcommand."""
raise NotImplementedError
# def run_dbupdate_subcommand(args):
# """Run the dbupdate subcommand."""
# LOGGER.debug("Running dbupdate subcommand:")
# LOGGER.debug(" dbname = %s", args.dbname)
# LOGGER.debug(" source_dir = %s", args.source_dir)
# config_file_name = dbutil.random_name("test_datacube") + ".conf"
# config_file_path = dbutil.make_config_file(args.dbname, TEMP_DIR,
# config_file_name)
# dbupdater_cmd = ["python",
# "dbupdater.py",
# "--debug",
# "--config=%s" % config_file_path,
# "--source=%s" % args.source_dir,
# "--removedblist",
# "--followsymlinks"]
# result = execute(dbupater_cmd, shell=False)
#
# Main program
#
if __name__ == '__main__':
ARGS = command_line_parser().parse_args()
ARGS.subcommand(ARGS)
| bsd-3-clause | 2,740,898,887,131,654,000 | 31.978723 | 86 | 0.655161 | false |
goodtune/vitriolic | touchtechnology/news/forms.py | 1 | 1627 | from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from modelforms.forms import ModelForm
from touchtechnology.common.forms.mixins import (
BootstrapFormControlMixin, SuperUserSlugMixin,
)
from touchtechnology.news.models import Article, Category, Translation
class ArticleForm(SuperUserSlugMixin, ModelForm):
def __init__(self, *args, **kwargs):
super(ArticleForm, self).__init__(*args, **kwargs)
if not self.fields["categories"].queryset.count():
self.fields.pop("categories", None)
self.fields["image"].required = getattr(
settings, "TOUCHTECHNOLOGY_NEWS_IMAGE_REQUIRED", True
)
class Meta:
model = Article
fields = (
"headline",
"image",
"abstract",
"copy",
"published",
"slug",
"slug_locked",
"byline",
"keywords",
"categories",
"is_active",
)
class CategoryForm(SuperUserSlugMixin, ModelForm):
class Meta:
model = Category
fields = (
"title",
"short_title",
"slug",
"slug_locked",
"is_active",
"hidden_from_navigation",
)
class TranslationForm(BootstrapFormControlMixin, ModelForm):
class Meta:
model = Translation
fields = (
"locale",
"headline",
"abstract",
"copy",
)
locale = forms.ChoiceField(choices=settings.LANGUAGES, label=_("Language"))
| bsd-3-clause | 2,439,485,887,015,281,000 | 26.116667 | 79 | 0.564229 | false |
dylanh333/android-unmkbootimg | vendor/android-tools/toolbox/generate-input.h-labels.py | 4 | 2801 | #!/usr/bin/env python
#
# Copyright (C) 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=bad-indentation,bad-continuation
from __future__ import print_function
import os
import re
import sys
input_prop_list = []
ev_list = []
syn_list = []
key_list = []
rel_list = []
abs_list = []
sw_list = []
msc_list = []
led_list = []
rep_list = []
snd_list = []
mt_tool_list = []
ff_status_list = []
ff_list = []
r = re.compile(r'#define\s+(\S+)\s+((?:0x)?\d+)')
for arg in sys.argv[1:]:
with open(arg, 'r') as f:
for line in f:
m = r.match(line)
if m:
name = m.group(1)
if name.startswith("INPUT_PROP_"):
input_prop_list.append(name)
elif name.startswith("EV_"):
ev_list.append(name)
elif name.startswith("SYN_"):
syn_list.append(name)
elif name.startswith("KEY_") or name.startswith("BTN_"):
key_list.append(name)
elif name.startswith("REL_"):
rel_list.append(name)
elif name.startswith("ABS_"):
abs_list.append(name)
elif name.startswith("SW_"):
sw_list.append(name)
elif name.startswith("MSC_"):
msc_list.append(name)
elif name.startswith("LED_"):
led_list.append(name)
elif name.startswith("REP_"):
rep_list.append(name)
elif name.startswith("SND_"):
snd_list.append(name)
elif name.startswith("MT_TOOL_"):
mt_tool_list.append(name)
elif name.startswith("FF_STATUS_"):
ff_status_list.append(name)
elif name.startswith("FF_"):
ff_list.append(name)
def Dump(struct_name, values):
print('static struct label %s[] = {' % (struct_name))
for value in values:
print(' LABEL(%s),' % (value))
print(' LABEL_END,')
print('};')
Dump("input_prop_labels", input_prop_list)
Dump("ev_labels", ev_list)
Dump("syn_labels", syn_list)
Dump("key_labels", key_list)
Dump("rel_labels", rel_list)
Dump("abs_labels", abs_list)
Dump("sw_labels", sw_list)
Dump("msc_labels", msc_list)
Dump("led_labels", led_list)
Dump("rep_labels", rep_list)
Dump("snd_labels", snd_list)
Dump("mt_tool_labels", mt_tool_list)
Dump("ff_status_labels", ff_status_list)
Dump("ff_labels", ff_list)
| mit | -6,136,553,381,846,212,000 | 28.177083 | 74 | 0.622992 | false |
Subsets and Splits