repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
bolkedebruin/airflow | airflow/contrib/hooks/qubole_check_hook.py | 1 | 1179 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.qubole.hooks.qubole_check`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.qubole.hooks.qubole_check import QuboleCheckHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.qubole.hooks.qubole_check`.",
DeprecationWarning, stacklevel=2
)
| apache-2.0 | 3,712,620,564,980,570,600 | 39.655172 | 91 | 0.765055 | false |
sivy/boxpub | boxpub/__init__.py | 1 | 6959 | # _______ _______ __ __ _______ __ __ _______
# | _ || || |_| || || | | || _ |
# | |_| || _ || || _ || | | || |_| |
# | || | | | | | | |_| || |_| || |
# | _ | | |_| | | | | ___|| || _ |
# | |_| || || _ || | | || |_| |
# |_______||_______||__| |__||___| |_______||_______|
#
# Copyright (c) 2014 Steve Ivy <[email protected]>
#
import logging
import imp
from datetime import datetime
import jinja2
import markdown
import re
import requests
import dropbox
import json
from dropbox import client, session
# from dropbox.rest import ErrorResponse
from werkzeug.routing import BaseConverter
from flask import Flask, request, make_response
from postutils import split_markdown, process_markdown
CONFIG_FILE = '/etc/boxpub/config.py'
def load_config(config_file):
try:
config = imp.load_source('config', config_file)
return config
except IOError:
LOG.critical('Could not load config at %s.' % config_file)
sys.exit(1)
CONFIG = load_config(CONFIG_FILE)
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(module)s.%(funcName)s (%(lineno)d) %(levelname)s: %(message)s'
)
log = logging.getLogger('boxpub')
format = logging.Formatter(
'%(asctime)s %(module)s.%(funcName)s (%(lineno)d) %(levelname)s: %(message)s')
fh = logging.FileHandler(CONFIG.LOGFILE)
fh.setLevel(getattr(logging, CONFIG.LOGLEVEL.upper()))
log.addHandler(fh)
boxpub = Flask('boxpub')
boxpub.debug = True
def render_template(template_string, context):
template_globals = {
'HOST': request.host,
# 'PAGE_URL_FULL': request.path_url,
'QUERY_STRING': request.query_string,
'URL': request.url,
'PATH': request.path,
'settings': CONFIG,
'config': CONFIG,
'site': CONFIG.SITE_DATA,
'time': datetime.now(),
}
template_globals.update(context)
jinja_environment = jinja2.Environment(
extensions=['jinja2.ext.autoescape'])
template = jinja_environment.from_string(template_string)
resp_body = template.render(template_globals)
return resp_body
def render_file_with_template(target_file, target_template):
"""
"""
client = dropbox.client.DropboxClient(CONFIG.DROPBOX_PRIVATE_TOKEN)
file_response, dropbox_meta = client.get_file_and_metadata(
target_file)
file_content = file_response.read()
f = process_markdown(
target_file, file_content)
log.debug(f)
if 'meta' in f:
fmeta = f['meta']
fmeta.update(dropbox_meta)
f['meta'] = fmeta
else:
f['meta'] = dropbox_meta
# data['published'] = data['modified']
# data['created'] = data['modified']
# merge 'meta' values with other values
f.update(f['meta'])
# fix title
if 'Title' in f:
f['title'] = f['Title']
# permalink
f['permalink'] = url_for_path(f['path'])
template_response, meta = client.get_file_and_metadata(
'templates/%s' % target_template)
template_content = template_response.read()
page_content = render_template(template_content, {
'page': f,
'post': f,
})
return page_content
def url_for_path(path):
if 'posts' in path:
year, month, day, filename = re.match(
'/posts/([\d]{4})-([\d]{2})-([\d]{2})-([\w-]+)\.md',
path).groups()
return "/%s/%s/%s/%s" % (year, month, day, filename)
elif 'page' in path:
filename = re.match(
'/pages/([\w-]+)\.md',
path).group(1)
return "/pages/%s" % filename
class RegexConverter(BaseConverter):
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
boxpub.url_map.converters['regex'] = RegexConverter
############################################################
# web handlers
#
@boxpub.route('/webhooks/dropbox', methods=['GET'])
def dropbox_webhook_verify():
log.info('Dropbox verification request')
return request.args.get('challenge')
@boxpub.route('/webhooks/dropbox', methods=['POST'])
def dropbox_webhook_handle():
log.info('Dropbox post request')
url = CONFIG.SITE_DATA['url']
try:
log.info('PURGING site index')
purge_resp = requests.request('PURGE', url)
log.debug(purge_resp.text)
resp = make_response(purge_resp.text)
except Exception, e:
log.exception(e)
resp = make_response("ERR: " + e.message)
log.debug(resp)
return resp
@boxpub.route('/')
def blog_index_handle(template='index.html', content_type='text/html'):
log.debug('blog_index_handle()')
target_file = "posts"
client = dropbox.client.DropboxClient(CONFIG.DROPBOX_PRIVATE_TOKEN)
dropbox_response = client.metadata(
target_file, list=True)
files = dropbox_response['contents']
files = sorted(
files,
key=lambda f: f['path'],
reverse=True)
files = files[:10]
log.debug(files)
for f in files:
log.debug(f['path'])
file_response, dropbox_meta = client.get_file_and_metadata(
f['path'])
f.update(dropbox_meta)
log.debug(f['path'])
file_content = file_response.read()
fdata = process_markdown(
target_file, file_content)
log.debug(fdata)
f.update(fdata)
log.debug(f['path'])
# fix title
f.update(f['meta'])
if 'Title' in f:
f['title'] = f['Title']
# permalink
f['permalink'] = url_for_path(f['path'])
log.debug(f)
# log.debug(files)
template_response, meta = client.get_file_and_metadata(
'templates/%s' % template)
template_content = template_response.read()
page_content = render_template(template_content, {
'posts': files,
})
resp = make_response(page_content)
resp.headers["Content-Type"] = content_type
return resp
@boxpub.route('/atom.xml')
def blog_feed_handle(template='atom.xml'):
return blog_index_handle(template, 'application/atom+xml')
@boxpub.route('/page/<page>')
def blog_page_handle(page, template='post.html'):
log.debug('blog_page_handle()')
target_file = "/pages/%s.md" % (page)
page_content = render_file_with_template(target_file, template)
return page_content
@boxpub.route('/<regex("[\d]{4}"):year>/<regex("[\d]{2}"):month>/<regex("[\d]{2}"):day>/<filename>')
def blog_post_handle(year, month, day, filename, template='post.html'):
log.debug('blog_post_handle()')
log.info('Dropbox post request')
target_file = "/posts/%s-%s-%s-%s.md" % (year, month, day, filename)
page_content = render_file_with_template(target_file, template)
return page_content
if __name__ == "__main__":
boxpub.run(host='0.0.0.0')
| mpl-2.0 | 7,306,741,248,592,084,000 | 24.39781 | 100 | 0.574652 | false |
data-refinery/data_refinery | workers/data_refinery_workers/processors/test_smasher.py | 1 | 49277 | import csv
import json
import os
import shutil
import sys
import zipfile
from io import StringIO
from django.core.management import call_command
from django.test import TestCase, tag
from data_refinery_common.models import (
SurveyJob,
ProcessorJob,
OriginalFile,
ProcessorJobOriginalFileAssociation,
ComputationalResult,
ComputedFile,
Experiment,
Organism,
Sample,
SampleAnnotation,
SampleResultAssociation,
ExperimentSampleAssociation,
Dataset,
ProcessorJobDatasetAssociation,
SampleComputedFileAssociation,
ComputationalResultAnnotation
)
from data_refinery_workers.processors import smasher
def prepare_job():
pj = ProcessorJob()
pj.pipeline_applied = "SMASHER"
pj.save()
experiment = Experiment()
experiment.accession_code = "GSE51081"
experiment.save()
result = ComputationalResult()
result.save()
homo_sapiens = Organism.get_object_for_name("HOMO_SAPIENS")
sample = Sample()
sample.accession_code = 'GSM1237810'
sample.title = 'GSM1237810'
sample.organism = homo_sapiens
sample.save()
sample_annotation = SampleAnnotation()
sample_annotation.data = {'hi': 'friend'}
sample_annotation.sample = sample
sample_annotation.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
computed_file = ComputedFile()
computed_file.filename = "GSM1237810_T09-1084.PCL"
computed_file.absolute_file_path = "/home/user/data_store/PCL/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
sample = Sample()
sample.accession_code = 'GSM1237812'
sample.title = 'GSM1237812'
sample.organism = homo_sapiens
sample.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
computed_file = ComputedFile()
computed_file.filename = "GSM1237812_S97-PURE.PCL"
computed_file.absolute_file_path = "/home/user/data_store/PCL/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
computed_file = ComputedFile()
computed_file.filename = "GSM1237812_S97-PURE.DAT"
computed_file.absolute_file_path = "/home/user/data_store/PCL/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = False
computed_file.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
ds = Dataset()
ds.data = {'GSE51081': ['GSM1237810', 'GSM1237812']}
ds.aggregate_by = 'EXPERIMENT' # [ALL or SPECIES or EXPERIMENT]
ds.scale_by = 'STANDARD' # [NONE or MINMAX or STANDARD or ROBUST]
ds.email_address = "[email protected]"
#ds.email_address = "[email protected]"
ds.quantile_normalize = False
ds.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = pj
pjda.dataset = ds
pjda.save()
return pj
class SmasherTestCase(TestCase):
@tag("smasher")
def test_smasher(self):
""" Main tester. """
job = prepare_job()
anno_samp = Sample.objects.get(accession_code='GSM1237810')
self.assertTrue('hi' in anno_samp.to_metadata_dict()['refinebio_annotations'][0].keys())
relations = ProcessorJobDatasetAssociation.objects.filter(processor_job=job)
dataset = Dataset.objects.filter(id__in=relations.values('dataset_id')).first()
job_context_check = {}
job_context_check["dataset"] = dataset
job_context_check["samples"] = dataset.get_samples()
job_context_check["experiments"] = dataset.get_experiments()
self.assertEqual(len(job_context_check['samples']), 2)
self.assertEqual(len(job_context_check['experiments']), 1)
# Smoke test while we're here..
dataset.get_samples_by_experiment()
dataset.get_samples_by_species()
dataset.get_aggregated_samples()
# XXX: agg_type 'SPECIES' hangs on Linux, not OSX.
# Don't know why yet.
# for ag_type in ['ALL', 'EXPERIMENT', 'SPECIES']:
for ag_type in ['ALL', 'EXPERIMENT']:
dataset = Dataset.objects.filter(id__in=relations.values('dataset_id')).first()
dataset.aggregate_by = ag_type
dataset.save()
print ("Smashing " + ag_type)
final_context = smasher.smash(job.pk, upload=False)
# Make sure the file exists and is a valid size
self.assertNotEqual(os.path.getsize(final_context['output_file']), 0)
self.assertEqual(final_context['dataset'].is_processed, True)
dataset = Dataset.objects.filter(id__in=relations.values('dataset_id')).first()
dataset.is_processed = False
dataset.save()
# Cleanup
os.remove(final_context['output_file'])
job.start_time = None
job.end_time = None
job.save()
for scale_type in ['NONE', 'MINMAX', 'STANDARD', 'ROBUST']:
dataset = Dataset.objects.filter(id__in=relations.values('dataset_id')).first()
dataset.aggregate_by = 'EXPERIMENT'
dataset.scale_by = scale_type
dataset.save()
print ("Smashing " + scale_type)
final_context = smasher.smash(job.pk, upload=False)
# Make sure the file exists and is a valid size
self.assertNotEqual(os.path.getsize(final_context['output_file']), 0)
self.assertEqual(final_context['dataset'].is_processed, True)
dataset = Dataset.objects.filter(id__in=relations.values('dataset_id')).first()
dataset.is_processed = False
dataset.save()
# Cleanup
os.remove(final_context['output_file'])
job.start_time = None
job.end_time = None
job.save()
# Stats
for scale_type in ['MINMAX', 'STANDARD']:
dataset = Dataset.objects.filter(id__in=relations.values('dataset_id')).first()
dataset.aggregate_by = 'EXPERIMENT'
dataset.scale_by = scale_type
dataset.save()
print("###")
print("# " + scale_type)
print('###')
final_context = smasher.smash(job.pk, upload=False)
final_frame = final_context['final_frame']
# Sanity test that these frames can be computed upon
final_frame.mean(axis=1)
final_frame.min(axis=1)
final_frame.max(axis=1)
final_frame.std(axis=1)
final_frame.median(axis=1)
zf = zipfile.ZipFile(final_context['output_file'])
namelist = zf.namelist()
self.assertFalse(True in final_frame.index.str.contains('AFFX-'))
self.assertTrue('GSE51081/metadata_GSE51081.tsv' in namelist)
self.assertTrue('aggregated_metadata.json' in namelist)
self.assertTrue('README.md' in namelist)
self.assertTrue('LICENSE.TXT' in namelist)
self.assertTrue('GSE51081/GSE51081.tsv' in namelist)
os.remove(final_context['output_file'])
job.start_time = None
job.end_time = None
job.save()
for scale_type in ['MINMAX', 'STANDARD']:
dataset = Dataset.objects.filter(id__in=relations.values('dataset_id')).first()
dataset.aggregate_by = 'SPECIES'
dataset.scale_by = scale_type
dataset.save()
print("###")
print("# " + scale_type)
print('###')
final_context = smasher.smash(job.pk, upload=False)
final_frame = final_context['final_frame']
# Sanity test that these frames can be computed upon
final_frame.mean(axis=1)
final_frame.min(axis=1)
final_frame.max(axis=1)
final_frame.std(axis=1)
final_frame.median(axis=1)
zf = zipfile.ZipFile(final_context['output_file'])
namelist = zf.namelist()
self.assertTrue('HOMO_SAPIENS/metadata_HOMO_SAPIENS.tsv' in namelist)
self.assertTrue('aggregated_metadata.json' in namelist)
self.assertTrue('README.md' in namelist)
self.assertTrue('LICENSE.TXT' in namelist)
self.assertTrue('HOMO_SAPIENS/HOMO_SAPIENS.tsv' in namelist)
os.remove(final_context['output_file'])
job.start_time = None
job.end_time = None
job.save()
for scale_type in ['MINMAX', 'STANDARD']:
dataset = Dataset.objects.filter(id__in=relations.values('dataset_id')).first()
dataset.aggregate_by = 'ALL'
dataset.scale_by = scale_type
dataset.save()
print("###")
print("# " + scale_type)
print('###')
final_context = smasher.smash(job.pk, upload=False)
final_frame = final_context['final_frame']
# Sanity test that these frames can be computed upon
final_frame.mean(axis=1)
final_frame.min(axis=1)
final_frame.max(axis=1)
final_frame.std(axis=1)
final_frame.median(axis=1)
zf = zipfile.ZipFile(final_context['output_file'])
namelist = zf.namelist()
self.assertTrue('ALL/metadata_ALL.tsv' in namelist)
self.assertTrue('aggregated_metadata.json' in namelist)
self.assertTrue('README.md' in namelist)
self.assertTrue('LICENSE.TXT' in namelist)
self.assertTrue('ALL/ALL.tsv' in namelist)
os.remove(final_context['output_file'])
job.start_time = None
job.end_time = None
job.save()
@tag("smasher")
def test_get_results(self):
""" Test our ability to collect the appropriate samples. """
sample = Sample()
sample.accession_code = 'GSM45588'
sample.save()
result = ComputationalResult()
result.save()
computed_file1 = ComputedFile()
computed_file1.filename = "oh_boy.txt"
computed_file1.result = result
computed_file1.size_in_bytes = 123
computed_file1.is_smashable = True
computed_file1.save()
computed_file2 = ComputedFile()
computed_file2.filename = "gee_whiz.bmp"
computed_file2.result = result
computed_file2.size_in_bytes = 123
computed_file2.is_smashable = False
computed_file2.save()
assoc = SampleResultAssociation()
assoc.sample = sample
assoc.result = result
assoc.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file1
assoc.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file2
assoc.save()
computed_files = sample.get_result_files()
self.assertEqual(computed_files.count(), 2)
@tag("smasher")
def test_fail(self):
""" Test our ability to fail """
result = ComputationalResult()
result.save()
sample = Sample()
sample.accession_code = 'XXX'
sample.title = 'XXX'
sample.organism = Organism.get_object_for_name("HOMO_SAPIENS")
sample.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
computed_file = ComputedFile()
computed_file.filename = "NOT_REAL.PCL"
computed_file.absolute_file_path = "/home/user/data_store/PCL/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
ds = Dataset()
ds.data = {'GSE51081': ['XXX']}
ds.aggregate_by = 'EXPERIMENT'
ds.scale_by = 'MINMAX'
ds.email_address = "[email protected]"
ds.quantile_normalize = False
ds.save()
dsid = ds.id
job = ProcessorJob()
job.pipeline_applied = "SMASHER"
job.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = ds
pjda.save()
final_context = smasher.smash(job.pk, upload=False)
ds = Dataset.objects.get(id=dsid)
print(ds.failure_reason)
print(final_context['dataset'].failure_reason)
self.assertNotEqual(final_context['unsmashable_files'], [])
@tag("smasher")
def test_no_smash_all_diff_species(self):
""" Smashing together with 'ALL' with different species is a really weird behavior.
This test isn't really testing a normal case, just make sure that it's marking the
unsmashable files.
"""
job = ProcessorJob()
job.pipeline_applied = "SMASHER"
job.save()
experiment = Experiment()
experiment.accession_code = "GSE51081"
experiment.save()
result = ComputationalResult()
result.save()
homo_sapiens = Organism.get_object_for_name("HOMO_SAPIENS")
sample = Sample()
sample.accession_code = 'GSM1237810'
sample.title = 'GSM1237810'
sample.organism = homo_sapiens
sample.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
computed_file = ComputedFile()
computed_file.filename = "GSM1237810_T09-1084.PCL"
computed_file.absolute_file_path = "/home/user/data_store/PCL/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
result = ComputationalResult()
result.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
experiment = Experiment()
experiment.accession_code = "GSE51084"
experiment.save()
mus_mus = Organism.get_object_for_name("MUS_MUSCULUS")
sample = Sample()
sample.accession_code = 'GSM1238108'
sample.title = 'GSM1238108'
sample.organism = homo_sapiens
sample.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
computed_file = ComputedFile()
computed_file.filename = "GSM1238108-tbl-1.txt"
computed_file.absolute_file_path = "/home/user/data_store/PCL/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
ds = Dataset()
ds.data = {'GSE51081': ['GSM1237810'], 'GSE51084': ['GSM1238108']}
ds.aggregate_by = 'ALL'
ds.scale_by = 'STANDARD'
ds.email_address = "[email protected]"
ds.quantile_normalize = False
ds.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = ds
pjda.save()
final_context = smasher.smash(job.pk, upload=False)
dsid = ds.id
ds = Dataset.objects.get(id=dsid)
print(ds.failure_reason)
print(final_context['dataset'].failure_reason)
self.assertEqual(final_context['unsmashable_files'], ['GSM1238108'])
@tag("smasher")
def test_no_smash_dupe(self):
""" """
job = ProcessorJob()
job.pipeline_applied = "SMASHER"
job.save()
experiment = Experiment()
experiment.accession_code = "GSE51081"
experiment.save()
result = ComputationalResult()
result.save()
homo_sapiens = Organism.get_object_for_name("HOMO_SAPIENS")
sample = Sample()
sample.accession_code = 'GSM1237810'
sample.title = 'GSM1237810'
sample.organism = homo_sapiens
sample.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
computed_file = ComputedFile()
computed_file.filename = "GSM1237810_T09-1084.PCL"
computed_file.absolute_file_path = "/home/user/data_store/PCL/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
result = ComputationalResult()
result.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
sample = Sample()
sample.accession_code = 'GSM1237811'
sample.title = 'GSM1237811'
sample.organism = homo_sapiens
sample.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
result = ComputationalResult()
result.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
ds = Dataset()
ds.data = {'GSE51081': ['GSM1237810', 'GSM1237811']}
ds.aggregate_by = 'ALL'
ds.scale_by = 'STANDARD'
ds.email_address = "[email protected]"
ds.quantile_normalize = False
ds.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = ds
pjda.save()
final_context = smasher.smash(job.pk, upload=False)
dsid = ds.id
ds = Dataset.objects.get(id=dsid)
self.assertTrue(ds.success)
for column in final_context['original_merged'].columns:
self.assertTrue('_x' not in column)
@tag("smasher")
def test_no_smash_dupe_two(self):
""" Tests the SRP051449 case, where the titles collide. Also uses a real QN target file."""
job = ProcessorJob()
job.pipeline_applied = "SMASHER"
job.save()
experiment = Experiment()
experiment.accession_code = "SRP051449"
experiment.save()
result = ComputationalResult()
result.save()
danio_rerio = Organism.get_object_for_name("DANIO_RERIO")
sample = Sample()
sample.accession_code = 'SRR1731761'
sample.title = 'Danio rerio'
sample.organism = danio_rerio
sample.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
computed_file = ComputedFile()
computed_file.filename = "SRR1731761_output_gene_lengthScaledTPM.tsv"
computed_file.absolute_file_path = "/home/user/data_store/PCL/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
result = ComputationalResult()
result.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
sample = Sample()
sample.accession_code = 'SRR1731762'
sample.title = 'Danio rerio'
sample.organism = danio_rerio
sample.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
computed_file = ComputedFile()
computed_file.filename = "SRR1731762_output_gene_lengthScaledTPM.tsv"
computed_file.absolute_file_path = "/home/user/data_store/PCL/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
result = ComputationalResult()
result.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
ds = Dataset()
ds.data = {'SRP051449': ['SRR1731761', 'SRR1731762']}
ds.aggregate_by = 'SPECIES'
ds.scale_by = 'NONE'
ds.email_address = "[email protected]"
ds.quantile_normalize = True
ds.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = job
pjda.dataset = ds
pjda.save()
cr = ComputationalResult()
cr.save()
computed_file = ComputedFile()
computed_file.filename = "danio_target.tsv"
computed_file.absolute_file_path = "/home/user/data_store/PCL/" + computed_file.filename
computed_file.result = cr
computed_file.size_in_bytes = 123
computed_file.is_smashable = False
computed_file.save()
cra = ComputationalResultAnnotation()
cra.data = {'organism_id': danio_rerio.id, 'is_qn': True}
cra.result = cr
cra.save()
final_context = smasher.smash(job.pk, upload=False)
self.assertTrue(final_context['success'])
@tag("smasher")
def test_log2(self):
pj = ProcessorJob()
pj.pipeline_applied = "SMASHER"
pj.save()
# Has non-log2 data:
# https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE44421
# ftp://ftp.ncbi.nlm.nih.gov/geo/series/GSE44nnn/GSE44421/miniml/GSE44421_family.xml.tgz
experiment = Experiment()
experiment.accession_code = "GSE44421"
experiment.save()
result = ComputationalResult()
result.save()
homo_sapiens = Organism.get_object_for_name("HOMO_SAPIENS")
sample = Sample()
sample.accession_code = 'GSM1084806'
sample.title = 'GSM1084806'
sample.organism = homo_sapiens
sample.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
computed_file = ComputedFile()
computed_file.filename = "GSM1084806-tbl-1.txt"
computed_file.absolute_file_path = "/home/user/data_store/PCL/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
sample = Sample()
sample.accession_code = 'GSM1084807'
sample.title = 'GSM1084807'
sample.organism = homo_sapiens
sample.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
computed_file = ComputedFile()
computed_file.filename = "GSM1084807-tbl-1.txt"
computed_file.absolute_file_path = "/home/user/data_store/PCL/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
ds = Dataset()
ds.data = {'GSE44421': ['GSM1084806', 'GSM1084807']}
ds.aggregate_by = 'EXPERIMENT'
ds.scale_by = 'MINMAX'
ds.email_address = "[email protected]"
ds.quantile_normalize = False
ds.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = pj
pjda.dataset = ds
pjda.save()
final_context = smasher.smash(pj.pk, upload=False)
ds = Dataset.objects.get(id=ds.id)
self.assertTrue(final_context['success'])
@tag("smasher")
def test_dualtech_smash(self):
""" """
pj = ProcessorJob()
pj.pipeline_applied = "SMASHER"
pj.save()
# MICROARRAY TECH
experiment = Experiment()
experiment.accession_code = "GSE1487313"
experiment.save()
result = ComputationalResult()
result.save()
gallus_gallus = Organism.get_object_for_name("GALLUS_GALLUS")
sample = Sample()
sample.accession_code = 'GSM1487313'
sample.title = 'GSM1487313'
sample.organism = gallus_gallus
sample.technology="MICROARRAY"
sample.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
computed_file = ComputedFile()
computed_file.filename = "GSM1487313_liver.PCL"
computed_file.absolute_file_path = "/home/user/data_store/PCL/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
# RNASEQ TECH
experiment2 = Experiment()
experiment2.accession_code = "SRS332914"
experiment2.save()
result2 = ComputationalResult()
result2.save()
sample2 = Sample()
sample2.accession_code = 'SRS332914'
sample2.title = 'SRS332914'
sample2.organism = gallus_gallus
sample2.technology = "RNA-SEQ"
sample2.save()
sra2 = SampleResultAssociation()
sra2.sample = sample2
sra2.result = result2
sra2.save()
esa2 = ExperimentSampleAssociation()
esa2.experiment = experiment2
esa2.sample = sample2
esa2.save()
computed_file2 = ComputedFile()
computed_file2.filename = "SRP149598_gene_lengthScaledTPM.tsv"
computed_file2.absolute_file_path = "/home/user/data_store/PCL/" + computed_file2.filename
computed_file2.result = result2
computed_file2.size_in_bytes = 234
computed_file2.is_smashable = True
computed_file2.save()
assoc2 = SampleComputedFileAssociation()
assoc2.sample = sample2
assoc2.computed_file = computed_file2
assoc2.save()
# CROSS-SMASH BY SPECIES
ds = Dataset()
ds.data = {'GSE1487313': ['GSM1487313'], 'SRX332914': ['SRS332914']}
ds.aggregate_by = 'SPECIES'
ds.scale_by = 'STANDARD'
ds.email_address = "[email protected]"
ds.quantile_normalize = False
ds.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = pj
pjda.dataset = ds
pjda.save()
self.assertTrue(ds.is_cross_technology())
final_context = smasher.smash(pj.pk, upload=False)
self.assertTrue(os.path.exists(final_context['output_file']))
os.remove(final_context['output_file'])
self.assertEqual(len(final_context['final_frame'].columns), 2)
# THEN BY EXPERIMENT
ds.aggregate_by = 'EXPERIMENT'
ds.save()
dsid = ds.id
ds = Dataset.objects.get(id=dsid)
pj.start_time = None
pj.end_time = None
pj.save()
final_context = smasher.smash(pj.pk, upload=False)
self.assertTrue(os.path.exists(final_context['output_file']))
os.remove(final_context['output_file'])
self.assertEqual(len(final_context['final_frame'].columns), 1)
# THEN BY ALL
ds.aggregate_by = 'ALL'
ds.save()
dsid = ds.id
ds = Dataset.objects.get(id=dsid)
pj.start_time = None
pj.end_time = None
pj.save()
final_context = smasher.smash(pj.pk, upload=False)
self.assertTrue(os.path.exists(final_context['output_file']))
self.assertEqual(len(final_context['final_frame'].columns), 2)
@tag("smasher")
def test_sanity_imports(self):
""" Sci imports can be tricky, make sure this works. """
import numpy
import scipy
import matplotlib
import pandas
import sklearn
import sympy
@tag("smasher")
def test_get_synced_files(self):
""" """
result = ComputationalResult()
result.save()
computed_file = ComputedFile()
computed_file.s3_key = "all_the_things.jpg"
computed_file.s3_bucket = "data-refinery-test-assets"
computed_file.filename = "all_the_things.jpg"
computed_file.absolute_file_path = "/home/user/data_store/PCL/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 9001
computed_file.is_smashable = False
computed_file.sha1 = "36cf21c08d461f74ddb0f2edb6257afee309c4a4"
computed_file.save()
# Make sure it's not there
try:
os.remove("/home/user/data_store/PCL/" + computed_file.filename)
except OSError:
pass
# We do this twice, once to get from S3 and once to get from local disk.
afp = computed_file.get_synced_file_path(force=True)
self.assertTrue(os.path.exists(afp))
afp = computed_file.get_synced_file_path(force=True)
self.assertTrue(os.path.exists(afp))
@tag("smasher")
def test_notify(self):
ds = Dataset()
ds.data = {'GSM1487313': ['GSM1487313'], 'SRS332914': ['SRS332914']}
ds.aggregate_by = 'SPECIES'
ds.scale_by = 'STANDARD'
ds.email_address = "[email protected]"
ds.quantile_normalize = False
ds.save()
pj = ProcessorJob()
pj.pipeline_applied = "SMASHER"
pj.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = pj
pjda.dataset = ds
pjda.save()
job_context = {}
job_context['job'] = pj
job_context['dataset'] = ds
job_context['upload'] = True
job_context['result_url'] = 'https://s3.amazonaws.com/data-refinery-test-assets/all_the_things.jpg'
final_context = smasher._notify(job_context)
self.assertTrue(final_context.get('success', True))
class CompendiaTestCase(TestCase):
"""Testing management commands are hard. Since there is always an explicit
sys.exit (which is really an Exception), we have to do weird stdio rerouting
to capture the result. Really, these are just sanity tests.
"""
@tag("smasher")
def test_call_create(self):
old_stderr = sys.stderr
old_stdout = sys.stdout
csio_err = StringIO()
csio_out = StringIO()
sys.stderr = csio_err
sys.stdout = csio_out
self.assertRaises(BaseException, call_command, 'create_compendia')
sys.stderr = old_stderr
sys.stdout = old_stdout
@tag("smasher")
def test_fetch_create(self):
old_stderr = sys.stderr
old_stdout = sys.stdout
csio_err = StringIO()
csio_out = StringIO()
sys.stderr = csio_err
sys.stdout = csio_out
self.assertRaises(BaseException, call_command, 'fetch_compendia')
sys.stderr = old_stderr
sys.stdout = old_stdout
class AggregationTestCase(TestCase):
"""Test the tsv file generation."""
def setUp(self):
self.metadata = {
'experiments': {
"E-GEOD-44719": {
"accession_code": "E-GEOD-44719",
"sample_titles": [ "IFNa DC_LB016_IFNa", "undefined_sample" ]
}
},
'samples': {
"IFNa DC_LB016_IFNa": { # Sample #1 is an ArrayExpress sample
"refinebio_title": "IFNa DC_LB016_IFNa",
"refinebio_accession_code": "E-GEOD-44719-GSM1089311",
"refinebio_source_database": "ARRAY_EXPRESS",
"refinebio_organism": "fake_species",
############# Annotations will be de-composed. #############
"refinebio_annotations": [
# annotation #1
{
"detected_platform": "illuminaHumanv3",
"detection_percentage": 98.44078,
"mapped_percentage": 100.0
},
# annotation #2
{
"assay": { "name": "GSM1089311" },
# Special field that will be taken out as separate columns
"characteristic": [
{ "category": "cell population",
"value": "IFNa DC"
},
{ "category": "dose", # also available in "variable"
"value": "1 mL"
},
{ "category": "donor id",
"value": "LB016"
}
],
# Another special field in Array Express sample
"variable": [
{ "name": "dose", # also available in "characteristic"
"value": "1 mL"
},
{ "name": "stimulation",
"value": "IFNa"
}
],
# "source" field in Array Express sample annotation will be
# skipped in tsv file.
'source': {
'name': 'GSM1288968 1',
'comment': [
{ 'name': 'Sample_source_name',
'value': 'pineal glands at CT18, after light exposure'
},
{ 'name': 'Sample_title',
'value': 'Pineal_Light_CT18'
}
]
},
# For single-key object whose key is "name",
# the key will be ignored in tsv file.
"extract": { "name": "GSM1089311 extract 1" }
}
] # end of annotations
}, # end of sample #1
"Bone.Marrow_OA_No_ST03": { # Sample #2 is a GEO sample
"refinebio_title": "Bone.Marrow_OA_No_ST03",
"refinebio_accession_code": "GSM1361050",
"refinebio_source_database": "GEO",
"refinebio_organism": "homo_sapiens",
"refinebio_annotations": [
{
"channel_count": [ "1" ],
# Special field that will be taken out as separate columns
"characteristics_ch1": [
"tissue: Bone Marrow",
"disease: OA",
"serum: Low Serum"
],
# For single-element array, the element will
# be saved directly in tsv file.
"contact_address": [ "Crown Street" ],
"contact_country": [ "United Kingdom" ],
"data_processing": [ "Data was processed and normalized" ],
"geo_accession": [ "GSM1361050" ],
}
] # end of annotations
} # end of sample #2
} # end of "samples"
}
self.smash_path = "/tmp/"
@tag("smasher")
def test_columns(self):
columns = smasher._get_tsv_columns(self.metadata['samples'])
self.assertEqual(len(columns), 21)
self.assertEqual(columns[0], 'refinebio_accession_code')
self.assertTrue('refinebio_accession_code' in columns)
self.assertTrue('cell population' in columns)
self.assertTrue('dose' in columns)
self.assertTrue('stimulation' in columns)
self.assertTrue('serum' in columns)
@tag("smasher")
def test_all_samples(self):
"""Check tsv file that includes all sample metadata."""
job_context = {
'dataset': Dataset.objects.create(aggregate_by='ALL')
}
smasher._write_tsv_json(job_context, self.metadata, self.smash_path)
tsv_filename = self.smash_path + "ALL/metadata_ALL.tsv"
self.assertTrue(os.path.isfile(tsv_filename))
with open(tsv_filename) as tsv_file:
reader = csv.DictReader(tsv_file, delimiter='\t')
for row_num, row in enumerate(reader):
if row['refinebio_accession_code'] == 'E-GEOD-44719-GSM1089311':
self.assertEqual(row['cell population'], 'IFNa DC') # ArrayExpress specific
self.assertEqual(row['dose'], '1 mL') # ArrayExpress specific
self.assertFalse('source' in row) # ArrayExpress specific
self.assertEqual(row['detection_percentage'], '98.44078')
self.assertEqual(row["extract"], "GSM1089311 extract 1")
elif row['refinebio_accession_code'] == 'GSM1361050':
self.assertEqual(row['tissue'], 'Bone Marrow') # GEO specific
self.assertEqual(row['refinebio_organism'], 'homo_sapiens')
self.assertEqual(row["contact_address"], "Crown Street")
self.assertEqual(row_num, 1) # only two data rows in tsv file
os.remove(tsv_filename)
@tag("smasher")
def test_experiment(self):
"""Check tsv file that is aggregated by experiment."""
job_context = {
'dataset': Dataset.objects.create(aggregate_by='EXPERIMENT')
}
smasher._write_tsv_json(job_context, self.metadata, self.smash_path)
tsv_filename = self.smash_path + "E-GEOD-44719/metadata_E-GEOD-44719.tsv"
self.assertTrue(os.path.isfile(tsv_filename))
with open(tsv_filename) as tsv_file:
reader = csv.DictReader(tsv_file, delimiter='\t')
for row_num, row in enumerate(reader):
self.assertEqual(row['refinebio_accession_code'], 'E-GEOD-44719-GSM1089311')
self.assertEqual(row['cell population'], 'IFNa DC') # ArrayExpress specific
self.assertEqual(row['dose'], '1 mL') # ArrayExpress specific
self.assertEqual(row['detection_percentage'], '98.44078')
self.assertEqual(row_num, 0) # only one data row in tsv file
os.remove(tsv_filename)
@tag("smasher")
def test_species(self):
"""Check tsv file that is aggregated by species."""
job_context = {
'dataset': Dataset.objects.create(aggregate_by='SPECIES'),
'input_files': {
'homo_sapiens': [], # only the key matters in this test
'fake_species': [] # only the key matters in this test
}
}
# Generate two TSV files, one should include only "GSM1361050",
# and the other should include only "E-GEOD-44719-GSM1089311".
smasher._write_tsv_json(job_context, self.metadata, self.smash_path)
# Test tsv file of "homo_sapiens"
tsv_filename = self.smash_path + "homo_sapiens/metadata_homo_sapiens.tsv"
self.assertTrue(os.path.isfile(tsv_filename))
with open(tsv_filename) as tsv_file:
reader = csv.DictReader(tsv_file, delimiter='\t')
for row_num, row in enumerate(reader):
self.assertEqual(row['refinebio_accession_code'], 'GSM1361050')
self.assertEqual(row['tissue'], 'Bone Marrow') # GEO specific
self.assertEqual(row['refinebio_organism'], 'homo_sapiens')
self.assertEqual(row_num, 0) # only one data row in tsv file
os.remove(tsv_filename)
# Test json file of "homo_sapiens"
json_filename = self.smash_path + "homo_sapiens/metadata_homo_sapiens.json"
self.assertTrue(os.path.isfile(json_filename))
with open(json_filename) as json_fp:
species_metadada = json.load(json_fp)
self.assertEqual(species_metadada['species'], 'homo_sapiens')
self.assertEqual(len(species_metadada['samples']), 1)
self.assertEqual(species_metadada['samples'][0]['refinebio_accession_code'],
'GSM1361050')
#os.remove(json_filename)
# Test tsv file of "fake_species"
tsv_filename = self.smash_path + "fake_species/metadata_fake_species.tsv"
self.assertTrue(os.path.isfile(tsv_filename))
with open(tsv_filename) as tsv_file:
reader = csv.DictReader(tsv_file, delimiter='\t')
for row_num, row in enumerate(reader):
self.assertEqual(row['refinebio_accession_code'], 'E-GEOD-44719-GSM1089311')
self.assertEqual(row['cell population'], 'IFNa DC') # ArrayExpress specific
self.assertEqual(row['dose'], '1 mL') # ArrayExpress specific
self.assertEqual(row['detection_percentage'], '98.44078')
self.assertEqual(row_num, 0) # only one data row in tsv file
os.remove(tsv_filename)
# Test json file of "fake_species"
json_filename = self.smash_path + "fake_species/metadata_fake_species.json"
self.assertTrue(os.path.isfile(json_filename))
with open(json_filename) as json_fp:
species_metadada = json.load(json_fp)
self.assertEqual(species_metadada['species'], 'fake_species')
self.assertEqual(len(species_metadada['samples']), 1)
self.assertEqual(species_metadada['samples'][0]['refinebio_accession_code'],
'E-GEOD-44719-GSM1089311')
os.remove(json_filename)
@tag("smasher")
def test_bad_overlap(self):
pj = ProcessorJob()
pj.pipeline_applied = "SMASHER"
pj.save()
experiment = Experiment()
experiment.accession_code = "GSE51081"
experiment.save()
result = ComputationalResult()
result.save()
homo_sapiens = Organism.get_object_for_name("HOMO_SAPIENS")
sample = Sample()
sample.accession_code = 'GSM1237810'
sample.title = 'GSM1237810'
sample.organism = homo_sapiens
sample.save()
sample_annotation = SampleAnnotation()
sample_annotation.data = {'hi': 'friend'}
sample_annotation.sample = sample
sample_annotation.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
computed_file = ComputedFile()
computed_file.filename = "big.PCL"
computed_file.absolute_file_path = "/home/user/data_store/BADSMASH/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
sample = Sample()
sample.accession_code = 'GSM1237812'
sample.title = 'GSM1237812'
sample.organism = homo_sapiens
sample.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
computed_file = ComputedFile()
computed_file.filename = "small.PCL"
computed_file.absolute_file_path = "/home/user/data_store/BADSMASH/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
ds = Dataset()
ds.data = {'GSE51081': ['GSM1237810', 'GSM1237812']}
ds.aggregate_by = 'ALL' # [ALL or SPECIES or EXPERIMENT]
ds.scale_by = 'NONE' # [NONE or MINMAX or STANDARD or ROBUST]
ds.email_address = "[email protected]"
#ds.email_address = "[email protected]"
ds.quantile_normalize = False
ds.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = pj
pjda.dataset = ds
pjda.save()
final_context = smasher.smash(pj.pk, upload=False)
ds = Dataset.objects.get(id=ds.id)
pj = ProcessorJob()
pj.pipeline_applied = "SMASHER"
pj.save()
# Now, make sure the bad can't zero this out.
sample = Sample()
sample.accession_code = 'GSM999'
sample.title = 'GSM999'
sample.organism = homo_sapiens
sample.save()
esa = ExperimentSampleAssociation()
esa.experiment = experiment
esa.sample = sample
esa.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
sra = SampleResultAssociation()
sra.sample = sample
sra.result = result
sra.save()
computed_file = ComputedFile()
computed_file.filename = "bad.PCL"
computed_file.absolute_file_path = "/home/user/data_store/BADSMASH/" + computed_file.filename
computed_file.result = result
computed_file.size_in_bytes = 123
computed_file.is_smashable = True
computed_file.save()
assoc = SampleComputedFileAssociation()
assoc.sample = sample
assoc.computed_file = computed_file
assoc.save()
ds = Dataset()
ds.data = {'GSE51081': ['GSM1237810', 'GSM1237812', 'GSM999']}
ds.aggregate_by = 'ALL' # [ALL or SPECIES or EXPERIMENT]
ds.scale_by = 'NONE' # [NONE or MINMAX or STANDARD or ROBUST]
ds.email_address = "[email protected]"
#ds.email_address = "[email protected]"
ds.quantile_normalize = False
ds.save()
pjda = ProcessorJobDatasetAssociation()
pjda.processor_job = pj
pjda.dataset = ds
pjda.save()
final_context = smasher.smash(pj.pk, upload=False)
ds = Dataset.objects.get(id=ds.id)
self.assertEqual(len(final_context['final_frame']), 4)
| bsd-3-clause | 7,144,830,882,378,763,000 | 33.054596 | 107 | 0.577734 | false |
pwyliu/clancy | clancy/engage.py | 1 | 1892 | import getpass
from .utils import goodquit_json, read_file
from .redoctober import api_call
def engage(args, password):
"""
Construct payloads and POST to Red October
"""
if args['create']:
payload = {'Name': args['--user'], 'Password': password}
goodquit_json(api_call('create', args, payload))
elif args['delegate']:
payload = {
'Name': args['--user'], 'Password': password,
'Time': args['--time'], 'Uses': args['--uses']
}
goodquit_json(api_call('delegate', args, payload))
elif args['encrypt']:
payload = {
'Name': args['--user'], 'Password': password,
'Minimum': args['--min'], 'Owners': args['--owners'].split(','),
'Data': (args['--str'] if args['--file'] is None
else read_file(args['--file']))
}
goodquit_json(api_call('encrypt', args, payload))
elif args['decrypt']:
payload = {
'Name': args['--user'], 'Password': password,
'Data': (args['--str'] if args['--file'] is None
else read_file(args['--file']))
}
goodquit_json(api_call('decrypt', args, payload))
elif args['summary']:
payload = {'Name': args['--user'], 'Password': password}
goodquit_json(api_call('summary', args, payload))
elif args['change-password']:
args['newpass'] = getpass.getpass('New Password: ')
payload = {
'Name': args['--user'], 'Password': password,
'NewPassword': args['newpass']
}
goodquit_json(api_call('password', args, payload))
elif args['modify']:
payload = {
'Name': args['--user'], 'Password': password,
'Command': args['--action'], 'ToModify': args['--target']
}
goodquit_json(api_call('modify', args, payload))
| mit | 5,020,975,873,342,050,000 | 32.785714 | 76 | 0.523784 | false |
erigones/ludolph-skeleton | ludolph_skeleton/hello_world.py | 1 | 1567 | """
This file is part of Ludolph: Skeleton plugin
Copyright (C) 2015-2017 Erigones, s. r. o.
See the LICENSE file for copying permission.
"""
import time
from ludolph.command import CommandError, command
from ludolph.plugins.plugin import LudolphPlugin
from . import __version__
class HelloWorld(LudolphPlugin):
"""
Ludolph: Skeleton, Hello World plugin commands.
Sample plugin with 3 commands. Each showing how you can use Ludolph decorators in your plugins.
"""
__version__ = __version__
# noinspection PyUnusedLocal
@command
def hello_world(self, msg):
"""
Hello World greeting.
Usage: hello-world
"""
return 'Hi, I am the Hello World plugin reply!'
# noinspection PyUnusedLocal
@command(stream_output=True)
def hello_repeat(self, msg, *args):
"""
Hello World plugin parameters repeater with streaming output.
Repeat all parameters passed to command, each in separate reply message.
First parameter is required.
Usage: hello-repeat [param1] [param2] [param3] [paramN]
"""
if not args:
raise CommandError('You gave me nothing to repeat :(')
for arg in args:
yield 'I have received parameter: "%s"' % arg
time.sleep(0.3)
# noinspection PyUnusedLocal
@command(admin_required=True)
def hello_admin(self, msg):
"""
Hello Admin greeting (admin only).
Usage: hello-admin
"""
return 'Hi, I am the Hello Admin plugin reply!'
| mit | 1,480,646,476,157,939,000 | 26.491228 | 99 | 0.634971 | false |
tobykurien/MakerDroid | assetsrc/pycam.mp3/src/pycam/Exporters/EMCToolExporter.py | 1 | 1454 | # -*- coding: utf-8 -*-
"""
$Id: EMCToolExporter.py 629 2010-08-23 16:53:06Z sumpfralle $
Copyright 2010 Lars Kruse <[email protected]>
This file is part of PyCAM.
PyCAM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PyCAM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PyCAM. If not, see <http://www.gnu.org/licenses/>.
"""
import os
class EMCToolExporter:
def __init__(self, tools):
self.tools = tools
def get_tool_definition_string(self):
result = []
#result.append(self.HEADER_ROW)
for index in range(len(self.tools)):
tool = self.tools[index]
# use an arbitrary length
tool_length = tool["tool_radius"] * 10
line = "T%d P%d D%f Z-%f ;%s" % (index + 1, index + 1,
2 * tool["tool_radius"], tool_length, tool["name"])
result.append(line)
# add the dummy line for the "last" tool
result.append("T99999 P99999 Z+0.100000 ;dummy tool")
return os.linesep.join(result)
| gpl-3.0 | -6,490,145,569,136,249,000 | 32.813953 | 71 | 0.662999 | false |
jcmgray/xarray | xarray/tests/test_formatting.py | 1 | 5879 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from xarray.core import formatting
from xarray.core.pycompat import PY3
from . import TestCase, raises_regex
class TestFormatting(TestCase):
def test_get_indexer_at_least_n_items(self):
cases = [
((20,), (slice(10),)),
((3, 20,), (0, slice(10))),
((2, 10,), (0, slice(10))),
((2, 5,), (slice(2), slice(None))),
((1, 2, 5,), (0, slice(2), slice(None))),
((2, 3, 5,), (0, slice(2), slice(None))),
((1, 10, 1,), (0, slice(10), slice(None))),
((2, 5, 1,), (slice(2), slice(None), slice(None))),
((2, 5, 3,), (0, slice(4), slice(None))),
((2, 3, 3,), (slice(2), slice(None), slice(None))),
]
for shape, expected in cases:
actual = formatting._get_indexer_at_least_n_items(shape, 10)
assert expected == actual
def test_first_n_items(self):
array = np.arange(100).reshape(10, 5, 2)
for n in [3, 10, 13, 100, 200]:
actual = formatting.first_n_items(array, n)
expected = array.flat[:n]
self.assertItemsEqual(expected, actual)
with raises_regex(ValueError, 'at least one item'):
formatting.first_n_items(array, 0)
def test_last_item(self):
array = np.arange(100)
reshape = ((10, 10), (1, 100), (2, 2, 5, 5))
expected = np.array([99])
for r in reshape:
result = formatting.last_item(array.reshape(r))
assert result == expected
def test_format_item(self):
cases = [
(pd.Timestamp('2000-01-01T12'), '2000-01-01T12:00:00'),
(pd.Timestamp('2000-01-01'), '2000-01-01'),
(pd.Timestamp('NaT'), 'NaT'),
(pd.Timedelta('10 days 1 hour'), '10 days 01:00:00'),
(pd.Timedelta('-3 days'), '-3 days +00:00:00'),
(pd.Timedelta('3 hours'), '0 days 03:00:00'),
(pd.Timedelta('NaT'), 'NaT'),
('foo', "'foo'"),
(u'foo', "'foo'" if PY3 else "u'foo'"),
(b'foo', "b'foo'" if PY3 else "'foo'"),
(1, '1'),
(1.0, '1.0'),
]
for item, expected in cases:
actual = formatting.format_item(item)
assert expected == actual
def test_format_items(self):
cases = [
(np.arange(4) * np.timedelta64(1, 'D'),
'0 days 1 days 2 days 3 days'),
(np.arange(4) * np.timedelta64(3, 'h'),
'00:00:00 03:00:00 06:00:00 09:00:00'),
(np.arange(4) * np.timedelta64(500, 'ms'),
'00:00:00 00:00:00.500000 00:00:01 00:00:01.500000'),
(pd.to_timedelta(['NaT', '0s', '1s', 'NaT']),
'NaT 00:00:00 00:00:01 NaT'),
(pd.to_timedelta(['1 day 1 hour', '1 day', '0 hours']),
'1 days 01:00:00 1 days 00:00:00 0 days 00:00:00'),
([1, 2, 3], '1 2 3'),
]
for item, expected in cases:
actual = ' '.join(formatting.format_items(item))
assert expected == actual
def test_format_array_flat(self):
actual = formatting.format_array_flat(np.arange(100), 13)
expected = '0 1 2 3 4 ...'
assert expected == actual
actual = formatting.format_array_flat(np.arange(100.0), 11)
expected = '0.0 1.0 ...'
assert expected == actual
actual = formatting.format_array_flat(np.arange(100.0), 1)
expected = '0.0 ...'
assert expected == actual
actual = formatting.format_array_flat(np.arange(3), 5)
expected = '0 1 2'
assert expected == actual
actual = formatting.format_array_flat(np.arange(4.0), 11)
expected = '0.0 1.0 ...'
assert expected == actual
actual = formatting.format_array_flat(np.arange(4), 0)
expected = '0 ...'
assert expected == actual
def test_pretty_print(self):
assert formatting.pretty_print('abcdefghij', 8) == 'abcde...'
assert formatting.pretty_print(u'ß', 1) == u'ß'
def test_maybe_truncate(self):
assert formatting.maybe_truncate(u'ß', 10) == u'ß'
def test_format_timestamp_out_of_bounds(self):
from datetime import datetime
date = datetime(1300, 12, 1)
expected = '1300-12-01'
result = formatting.format_timestamp(date)
assert result == expected
date = datetime(2300, 12, 1)
expected = '2300-12-01'
result = formatting.format_timestamp(date)
assert result == expected
def test_attribute_repr(self):
short = formatting.summarize_attr(u'key', u'Short string')
long = formatting.summarize_attr(u'key', 100 * u'Very long string ')
newlines = formatting.summarize_attr(u'key', u'\n\n\n')
tabs = formatting.summarize_attr(u'key', u'\t\t\t')
assert short == ' key: Short string'
assert len(long) <= 80
assert long.endswith(u'...')
assert u'\n' not in newlines
assert u'\t' not in tabs
def test_set_numpy_options():
original_options = np.get_printoptions()
with formatting.set_numpy_options(threshold=10):
assert len(repr(np.arange(500))) < 200
# original options are restored
assert np.get_printoptions() == original_options
def test_short_array_repr():
cases = [
np.random.randn(500),
np.random.randn(20, 20),
np.random.randn(5, 10, 15),
np.random.randn(5, 10, 15, 3),
]
# number of lines:
# for default numpy repr: 167, 140, 254, 248
# for short_array_repr: 1, 7, 24, 19
for array in cases:
num_lines = formatting.short_array_repr(array).count('\n') + 1
assert num_lines < 30
| apache-2.0 | -2,457,367,356,922,792,000 | 34.606061 | 76 | 0.538553 | false |
j-be/vj-aerome-scent-controller | aerome_scent_controller.py | 1 | 1957 | import logging
import serial
import array
class AeromeScentController (object):
BLOCK_BEGIN = [0x1b]
BLOCK_END = [0x0d]
ACTIVATE_CONTROLLER = [0xe0, 0xe1, 0xe2, 0xe3, 0x0d]
ALL_VALVES_HOLD = [0xee, 0xef]
FLUSH_VALVE_ON = [0x26]
FLUSH_VALVE_OFF = [0xa6]
SCENT_VALVE_ON = 0x40
SCENT_VALVE_OFF = 0xC0
def __init__(self, serial_port_name):
self.serial_port_name = serial_port_name
def _init_serial(self):
self.log = logging.getLogger("aeromeScentController")
try:
# Init Serial port
self.serial_port = serial.Serial(self.serial_port_name, timeout=1, baudrate=9600)
self.serial_port.flushInput()
self.serial_port.flushOutput()
except OSError as error:
self.serial_port = None
self.log.error("Cannot initialize. Reason: %s", error)
except serial.serialutil.SerialException as error:
self.serial_port = None
self.log.error("Cannot initialize. Reason: %s", error)
self.log.debug("Serial: %s", self.serial_port)
def initialize_controller(self):
self._init_serial()
self._send_block(self.ALL_VALVES_HOLD)
self._send_message(self.ACTIVATE_CONTROLLER)
self._send_block(self.ALL_VALVES_HOLD)
def open_valve(self, valve_id):
self._send_block(self.FLUSH_VALVE_ON + [self.SCENT_VALVE_ON + valve_id])
def close_valve(self, valve_id):
self._send_block(self.FLUSH_VALVE_OFF + [self.SCENT_VALVE_OFF + valve_id])
def _send_block(self, block_content):
block = []
block += self.BLOCK_BEGIN
block += block_content
block += self.BLOCK_END
self._send_message(block)
def _send_message(self, message):
msg_str = array.array('B', message).tostring()
self.log.debug("Sending: " + ''.join(format(x, '02x') for x in message))
#self.serial_port.write(msg_str)
| mit | 5,824,470,294,240,700,000 | 31.081967 | 93 | 0.615738 | false |
dmsurti/reynolds-blender | reynolds_blender/fvschemes.py | 1 | 6197 | #------------------------------------------------------------------------------
# Reynolds-Blender | The Blender add-on for Reynolds, an OpenFoam toolbox.
#------------------------------------------------------------------------------
# Copyright|
#------------------------------------------------------------------------------
# Deepak Surti ([email protected])
# Prabhu R (IIT Bombay, [email protected])
# Shivasubramanian G (IIT Bombay, [email protected])
#------------------------------------------------------------------------------
# License
#
# This file is part of reynolds-blender.
#
# reynolds-blender is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# reynolds-blender is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with reynolds-blender. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------------------------------
# -----------
# bpy imports
# -----------
import bpy, bmesh
from bpy.props import (StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
EnumProperty,
PointerProperty,
IntVectorProperty,
FloatVectorProperty,
CollectionProperty
)
from bpy.types import (Panel,
Operator,
PropertyGroup,
UIList
)
from bpy.path import abspath
from mathutils import Matrix, Vector
# --------------
# python imports
# --------------
import operator
import os
import pathlib
# ------------------------
# reynolds blender imports
# ------------------------
from reynolds_blender.gui.register import register_classes, unregister_classes
from reynolds_blender.gui.attrs import set_scene_attrs, del_scene_attrs
from reynolds_blender.gui.custom_operator import create_custom_operators
from reynolds_blender.gui.renderer import ReynoldsGUIRenderer
# ----------------
# reynolds imports
# ----------------
from reynolds.dict.parser import ReynoldsFoamDict
from reynolds.foam.cmd_runner import FoamCmdRunner
# ------------------------------------------------------------------------
# operators
# ------------------------------------------------------------------------
def generate_laplacianFoam_fvschemes(fvschemes, scene):
fvschemes['ddtSchemes']['default'] = scene.ddt_schemes_default
fvschemes['gradSchemes']['default'] = scene.grad_schemes_default
fvschemes['gradSchemes']['grad(T)'] = scene.grad_schemes_grad_T
fvschemes['divSchemes']['default'] = scene.div_schemes_default
fvschemes['laplacianSchemes']['default'] = scene.lap_schemes_default
fvschemes['laplacianSchemes']['laplacian(DT,T)'] = scene.lap_schemes_dt_t
fvschemes['interpolationSchemes']['default'] = scene.interp_schemes_default
fvschemes['snGradSchemes']['default'] = scene.sngrad_schemes_default
fvschemes['fluxRequired']['default'] = scene.flux_required_default
fvschemes['fluxRequired']['T'] = scene.flux_required_t
def generate_icoFoam_fvschemes(fvschemes, scene):
fvschemes['ddtSchemes']['default'] = scene.ddt_schemes_default
fvschemes['gradSchemes']['default'] = scene.grad_schemes_default
fvschemes['gradSchemes']['grad(p)'] = scene.grad_schemes_grad_p
fvschemes['divSchemes']['default'] = scene.div_schemes_default
fvschemes['divSchemes']['div(phi,U)'] = scene.div_schemes_phi_U
fvschemes['laplacianSchemes']['default'] = scene.lap_schemes_default
fvschemes['interpolationSchemes']['default'] = scene.interp_schemes_default
fvschemes['snGradSchemes']['default'] = scene.sngrad_schemes_default
# ------------------------------------------------------------------------
# Panel
# ------------------------------------------------------------------------
class FVSchemesOperator(bpy.types.Operator):
bl_idname = "reynolds.of_fvschemes"
bl_label = "FVSchemes"
@classmethod
def poll(cls, context):
return True
def execute(self, context):
scene = context.scene
print('Generate fvschemes for solver: ' + scene.solver_name)
abs_case_dir_path = bpy.path.abspath(scene.case_dir_path)
fvschemes = ReynoldsFoamDict('fvSchemes.foam', solver_name=scene.solver_name)
if scene.solver_name == 'laplacianFoam':
generate_laplacianFoam_fvschemes(fvschemes, scene)
elif scene.solver_name == 'icoFoam':
generate_icoFoam_fvschemes(fvschemes, scene)
system_dir = os.path.join(abs_case_dir_path, "system")
if not os.path.exists(system_dir):
os.makedirs(system_dir)
fvschemes_file_path = os.path.join(system_dir, "fvSchemes")
with open(fvschemes_file_path, "w+") as f:
f.write(str(fvschemes))
return {'FINISHED'}
# Return True to force redraw
def check(self, context):
return True
def invoke(self, context, event):
scene = context.scene
return context.window_manager.invoke_props_dialog(self, width=1000)
def draw(self, context):
layout = self.layout
scene = context.scene
gui_renderer = ReynoldsGUIRenderer(scene, layout,
scene.solver_name + 'Schemes.yaml')
gui_renderer.render()
# ------------------------------------------------------------------------
# register and unregister
# ------------------------------------------------------------------------
def register():
register_classes(__name__)
def unregister():
unregister_classes(__name__)
if __name__ == "__main__":
register()
| gpl-3.0 | -8,835,433,186,925,909,000 | 39.503268 | 85 | 0.555591 | false |
maximinus/SPQR | setup.py | 1 | 2499 | #!/usr/bin/python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# setup code for game
# for now, you can only set the game resolution
import sys, pygame
from pygame.locals import *
from scripts import spqr_defines as SPQR
from scripts import spqr_gui as SGFX
from scripts import spqr_window as SWINDOW
from scripts import spqr_widgets as SWIDGET
from scripts import spqr_ybuild as SYAML
SCREEN_WIDTH = 285
SCREEN_HEIGHT = 192
def setupWindow():
SYAML.createWindow("../data/layouts/setup_window.yml")
def setupWindow2():
# get a fullsize window, and add the options to it
window = SWINDOW.CWindow(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT,
"", False, "main-window")
window.fillWindowImage()
# an optionmenu, a label, a seperator and 2 buttons
label1 = SWIDGET.buildLabel("Resolution")
label1.rect.x = 29
label1.rect.y = 26
label2 = SWIDGET.buildLabel("Play Music")
label2.rect.x = 32
label2.rect.y = 64
music = SWIDGET.CCheckBox(140, 64, True)
label3 = SWIDGET.buildLabel("Show intro")
label3.rect.x = 30
label3.rect.y = 100
intro = SWIDGET.CCheckBox(140, 100, True)
options = SWIDGET.COptionMenu(124, 20, ["800x600", "1024x768", "Fullscreen"])
options.describe = "opt-Resolution"
sepbar = SWIDGET.CSeperator(6 ,label1.rect.y + 106, SCREEN_WIDTH - 9)
ok_button = SWIDGET.CButton(165, 148, "OK")
ok_button.callbacks.mouse_lclk = okClick
cancel_button = SWIDGET.CButton(50, 148, "Cancel")
cancel_button.callbacks.mouse_lclk = cancelClick
for i in [options, label1, label2, music, label3, intro,
sepbar, ok_button, cancel_button]:
i.active = True
window.addWidget(i)
# only 1 window, set it modal
window.modal = True
SGFX.gui.addWindow(window)
if __name__ == "__main__":
SGFX.gui.mainInit(SCREEN_WIDTH, SCREEN_HEIGHT, False, False)
setupWindow()
SGFX.gui.updateGUI()
SGFX.gui.mainLoop()
| gpl-3.0 | 7,500,770,195,348,641,000 | 30.632911 | 78 | 0.732693 | false |
alexweav/Learny-McLearnface | LearnyMcLearnface/NeuralNetwork.py | 1 | 5358 | # -*- coding: utf-8 -*-
"""
Created on Fri May 06 14:34:21 2016
@author: Alexander Weaver
"""
import numpy as np
from . import Layers as layers
from . import Utils as utils
class NeuralNetwork(object):
"""
Initializes a neural network.
Takes a dictionary of initialization options.
"""
def __init__(self, options):
self.input_dim = options['input_dim']
self.data_type = options.setdefault('data_type', np.float32)
self.init_scheme = options.setdefault('init_scheme', 'xavier')
self.layers = []
self.num_layers = 0
"""
Adds a layer to the neural network.
The layer must be of a valid type, and is associated with a dictionary.
If the layer has any special options or hyperparameters, these are indicated in the dictionary.
Otherwise, the dictionary is empty.
"""
def add_layer(self, layer_type, params):
if not self.layers:
in_dim = self.input_dim
else:
in_dim = self.layers[-1].out_dim
if 'weight_scale' in params:
weight_scale = params['weight_scale']
elif self.init_scheme == 'xavier':
weight_scale = 1./np.sqrt(in_dim)
if layer_type == 'SoftmaxLoss':
layer = layers.SoftmaxLossLayer(in_dim)
self.layers.append(layer)
elif layer_type == 'SVMLoss':
layer = layers.SVMLossLayer(in_dim)
self.layers.append(layer)
elif layer_type == 'Affine':
layer = layers.AffineLayer(in_dim, params['neurons'], weight_scale, self.data_type)
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'Batchnorm':
layer = layers.BatchnormLayer(in_dim, params['decay'], self.data_type)
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'Dropout':
if 'seed' in params:
layer = layers.DropoutLayer(in_dim, params['dropout_param'], seed=params['seed'])
else:
layer = layers.DropoutLayer(in_dim, params['dropout_param'])
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'PReLU':
layer = layers.PReLULayer(in_dim, self.data_type)
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'ReLU':
layer = layers.ReLULayer(in_dim)
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'Sigmoid':
layer = layers.SigmoidLayer(in_dim)
self.layers.append(layer)
self.num_layers += 1
elif layer_type == 'Tanh':
layer = layers.TanhLayer(in_dim)
self.layers.append(layer)
self.num_layers += 1
else:
raise InvalidLayerException('Invalid layer: ' + layer_type)
"""
Performs forward propagation on the network, pushing a tensor through each layer in sequence.
Does not perform final layer classification.
"""
def forward(self, X, train=False):
X = X.astype(self.data_type)
forward_tensor = X
for layer in self.layers:
if layer == self.layers[-1]:
return forward_tensor
if isinstance(layer, layers.DropoutLayer) or isinstance(layer, layers.BatchnormLayer) and train:
forward_tensor = layer.forward_train(forward_tensor)
else:
forward_tensor = layer.forward(forward_tensor)
"""
Performs forward propagation, and performs final layer classification.
Returns an NxC matrix of class scores per given example.
"""
def classify(self, X):
X = X.astype(self.data_type)
scores = self.forward(X)
return self.layers[-1].evaluate(scores)
"""
Given a set of training examples and their corresponding scores, performs forward propagation and then
returns the final layer classifier loss and the derivative of that loss function.
"""
def loss(self, X, y, reg_param=0.0):
X = X.astype(self.data_type)
scores = self.forward(X, train=True)
loss, dx = self.layers[-1].loss(scores, y)
squared_sum = 0.0
for layer in self.layers:
if isinstance(layer, layers.AffineLayer):
squared_sum += np.sum(layer.W * layer.W)
loss += 0.5 * reg_param * squared_sum
return loss, dx
"""
Takes a set of training examples and corresponding scores.
Performs forward propagation, executes the final layer classifier loss function.
Then, performs backpropagation on the network and saves intermediate derivatives to the respective layers.
Returns the classifier loss and its derivative for progress reporting purposes.
"""
def backward(self, X, y, reg_param=0.0):
X = X.astype(self.data_type)
loss, dx = self.loss(X, y, reg_param)
for layer in reversed(self.layers):
if layer == self.layers[-1]:
continue
dx = layer.backward(dx)
if isinstance(layer, layers.AffineLayer):
layer.dW += reg_param * layer.W
return loss, dx
class InvalidLayerException(Exception):
pass | mit | -489,052,098,223,998,340 | 37.553957 | 110 | 0.591639 | false |
DailyActie/Surrogate-Model | 01-codes/scipy-master/scipy/ndimage/morphology.py | 1 | 80212 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other 00-courses provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy
from . import _nd_image
from . import _ni_support
from . import filters
__all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion',
'binary_dilation', 'binary_opening', 'binary_closing',
'binary_hit_or_miss', 'binary_propagation', 'binary_fill_holes',
'grey_erosion', 'grey_dilation', 'grey_opening', 'grey_closing',
'morphological_gradient', 'morphological_laplace', 'white_tophat',
'black_tophat', 'distance_transform_bf', 'distance_transform_cdt',
'distance_transform_edt']
def _center_is_true(structure, origin):
structure = numpy.array(structure)
coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape,
origin)])
return bool(structure[coor])
def iterate_structure(structure, iterations, origin=None):
"""
Iterate a structure by dilating it with itself.
Parameters
----------
structure : array_like
Structuring element (an array of bools, for example), to be dilated with
itself.
iterations : int
number of dilations performed on the structure with itself
origin : optional
If origin is None, only the iterated structure is returned. If
not, a tuple of the iterated structure and the modified origin is
returned.
Returns
-------
iterate_structure : ndarray of bools
A new structuring element obtained by dilating `structure`
(`iterations` - 1) times with itself.
See also
--------
generate_binary_structure
Examples
--------
>>> from scipy import ndimage
>>> struct = ndimage.generate_binary_structure(2, 1)
>>> struct.astype(int)
array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]])
>>> ndimage.iterate_structure(struct, 2).astype(int)
array([[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0]])
>>> ndimage.iterate_structure(struct, 3).astype(int)
array([[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0]])
"""
structure = numpy.asarray(structure)
if iterations < 2:
return structure.copy()
ni = iterations - 1
shape = [ii + ni * (ii - 1) for ii in structure.shape]
pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))]
slc = [slice(pos[ii], pos[ii] + structure.shape[ii], None)
for ii in range(len(shape))]
out = numpy.zeros(shape, bool)
out[slc] = structure != 0
out = binary_dilation(out, structure, iterations=ni)
if origin is None:
return out
else:
origin = _ni_support._normalize_sequence(origin, structure.ndim)
origin = [iterations * o for o in origin]
return out, origin
def generate_binary_structure(rank, connectivity):
"""
Generate a binary structure for binary morphological operations.
Parameters
----------
rank : int
Number of dimensions of the array to which the structuring element
will be applied, as returned by `np.ndim`.
connectivity : int
`connectivity` determines which elements of the output array belong
to the structure, i.e. are considered as neighbors of the central
element. Elements up to a squared distance of `connectivity` from
the center are considered neighbors. `connectivity` may range from 1
(no diagonal elements are neighbors) to `rank` (all elements are
neighbors).
Returns
-------
output : ndarray of bools
Structuring element which may be used for binary morphological
operations, with `rank` dimensions and all dimensions equal to 3.
See also
--------
iterate_structure, binary_dilation, binary_erosion
Notes
-----
`generate_binary_structure` can only create structuring elements with
dimensions equal to 3, i.e. minimal dimensions. For larger structuring
elements, that are useful e.g. for eroding large objects, one may either
use `iterate_structure`, or create directly custom arrays with
numpy functions such as `numpy.ones`.
Examples
--------
>>> from scipy import ndimage
>>> struct = ndimage.generate_binary_structure(2, 1)
>>> struct
array([[False, True, False],
[ True, True, True],
[False, True, False]], dtype=bool)
>>> a = np.zeros((5,5))
>>> a[2, 2] = 1
>>> a
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> b = ndimage.binary_dilation(a, structure=struct).astype(a.dtype)
>>> b
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(b, structure=struct).astype(a.dtype)
array([[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 1., 1., 1., 1., 1.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.]])
>>> struct = ndimage.generate_binary_structure(2, 2)
>>> struct
array([[ True, True, True],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> struct = ndimage.generate_binary_structure(3, 1)
>>> struct # no diagonal elements
array([[[False, False, False],
[False, True, False],
[False, False, False]],
[[False, True, False],
[ True, True, True],
[False, True, False]],
[[False, False, False],
[False, True, False],
[False, False, False]]], dtype=bool)
"""
if connectivity < 1:
connectivity = 1
if rank < 1:
if connectivity < 1:
return numpy.array(0, dtype=bool)
else:
return numpy.array(1, dtype=bool)
output = numpy.fabs(numpy.indices([3] * rank) - 1)
output = numpy.add.reduce(output, 0)
return numpy.asarray(output <= connectivity, dtype=bool)
def _binary_erosion(input, structure, iterations, mask, output,
border_value, origin, invert, brute_force):
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if structure is None:
structure = generate_binary_structure(input.ndim, 1)
else:
structure = numpy.asarray(structure)
structure = structure.astype(bool)
if structure.ndim != input.ndim:
raise RuntimeError('structure and input must have same dimensionality')
if not structure.flags.contiguous:
structure = structure.copy()
if numpy.product(structure.shape, axis=0) < 1:
raise RuntimeError('structure must not be empty')
if mask is not None:
mask = numpy.asarray(mask)
if mask.shape != input.shape:
raise RuntimeError('mask and input must have equal sizes')
origin = _ni_support._normalize_sequence(origin, input.ndim)
cit = _center_is_true(structure, origin)
if isinstance(output, numpy.ndarray):
if numpy.iscomplexobj(output):
raise TypeError('Complex output type not supported')
else:
output = bool
output, return_value = _ni_support._get_output(output, input)
if iterations == 1:
_nd_image.binary_erosion(input, structure, mask, output,
border_value, origin, invert, cit, 0)
return return_value
elif cit and not brute_force:
changed, coordinate_list = _nd_image.binary_erosion(input,
structure, mask, output, border_value, origin, invert, cit,
1)
structure = structure[tuple([slice(None, None, -1)] *
structure.ndim)]
for ii in range(len(origin)):
origin[ii] = -origin[ii]
if not structure.shape[ii] & 1:
origin[ii] -= 1
if mask is not None:
msk = numpy.asarray(mask)
msk = mask.astype(numpy.int8)
if msk is mask:
msk = mask.copy()
mask = msk
if not structure.flags.contiguous:
structure = structure.copy()
_nd_image.binary_erosion2(output, structure, mask, iterations - 1,
origin, invert, coordinate_list)
return return_value
else:
tmp_in = numpy.zeros(input.shape, bool)
if return_value is None:
tmp_out = output
else:
tmp_out = numpy.zeros(input.shape, bool)
if not iterations & 1:
tmp_in, tmp_out = tmp_out, tmp_in
changed = _nd_image.binary_erosion(input, structure, mask,
tmp_out, border_value, origin, invert, cit, 0)
ii = 1
while (ii < iterations) or (iterations < 1) and changed:
tmp_in, tmp_out = tmp_out, tmp_in
changed = _nd_image.binary_erosion(tmp_in, structure, mask,
tmp_out, border_value, origin, invert, cit, 0)
ii += 1
if return_value is not None:
return tmp_out
def binary_erosion(input, structure=None, iterations=1, mask=None,
output=None, border_value=0, origin=0, brute_force=False):
"""
Multi-dimensional binary erosion with a given structuring element.
Binary erosion is a mathematical morphology operation used for image
processing.
Parameters
----------
input : array_like
Binary image to be eroded. Non-zero (True) elements form
the subset to be eroded.
structure : array_like, optional
Structuring element used for the erosion. Non-zero elements are
considered True. If no structuring element is provided, an element
is generated with a square connectivity equal to one.
iterations : {int, float}, optional
The erosion is repeated `iterations` times (one, by default).
If iterations is less than 1, the erosion is repeated until the
result does not change anymore.
mask : array_like, optional
If a mask is given, only those elements with a True value at
the corresponding mask element are modified at each iteration.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
border_value : int (cast to 0 or 1), optional
Value at the border in the output array.
Returns
-------
binary_erosion : ndarray of bools
Erosion of the input by the structuring element.
See also
--------
grey_erosion, binary_dilation, binary_closing, binary_opening,
generate_binary_structure
Notes
-----
Erosion [1]_ is a mathematical morphology operation [2]_ that uses a
structuring element for shrinking the shapes in an image. The binary
erosion of an image by a structuring element is the locus of the points
where a superimposition of the structuring element centered on the point
is entirely contained in the set of non-zero elements of the image.
References
----------
.. [1] http://en.wikipedia.org/wiki/Erosion_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((7,7), dtype=int)
>>> a[1:6, 2:5] = 1
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.binary_erosion(a).astype(a.dtype)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> #Erosion removes objects smaller than the structure
>>> ndimage.binary_erosion(a, structure=np.ones((5,5))).astype(a.dtype)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
return _binary_erosion(input, structure, iterations, mask,
output, border_value, origin, 0, brute_force)
def binary_dilation(input, structure=None, iterations=1, mask=None,
output=None, border_value=0, origin=0, brute_force=False):
"""
Multi-dimensional binary dilation with the given structuring element.
Parameters
----------
input : array_like
Binary array_like to be dilated. Non-zero (True) elements form
the subset to be dilated.
structure : array_like, optional
Structuring element used for the dilation. Non-zero elements are
considered True. If no structuring element is provided an element
is generated with a square connectivity equal to one.
iterations : {int, float}, optional
The dilation is repeated `iterations` times (one, by default).
If iterations is less than 1, the dilation is repeated until the
result does not change anymore.
mask : array_like, optional
If a mask is given, only those elements with a True value at
the corresponding mask element are modified at each iteration.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
border_value : int (cast to 0 or 1), optional
Value at the border in the output array.
Returns
-------
binary_dilation : ndarray of bools
Dilation of the input by the structuring element.
See also
--------
grey_dilation, binary_erosion, binary_closing, binary_opening,
generate_binary_structure
Notes
-----
Dilation [1]_ is a mathematical morphology operation [2]_ that uses a
structuring element for expanding the shapes in an image. The binary
dilation of an image by a structuring element is the locus of the points
covered by the structuring element, when its center lies within the
non-zero points of the image.
References
----------
.. [1] http://en.wikipedia.org/wiki/Dilation_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((5, 5))
>>> a[2, 2] = 1
>>> a
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(a)
array([[False, False, False, False, False],
[False, False, True, False, False],
[False, True, True, True, False],
[False, False, True, False, False],
[False, False, False, False, False]], dtype=bool)
>>> ndimage.binary_dilation(a).astype(a.dtype)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> # 3x3 structuring element with connectivity 1, used by default
>>> struct1 = ndimage.generate_binary_structure(2, 1)
>>> struct1
array([[False, True, False],
[ True, True, True],
[False, True, False]], dtype=bool)
>>> # 3x3 structuring element with connectivity 2
>>> struct2 = ndimage.generate_binary_structure(2, 2)
>>> struct2
array([[ True, True, True],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> ndimage.binary_dilation(a, structure=struct1).astype(a.dtype)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(a, structure=struct2).astype(a.dtype)
array([[ 0., 0., 0., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 0., 0., 0.]])
>>> ndimage.binary_dilation(a, structure=struct1,\\
... iterations=2).astype(a.dtype)
array([[ 0., 0., 1., 0., 0.],
[ 0., 1., 1., 1., 0.],
[ 1., 1., 1., 1., 1.],
[ 0., 1., 1., 1., 0.],
[ 0., 0., 1., 0., 0.]])
"""
input = numpy.asarray(input)
if structure is None:
structure = generate_binary_structure(input.ndim, 1)
origin = _ni_support._normalize_sequence(origin, input.ndim)
structure = numpy.asarray(structure)
structure = structure[tuple([slice(None, None, -1)] *
structure.ndim)]
for ii in range(len(origin)):
origin[ii] = -origin[ii]
if not structure.shape[ii] & 1:
origin[ii] -= 1
return _binary_erosion(input, structure, iterations, mask,
output, border_value, origin, 1, brute_force)
def binary_opening(input, structure=None, iterations=1, output=None,
origin=0):
"""
Multi-dimensional binary opening with the given structuring element.
The *opening* of an input image by a structuring element is the
*dilation* of the *erosion* of the image by the structuring element.
Parameters
----------
input : array_like
Binary array_like to be opened. Non-zero (True) elements form
the subset to be opened.
structure : array_like, optional
Structuring element used for the opening. Non-zero elements are
considered True. If no structuring element is provided an element
is generated with a square connectivity equal to one (i.e., only
nearest neighbors are connected to the center, diagonally-connected
elements are not considered neighbors).
iterations : {int, float}, optional
The erosion step of the opening, then the dilation step are each
repeated `iterations` times (one, by default). If `iterations` is
less than 1, each operation is repeated until the result does
not change anymore.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
Returns
-------
binary_opening : ndarray of bools
Opening of the input by the structuring element.
See also
--------
grey_opening, binary_closing, binary_erosion, binary_dilation,
generate_binary_structure
Notes
-----
*Opening* [1]_ is a mathematical morphology operation [2]_ that
consists in the succession of an erosion and a dilation of the
input with the same structuring element. Opening therefore removes
objects smaller than the structuring element.
Together with *closing* (`binary_closing`), opening can be used for
noise removal.
References
----------
.. [1] http://en.wikipedia.org/wiki/Opening_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((5,5), dtype=int)
>>> a[1:4, 1:4] = 1; a[4, 4] = 1
>>> a
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 1]])
>>> # Opening removes small objects
>>> ndimage.binary_opening(a, structure=np.ones((3,3))).astype(int)
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> # Opening can also smooth corners
>>> ndimage.binary_opening(a).astype(int)
array([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]])
>>> # Opening is the dilation of the erosion of the input
>>> ndimage.binary_erosion(a).astype(int)
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
>>> ndimage.binary_dilation(ndimage.binary_erosion(a)).astype(int)
array([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]])
"""
input = numpy.asarray(input)
if structure is None:
rank = input.ndim
structure = generate_binary_structure(rank, 1)
tmp = binary_erosion(input, structure, iterations, None, None, 0,
origin)
return binary_dilation(tmp, structure, iterations, None, output, 0,
origin)
def binary_closing(input, structure=None, iterations=1, output=None,
origin=0):
"""
Multi-dimensional binary closing with the given structuring element.
The *closing* of an input image by a structuring element is the
*erosion* of the *dilation* of the image by the structuring element.
Parameters
----------
input : array_like
Binary array_like to be closed. Non-zero (True) elements form
the subset to be closed.
structure : array_like, optional
Structuring element used for the closing. Non-zero elements are
considered True. If no structuring element is provided an element
is generated with a square connectivity equal to one (i.e., only
nearest neighbors are connected to the center, diagonally-connected
elements are not considered neighbors).
iterations : {int, float}, optional
The dilation step of the closing, then the erosion step are each
repeated `iterations` times (one, by default). If iterations is
less than 1, each operations is repeated until the result does
not change anymore.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
Returns
-------
binary_closing : ndarray of bools
Closing of the input by the structuring element.
See also
--------
grey_closing, binary_opening, binary_dilation, binary_erosion,
generate_binary_structure
Notes
-----
*Closing* [1]_ is a mathematical morphology operation [2]_ that
consists in the succession of a dilation and an erosion of the
input with the same structuring element. Closing therefore fills
holes smaller than the structuring element.
Together with *opening* (`binary_opening`), closing can be used for
noise removal.
References
----------
.. [1] http://en.wikipedia.org/wiki/Closing_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((5,5), dtype=int)
>>> a[1:-1, 1:-1] = 1; a[2,2] = 0
>>> a
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> # Closing removes small holes
>>> ndimage.binary_closing(a).astype(int)
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> # Closing is the erosion of the dilation of the input
>>> ndimage.binary_dilation(a).astype(int)
array([[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0]])
>>> ndimage.binary_erosion(ndimage.binary_dilation(a)).astype(int)
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> a = np.zeros((7,7), dtype=int)
>>> a[1:6, 2:5] = 1; a[1:3,3] = 0
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> # In addition to removing holes, closing can also
>>> # coarsen boundaries with fine hollows.
>>> ndimage.binary_closing(a).astype(int)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.binary_closing(a, structure=np.ones((2,2))).astype(int)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
input = numpy.asarray(input)
if structure is None:
rank = input.ndim
structure = generate_binary_structure(rank, 1)
tmp = binary_dilation(input, structure, iterations, None, None, 0,
origin)
return binary_erosion(tmp, structure, iterations, None, output, 0,
origin)
def binary_hit_or_miss(input, structure1=None, structure2=None,
output=None, origin1=0, origin2=None):
"""
Multi-dimensional binary hit-or-miss transform.
The hit-or-miss transform finds the locations of a given pattern
inside the input image.
Parameters
----------
input : array_like (cast to booleans)
Binary image where a pattern is to be detected.
structure1 : array_like (cast to booleans), optional
Part of the structuring element to be fitted to the foreground
(non-zero elements) of `input`. If no value is provided, a
structure of square connectivity 1 is chosen.
structure2 : array_like (cast to booleans), optional
Second part of the structuring element that has to miss completely
the foreground. If no value is provided, the complementary of
`structure1` is taken.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin1 : int or tuple of ints, optional
Placement of the first part of the structuring element `structure1`,
by default 0 for a centered structure.
origin2 : int or tuple of ints, optional
Placement of the second part of the structuring element `structure2`,
by default 0 for a centered structure. If a value is provided for
`origin1` and not for `origin2`, then `origin2` is set to `origin1`.
Returns
-------
binary_hit_or_miss : ndarray
Hit-or-miss transform of `input` with the given structuring
element (`structure1`, `structure2`).
See also
--------
ndimage.morphology, binary_erosion
References
----------
.. [1] http://en.wikipedia.org/wiki/Hit-or-miss_transform
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((7,7), dtype=int)
>>> a[1, 1] = 1; a[2:4, 2:4] = 1; a[4:6, 4:6] = 1
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> structure1 = np.array([[1, 0, 0], [0, 1, 1], [0, 1, 1]])
>>> structure1
array([[1, 0, 0],
[0, 1, 1],
[0, 1, 1]])
>>> # Find the matches of structure1 in the array a
>>> ndimage.binary_hit_or_miss(a, structure1=structure1).astype(int)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> # Change the origin of the filter
>>> # origin1=1 is equivalent to origin1=(1,1) here
>>> ndimage.binary_hit_or_miss(a, structure1=structure1,\\
... origin1=1).astype(int)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
input = numpy.asarray(input)
if structure1 is None:
structure1 = generate_binary_structure(input.ndim, 1)
if structure2 is None:
structure2 = numpy.logical_not(structure1)
origin1 = _ni_support._normalize_sequence(origin1, input.ndim)
if origin2 is None:
origin2 = origin1
else:
origin2 = _ni_support._normalize_sequence(origin2, input.ndim)
tmp1 = _binary_erosion(input, structure1, 1, None, None, 0, origin1,
0, False)
inplace = isinstance(output, numpy.ndarray)
result = _binary_erosion(input, structure2, 1, None, output, 0,
origin2, 1, False)
if inplace:
numpy.logical_not(output, output)
numpy.logical_and(tmp1, output, output)
else:
numpy.logical_not(result, result)
return numpy.logical_and(tmp1, result)
def binary_propagation(input, structure=None, mask=None,
output=None, border_value=0, origin=0):
"""
Multi-dimensional binary propagation with the given structuring element.
Parameters
----------
input : array_like
Binary image to be propagated inside `mask`.
structure : array_like, optional
Structuring element used in the successive dilations. The output
may depend on the structuring element, especially if `mask` has
several connex components. If no structuring element is
provided, an element is generated with a squared connectivity equal
to one.
mask : array_like, optional
Binary mask defining the region into which `input` is allowed to
propagate.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
border_value : int (cast to 0 or 1), optional
Value at the border in the output array.
origin : int or tuple of ints, optional
Placement of the filter, by default 0.
Returns
-------
binary_propagation : ndarray
Binary propagation of `input` inside `mask`.
Notes
-----
This function is functionally equivalent to calling binary_dilation
with the number of iterations less then one: iterative dilation until
the result does not change anymore.
The succession of an erosion and propagation inside the original image
can be used instead of an *opening* for deleting small objects while
keeping the contours of larger objects untouched.
References
----------
.. [1] http://cmm.ensmp.fr/~serra/cours/pdf/en/ch6en.pdf, slide 15.
.. [2] http://www.qi.tnw.tudelft.nl/Courses/FIP/noframes/fip-Morpholo.html#Heading102
Examples
--------
>>> from scipy import ndimage
>>> input = np.zeros((8, 8), dtype=int)
>>> input[2, 2] = 1
>>> mask = np.zeros((8, 8), dtype=int)
>>> mask[1:4, 1:4] = mask[4, 4] = mask[6:8, 6:8] = 1
>>> input
array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
>>> mask
array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1]])
>>> ndimage.binary_propagation(input, mask=mask).astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.binary_propagation(input, mask=mask,\\
... structure=np.ones((3,3))).astype(int)
array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]])
>>> # Comparison between opening and erosion+propagation
>>> a = np.zeros((6,6), dtype=int)
>>> a[2:5, 2:5] = 1; a[0, 0] = 1; a[5, 5] = 1
>>> a
array([[1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1]])
>>> ndimage.binary_opening(a).astype(int)
array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0]])
>>> b = ndimage.binary_erosion(a)
>>> b.astype(int)
array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
>>> ndimage.binary_propagation(b, mask=a).astype(int)
array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0]])
"""
return binary_dilation(input, structure, -1, mask, output,
border_value, origin)
def binary_fill_holes(input, structure=None, output=None, origin=0):
"""
Fill the holes in binary objects.
Parameters
----------
input : array_like
n-dimensional binary array with holes to be filled
structure : array_like, optional
Structuring element used in the computation; large-size elements
make computations faster but may miss holes separated from the
background by thin regions. The default element (with a square
connectivity equal to one) yields the intuitive result where all
holes in the input have been filled.
output : ndarray, optional
Array of the same shape as input, into which the output is placed.
By default, a new array is created.
origin : int, tuple of ints, optional
Position of the structuring element.
Returns
-------
out : ndarray
Transformation of the initial image `input` where holes have been
filled.
See also
--------
binary_dilation, binary_propagation, label
Notes
-----
The algorithm used in this function consists in invading the complementary
of the shapes in `input` from the outer boundary of the image,
using binary dilations. Holes are not connected to the boundary and are
therefore not invaded. The result is the complementary subset of the
invaded region.
References
----------
.. [1] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((5, 5), dtype=int)
>>> a[1:4, 1:4] = 1
>>> a[2,2] = 0
>>> a
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> ndimage.binary_fill_holes(a).astype(int)
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
>>> # Too big structuring element
>>> ndimage.binary_fill_holes(a, structure=np.ones((5,5))).astype(int)
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]])
"""
mask = numpy.logical_not(input)
tmp = numpy.zeros(mask.shape, bool)
inplace = isinstance(output, numpy.ndarray)
if inplace:
binary_dilation(tmp, structure, -1, mask, output, 1, origin)
numpy.logical_not(output, output)
else:
output = binary_dilation(tmp, structure, -1, mask, None, 1,
origin)
numpy.logical_not(output, output)
return output
def grey_erosion(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Calculate a greyscale erosion, using either a structuring element,
or a footprint corresponding to a flat structuring element.
Grayscale erosion is a mathematical morphology operation. For the
simple case of a full and flat structuring element, it can be viewed
as a minimum filter over a sliding window.
Parameters
----------
input : array_like
Array over which the grayscale erosion is to be computed.
size : tuple of ints
Shape of a flat and full structuring element used for the grayscale
erosion. Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the grayscale erosion. Non-zero values give the set of
neighbors of the center over which the minimum is chosen.
structure : array of ints, optional
Structuring element used for the grayscale erosion. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the ouput of the erosion may be provided.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
output : ndarray
Grayscale erosion of `input`.
See also
--------
binary_erosion, grey_dilation, grey_opening, grey_closing
generate_binary_structure, ndimage.minimum_filter
Notes
-----
The grayscale erosion of an image input by a structuring element s defined
over a domain E is given by:
(input+s)(x) = min {input(y) - s(x-y), for y in E}
In particular, for structuring elements defined as
s(y) = 0 for y in E, the grayscale erosion computes the minimum of the
input image inside a sliding window defined by E.
Grayscale erosion [1]_ is a *mathematical morphology* operation [2]_.
References
----------
.. [1] http://en.wikipedia.org/wiki/Erosion_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((7,7), dtype=int)
>>> a[1:6, 1:6] = 3
>>> a[4,4] = 2; a[2,3] = 1
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 1, 3, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 3, 3, 3, 2, 3, 0],
[0, 3, 3, 3, 3, 3, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.grey_erosion(a, size=(3,3))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 3, 2, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> footprint = ndimage.generate_binary_structure(2, 1)
>>> footprint
array([[False, True, False],
[ True, True, True],
[False, True, False]], dtype=bool)
>>> # Diagonally-connected elements are not considered neighbors
>>> ndimage.grey_erosion(a, size=(3,3), footprint=footprint)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 3, 1, 2, 0, 0],
[0, 0, 3, 2, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
if size is None and footprint is None and structure is None:
raise ValueError("size, footprint or structure must be specified")
return filters._min_or_max_filter(input, size, footprint, structure,
output, mode, cval, origin, 1)
def grey_dilation(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Calculate a greyscale dilation, using either a structuring element,
or a footprint corresponding to a flat structuring element.
Grayscale dilation is a mathematical morphology operation. For the
simple case of a full and flat structuring element, it can be viewed
as a maximum filter over a sliding window.
Parameters
----------
input : array_like
Array over which the grayscale dilation is to be computed.
size : tuple of ints
Shape of a flat and full structuring element used for the grayscale
dilation. Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the grayscale dilation. Non-zero values give the set of
neighbors of the center over which the maximum is chosen.
structure : array of ints, optional
Structuring element used for the grayscale dilation. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the ouput of the dilation may be provided.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
grey_dilation : ndarray
Grayscale dilation of `input`.
See also
--------
binary_dilation, grey_erosion, grey_closing, grey_opening
generate_binary_structure, ndimage.maximum_filter
Notes
-----
The grayscale dilation of an image input by a structuring element s defined
over a domain E is given by:
(input+s)(x) = max {input(y) + s(x-y), for y in E}
In particular, for structuring elements defined as
s(y) = 0 for y in E, the grayscale dilation computes the maximum of the
input image inside a sliding window defined by E.
Grayscale dilation [1]_ is a *mathematical morphology* operation [2]_.
References
----------
.. [1] http://en.wikipedia.org/wiki/Dilation_%28morphology%29
.. [2] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((7,7), dtype=int)
>>> a[2:5, 2:5] = 1
>>> a[4,4] = 2; a[2,3] = 3
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 3, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.grey_dilation(a, size=(3,3))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 3, 3, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.grey_dilation(a, footprint=np.ones((3,3)))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 3, 3, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> s = ndimage.generate_binary_structure(2,1)
>>> s
array([[False, True, False],
[ True, True, True],
[False, True, False]], dtype=bool)
>>> ndimage.grey_dilation(a, footprint=s)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 3, 1, 0, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 1, 3, 2, 1, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 0, 1, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.grey_dilation(a, size=(3,3), structure=np.ones((3,3)))
array([[1, 1, 1, 1, 1, 1, 1],
[1, 2, 4, 4, 4, 2, 1],
[1, 2, 4, 4, 4, 2, 1],
[1, 2, 4, 4, 4, 3, 1],
[1, 2, 2, 3, 3, 3, 1],
[1, 2, 2, 3, 3, 3, 1],
[1, 1, 1, 1, 1, 1, 1]])
"""
if size is None and footprint is None and structure is None:
raise ValueError("size, footprint or structure must be specified")
if structure is not None:
structure = numpy.asarray(structure)
structure = structure[tuple([slice(None, None, -1)] *
structure.ndim)]
if footprint is not None:
footprint = numpy.asarray(footprint)
footprint = footprint[tuple([slice(None, None, -1)] *
footprint.ndim)]
input = numpy.asarray(input)
origin = _ni_support._normalize_sequence(origin, input.ndim)
for ii in range(len(origin)):
origin[ii] = -origin[ii]
if footprint is not None:
sz = footprint.shape[ii]
elif structure is not None:
sz = structure.shape[ii]
elif numpy.isscalar(size):
sz = size
else:
sz = size[ii]
if not sz & 1:
origin[ii] -= 1
return filters._min_or_max_filter(input, size, footprint, structure,
output, mode, cval, origin, 0)
def grey_opening(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Multi-dimensional greyscale opening.
A greyscale opening consists in the succession of a greyscale erosion,
and a greyscale dilation.
Parameters
----------
input : array_like
Array over which the grayscale opening is to be computed.
size : tuple of ints
Shape of a flat and full structuring element used for the grayscale
opening. Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the grayscale opening.
structure : array of ints, optional
Structuring element used for the grayscale opening. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the ouput of the opening may be provided.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
grey_opening : ndarray
Result of the grayscale opening of `input` with `structure`.
See also
--------
binary_opening, grey_dilation, grey_erosion, grey_closing
generate_binary_structure
Notes
-----
The action of a grayscale opening with a flat structuring element amounts
to smoothen high local maxima, whereas binary opening erases small objects.
References
----------
.. [1] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(36).reshape((6,6))
>>> a[3, 3] = 50
>>> a
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 50, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]])
>>> ndimage.grey_opening(a, size=(3,3))
array([[ 0, 1, 2, 3, 4, 4],
[ 6, 7, 8, 9, 10, 10],
[12, 13, 14, 15, 16, 16],
[18, 19, 20, 22, 22, 22],
[24, 25, 26, 27, 28, 28],
[24, 25, 26, 27, 28, 28]])
>>> # Note that the local maximum a[3,3] has disappeared
"""
tmp = grey_erosion(input, size, footprint, structure, None, mode,
cval, origin)
return grey_dilation(tmp, size, footprint, structure, output, mode,
cval, origin)
def grey_closing(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Multi-dimensional greyscale closing.
A greyscale closing consists in the succession of a greyscale dilation,
and a greyscale erosion.
Parameters
----------
input : array_like
Array over which the grayscale closing is to be computed.
size : tuple of ints
Shape of a flat and full structuring element used for the grayscale
closing. Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the grayscale closing.
structure : array of ints, optional
Structuring element used for the grayscale closing. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the ouput of the closing may be provided.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
grey_closing : ndarray
Result of the grayscale closing of `input` with `structure`.
See also
--------
binary_closing, grey_dilation, grey_erosion, grey_opening,
generate_binary_structure
Notes
-----
The action of a grayscale closing with a flat structuring element amounts
to smoothen deep local minima, whereas binary closing fills small holes.
References
----------
.. [1] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(36).reshape((6,6))
>>> a[3,3] = 0
>>> a
array([[ 0, 1, 2, 3, 4, 5],
[ 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17],
[18, 19, 20, 0, 22, 23],
[24, 25, 26, 27, 28, 29],
[30, 31, 32, 33, 34, 35]])
>>> ndimage.grey_closing(a, size=(3,3))
array([[ 7, 7, 8, 9, 10, 11],
[ 7, 7, 8, 9, 10, 11],
[13, 13, 14, 15, 16, 17],
[19, 19, 20, 20, 22, 23],
[25, 25, 26, 27, 28, 29],
[31, 31, 32, 33, 34, 35]])
>>> # Note that the local minimum a[3,3] has disappeared
"""
tmp = grey_dilation(input, size, footprint, structure, None, mode,
cval, origin)
return grey_erosion(tmp, size, footprint, structure, output, mode,
cval, origin)
def morphological_gradient(input, size=None, footprint=None,
structure=None, output=None, mode="reflect",
cval=0.0, origin=0):
"""
Multi-dimensional morphological gradient.
The morphological gradient is calculated as the difference between a
dilation and an erosion of the input with a given structuring element.
Parameters
----------
input : array_like
Array over which to compute the morphlogical gradient.
size : tuple of ints
Shape of a flat and full structuring element used for the mathematical
morphology operations. Optional if `footprint` or `structure` is
provided. A larger `size` yields a more blurred gradient.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the morphology operations. Larger footprints
give a more blurred morphological gradient.
structure : array of ints, optional
Structuring element used for the morphology operations.
`structure` may be a non-flat structuring element.
output : array, optional
An array used for storing the ouput of the morphological gradient
may be provided.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
morphological_gradient : ndarray
Morphological gradient of `input`.
See also
--------
grey_dilation, grey_erosion, ndimage.gaussian_gradient_magnitude
Notes
-----
For a flat structuring element, the morphological gradient
computed at a given point corresponds to the maximal difference
between elements of the input among the elements covered by the
structuring element centered on the point.
References
----------
.. [1] http://en.wikipedia.org/wiki/Mathematical_morphology
Examples
--------
>>> from scipy import ndimage
>>> a = np.zeros((7,7), dtype=int)
>>> a[2:5, 2:5] = 1
>>> ndimage.morphological_gradient(a, size=(3,3))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> # The morphological gradient is computed as the difference
>>> # between a dilation and an erosion
>>> ndimage.grey_dilation(a, size=(3,3)) -\\
... ndimage.grey_erosion(a, size=(3,3))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 0, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> a = np.zeros((7,7), dtype=int)
>>> a[2:5, 2:5] = 1
>>> a[4,4] = 2; a[2,3] = 3
>>> a
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 3, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 2, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]])
>>> ndimage.morphological_gradient(a, size=(3,3))
array([[0, 0, 0, 0, 0, 0, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 3, 3, 1, 0],
[0, 1, 3, 2, 3, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 1, 1, 2, 2, 2, 0],
[0, 0, 0, 0, 0, 0, 0]])
"""
tmp = grey_dilation(input, size, footprint, structure, None, mode,
cval, origin)
if isinstance(output, numpy.ndarray):
grey_erosion(input, size, footprint, structure, output, mode,
cval, origin)
return numpy.subtract(tmp, output, output)
else:
return (tmp - grey_erosion(input, size, footprint, structure,
None, mode, cval, origin))
def morphological_laplace(input, size=None, footprint=None,
structure=None, output=None,
mode="reflect", cval=0.0, origin=0):
"""
Multi-dimensional morphological laplace.
Parameters
----------
input : array_like
Input.
size : int or sequence of ints, optional
See `structure`.
footprint : bool or ndarray, optional
See `structure`.
structure : structure, optional
Either `size`, `footprint`, or the `structure` must be provided.
output : ndarray, optional
An output array can optionally be provided.
mode : {'reflect','constant','nearest','mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled.
For 'constant' mode, values beyond borders are set to be `cval`.
Default is 'reflect'.
cval : scalar, optional
Value to fill past edges of input if mode is 'constant'.
Default is 0.0
origin : origin, optional
The origin parameter controls the placement of the filter.
Returns
-------
morphological_laplace : ndarray
Output
"""
tmp1 = grey_dilation(input, size, footprint, structure, None, mode,
cval, origin)
if isinstance(output, numpy.ndarray):
grey_erosion(input, size, footprint, structure, output, mode,
cval, origin)
numpy.add(tmp1, output, output)
numpy.subtract(output, input, output)
return numpy.subtract(output, input, output)
else:
tmp2 = grey_erosion(input, size, footprint, structure, None, mode,
cval, origin)
numpy.add(tmp1, tmp2, tmp2)
numpy.subtract(tmp2, input, tmp2)
numpy.subtract(tmp2, input, tmp2)
return tmp2
def white_tophat(input, size=None, footprint=None, structure=None,
output=None, mode="reflect", cval=0.0, origin=0):
"""
Multi-dimensional white tophat filter.
Parameters
----------
input : array_like
Input.
size : tuple of ints
Shape of a flat and full structuring element used for the filter.
Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of elements of a flat structuring element
used for the white tophat filter.
structure : array of ints, optional
Structuring element used for the filter. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the output of the filter may be provided.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'.
Default is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default is 0.
Returns
-------
output : ndarray
Result of the filter of `input` with `structure`.
See also
--------
black_tophat
"""
tmp = grey_erosion(input, size, footprint, structure, None, mode,
cval, origin)
if isinstance(output, numpy.ndarray):
grey_dilation(tmp, size, footprint, structure, output, mode, cval,
origin)
return numpy.subtract(input, output, output)
else:
tmp = grey_dilation(tmp, size, footprint, structure, None, mode,
cval, origin)
return input - tmp
def black_tophat(input, size=None, footprint=None,
structure=None, output=None, mode="reflect",
cval=0.0, origin=0):
"""
Multi-dimensional black tophat filter.
Parameters
----------
input : array_like
Input.
size : tuple of ints, optional
Shape of a flat and full structuring element used for the filter.
Optional if `footprint` or `structure` is provided.
footprint : array of ints, optional
Positions of non-infinite elements of a flat structuring element
used for the black tophat filter.
structure : array of ints, optional
Structuring element used for the filter. `structure`
may be a non-flat structuring element.
output : array, optional
An array used for storing the output of the filter may be provided.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The `mode` parameter determines how the array borders are
handled, where `cval` is the value when mode is equal to
'constant'. Default is 'reflect'
cval : scalar, optional
Value to fill past edges of input if `mode` is 'constant'. Default
is 0.0.
origin : scalar, optional
The `origin` parameter controls the placement of the filter.
Default 0
Returns
-------
black_tophat : ndarray
Result of the filter of `input` with `structure`.
See also
--------
white_tophat, grey_opening, grey_closing
"""
tmp = grey_dilation(input, size, footprint, structure, None, mode,
cval, origin)
if isinstance(output, numpy.ndarray):
grey_erosion(tmp, size, footprint, structure, output, mode, cval,
origin)
return numpy.subtract(output, input, output)
else:
tmp = grey_erosion(tmp, size, footprint, structure, None, mode,
cval, origin)
return tmp - input
def distance_transform_bf(input, metric="euclidean", sampling=None,
return_distances=True, return_indices=False,
distances=None, indices=None):
"""
Distance transform function by a brute force algorithm.
This function calculates the distance transform of the `input`, by
replacing each background element (zero values), with its
shortest distance to the foreground (any element non-zero).
In addition to the distance transform, the feature transform can
be calculated. In this case the index of the closest background
element is returned along the first axis of the result.
Parameters
----------
input : array_like
Input
metric : str, optional
Three types of distance metric are supported: 'euclidean', 'taxicab'
and 'chessboard'.
sampling : {int, sequence of ints}, optional
This parameter is only used in the case of the euclidean `metric`
distance transform.
The sampling along each axis can be given by the `sampling` parameter
which should be a sequence of length equal to the input rank, or a
single number in which the `sampling` is assumed to be equal along all
axes.
return_distances : bool, optional
The `return_distances` flag can be used to indicate if the distance
transform is returned.
The default is True.
return_indices : bool, optional
The `return_indices` flags can be used to indicate if the feature
transform is returned.
The default is False.
distances : float64 ndarray, optional
Optional output array to hold distances (if `return_distances` is
True).
indices : int64 ndarray, optional
Optional output array to hold indices (if `return_indices` is True).
Returns
-------
distances : ndarray
Distance array if `return_distances` is True.
indices : ndarray
Indices array if `return_indices` is True.
Notes
-----
This function employs a slow brute force algorithm, see also the
function distance_transform_cdt for more efficient taxicab and
chessboard algorithms.
"""
if (not return_distances) and (not return_indices):
msg = 'at least one of distances/indices must be specified'
raise RuntimeError(msg)
tmp1 = numpy.asarray(input) != 0
struct = generate_binary_structure(tmp1.ndim, tmp1.ndim)
tmp2 = binary_dilation(tmp1, struct)
tmp2 = numpy.logical_xor(tmp1, tmp2)
tmp1 = tmp1.astype(numpy.int8) - tmp2.astype(numpy.int8)
metric = metric.lower()
if metric == 'euclidean':
metric = 1
elif metric in ['taxicab', 'cityblock', 'manhattan']:
metric = 2
elif metric == 'chessboard':
metric = 3
else:
raise RuntimeError('distance metric not supported')
if sampling is not None:
sampling = _ni_support._normalize_sequence(sampling, tmp1.ndim)
sampling = numpy.asarray(sampling, dtype=numpy.float64)
if not sampling.flags.contiguous:
sampling = sampling.copy()
if return_indices:
ft = numpy.zeros(tmp1.shape, dtype=numpy.int32)
else:
ft = None
if return_distances:
if distances is None:
if metric == 1:
dt = numpy.zeros(tmp1.shape, dtype=numpy.float64)
else:
dt = numpy.zeros(tmp1.shape, dtype=numpy.uint32)
else:
if distances.shape != tmp1.shape:
raise RuntimeError('distances array has wrong shape')
if metric == 1:
if distances.dtype.type != numpy.float64:
raise RuntimeError('distances array must be float64')
else:
if distances.dtype.type != numpy.uint32:
raise RuntimeError('distances array must be uint32')
dt = distances
else:
dt = None
_nd_image.distance_transform_bf(tmp1, metric, sampling, dt, ft)
if return_indices:
if isinstance(indices, numpy.ndarray):
if indices.dtype.type != numpy.int32:
raise RuntimeError('indices must of int32 type')
if indices.shape != (tmp1.ndim,) + tmp1.shape:
raise RuntimeError('indices has wrong shape')
tmp2 = indices
else:
tmp2 = numpy.indices(tmp1.shape, dtype=numpy.int32)
ft = numpy.ravel(ft)
for ii in range(tmp2.shape[0]):
rtmp = numpy.ravel(tmp2[ii, ...])[ft]
rtmp.shape = tmp1.shape
tmp2[ii, ...] = rtmp
ft = tmp2
# construct and return the result
result = []
if return_distances and not isinstance(distances, numpy.ndarray):
result.append(dt)
if return_indices and not isinstance(indices, numpy.ndarray):
result.append(ft)
if len(result) == 2:
return tuple(result)
elif len(result) == 1:
return result[0]
else:
return None
def distance_transform_cdt(input, metric='chessboard',
return_distances=True, return_indices=False,
distances=None, indices=None):
"""
Distance transform for chamfer type of transforms.
Parameters
----------
input : array_like
Input
metric : {'chessboard', 'taxicab'}, optional
The `metric` determines the type of chamfering that is done. If the
`metric` is equal to 'taxicab' a structure is generated using
generate_binary_structure with a squared distance equal to 1. If
the `metric` is equal to 'chessboard', a `metric` is generated
using generate_binary_structure with a squared distance equal to
the dimensionality of the array. These choices correspond to the
common interpretations of the 'taxicab' and the 'chessboard'
distance metrics in two dimensions.
The default for `metric` is 'chessboard'.
return_distances, return_indices : bool, optional
The `return_distances`, and `return_indices` flags can be used to
indicate if the distance transform, the feature transform, or both
must be returned.
If the feature transform is returned (``return_indices=True``),
the index of the closest background element is returned along
the first axis of the result.
The `return_distances` default is True, and the
`return_indices` default is False.
distances, indices : ndarrays of int32, optional
The `distances` and `indices` arguments can be used to give optional
output arrays that must be the same shape as `input`.
"""
if (not return_distances) and (not return_indices):
msg = 'at least one of distances/indices must be specified'
raise RuntimeError(msg)
ft_inplace = isinstance(indices, numpy.ndarray)
dt_inplace = isinstance(distances, numpy.ndarray)
input = numpy.asarray(input)
if metric in ['taxicab', 'cityblock', 'manhattan']:
rank = input.ndim
metric = generate_binary_structure(rank, 1)
elif metric == 'chessboard':
rank = input.ndim
metric = generate_binary_structure(rank, rank)
else:
try:
metric = numpy.asarray(metric)
except:
raise RuntimeError('invalid metric provided')
for s in metric.shape:
if s != 3:
raise RuntimeError('metric sizes must be equal to 3')
if not metric.flags.contiguous:
metric = metric.copy()
if dt_inplace:
if distances.dtype.type != numpy.int32:
raise RuntimeError('distances must be of int32 type')
if distances.shape != input.shape:
raise RuntimeError('distances has wrong shape')
dt = distances
dt[...] = numpy.where(input, -1, 0).astype(numpy.int32)
else:
dt = numpy.where(input, -1, 0).astype(numpy.int32)
rank = dt.ndim
if return_indices:
sz = numpy.product(dt.shape, axis=0)
ft = numpy.arange(sz, dtype=numpy.int32)
ft.shape = dt.shape
else:
ft = None
_nd_image.distance_transform_op(metric, dt, ft)
dt = dt[tuple([slice(None, None, -1)] * rank)]
if return_indices:
ft = ft[tuple([slice(None, None, -1)] * rank)]
_nd_image.distance_transform_op(metric, dt, ft)
dt = dt[tuple([slice(None, None, -1)] * rank)]
if return_indices:
ft = ft[tuple([slice(None, None, -1)] * rank)]
ft = numpy.ravel(ft)
if ft_inplace:
if indices.dtype.type != numpy.int32:
raise RuntimeError('indices must of int32 type')
if indices.shape != (dt.ndim,) + dt.shape:
raise RuntimeError('indices has wrong shape')
tmp = indices
else:
tmp = numpy.indices(dt.shape, dtype=numpy.int32)
for ii in range(tmp.shape[0]):
rtmp = numpy.ravel(tmp[ii, ...])[ft]
rtmp.shape = dt.shape
tmp[ii, ...] = rtmp
ft = tmp
# construct and return the result
result = []
if return_distances and not dt_inplace:
result.append(dt)
if return_indices and not ft_inplace:
result.append(ft)
if len(result) == 2:
return tuple(result)
elif len(result) == 1:
return result[0]
else:
return None
def distance_transform_edt(input, sampling=None,
return_distances=True, return_indices=False,
distances=None, indices=None):
"""
Exact euclidean distance transform.
In addition to the distance transform, the feature transform can
be calculated. In this case the index of the closest background
element is returned along the first axis of the result.
Parameters
----------
input : array_like
Input data to transform. Can be any type but will be converted
into binary: 1 wherever input equates to True, 0 elsewhere.
sampling : float or int, or sequence of same, optional
Spacing of elements along each dimension. If a sequence, must be of
length equal to the input rank; if a single number, this is used for
all axes. If not specified, a grid spacing of unity is implied.
return_distances : bool, optional
Whether to return distance matrix. At least one of
return_distances/return_indices must be True. Default is True.
return_indices : bool, optional
Whether to return indices matrix. Default is False.
distances : ndarray, optional
Used for output of distance array, must be of type float64.
indices : ndarray, optional
Used for output of indices, must be of type int32.
Returns
-------
distance_transform_edt : ndarray or list of ndarrays
Either distance matrix, index matrix, or a list of the two,
depending on `return_x` flags and `distance` and `indices`
input parameters.
Notes
-----
The euclidean distance transform gives values of the euclidean
distance::
n
y_i = sqrt(sum (x[i]-b[i])**2)
i
where b[i] is the background point (value 0) with the smallest
Euclidean distance to input points x[i], and n is the
number of dimensions.
Examples
--------
>>> from scipy import ndimage
>>> a = np.array(([0,1,1,1,1],
... [0,0,1,1,1],
... [0,1,1,1,1],
... [0,1,1,1,0],
... [0,1,1,0,0]))
>>> ndimage.distance_transform_edt(a)
array([[ 0. , 1. , 1.4142, 2.2361, 3. ],
[ 0. , 0. , 1. , 2. , 2. ],
[ 0. , 1. , 1.4142, 1.4142, 1. ],
[ 0. , 1. , 1.4142, 1. , 0. ],
[ 0. , 1. , 1. , 0. , 0. ]])
With a sampling of 2 units along x, 1 along y:
>>> ndimage.distance_transform_edt(a, sampling=[2,1])
array([[ 0. , 1. , 2. , 2.8284, 3.6056],
[ 0. , 0. , 1. , 2. , 3. ],
[ 0. , 1. , 2. , 2.2361, 2. ],
[ 0. , 1. , 2. , 1. , 0. ],
[ 0. , 1. , 1. , 0. , 0. ]])
Asking for indices as well:
>>> edt, inds = ndimage.distance_transform_edt(a, return_indices=True)
>>> inds
array([[[0, 0, 1, 1, 3],
[1, 1, 1, 1, 3],
[2, 2, 1, 3, 3],
[3, 3, 4, 4, 3],
[4, 4, 4, 4, 4]],
[[0, 0, 1, 1, 4],
[0, 1, 1, 1, 4],
[0, 0, 1, 4, 4],
[0, 0, 3, 3, 4],
[0, 0, 3, 3, 4]]])
With arrays provided for inplace outputs:
>>> indices = np.zeros(((np.ndim(a),) + a.shape), dtype=np.int32)
>>> ndimage.distance_transform_edt(a, return_indices=True, indices=indices)
array([[ 0. , 1. , 1.4142, 2.2361, 3. ],
[ 0. , 0. , 1. , 2. , 2. ],
[ 0. , 1. , 1.4142, 1.4142, 1. ],
[ 0. , 1. , 1.4142, 1. , 0. ],
[ 0. , 1. , 1. , 0. , 0. ]])
>>> indices
array([[[0, 0, 1, 1, 3],
[1, 1, 1, 1, 3],
[2, 2, 1, 3, 3],
[3, 3, 4, 4, 3],
[4, 4, 4, 4, 4]],
[[0, 0, 1, 1, 4],
[0, 1, 1, 1, 4],
[0, 0, 1, 4, 4],
[0, 0, 3, 3, 4],
[0, 0, 3, 3, 4]]])
"""
if (not return_distances) and (not return_indices):
msg = 'at least one of distances/indices must be specified'
raise RuntimeError(msg)
ft_inplace = isinstance(indices, numpy.ndarray)
dt_inplace = isinstance(distances, numpy.ndarray)
# calculate the feature transform
input = numpy.atleast_1d(numpy.where(input, 1, 0).astype(numpy.int8))
if sampling is not None:
sampling = _ni_support._normalize_sequence(sampling, input.ndim)
sampling = numpy.asarray(sampling, dtype=numpy.float64)
if not sampling.flags.contiguous:
sampling = sampling.copy()
if ft_inplace:
ft = indices
if ft.shape != (input.ndim,) + input.shape:
raise RuntimeError('indices has wrong shape')
if ft.dtype.type != numpy.int32:
raise RuntimeError('indices must be of int32 type')
else:
ft = numpy.zeros((input.ndim,) + input.shape,
dtype=numpy.int32)
_nd_image.euclidean_feature_transform(input, sampling, ft)
# if requested, calculate the distance transform
if return_distances:
dt = ft - numpy.indices(input.shape, dtype=ft.dtype)
dt = dt.astype(numpy.float64)
if sampling is not None:
for ii in range(len(sampling)):
dt[ii, ...] *= sampling[ii]
numpy.multiply(dt, dt, dt)
if dt_inplace:
dt = numpy.add.reduce(dt, axis=0)
if distances.shape != dt.shape:
raise RuntimeError('indices has wrong shape')
if distances.dtype.type != numpy.float64:
raise RuntimeError('indices must be of float64 type')
numpy.sqrt(dt, distances)
else:
dt = numpy.add.reduce(dt, axis=0)
dt = numpy.sqrt(dt)
# construct and return the result
result = []
if return_distances and not dt_inplace:
result.append(dt)
if return_indices and not ft_inplace:
result.append(ft)
if len(result) == 2:
return tuple(result)
elif len(result) == 1:
return result[0]
else:
return None
| mit | 3,503,458,796,533,075,000 | 35.862132 | 119 | 0.558495 | false |
kuzmoyev/Google-Calendar-Simple-API | tests/test_attendee.py | 1 | 2820 | from unittest import TestCase
from gcsa.attendee import Attendee, ResponseStatus
from gcsa.serializers.attendee_serializer import AttendeeSerializer
class TestAttendeeSerializer(TestCase):
def test_to_json(self):
attendee = Attendee(
email='[email protected]',
display_name='Guest',
comment='I do not know him',
optional=True,
additional_guests=2,
response_status=ResponseStatus.NEEDS_ACTION
)
attendee_json = AttendeeSerializer.to_json(attendee)
self.assertEqual(attendee.email, attendee_json['email'])
self.assertEqual(attendee.display_name, attendee_json['displayName'])
self.assertEqual(attendee.comment, attendee_json['comment'])
self.assertEqual(attendee.optional, attendee_json['optional'])
self.assertNotIn('resource', attendee_json)
self.assertEqual(attendee.additional_guests, attendee_json['additionalGuests'])
self.assertEqual(attendee.response_status, attendee_json['responseStatus'])
def test_to_object(self):
attendee_json = {
'email': '[email protected]',
'displayName': 'Guest2',
'comment': 'I do not know him either',
'optional': True,
'resource': True,
'additionalGuests': 1,
'responseStatus': ResponseStatus.ACCEPTED
}
attendee = AttendeeSerializer.to_object(attendee_json)
self.assertEqual(attendee_json['email'], attendee.email)
self.assertEqual(attendee_json['displayName'], attendee.display_name)
self.assertEqual(attendee_json['comment'], attendee.comment)
self.assertEqual(attendee_json['optional'], attendee.optional)
self.assertEqual(attendee_json['resource'], attendee.is_resource)
self.assertEqual(attendee_json['additionalGuests'], attendee.additional_guests)
self.assertEqual(attendee_json['responseStatus'], attendee.response_status)
attendee_json_str = """{
"email": "[email protected]",
"displayName": "Guest3",
"comment": "Who are these people?",
"optional": true,
"resource": false,
"additionalGuests": 66,
"responseStatus": "tentative"
}"""
serializer = AttendeeSerializer(attendee_json_str)
attendee = serializer.get_object()
self.assertEqual(attendee.email, "[email protected]")
self.assertEqual(attendee.display_name, "Guest3")
self.assertEqual(attendee.comment, "Who are these people?")
self.assertEqual(attendee.optional, True)
self.assertEqual(attendee.is_resource, False)
self.assertEqual(attendee.additional_guests, 66)
self.assertEqual(attendee.response_status, "tentative")
| mit | -4,699,221,430,244,430,000 | 40.470588 | 87 | 0.648582 | false |
ParanoidNemo/twolame | cloud.py | 1 | 1896 | #! /usr/bin/env python
# Copyright (C) 2015 by Andrea Calzavacca <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import os, sys, re
import configparser
import rc
from spam import clouds
from spam import beshell
from spam import methods
rc_file = os.path.join(beshell.Theme.path(), 'twolamerc')
format_file = os.path.join(beshell.Theme.path(), 'twolame', 'cloud_one.format')
rc.get_rc(rc_file)
css = os.path.join(beshell.Theme.path(), 'style.css.d', rc.CSS)
cloud_info = []
if rc.RCLONE == '1':
parser = configparser.ConfigParser()
rclone_cfg = rc.RCLONE_CONFIG_FILE
read_cfg = parser.read(rclone_cfg)
services_list = []
for item in parser.sections():
service = parser.get(item, 'type')
l = clouds.Rclone.space_info(item, service)
d = methods.create_dict(l)
outstring = methods.insert_data(methods.format_string(format_file), d)
cloud_info.append(outstring)
if rc.MEGA == '1':
l = clouds.Mega.space_info(rc.SIZE)
d = methods.create_dict(l)
outstring = methods.insert_data(methods.format_string(format_file), d)
cloud_info.append(outstring)
info = methods.create_dict(cloud_info)
info['{x}'] = css
| gpl-3.0 | 2,340,330,728,297,430,000 | 30.6 | 79 | 0.699367 | false |
mauriceyap/ccm-assistant | tests/test_alexa_main.py | 1 | 4199 | import unittest
from mock import patch
import alexa_main
TEST_APPLICATION_ID = "aPpLiCaTiOnId12345"
TEST_REQUEST_ID = "rEqUeStId123"
class TestAlexaMain(unittest.TestCase):
def setUp(self):
alexa_main.config.APPLICATION_ID = TEST_APPLICATION_ID
@patch("alexa_main.events.on_session_started")
@patch("alexa_main.events.on_launch")
def test_lambda_handler_throws_error_with_invalid_session_id(self, on_launch,
on_session_started):
test_invalid_application_id = "iAmAnInvalidId00000"
test_session_event_with_invalid_id = {
'session': {
'application': {
'applicationId': test_invalid_application_id
},
'new': False
},
'request': {
'requestId': TEST_REQUEST_ID,
'type': 'LaunchRequest'
},
'context': {}
}
test_context_only_event_with_invalid_id = {
'request': {},
'context': {
'System': {
'application': {
'applicationId': test_invalid_application_id
}
}
}
}
with self.assertRaises(ValueError) as cm_session_event:
alexa_main.lambda_handler(test_session_event_with_invalid_id, None)
with self.assertRaises(ValueError) as cm_context_event:
alexa_main.lambda_handler(test_context_only_event_with_invalid_id, None)
self.assertEqual(cm_session_event.exception.message, "Invalid Application ID")
self.assertEqual(cm_context_event.exception.message, "Invalid Application ID")
on_session_started.assert_not_called()
on_launch.assert_not_called()
@patch("alexa_main.events.on_session_started")
@patch("alexa_main.events.on_launch")
def test_lambda_handler_on_session_started_launch_request(self, on_launch, on_session_started):
test_session_obj = {
'application': {
'applicationId': TEST_APPLICATION_ID},
'new': True
}
test_event = {
'session': test_session_obj,
'request': {
'requestId': TEST_REQUEST_ID,
'type': 'LaunchRequest'
},
'context': {}
}
alexa_main.lambda_handler(test_event, None)
on_session_started.assert_called_once_with({
'requestId': TEST_REQUEST_ID,
}, test_session_obj)
on_launch.assert_called_once()
@patch("alexa_main.events.on_intent")
def test_lambda_handler_intent_request(self, on_intent):
test_request_obj = {
'requestId': TEST_REQUEST_ID,
'type': 'IntentRequest'
}
test_session_obj = {
'application': {
'applicationId': TEST_APPLICATION_ID,
},
'new': True
}
test_context_obj = {
"System": {},
"AudioPlayer": {}
}
test_event = {
'session': test_session_obj,
'request': test_request_obj,
'context': test_context_obj
}
alexa_main.lambda_handler(test_event, None)
on_intent.assert_called_once_with(test_request_obj, test_session_obj, test_context_obj)
@patch("alexa_main.events.on_session_ended")
def test_lambda_handler_session_ended_request(self, on_session_ended):
test_request_obj = {
'requestId': TEST_REQUEST_ID,
'type': 'SessionEndedRequest'
}
test_session_obj = {
'application': {
'applicationId': TEST_APPLICATION_ID,
},
'new': False
}
test_context_obj = {
"System": {},
"AudioPlayer": {}
}
test_event = {
'session': test_session_obj,
'request': test_request_obj,
'context': test_context_obj
}
alexa_main.lambda_handler(test_event, None)
on_session_ended.assert_called_once_with(test_request_obj, test_session_obj)
| mit | -4,778,385,089,163,808,000 | 32.062992 | 99 | 0.535127 | false |
adobe/avmplus | halfmoon/templates/templates.py | 1 | 36785 | #!/usr/bin/env python
# -*- Mode: Python; indent-tabs-mode: nil -*-
# vi: set ts=2 sw=2 expandtab:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import StringIO
from hrdefs import hrdefs
from sexp import parse
from ast import *
from mangle import *
# -----------------------------------------------------
#
# generate template builder code
#
# format argument value
#
def fmtArg(expr):
if expr.kind == 'TypeConst':
# return 'tb.typeConst(%s)' % expr.value.cg()
return expr.value.cgTraits()
elif expr.kind == 'NumConst':
if type(expr.value) == int:
return 'tb.intConst(%i)' % expr.value
elif type(expr.value) == float:
return 'tb.doubleConst(%d)' % expr.value
else:
raise ParseError("unsupported constant '%s'" % expr.value)
elif expr.kind == 'VarRef':
vartype = expr.type()
if vartype == int_type:
return 'tb.intConst(%s)' % expr.value
elif vartype == number_type:
return 'tb.doubleConst(%s)' % expr.value
else:
raise ParseError("var refs of type '%s' not yet supported" % vartype.dump())
else:
return expr.dump()
# generate IfCall code.
# optionally bind the result to a local with the given name.
# numresults specifies the number of results generated by the
# ifcall--we use this only if we generate a merge label.
# merge_label specifies an existing label to use instead of
# generating one. (TODO param count agreement is assumed--verify.)
#
# if we generated a merge label, return its C++ name, otherwise None.
#
def fmtIfCall(ifcall, lhsname, numresults, use_merge_label, indent):
args = [ifcall.condexpr] + ifcall.args
# argc = len(args)
# build arglist expr. collect varargs in local array if needed
# fixc = 1
# varc = argc - fixc
# if varc > 0:
# vaname = '%s_args' % (lhsname if lhsname else '')
# varargs = ', '.join([fmtArg(args[i]) for i in range(fixc, argc)])
# print '%sDef* %s[] = { %s };' % (indent, vaname, varargs)
# arglist = ', '.join([fmtArg(args[i]) for i in range(0, fixc)] + [str(varc), vaname])
# else:
# arglist = ', '.join([fmtArg(args[i]) for i in range(0, fixc)] + ['0', 'NULL'])
arglist = fmtArglist(lhsname, ifcall.base, args, indent)
# create IfInstr and add to IR
repname = "IfInstr"
lhs = '' if lhsname is None else '%s* %s = (%s*)' % (repname, lhsname, repname)
print '%s%stb.addInstr(new%s(%s));' % (indent, lhs, repname, arglist)
# an (if ...) may have a mixture of (goto ...) and plain exprs at its leaves.
# we need to generate a synthetic label for any plain exprs to jump to.
need_merge = not(ifcall.iftrue.allPathsEscape() and
ifcall.iffalse.allPathsEscape())
# for any of our paths which terminate without an explicit goto,
# we need to generate a goto to a synthetic merge label.
# our caller may have supplied a candidate in use_merge_label;
# if not, generate one now.
#
if need_merge:
if use_merge_label is None:
# create LabelInstr, but don't add to IR
merge_label = lhsname + '_merge'
print '%sLabelInstr* %s = newLabelInstr(%i);' % (indent, merge_label, numresults)
else:
merge_label = use_merge_label
else:
merge_label = None
print ''
fmtArm(ifcall.iftrue, lhsname, merge_label, indent)
fmtArm(ifcall.iffalse, lhsname, merge_label, indent)
return merge_label if use_merge_label is None else None
# helper - generate arm code
#
def fmtArm(arm, lhsname, merge_label, indent):
repname = 'ArmInstr'
armname = lhsname + '_' + arm.name
print '%s{ // %s %s arm' % (indent, lhsname, arm.name)
indent += ' '
# create ArmInstr, add to IR, save to local
# note: "(void)arm;" prevents unused variable warnings
print '%s%s* %s = (%s*)tb.addInstr(%s->arm(%s)); (void)%s;' % (
indent, repname, armname, repname, lhsname, arm.name, armname)
# create a local for each arm param
for i in range(0, len(arm.parnames)):
parname = arm.parnames[i]
print '%sDef* %s = &%s->params[%i]; (void)%s;' % (
indent, parname, armname, i, parname)
print ''
# generate arm body statements, up to final instruction
genTemStmts(arm, indent)
# if needed, generate a final goto to synthetic label
if not arm.allPathsEscape():
body = arm.body
retinstr = body[len(body) - 1]
repname = 'GotoInstr'
gotoname = armname + '_exit'
print '%s%s* %s = newGotoStmt(%s); ' % (
indent, repname, gotoname, merge_label)
for i in range(0, len(retinstr.expr.args)):
print '%s%s->args[%i] = %s;' % (
indent, gotoname, i, fmtArg(retinstr.expr.args[i]))
print '%stb.addInstr(%s); ' % (indent, gotoname)
# close the c++ block for this arm
indent = indent[:len(indent) - 2]
print '%s}' % indent
print ''
# helper - indicates the presence of a fixed-arg factory
# method for variable-arg instructions of a given shape.
# NOTE: carnal knowledge of InstrFactory API.
#
def hasFixedArgFactoryMethod(repname, argc):
return (repname == 'StopInstr' and argc == 2) or \
(repname == 'CallStmt2' and (argc == 4 or argc == 3)) or \
(repname == 'CallStmt3' and argc == 5) or \
(repname == 'IfInstr' and (argc == 0 or argc == 1 or argc == 2))
# generate argument list code. For instructions with fixed input
# shape, and common instances of some variable-input instructions,
# a single-shot factory method is available. In these cases we build
# a simple expression list of args.
#
# Other variable-input instructions have factory methods that take
# a count and an array for their variable args (they may still have
# a nonzero number of fixed args; these come first). For these, we
# generate code to create an array of args and assign it to a local,
# then return an expression list containing an arg count and a reference
# to the local.
#
def fmtArglist(lhsname, base, args, indent):
rep = getRep(base)
fixc = rep.shape[EFFECT_IN] + rep.shape[DATA_IN] # fixed arg count
argc = len(args)
varc = argc - fixc
if rep.isvarin() and not hasFixedArgFactoryMethod(rep.name, len(args)):
# build arglist expr. collect va_value in local array if needed
if varc > 0:
va_name = '%s_args' % (lhsname if lhsname else '')
va_value = ', '.join([fmtArg(args[i]) for i in range(fixc, argc)])
print '%sDef* %s[] = { %s };' % (indent, va_name, va_value)
arglist = ', '.join([fmtArg(args[i]) for i in range(0, fixc)] + [str(varc), va_name])
else:
arglist = ', '.join([fmtArg(args[i]) for i in range(0, fixc)] + ['0', 'NULL'])
else:
arglist = ', '.join([fmtArg(arg) for arg in args]) # simple arg list
return arglist
# generate Call code.
# optionally bind the result to a local with the given name.
#
def fmtCall(call, defs, indent, lhsname = None):
base, args = call.base, call.args
# format argument list, possibly generating locals along the way
arglist = fmtArglist(lhsname, base, args, indent)
# add kind arg if needed
rep = getRep(base)
if getRepCount(rep, defs) > 1:
arglist = base.hrname() + ', ' + arglist
# create and add instr, maybe store to local
repname = rep.name
lhs = '' if lhsname is None else '%s* %s = (%s*)' % (repname, lhsname, repname)
print '%s%stb.addInstr(new%s(%s));' % (indent, lhs, repname, arglist)
# format call to access the given output of a call.
# Instr API has effect_out(), value_out() for instrs which
# have only one effect or data output (respectively),
# and effectOut(i), value_out(i) for the > 1 cases. Here
# we take an index into the combined list of outputs.
#
def fmtAccessor(call, i):
effects = [t.isEffect() for t in call.types()]
effect = effects[i]
fmtix = '' if effects.count(effect) == 1 else '%s' % effects[:i].count(effect)
return '%s(%s)' % ('effect_out' if effect else 'value_out', fmtix)
# emit user-defined labelled (local) definitions for the given template.
# syntactic restrictions on templates say that gotos must always terminate
# execution paths, which makes our job here simple--all user-defined labelled
# defs end by jumping to a common endpoint label, given here by endlabelname.
#
def genLabelDefSection(tem, end_label_name, indent):
for label in tem.labels.values():
labelname = 'label_%s' % label.name.lstrip('@')
print '%s{ // label %s in %s' % (indent, labelname, tem.name)
indent += ' '
print '%stb.addInstr(%s);' % (indent, labelname)
# extract label params
for i in range(0, len(label.parnames)):
pname = label.parnames[i]
print '%sDef* %s = &%s->params[%i]; (void)%s;' % (
indent, pname, labelname, i, pname)
print ''
# label body
genTemStmts(label, indent)
# build goto end from return
retinstr = label.body[len(label.body) - 1]
repname = 'GotoInstr'
gotoname = labelname + '_exit'
print '%s%s* %s = newGotoStmt(%s);' % (
indent, repname, gotoname, end_label_name)
for i in range(0, len(retinstr.expr.args)):
print '%s%s->args[%i] = %s;' % (
indent, gotoname, i, fmtArg(retinstr.expr.args[i]))
print '%stb.addInstr(%s); ' % (indent, gotoname)
indent = indent[:len(indent) - 2]
print '%s}' % indent
print ''
# generate builder code from template statements
# note that stmtlist is currently either the template body, or
# the template body without the final (return) statement.
#
def genTemStmts(tem, indent):
# first, create label instrs. we need to have them around for gotos,
# but they don't go into the IR until the end of the template.
# note that if we have any labels, we'll need common endpoint label
if len(tem.labels) > 0:
print '%s// labels defined in %s, plus final endpoint' % (
indent, tem.name)
for label in tem.labels.values():
labelname = 'label_%s' % label.name.lstrip('@')
print '%sLabelInstr* %s = newLabelInstr(%i);' % (
indent, labelname, label.numParams())
# endpoint label
# note: we get the number of label params from the number
# of returned results.
end_label = 'label_%s' % tem.genLocalName('end')
print '%sLabelInstr* %s = newLabelInstr(%i);' % (
indent, end_label, tem.body[len(tem.body) - 1].expr.base.numParams())
print ''
else:
end_label = None
# emit mainline statement list
# for stmt in tem.body[:len(tem.body) - 1]:
for i in range(0, len(tem.body) - 1):
stmt = tem.body[i]
if stmt.kind == 'LocalDefs':
# lhs = ...
rhs = stmt.expr
if rhs.kind == 'Call':
# lhs = if/goto/call
basename = rhs.base.name
iname = tem.genLocalName(basename) if len(stmt.names) > 0 else None
if basename == 'if':
# if this if-stmt is the terminal statement of the template,
# and we have user-defined labelled defs, then the if can use our
# end_label as a post-if merge point, if needed.
use_merge_label = end_label if i == len(tem.body) - 2 else None
# fmt_ifcall will return the name of merge label to generate
new_merge_label = fmtIfCall(rhs, iname, len(stmt.names), use_merge_label, indent)
# add generated merge label to IR and extract vars
if new_merge_label:
print '%s// %s merge label, defs' % (indent, iname)
print '%stb.addInstr(%s);' % (indent, new_merge_label)
# create C++ local for each LocalDefs binding
for i in range(0, len(stmt.names)):
varname = stmt.names[i]
print '%sDef* %s = &%s->params[%i]; (void)%s;' % (
indent, varname, new_merge_label, i, varname)
elif basename.startswith('@'):
# goto label
labelname = 'label_%s' % rhs.base.name.lstrip('@')
repname = 'GotoInstr'
gotoname = tem.genLocalName('goto')
print '%sGotoInstr* %s = newGotoStmt(%s);' % (
indent, gotoname, labelname)
for i in range(0, len(rhs.args)):
print '%s%s->args[%i] = %s;' % (
indent, gotoname, i, fmtArg(rhs.args[i]))
print '%stb.addInstr(%s);' % (indent, gotoname)
else:
# call
fmtCall(rhs, defs, indent, iname)
# create local for each LocalDefs binding
for i in range(0, len(stmt.names)):
labelname = stmt.names[i]
accessor = fmtAccessor(rhs, i)
print '%sDef* %s = %s->%s; (void)%s;' % (
indent, labelname, iname, accessor, labelname)
print ''
else:
# lhs = non-call
labelname = stmt.names[0]
ldef = stmt.defs[stmt.names[0]]
print '%sDef* %s = %s;' % (indent, labelname, fmtArg(ldef))
print ''
elif stmt.kind == 'Call':
# otherwise it's just an unbound call, no lhs
# NOTE: this doesn't happen at the moment
fmtCall(stmt, defs, indent)
else:
# nothing else at the top level of a template body
raise ParseError('unknown statement type in body list: %s' % stmt.dump())
# wrap it up
# if needed, emit defined label code, plus final endpoint
if len(tem.labels) > 0:
genLabelDefSection(tem, end_label, indent)
# add endpoint label
print '%s// common endpoint block' % indent
print '%stb.addInstr(%s);' % (indent, end_label)
# create C++ local for each LocalDefs binding fromt
# terminal statement
term_stmt = tem.body[len(tem.body) - 2]
for i in range(0, len(term_stmt.names)):
varname = term_stmt.names[i]
print '%sDef* %s = &%s->params[%i]; (void)%s;' % (
indent, varname, end_label, i, varname)
print ''
# finally, add return instr for top-level templates
# TODO verify handling of extracted vars (above) in nested tems
if tem.parent is None:
ret_stmt = tem.body[len(tem.body) - 1]
fmtCall(ret_stmt.expr, defs, indent)
# generate builder switch case for a template
#
def genTemBuilderCase(tem):
hrname = tem.hrname()
print ' case %s: {' % hrname
print '/***'
print tem.dump()
print '***/'
print ''
print ' const Type* in_types[] = { %s };' % ', '.join([t.cgType() for t in tem.partypes])
print ' tb.start(%s, %i, in_types);' % (hrname, len(tem.partypes))
print ''
for i in range(0, len(tem.parnames)):
pname = tem.parnames[i]
print ' Def* %s = tb.paramRef(%i); (void)%s;' % (pname, i, pname)
print ''
genTemStmts(tem, ' ')
# emit labels
# emit return
print ''
print ' break;'
print ' }'
print ''
# generate template builder function impl
def genTemBuilderCases(defs):
for d in templates_only(defs):
genTemBuilderCase(d)
# -----------------------------------------------------
#
# generate type signature builder code
#
# generate input signature builder function
#
def genInputSigBuilder(defs):
print '/// return input type signature for given instruction'
print '///'
print 'const Type** InstrFactory::buildInputSignature(InstrKind kind) {'
print ' switch (kind) {'
for d in defs:
hrname = d.hrname()
print ' case %s: {' % hrname
print ' /* %s */' % d.dumpSig()
siglen = len(d.partypes)
if siglen == 0:
print ' return NULL;'
elif siglen == 1:
print ' return copySig(%s);' % d.partypes[0].cgType()
else:
print ' const Type* input_sig[] = { %s };' % ', '.join([t.cgType() for t in d.partypes])
print ' return copySig(%i, input_sig);' % len(d.partypes)
print ' }'
print ' default: {'
print ' assert(false && "unsupported opcode");'
print ' return NULL;'
print ' }'
print ' } // switch'
print '}'
# generate output signature builder function
#
def genOutputSigBuilder(defs):
print '/// return output type signature for given instruction'
print '///'
print 'const Type** InstrFactory::buildOutputSignature(InstrKind kind) {'
print ' switch (kind) {'
for d in defs:
hrname = d.hrname()
print ' case %s: {' % hrname
print ' /* %s */' % d.dumpSig()
siglen = len(d.rettypes)
if siglen == 0:
print ' return NULL;'
elif siglen == 1:
print ' return copySig(%s);' % d.rettypes[0].cgType()
else:
print ' const Type* output_sig[] = { %s };' % ', '.join([t.cgType() for t in d.rettypes])
print ' return copySig(%i, output_sig);' % len(d.rettypes)
print ' }'
print ' default: {'
print ' assert(false && "unsupported opcode");'
print ' return NULL;'
print ' }'
print ' } // switch'
print '}'
def genSigBuildersImpl(defs):
genInputSigBuilder(defs)
print ''
genOutputSigBuilder(defs)
# ------------------------------------------------------
#
# generate predicate methods
#
# helper: sorted list of all distinct reps
def allReps():
repset = set(replist +
instr_rep_overrides.values() +
shape_rep_overrides.values())
return sorted(repset, key=lambda rep: (rep.shape, rep.name))
# generate is-shape function for a given RepInfo
def genIsShape(defs, shapeinfo, proto = False):
print '/// true if given InstrKind is instance of %s' % shapeinfo.name
if proto:
print 'static bool is%s(InstrKind k);' % shapeinfo.name
else:
print 'bool InstrFactory::is%s(InstrKind k) {' % shapeinfo.name
print ' return instr_attrs[k].shape == %s;' % shapeinfo.enum()
print '}'
print ''
# generate has-template pred
def genHasTemplate(defs, proto = False):
print '/// true if given InstrKind has a template'
if proto:
print 'static bool hasTemplate(InstrKind k);'
else:
print 'bool InstrFactory::hasTemplate(InstrKind k) {'
print ' return instr_attrs[k].hastem;'
print '}'
print ''
# generate InstrFactory predicate impls
def genPredsImpl(defs):
genHasTemplate(defs, False)
for sh in allReps():
genIsShape(defs, sh)
# generate InstrFactory predicate protos
def genPredsProto(defs):
genHasTemplate(defs, True)
for sh in allReps():
genIsShape(defs, sh, True)
# --------------------------------------------------------
#
# generate shape and instr enums
#
# helper: return map of rep names to counts.
# CAUTION - relies on unique rep names. Can fail, but not silently.
# CAUTION - cache assumes single def list over lifetime of CG
rep_counts = None
def getRepCount(rep, defs):
global rep_counts
if rep_counts is None:
repnames = [r.name for r in allReps()]
defrepnames = [getRep(d).name for d in defs]
rep_counts = dict(zip(repnames, [defrepnames.count(repname) for repname in repnames]))
return rep_counts[rep.name]
#
def shapeData(sh):
return "%i, %i, %s" % (
sh[0] + sh[1], sh[2] + sh[3], vararg_names[sh[4]])
#
def genEnums(defs, proto = False):
reps = allReps()
if proto:
print '/// High level intermediate representation (HR) opcodes'
print '///'
print 'enum InstrKind {'
for i in range(0, len(defs)):
d = defs[i]
print ' %s, %s// %s %s' % (
d.hrname(), ' ' * max(0, 24 - len(d.hrname())),
getRep(d).name, 'template' if d.isTemplate() else '')
print ' HR_MAX = %s + 1' % defs[len(defs) - 1].hrname()
print '};'
print ''
print '/// VarargKind designates variability in at most one'
print "/// of an instruction's four argument groups."
print '///'
print 'enum VarargKind {'
for i in [DATA_IN, DATA_OUT, NONE]:
print ' %s,' % vararg_names[i]
print ' VARARGKIND_MAX = %s' % vararg_names[NONE]
print '};'
print ''
print '/// ShapeRep describes the representation of an instruction shape.'
print '/// Note that when varargs are specified, the corresponding'
print '/// member gives a minimum, rather than exact, quantity.'
print '/// For example, a ShapeRep with vararg == %s and datain == 2' % vararg_names[DATA_IN]
print '/// describes instructions with *at least* 2 data inputs.'
print '///'
print 'struct ShapeRep {'
print ' int num_uses; // number of Use inputs'
print ' int num_defs; // number of Def outputs'
print ' VarargKind vararg; // vararg position, if any'
print '};'
print ''
print '/// InstrShape is an enumeration of HR instruction shapes.'
print '/// The representation details of each InstrShape s is described by'
print '/// shape_reps[s].'
print '///'
print 'enum InstrShape {'
for i in range(0, len(reps)):
rep = reps[i]
shapedump = shapeData(rep.shape)
print ' %s, %s// %s %s%i instrs' % (rep.enum(),
' ' * max(0, 24 - len(rep.enum())), shapedump,
' ' * max(0, 24 - len(shapedump)), getRepCount(rep, defs))
print ' SHAPE_MAX = %s + 1' % reps[len(reps) - 1].enum()
print '};'
print ''
if proto:
print '/// shape_reps[] gives the representations of'
print '/// the shapes enumerated by InstrShape.'
print '///'
print 'extern const ShapeRep shape_reps[SHAPE_MAX];'
print ''
else:
print '/// shape_reps[] gives the representations of'
print '/// the shapes enumerated by InstrShape.'
print '///'
print 'extern const ShapeRep shape_reps[SHAPE_MAX] = {'
for rep in reps:
sh = rep.shape
print ' { %s }, %s// %s' % (shapeData(sh),
' ' * max(0, 10 - len(vararg_names[sh[4]])), rep.enum())
print '};'
print ''
if proto:
print '/// InstrAttrs contains attributes specific to (and universal'
print '/// across all instances of) a particular HR instruction.'
print '///'
print 'struct InstrAttrs {'
print ' const char* name; // printable name'
print ' InstrShape shape; // shape (const)'
print ' bool hastem; // true if instruction has a template (const)'
print '};'
print ''
print '/// instr_attrs describes the instructions enumerated in InstrKind.'
print '///'
print 'extern const InstrAttrs instr_attrs[HR_MAX];'
print ''
else:
print '/// instr_attrs describes the instructions enumerated in InstrKind.'
print '///'
print 'extern const InstrAttrs instr_attrs[HR_MAX] = {'
for d in defs:
print ' { "%s", %s%s, %s },' % (d.name, ' ' * max(0, 24 - len(d.hrname())),
getRep(d).enum(), 'true' if d.isTemplate() else 'false')
print '};'
print ''
# generate enum declarations
def genEnumsProto(defs):
genEnums(defs, True)
# generate enum data definitions
def genEnumsImpl(defs):
genEnums(defs, False)
# -----------------------------------------------------
#
# generate kind-driven dispatcher infrastructure
#
# generate kind adapter methods
#
def genKindAdapterMethods(defs):
for d in defs:
rep = getRep(d)
print ('RETURN_TYPE do_%s(%s* i) { return static_cast<SELF_CLASS*>(this)->do_default(i); }'
% (d.name, rep.name))
# generate dispatch function switch cases
#
def genKindAdapterCases(defs):
for d in defs:
hrname = d.hrname()
rep = getRep(d)
print 'case %s: ' % hrname
print ' return a->do_%s(cast<%s>(instr));' % (d.name, rep.name)
# -----------------------------------------------------
#
# generate shape-driven dispatcher infrastructure
#
# generate shape adapter methods
#
def genShapeAdapterMethods(defs):
reps = allReps()
for rep in reps:
print ('RETURN_TYPE do_%s(%s* i) { return static_cast<SELF_CLASS*>(this)->do_default(i); }'
% (rep.name, rep.name))
# generate dispatch function switch cases
#
def genShapeAdapterCases(defs):
reps = allReps()
for rep in reps:
print 'case %s: ' % rep.enum()
print ' return a->do_%s(cast<%s>(instr));' % (rep.name, rep.name)
# -----------------------------------------------------
#
# generate C++ definitions of runtime helper functions
#
cpp_type_map = {
'Atom': 'Atom',
'Boolean': 'BoolKind',
'Class': 'ClassClosure*',
'Env': 'MethodEnv*',
'Int': 'int32_t',
'Method': 'MethodInfo*',
'Name': 'const Multiname*',
'Namespace': 'Namespace*',
'Number': 'double',
'Ord': 'int',
'String': 'String*',
'Effect': 'void',
'Traits': 'Traits*',
'Uint': 'uint32_t',
'Object': 'Atom',
'ScriptObject': 'ScriptObject*',
'Array': 'ArrayObject*',
'VectorInt': 'IntVectorObject*',
'VectorUInt': 'UIntVectorObject*',
'VectorDouble': 'DoubleVectorObject*',
'Function': 'ClassClosure*',
'Bot': 'void',
}
def cpp_typename(t):
name = t.name
if name.endswith('~'):
name = name[0:len(name)-1]
return cpp_type_map[name] if name in cpp_type_map else '?'+name
# true if the shape for d treats the last fixed arg as the first vararg
def has_extra_vararg(d):
return getRep(d).name.startswith('CallStmt')
def make_argsig(d):
sig = [cpp_typename(t) for t in d.partypes if cpp_typename(t) != 'void']
if d.isvarin:
vartype = sig[len(sig)-1]
fixc = getRep(d).shape[DATA_IN] - (1 if has_extra_vararg(d) else 0)
sig = sig[0:fixc] + ['int'] + ['%s*' % vartype]
return ['MethodFrame*'] + sig
def ret_type(d):
sig = [t for t in d.rettypes if not t.isEffect()]
return sig[0] if len(sig) == 1 else None
def make_ret_ctype(d):
t = ret_type(d)
return cpp_typename(t) if t else 'void'
# Make a list of just primitive instructions
def protos_only(defs):
return [d for d in defs if not(d.isTemplate())]
# return a list of all template instructions
def templates_only(defs):
return [d for d in defs if d.isTemplate()]
# Exclude instructions with TopData or Top in their signature,
# or any instruction with 2+ data outputs.
def do_generate_stub(d):
fullsig = [t.name for t in d.partypes + d.rettypes]
return not ('TopData' in fullsig or 'Top' in fullsig or 'State' in fullsig)\
and d.shape[DATA_OUT] in range(2)
# Generate a class with C++ prototypes for each stub.
def gen_stub_protos(defs):
protos = protos_only(defs)
stubs = [d for d in protos if do_generate_stub(d)]
print "namespace halfmoon {"
print "using namespace avmplus;"
print "struct Stubs {"
print " static const int stub_count = %d;" % len(protos)
print
for d in stubs:
print ' // %s' % d.dumpSig()
arg_sig = make_argsig(d)
ret_ctype = make_ret_ctype(d)
print ' static %s do_%s(%s);' % (ret_ctype, d.name, ', '.join(arg_sig))
print
print "};"
print
print "/* One-line implementations, for copy/paste convenience:"
for d in stubs:
arg_sig = make_argsig(d)
ret_ctype = make_ret_ctype(d)
ret_stmt = 'return 0; ' if ret_ctype != 'void' else ''
print ' %s Stubs::do_%s(%s) { assert(false && "%s not implemented"); %s}' %\
(ret_ctype, d.name, ', '.join(arg_sig), d.name, ret_stmt)
print "*/"
print "}"
# Map C++ type names to nanojit::ArgType enums.
def lir_argtype(ctype):
if ctype == 'void':
return 'ARGTYPE_V'
if ctype == 'double':
return 'ARGTYPE_D'
if ctype == 'uint32_t':
return 'ARGTYPE_UI'
if ctype in ['int', 'int32_t', 'BoolKind']:
return 'ARGTYPE_I'
return 'ARGTYPE_P'
# Generate the LIR typesig builder expression by mapping the C++
# paramter types to LIR ArgType enums.
def lir_typesig(d):
argtypes = [lir_argtype(s) for s in make_argsig(d)]
sigtypes = [lir_argtype(make_ret_ctype(d))] + argtypes
return 'CallInfo::typeSig%d(%s)' % (len(argtypes), ', '.join(sigtypes))
# an opcode is pure if it has no side effects. Since side-effect
# types are mapped to C++ 'void', we scan for void.
def lir_ispure(d):
return 1 if 'void' not in [cpp_typename(t) for t in d.partypes] else 0
def lir_accset(d):
return 'ACCSET_NONE' if lir_ispure(d) else 'ACCSET_ALL'
# generate a table of nanojit CallInfo structures; one for each stub.
def gen_stub_lirtable(defs):
protos = protos_only(defs)
print "namespace halfmoon {"
print "const nanojit::CallInfo LirEmitter::lir_table[] = {"
for d in protos:
if do_generate_stub(d):
print ' { (uintptr_t)&Stubs::do_%s, %s, ABI_CDECL, %d, %s verbose_only(, "%s")},' %\
(d.name, lir_typesig(d), lir_ispure(d), lir_accset(d), d.name)
else:
print ' { 0, 0, ABI_CDECL, 0, ACCSET_NONE verbose_only(, "%s")},' % d.name
print "};"
print
print "const int LirEmitter::stub_fixc[] = {"
for d in protos:
fixc = (getRep(d).shape[DATA_IN] - (1 if has_extra_vararg(d) else 0)) if d.isvarin\
else -1 # -1 means stub has fixed arg count despite variadic shape
print ' %d, // %s' % (fixc, d.name)
print "};"
print "}"
# generate a table of LLVMEmitter StubInfo structures; one for each stub.
def gen_stub_llvmtable(defs):
return gen_stub_llvmtable_common(defs, 32)
def gen_stub_llvmtable64(defs):
return gen_stub_llvmtable_common(defs, 64)
def gen_stub_llvmtable_common(defs,arch):
protos = protos_only(defs)
type_strings = []
type_strings_cxx = {}
def type_string_index(ret, args):
type_string = ";".join([make_llvm_type_string(nm, getAvmMangleTypedefs(arch)) for nm in [ret] + args])
if type_string in type_strings:
return type_strings.index(type_string)
type_strings.append(type_string)
type_strings_cxx[type_string] = '%s ()(%s)' % (ret, ', '.join(args))
return len(type_strings)-1
save_stdout = sys.stdout
sys.stdout = buffer = StringIO.StringIO()
for scheme in mangleSchemes:
kindIndex = 0;
# We need to print 2 different stub tables.
# One to handle mangles function names when the target OS is Mac or iOS
# And the other to handle mangled function names when the target OS is Windows
# The tables are named based on the target OS on which the packaged app will be running
print "const LLVMModule::StubInfo %sllvm_stub_table[%d] = {" % (scheme.getCppLatch(),len(protos))
print
for d in protos:
print ' // %d: %s' % (kindIndex, d.dumpSig())
kindIndex = kindIndex+1
fixc = (getRep(d).shape[DATA_IN] - (1 if has_extra_vararg(d) else 0)) if d.isvarin\
else -1 # -1 means stub has fixed arg count despite variadic shape
if do_generate_stub(d):
arg_sig = make_argsig(d)
ret_ctype = make_ret_ctype(d)
fn_name = 'halfmoon::Stubs::do_'+d.name
func_attrs = Attribute.STATIC | Attribute.PUBLIC | Attribute.CDECL
mgl_name = scheme.mangle(fn_name, ret_ctype, arg_sig, func_attrs, getAvmMangleTypedefs(arch))
print ' // %s %s(%s)' % (ret_ctype, fn_name, ', '.join(arg_sig))
print ' { "%s", "%s", llvm_stub_types[%d], %s, %d },' % ( d.name, mgl_name,
type_string_index(ret_ctype, arg_sig), 'true' if lir_ispure(d) else 'false', fixc)
print
else:
print ' { "%s", 0, 0, false, %d },' % (d.name, fixc)
print
print "};"
print
sys.stdout = save_stdout
print "namespace compile_abc {"
print "static const int llvm_stub_count = %d;" % len(protos)
print "static const char* llvm_stub_types[%d] = {" % len(type_strings)
typeIndex = 0
for t in type_strings:
print ' // %d: %s' % (typeIndex, type_strings_cxx[t])
typeIndex = typeIndex+1
print ' "%s",' % (t)
print
print "};"
print
print buffer.getvalue()
print "}"
# return the interpreter getter expression for type t
interp_getter_name = {
'double' : 'interp->getDouble',
'int' : 'interp->getOrdinal',
'int32_t' : 'interp->getInt',
'uint32_t' : 'interp->getUint',
'BoolKind' : 'interp->getBool',
'String*' : 'interp->getString',
'Namespace*' : 'interp->getNs',
'Atom' : 'interp->getAtom',
'Traits*' : 'interp->getTraits',
'MethodEnv*' : 'interp->getEnv',
'MethodInfo*' : 'interp->getMethod',
'const Multiname*' : 'interp->getName',
'ScriptObject*' : 'interp->getObject',
'ArrayObject*' : '(ArrayObject*)interp->getObject',
'IntVectorObject*' : '(IntVectorObject*)interp->getObject',
'UIntVectorObject*' : '(UIntVectorObject*)interp->getObject',
'DoubleVectorObject*' : '(DoubleVectorObject*)interp->getObject',
'ClassClosure*' : '(ClassClosure*)interp->getObject',
}
def interp_getter(t):
return interp_getter_name[cpp_typename(t)]
# return the Interpreter Value constructor name for the return type of d
def interp_value(d):
ct = cpp_typename(ret_type(d))
return 'AtomValue' if ct == 'Atom'\
else 'OrdValue' if ct == 'int'\
else 'Value'
# generate a class of helper functions for the interpreter. Each one
# unpacks arguments, invokes the stub, then saves the result Value.
# var-in instructions are handled by passing a count and pointer to
# values. void stubs are handled by not saving the result.
def gen_stub_callers(defs):
protos = protos_only(defs)
stubs = [d for d in protos if do_generate_stub(d)]
print "namespace halfmoon {"
print "class StubCaller {"
print " public:"
for d in stubs:
exprs = ['%s(instr->use(%d))' % (interp_getter(d.partypes[i]), i) for i in range(len(d.partypes))\
if cpp_typename(d.partypes[i]) != 'void']
print ' // %s' % d.dumpSig()
print ' static void do_%s(Interpreter* interp, %s* instr) {' % (d.name, getRep(d).name)
if d.isvarin:
fixc = getRep(d).shape[DATA_IN] - (1 if has_extra_vararg(d) else 0)
var_type = d.partypes[len(d.partypes)-1]
var_ctype = cpp_typename(var_type)
vargetter = interp_getter(var_type)
print ' int argc = instr->arg_count();'
print ' Use* arg_uses = instr->args();'
print ' %s* args = (%s*)interp->args_out_;' % (var_ctype, var_ctype)
print ' for (int i = 0; i < argc; ++i)'
print ' args[i] = %s(arg_uses[i]);' % (vargetter)
exprs = exprs[0:fixc] + ['argc, args']
exprs = ['&interp->frame_'] + exprs
arg_expr = ',\n '.join(exprs)
ret_ctype = make_ret_ctype(d)
if ret_ctype == 'void':
print ' Stubs::do_%s(%s);' % (d.name, arg_expr)
print ' (void)interp;'
else:
print ' interp->resultVal(instr->value_out()) = %s(Stubs::do_%s(%s));' %\
(interp_value(d), d.name, arg_expr)
if len(exprs) == 1 and ret_ctype == 'void':
print ' (void)instr;'
print ' }'
print
print "};"
print
# generate a table with pointers to the helper functions, indexed by InstrKind
print "const Interpreter::StubCall Interpreter::stub_table[] = {"
for d in protos:
if do_generate_stub(d):
print ' (StubCall)&StubCaller::do_%s,' % d.name
else:
print ' 0, // %s' % d.name
print "};"
print
print "}"
# End generation of helpers for stubs
# --------------------------------------------------------
#
# generator harness and helpers
#
def printheader():
print '///'
print '/// generated by templates.py -- do not edit'
print '///'
print
gendir = "../generated"
def genfile(defs, gen, filename):
if not(os.path.exists(gendir)):
os.mkdir(gendir)
f = open('%s/%s' % (gendir, filename), 'wb')
try:
sys.stdout = f
printheader()
gen(defs)
finally:
f.close()
sys.stdout = sys.__stdout__
# group defs into primitives-then-templates
def sortdefs(defs):
return protos_only(defs) + templates_only(defs)
def gendefs(defs):
defs = sortdefs(defs)
genfile(defs, genEnumsProto, "InstrFactory_defs_proto.hh")
genfile(defs, genEnumsImpl, "InstrFactory_defs_impl.hh")
genfile(defs, genPredsProto, "InstrFactory_preds_proto.hh")
genfile(defs, genPredsImpl, "InstrFactory_preds_impl.hh")
genfile(defs, genSigBuildersImpl, "InstrFactory_signatures_impl.hh")
genfile(defs, genTemBuilderCases, "InstrFactory_buildTemplate_cases.hh")
genfile(defs, genKindAdapterMethods, "KindAdapter_methods.hh")
genfile(defs, genKindAdapterCases, "KindAdapter_cases.hh")
genfile(defs, genShapeAdapterMethods, "ShapeAdapter_methods.hh")
genfile(defs, genShapeAdapterCases, "ShapeAdapter_cases.hh")
genfile(defs, gen_stub_protos, "Stub_protos.hh")
genfile(defs, gen_stub_lirtable, "Stub_lirtable.hh")
genfile(defs, gen_stub_llvmtable, "Stub_llvmtable.hh")
genfile(defs, gen_stub_llvmtable64, "Stub_llvmtable_64.hh")
genfile(defs, gen_stub_callers, "Stub_callers.hh")
def trace(s):
save = sys.stdout
sys.stdout = sys.__stdout__
print s
sys.stdout = save
# -----------------------------------------------------
#
# main
#
# dump processed defs
def dump(defs):
for d in defs:
print '\n' + d.dump()
# generator functions callable from the command line
gens = {
'defs': gendefs, # generate code
'dump': dump # dump internal reps
}
if len(sys.argv) > 1 and sys.argv[1] in gens:
gen = gens[sys.argv[1]]
else:
print "Error: must specify defs or dump as command-line argument"
sys.exit(1)
try:
sexprs = [sexpr for sexpr in parse(hrdefs) if isValid(sexpr)]
defs = [toDef(sexpr) for sexpr in sexprs]
process(defs)
gen(defs)
except ParseError as e:
print 'parse error: %s' % e.message()
sys.exit(1)
| mpl-2.0 | 1,539,823,929,326,712,300 | 33.15506 | 106 | 0.617453 | false |
tshi04/machine-learning-codes | headGAN-ff/model.py | 1 | 2150 | import re
import numpy as np
import tensorflow as tf
class discriminator(object):
def __init__(self):
self.name = 'keydis'
def __call__(self, input_data, reuse=False):
with tf.variable_scope(self.name) as self.ds:
if reuse:
self.ds.reuse_variables()
nf_len1 = 3
nf_filter = 32
input_data = tf.transpose(input_data,[0,3,2,1])
wdvec_dim = int(input_data.shape[1])
seq_len = int(input_data.shape[2])
input_channel = int(input_data.shape[3])
w_conv1 = tf.Variable(tf.truncated_normal([wdvec_dim,nf_len1,input_channel,nf_filter],stddev=0.1),name='w_conv1')
b_conv1 = tf.Variable(tf.truncated_normal([nf_filter]),name='b_conv1')
h_conv1 = tf.nn.conv2d(input=input_data,filter=w_conv1,strides=[1,1,1,1],padding='VALID',name='conv1')
h_conv1 = tf.add(h_conv1,b_conv1,name='h_conv1')
h_flat = tf.reshape(h_conv1,[-1,(seq_len-nf_len1+1)*nf_filter],name='h_flat')
w_fc1 = tf.Variable(tf.truncated_normal([(seq_len-nf_len1+1)*nf_filter,1],stddev=0.1),name='w_fc1')
logits = tf.matmul(h_flat,w_fc1)
return logits
@property
def vars(self):
return tf.contrib.framework.get_variables(self.ds)
class generator(object):
def __init__(self):
self.name = 'keygen'
def __call__(self, input_data, reuse=False):
with tf.variable_scope(self.name) as self.gs:
if reuse:
self.gs.reuse_variables()
nf_len1 = 20 # filter length
nf_filter = 100 # number of filters
wdvec_dim = int(input_data.shape[1])
input_channel = int(input_data.shape[3])
w_conv1 = tf.Variable(tf.truncated_normal([wdvec_dim,nf_len1,input_channel,nf_filter],stddev=0.1),name='w_conv1')
b_conv1 = tf.Variable(tf.truncated_normal([nf_filter]),name='b_conv1')
h_conv1 = tf.nn.conv2d(input=input_data, filter=w_conv1,strides=[1,1,20,1],padding='VALID',name='conv1')
h_conv1 = tf.add(h_conv1,b_conv1,name='h_conv1')
return h_conv1
@property
def vars(self):
return tf.contrib.framework.get_variables(self.gs)
| gpl-3.0 | 1,230,414,954,405,514,500 | 38.090909 | 118 | 0.61814 | false |
Zanzibar82/streamondemand.test | servers_sports/ucaster.py | 1 | 3368 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# streamondemand - XBMC Plugin
# Conector para ucaster
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
DEBUG = config.get_setting("debug")
def find_url_play(data, headers):
logger.info("[ucaster.py] find_url_play")
'''
<script type='text/javascript'> width=726, height=500, channel='danysportscucu', g='1';</script><script type='text/javascript' src='http://www.ucaster.eu/static/scripts/ucaster.js'></script>
<script type="text/javascript">
<!--//--><![CDATA[// ><!--
width=610, height=470, channel='tashsport02', g='1';
//--><!]]>
</script><script type="text/javascript" src="http://www.ucaster.eu/static/scripts/ucaster.js"></script>
'''
fid = scrapertools.find_single_match (data, "channel='([^']+)'[^<]+</script><script type='text/javascript' src='http://www.ucaster.eu/static/scripts/ucaster.js'")
if fid == '':
fid = scrapertools.find_single_match (data, "channel='([^']+)'[^<]+<[^<]+</script><script type=['\"]text/javascript['\"] src=['\"]http://www.ucaster.eu/static/scripts/ucaster.js['\"]")
if fid == '':
return ''
pageurl = 'http://www.embeducaster.com/embedded/%s/1/726/500' % fid #http://www.embeducaster.com/embedded/danysportscucu/1/726/500
data2 = scrapertools.cachePage(pageurl, headers=headers)
if (DEBUG): logger.info("data2="+data2)
'''
<div class="player_div" align="center">
<span>
<script type="text/javascript" src="/static/scripts/swfobject.js"></script>
<div id="flashcontent">
<strong>You need to upgrade your Flash Player in order to watch movies from ucaster.eu</strong>
</div>
<script type="text/javascript">
var so = new SWFObject("/static/scripts/fplayer.swf", "fplayer", "726", "500", "9");
so.addParam('allowfullscreen','true');
so.addParam('allowscriptaccess','always');
so.addParam('wmode','transparent');
so.addParam('FlashVars', 'id=78955&s=danysportscucu&g=1&a=1&l=Dany Rojadirecta.me');
so.write("flashcontent");
</script>
</span>
</div>
'''
data3 = scrapertools.cache_page('http://www.embeducaster.com:1935/loadbalancer',headers=headers)
rtmpurl = 'rtmp://' + scrapertools.find_single_match (data3, "redirect=(.*)") + '/live'
idvalue, svalue = scrapertools.find_single_match (data2, "'FlashVars', 'id=([^&]+)&s=([^&]+)")
swfurl = 'http://www.embeducaster.com' + scrapertools.find_single_match (data2, 'new SWFObject\("([^"]+)"')
url = '%s playpath=%s?id=%s swfUrl=%s swfVfy=1 conn=S:OK live=1 pageUrl=%s' % (rtmpurl, svalue, idvalue, swfurl, pageurl)
#url = '%s playpath=%s?id=%s swfUrl=%s conn=S:OK live=1 timeout=20 pageUrl=%s --live' % (rtmpurl, svalue, idvalue, swfurl, pageurl)
return url
| gpl-3.0 | -695,613,796,193,651,600 | 48.529412 | 192 | 0.554632 | false |
Ultimaker/Cura | plugins/UltimakerMachineActions/UMOUpgradeSelection.py | 1 | 1985 | # Copyright (c) 2017 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Settings.ContainerRegistry import ContainerRegistry
from cura.MachineAction import MachineAction
from PyQt5.QtCore import pyqtSlot, pyqtSignal, pyqtProperty
from UM.i18n import i18nCatalog
from UM.Application import Application
catalog = i18nCatalog("cura")
from cura.Settings.CuraStackBuilder import CuraStackBuilder
class UMOUpgradeSelection(MachineAction):
"""The Ultimaker Original can have a few revisions & upgrades.
This action helps with selecting them, so they are added as a variant.
"""
def __init__(self):
super().__init__("UMOUpgradeSelection", catalog.i18nc("@action", "Select upgrades"))
self._qml_url = "UMOUpgradeSelectionMachineAction.qml"
def _reset(self):
self.heatedBedChanged.emit()
heatedBedChanged = pyqtSignal()
@pyqtProperty(bool, notify = heatedBedChanged)
def hasHeatedBed(self):
global_container_stack = Application.getInstance().getGlobalContainerStack()
if global_container_stack:
return global_container_stack.getProperty("machine_heated_bed", "value")
@pyqtSlot(bool)
def setHeatedBed(self, heated_bed = True):
global_container_stack = Application.getInstance().getGlobalContainerStack()
if global_container_stack:
# Make sure there is a definition_changes container to store the machine settings
definition_changes_container = global_container_stack.definitionChanges
if definition_changes_container == ContainerRegistry.getInstance().getEmptyInstanceContainer():
definition_changes_container = CuraStackBuilder.createDefinitionChangesContainer(
global_container_stack, global_container_stack.getId() + "_settings")
definition_changes_container.setProperty("machine_heated_bed", "value", heated_bed)
self.heatedBedChanged.emit()
| lgpl-3.0 | 2,034,681,844,096,167,200 | 42.152174 | 107 | 0.723929 | false |
zeal4u/FCA_Faceted_Search | bin/web_backend.py | 1 | 2649 | # -*- coding: utf-8 -*-
__author__ = 'jsz'
__version__ = 0.1
import web
import json
from search_engine import SearchService
from search_engine import FacetEncoder
from models import BookEncoder
# APIs exposed to front end
urls = (
"/keyWordsSearch","KeyWordsSearch",
"/facetSearch","FacetSearch",
"/expandSearch","ExpandSearch",
"/defineSearch","DefineSearch",
"/historySummary","HistorySummary",
"/(.*)", "Index"
)
app = web.application(urls, globals())
class Index:
def GET(self, url):
u"""
:param url: needs, or will throw exception
"""
raise web.redirect('/static/web-angular/app/index.html')
def json_encoder(search_result):
result = {
'facets': json.loads(json.dumps(search_result.facets, cls=FacetEncoder)),
'content': json.loads(json.dumps(search_result.content, cls=BookEncoder))
}
return json.dumps(result)
class KeyWordsSearch:
def GET(self):
u"""
关键词搜索后台接口
:param key_word: String 从前台获取关键词进行搜索
:return: SearchResult 关键词搜索结果集
"""
key_words_str = web.input()['key_words']
key_words = key_words_str.split()
ip = web.ctx.ip
service = SearchService.get_service_by_ip(ip)
search_result = service.key_words_search(key_words)
return json_encoder(search_result)
class FacetSearch:
def GET(self):
u"""
分面搜索接口
:param new_attr: int 新增分面属性
:return SearchResult 分面搜索结果集
"""
new_attr = int(web.input()['new_attr'])
ip = web.ctx.ip
service = SearchService.get_service_by_ip(ip)
search_result = service.facet_search(new_attr)
return json_encoder(search_result)
class ExpandSearch:
def GET(self):
u"""
根据当前查询节点进行泛化
:param degree: float 泛化程度 取值0~1
:return: SearchResult 泛化搜索结果集
"""
return "ExpandSearch"
class DefineSearch:
def GET(self):
u"""
根据当前查询节点进行细化
:param degree: float 细化程度 取值0~1
:return: SearchResult 细化搜索结果集
"""
return "DefineSearch"
class HistorySummary:
def GET(self):
u"""
根据查询历史推荐结果
:return: SearchResult 历史查询推荐结果集
"""
return "HistorySummary"
if __name__ == '__main__':
app.run()
| mit | 5,996,236,848,290,108,000 | 21.155963 | 81 | 0.575983 | false |
and3rson/isc | examples/test_client.py | 1 | 2134 | #!/usr/bin/env python3.6
from isc.client import Client
from threading import Thread, Event
from time import time
from random import random
import logging
ITERATIONS = 1
CONN_POOL_SIZE = 1
COUNT = 1000
class Process(Thread):
def __init__(self, client):
super(Process, self).__init__()
self.proceed_evt = Event()
self.client = client
self.timediff = 0
def run(self):
self.proceed_evt.wait()
for i in range(0, ITERATIONS):
start = time()
self.client.example.add(random(), random(), wait=0)
self.timediff += int((time() - start) * 1000)
def create_client():
client = Client(exchange='isctest')
client.set_logging_level(logging.INFO)
client.start()
return client
"""
client = create_client()
client.example.start_tracking()
"""
print('Creating', CONN_POOL_SIZE, 'connections')
clients = []
events = []
for _ in range(CONN_POOL_SIZE):
event = Event()
client = create_client()
clients.append(client)
events.append(event)
client.on_connect += event.set
for i, (client, event) in enumerate(zip(clients, events)):
print('Waiting for client', i, 'to become ready')
event.wait()
print('Client', i, 'ready')
print('Creating', COUNT, 'requesters')
threads = []
for i in range(0, COUNT):
threads.append(Process(clients[i % CONN_POOL_SIZE]))
print('Starting workers')
for thread in threads:
thread.start()
print('Starting attack ({} requests per worker)...'.format(ITERATIONS))
for thread in threads:
thread.proceed_evt.set()
start = time()
for thread in threads:
thread.join()
timediff = int((time() - start) * 1000)
print('Done in {}ms'.format(timediff))
print('avg: {}ms, min: {}ms, max: {}ms'.format(
sum([thread.timediff / ITERATIONS for thread in threads]) / len(threads),
min([thread.timediff / ITERATIONS for thread in threads]),
max([thread.timediff / ITERATIONS for thread in threads])
))
"""
print('Final server summary:')
summary = client.example.get_summary()
for line in summary:
print(line)
"""
for client in clients:
client.stop()
| gpl-3.0 | 511,173,291,182,243,840 | 20.77551 | 77 | 0.64761 | false |
Hoikas/korman | korman/exporter/logger.py | 1 | 2584 | # This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import os.path
import sys
class ExportAnalysis:
"""This is used to collect artist action items from the export process. You can warn about
portability issues, possible oversights, etc. The benefit here is that the user doesn't have
to look through all of the gobbledygook in the export log.
"""
_porting = []
_warnings = []
def save(self):
# TODO
pass
def port(self, message, indent=0):
self._porting.append(message)
print(" " * indent, end="")
print("PORTING: {}".format(message))
def warn(self, message, indent=0):
self._warnings.append(message)
print(" " * indent, end="")
print("WARNING: {}".format(message))
class ExportLogger:
"""Yet Another Logger(TM)"""
def __init__(self, ageFile):
# Make the log file name from the age file path -- this ensures we're not trying to write
# the log file to the same directory Blender.exe is in, which might be a permission error
path, ageFile = os.path.split(ageFile)
ageName, _crap = os.path.splitext(ageFile)
fn = os.path.join(path, "{}_export.log".format(ageName))
self._file = open(fn, "w")
for i in dir(self._file):
if not hasattr(self, i):
setattr(self, i, getattr(self._file, i))
def __enter__(self):
self._stdout, sys.stdout = sys.stdout, self._file
self._stderr, sys.stderr = sys.stderr, self._file
def __exit__(self, type, value, traceback):
sys.stdout = self._stdout
sys.stderr = self._stderr
def flush(self):
self._file.flush()
self._stdout.flush()
self._stderr.flush()
def write(self, str):
self._file.write(str)
self._stdout.write(str)
def writelines(self, seq):
self._file.writelines(seq)
self._stdout.writelines(seq)
| gpl-3.0 | -5,733,352,604,971,628,000 | 32.558442 | 99 | 0.627709 | false |
rwl/openpowersystem | ucte/state_variables/sv_shunt_compensator_sections.py | 1 | 3822 | #------------------------------------------------------------------------------
# Copyright (C) 2009 Richard Lincoln
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation; version 2 dated June, 1991.
#
# This software is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANDABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#------------------------------------------------------------------------------
""" State variable for the number of sections in service for a shunt compensator. A SvShuntCompensator is always associated with any instance of ShuntCompensator. The sections or continuousSections values are specified depending upon the value of the associated RegulatingControl.discrete attribute. If no RegulatingControl is associated, then the ShuntCompensator is treated as discrete. In discrete mode, the 'sections' attribute must be present. In the not 'discrete' mode (continuous mode) the 'continuousSections' attribute must be present. In the case the Terminal.connected value is 'false' the specificed number of sections is not meaningful to the powerflow solution and powerflow implementations should interpret this as zero injection. Note that an SvShuntCompensatorSections should be supplied even for ShuntCompensators whose Terminal.connected status is 'false' to keep total number of ShuntCompensator and SvShuntCompensatorSection objects in the model the same.
"""
# <<< imports
# @generated
from ucte.state_variables.state_variable import StateVariable
from google.appengine.ext import db
# >>> imports
class SvShuntCompensatorSections(StateVariable):
""" State variable for the number of sections in service for a shunt compensator. A SvShuntCompensator is always associated with any instance of ShuntCompensator. The sections or continuousSections values are specified depending upon the value of the associated RegulatingControl.discrete attribute. If no RegulatingControl is associated, then the ShuntCompensator is treated as discrete. In discrete mode, the 'sections' attribute must be present. In the not 'discrete' mode (continuous mode) the 'continuousSections' attribute must be present. In the case the Terminal.connected value is 'false' the specificed number of sections is not meaningful to the powerflow solution and powerflow implementations should interpret this as zero injection. Note that an SvShuntCompensatorSections should be supplied even for ShuntCompensators whose Terminal.connected status is 'false' to keep total number of ShuntCompensator and SvShuntCompensatorSection objects in the model the same.
"""
# <<< sv_shunt_compensator_sections.attributes
# @generated
# The number of sections in service as a continous variable.
continuous_sections = db.FloatProperty()
# >>> sv_shunt_compensator_sections.attributes
# <<< sv_shunt_compensator_sections.references
# @generated
# The shunt compensator for which the state applies.
shunt_compensator = db.ReferenceProperty(db.Model,
collection_name="_sv_shunt_compensator_sections_set") # sv_shunt_compensator_sections
# >>> sv_shunt_compensator_sections.references
# <<< sv_shunt_compensator_sections.operations
# @generated
# >>> sv_shunt_compensator_sections.operations
# EOF -------------------------------------------------------------------------
| agpl-3.0 | -7,719,362,087,847,217,000 | 72.5 | 996 | 0.736002 | false |
Kotaimen/stonemason | stonemason/pyramid/geo/tms.py | 1 | 10943 | # -*- encoding: utf-8 -*-
"""
stonemason.pyramid.geo.geosys
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Geographic system
"""
__author__ = 'kotaimen'
__date__ = '3/20/15'
import collections
from osgeo import osr
from osgeo import ogr
osr.UseExceptions()
ogr.UseExceptions()
from stonemason.pyramid import Pyramid
from stonemason.pyramid import TileIndex, MetaTileIndex
class TileMapError(RuntimeError):
pass
_Envelope = collections.namedtuple('Envelope', 'left bottom right top')
class Envelope(_Envelope):
"""A rectangular area on the projection surface, defined by two corner
points ``(left, bottom, right, top)``.
"""
@staticmethod
def from_ogr(e):
"""Create a envelope from a call from :func:`ogr.Geometry.GetEnvelope()`,
which is defined by ``(minx, maxx, miny, maxy)``.
"""
return Envelope(e[0], e[2], e[1], e[3])
def to_geometry(self, srs=None):
"""Convert the envelope to a :class:`ogr.Geometry` instance, with
specified spatial reference system"""
left, bottom, right, top = self
bbox = (left, bottom, right, bottom,
right, top, left, top, left, bottom)
wkt = 'POLYGON((%.9f %.9f, %.9f %.9f, %.9f %.9f, %.9f %.9f, %.9f %.9f))' % bbox
return ogr.CreateGeometryFromWkt(wkt, srs)
class TileMapSystem(object):
"""Defines geographic attributes of a `pyramid` tile map system.
>>> from stonemason.pyramid import Pyramid, MetaTileIndex
>>> from stonemason.pyramid.geo import TileMapSystem
>>> pyramid = Pyramid(geogcs='EPSG:4326', projcs='EPSG:3857')
>>> tms = TileMapSystem(pyramid)
>>> tms.geogcs # doctest: +ELLIPSIS
<osgeo.osr.SpatialReference; proxy of <Swig Object of type 'OSRSpatialReferenceShadow *' at ...> >
>>> tms.pyramid.geogcs
'+proj=longlat +datum=WGS84 +no_defs '
>>> tms.forward_projection # doctest: +ELLIPSIS
<osgeo.osr.CoordinateTransformation; proxy of <Swig Object of type 'OSRCoordinateTransformationShadow *' at ...> >
>>> index = MetaTileIndex(4, 12, 12, 8)
>>> tms.calc_tile_envelope(index)
Envelope(left=0.0, bottom=-20037508.34, right=20037508.34, top=0.0)
.. note:: `TileMapSystem` uses `GDAL <http://www.gdal.org/>`_ for spatial
calculations, the actual list of supported spatial references and
coordinate transforms depends on `GDAL` installation and may vary
between distributions.
.. seealso:: `Geometry`_, `SpatialReference`_, `CoordinateTransformation`_
.. _Geometry: http://gdal.org/python/osgeo.ogr.Geometry-class.html
.. _SpatialReference: http://gdal.org/python/osgeo.osr.SpatialReference-class.html
.. _CoordinateTransformation: http://gdal.org/python/osgeo.osr.CoordinateTransformation-class.html
:param pyramid: The `pyramid` defines the tile map system, the following
attributes are used to create `TileMapSystem`:
``Pyramid.geogcs``
Geographic coordinate system, can be any string supported by
:func:`~osgeo.ogr.SpatialReference.SetFromUserInput`.
``Pyramid.projcs``
Projection coordinate system, can be any string supported by
:func:`~osgeo.ogr.SpatialReference.SetFromUserInput`.
When set to ``None``, `TileMapSystem` will try to figure
out one from ``geogcs``.
``Pyramid.geogbounds``
Boundary of the map in geography coordinate system. Specified using
envelope ``(min_lon, min_lat, max_lon, max_lat)``.
The envelope is not considered as a ogr simple geometry and may
behaviour incorrectly for some GCS if it crosses meridian line.
``Pyramid.projbounds``
Boundary of the map in projection coordinate system. Specified using
envelope ``(left, bottom, right, top)``. When set to ``None``,
this will be calculated by projecting ``geogbounds`` form ``geogcs``
to ``projcs``. Note this calculation may fail or give a incorrect
result due to limitations in GDAL.
:type pyramid: :class:`~stonemason.pyramid.Pyramid`
"""
def __init__(self, pyramid):
assert isinstance(pyramid, Pyramid)
self._projcs = None
self._geogcs = None
self._forward_projection = None
self._backward_projection = None
self._geogbounds = None
self._projbounds = None
self._init_spatial_ref(pyramid)
self._init_projections(pyramid)
self._init_bounds(pyramid)
# construct a normalized pyramid from calculations above
self._pyramid = Pyramid(
levels=pyramid.levels,
stride=pyramid.stride,
projcs=self._projcs.ExportToProj4(),
geogcs=self._geogcs.ExportToProj4(),
geogbounds=Envelope.from_ogr(self._geogbounds.GetEnvelope()),
projbounds=Envelope.from_ogr(self._projbounds.GetEnvelope()),
)
@property
def projcs(self):
"""Projection coordinate system.
:rtype: :class:`osgeo.osr.SpatialReference`
"""
return self._projcs
@property
def geogcs(self):
"""Geographic coordinate system.
:rtype: :class:`osgeo.osr.SpatialReference`
"""
return self._geogcs
@property
def forward_projection(self):
"""Defines coordinate transformation from geographic coordinate
system to projection coordinate system.
:rtype: :class:`osgeo.osr.CoordinateTransformation`
"""
return self._forward_projection
@property
def backward_projection(self):
"""Defines coordinate transformation from projection coordinate
system to geographic coordinate system.
:rtype: :class:`osgeo.osr.CoordinateTransformation`
"""
return self._backward_projection
@property
def geog_bounds(self):
"""Bounds of the tile map system in geometry coordinate system.
:rtype: :class:`osgeo.osr.Geometry`
"""
return self._geogbounds
@property
def proj_bounds(self):
"""Bounds of the tile map system in projection coordinate system.
:rtype: :class:`osgeo.osr.Geometry`
"""
return self._projbounds
@property
def pyramid(self):
"""Normalized pyramid object.
:rtype: :class:`~stonemason.pyramid.Pyramid`
"""
return self._pyramid
def _init_spatial_ref(self, pyramid):
# create projection coordinate system from Pyramid
self._projcs = osr.SpatialReference()
self._projcs.SetFromUserInput(pyramid.projcs)
# must be a map projection
if not self._projcs.IsProjected():
raise TileMapError('Not a projection coordinate system.')
# create geographic coordinate system from Pyramid
self._geogcs = osr.SpatialReference()
if pyramid.geogcs is not None:
self._geogcs.SetFromUserInput(pyramid.geogcs)
else:
# try figure out geogcs of the projection if its not specified
code = self._projcs.GetAuthorityCode('geogcs')
authority = self._projcs.GetAuthorityName('geogcs')
if code is None or authority is None:
raise TileMapError("Cannot figure out geogcs automaticlly.")
self._geogcs.SetFromUserInput('%s:%s' % (authority, code))
# XXX: Fix up wkt +over issue
# By default PROJ.4 wraps output longitudes in the range -180 to 180.
# The +over switch can be used to disable the default wrapping which
# is done at a low level.
projcs = self._projcs.ExportToProj4()
if '+over' not in projcs.split():
projcs += ' +over'
self._projcs.ImportFromProj4(projcs)
geogcs = self._geogcs.ExportToProj4()
if '+over' not in geogcs.split():
geogcs += ' +over'
self._geogcs.ImportFromProj4(geogcs)
def _init_projections(self, pyramid):
self._forward_projection = osr.CoordinateTransformation(self._geogcs,
self._projcs)
self._backward_projection = osr.CoordinateTransformation(self._geogcs,
self._projcs)
def _init_bounds(self, pyramid):
self._geogbounds = Envelope(*pyramid.geogbounds) \
.to_geometry(self._geogcs)
if pyramid.projbounds is None:
geobounds = self._geogbounds.Clone()
geobounds.Transform(self._forward_projection)
self._projbounds = geobounds
else:
self._projbounds = Envelope(*pyramid.projbounds) \
.to_geometry(self._projcs)
def _calc_max_bbox(self):
envelope = self.proj_bounds.GetEnvelope()
min_x, max_x, min_y, max_y = envelope
size_x = abs(max_x - min_x)
size_y = abs(max_y - min_y)
scale = max([size_x, size_y])
# fit projection bounds to a square box, if necessary
if size_x > size_y:
offset_x = min_x
offset_y = min_y - (size_x - size_y) / 2
elif size_x < size_y:
offset_x = min_x - (size_y - size_x) / 2
offset_y = min_y
else:
offset_x = min_x
offset_y = min_y
return offset_x, offset_y, scale
def calc_tile_envelope(self, index):
""" Calculates envelope of given `TileIndex` of `MetaTileIndex` under
projection coordinate system.
:param index: Given tile index or metatile index
:type index: :class:`~stonemason.pyramid.TileIndex` or
:class:`~stonemason.pyramid.MetaTileIndex`
:return: Calculated envelope
:rtype: :class:`~stonemason.pyramid.geo.Envelope`
"""
# just convert metatile index to higher level tile index
if isinstance(index, MetaTileIndex):
index = index.to_tile_index()
assert isinstance(index, TileIndex)
# XXX: should've cached this
offset_x, offset_y, scale = self._calc_max_bbox()
z, x, y = index.z, index.x, index.y
norm_factor = 2. ** z
norm_min_x = x / norm_factor
norm_max_x = (x + 1) / norm_factor
norm_min_y = 1 - (y + 1) / norm_factor
norm_max_y = 1 - y / norm_factor
envelope = Envelope(norm_min_x * scale + offset_x,
norm_min_y * scale + offset_y,
norm_max_x * scale + offset_x,
norm_max_y * scale + offset_y)
return envelope
def __repr__(self):
return '''GeographicSystem
projcs: %s
geogcs: %s
projbounds: %s
geogbounds: %s
)''' % (self._projcs.ExportToWkt(),
self._geogcs.ExportToWkt(),
self._projbounds.ExportToWkt(),
self._geogbounds.ExportToWkt())
| mit | 1,514,051,397,815,786,000 | 34.186495 | 118 | 0.607512 | false |
asciinema/asciinema | asciinema/__main__.py | 1 | 5439 | import locale
import argparse
import os
import sys
from asciinema import __version__
import asciinema.config as config
from asciinema.commands.auth import AuthCommand
from asciinema.commands.record import RecordCommand
from asciinema.commands.play import PlayCommand
from asciinema.commands.cat import CatCommand
from asciinema.commands.upload import UploadCommand
def positive_float(value):
value = float(value)
if value <= 0.0:
raise argparse.ArgumentTypeError("must be positive")
return value
def maybe_str(v):
if v is not None:
return str(v)
def main():
if locale.nl_langinfo(locale.CODESET).upper() not in ['US-ASCII', 'UTF-8', 'UTF8']:
print("asciinema needs an ASCII or UTF-8 character encoding to run. Check the output of `locale` command.")
sys.exit(1)
try:
cfg = config.load()
except config.ConfigError as e:
sys.stderr.write(str(e) + '\n')
sys.exit(1)
# create the top-level parser
parser = argparse.ArgumentParser(
description="Record and share your terminal sessions, the right way.",
epilog="""example usage:
Record terminal and upload it to asciinema.org:
\x1b[1masciinema rec\x1b[0m
Record terminal to local file:
\x1b[1masciinema rec demo.cast\x1b[0m
Record terminal and upload it to asciinema.org, specifying title:
\x1b[1masciinema rec -t "My git tutorial"\x1b[0m
Record terminal to local file, limiting idle time to max 2.5 sec:
\x1b[1masciinema rec -i 2.5 demo.cast\x1b[0m
Replay terminal recording from local file:
\x1b[1masciinema play demo.cast\x1b[0m
Replay terminal recording hosted on asciinema.org:
\x1b[1masciinema play https://asciinema.org/a/difqlgx86ym6emrmd8u62yqu8\x1b[0m
Print full output of recorded session:
\x1b[1masciinema cat demo.cast\x1b[0m
For help on a specific command run:
\x1b[1masciinema <command> -h\x1b[0m""",
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--version', action='version', version='asciinema %s' % __version__)
subparsers = parser.add_subparsers()
# create the parser for the "rec" command
parser_rec = subparsers.add_parser('rec', help='Record terminal session')
parser_rec.add_argument('--stdin', help='enable stdin recording, disabled by default', action='store_true', default=cfg.record_stdin)
parser_rec.add_argument('--append', help='append to existing recording', action='store_true', default=False)
parser_rec.add_argument('--raw', help='save only raw stdout output', action='store_true', default=False)
parser_rec.add_argument('--overwrite', help='overwrite the file if it already exists', action='store_true', default=False)
parser_rec.add_argument('-c', '--command', help='command to record, defaults to $SHELL', default=cfg.record_command)
parser_rec.add_argument('-e', '--env', help='list of environment variables to capture, defaults to ' + config.DEFAULT_RECORD_ENV, default=cfg.record_env)
parser_rec.add_argument('-t', '--title', help='title of the asciicast')
parser_rec.add_argument('-i', '--idle-time-limit', help='limit recorded idle time to given number of seconds', type=positive_float, default=maybe_str(cfg.record_idle_time_limit))
parser_rec.add_argument('-y', '--yes', help='answer "yes" to all prompts (e.g. upload confirmation)', action='store_true', default=cfg.record_yes)
parser_rec.add_argument('-q', '--quiet', help='be quiet, suppress all notices/warnings (implies -y)', action='store_true', default=cfg.record_quiet)
parser_rec.add_argument('filename', nargs='?', default='', help='filename/path to save the recording to')
parser_rec.set_defaults(cmd=RecordCommand)
# create the parser for the "play" command
parser_play = subparsers.add_parser('play', help='Replay terminal session')
parser_play.add_argument('-i', '--idle-time-limit', help='limit idle time during playback to given number of seconds', type=positive_float, default=maybe_str(cfg.play_idle_time_limit))
parser_play.add_argument('-s', '--speed', help='playback speedup (can be fractional)', type=positive_float, default=cfg.play_speed)
parser_play.add_argument('filename', help='local path, http/ipfs URL or "-" (read from stdin)')
parser_play.set_defaults(cmd=PlayCommand)
# create the parser for the "cat" command
parser_cat = subparsers.add_parser('cat', help='Print full output of terminal session')
parser_cat.add_argument('filename', help='local path, http/ipfs URL or "-" (read from stdin)')
parser_cat.set_defaults(cmd=CatCommand)
# create the parser for the "upload" command
parser_upload = subparsers.add_parser('upload', help='Upload locally saved terminal session to asciinema.org')
parser_upload.add_argument('filename', help='filename or path of local recording')
parser_upload.set_defaults(cmd=UploadCommand)
# create the parser for the "auth" command
parser_auth = subparsers.add_parser('auth', help='Manage recordings on asciinema.org account')
parser_auth.set_defaults(cmd=AuthCommand)
# parse the args and call whatever function was selected
args = parser.parse_args()
if hasattr(args, 'cmd'):
command = args.cmd(args, cfg, os.environ)
code = command.execute()
sys.exit(code)
else:
parser.print_help()
sys.exit(1)
if __name__ == '__main__':
main()
| gpl-3.0 | 3,624,383,145,244,699,000 | 46.295652 | 188 | 0.706012 | false |
elsantodel90/RAAGo | aago_ranking/users/admin.py | 1 | 1070 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as AuthUserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from .models import User
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class MyUserCreationForm(UserCreationForm):
error_message = UserCreationForm.error_messages.update(
{
'duplicate_username': 'This username has already been taken.'
}
)
class Meta(UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data['username']
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
@admin.register(User)
class UserAdmin(AuthUserAdmin):
form = MyUserChangeForm
add_form = MyUserCreationForm
| gpl-3.0 | -776,621,858,203,700,000 | 25.75 | 78 | 0.703738 | false |
caktus/ibid | ibid/plugins/core.py | 1 | 11725 | # Copyright (c) 2008-2010, Michael Gorven, Stefano Rivera
# Released under terms of the MIT/X/Expat Licence. See COPYING for details.
import re
from datetime import datetime, timedelta
from random import choice
import logging
import ibid
from ibid.config import IntOption, ListOption, DictOption
from ibid.plugins import Processor, handler
from ibid.plugins.identity import identify
class Addressed(Processor):
priority = -1500
addressed = False
names = ListOption('names', 'Names to respond to', [ibid.config['botname']])
verbs = ListOption('verbs', u'Verbs to ignore', ('is', 'has', 'was', 'might', 'may', 'would', 'will', "isn't", "hasn't", "wasn't", "wouldn't", "won't", 'can', "can't", 'did', "didn't", 'said', 'says', 'should', "shouldn't", 'does', "doesn't"))
def setup(self):
names = '|'.join(re.escape(x) for x in self.names)
verbs = '|'.join(re.escape(x) for x in self.verbs)
self.patterns = [
re.compile(r'^\s*(?P<nick>%s)' % names
+ r'(?:\s*[:;.?>!,-]+\s+|\s+|\s*[,:]\s*)(?P<body>.*)',
re.I | re.DOTALL),
# "hello there, bot"-style addressing. But we want to be sure that
# there wasn't normal addressing too:
re.compile(r'^(?:\S+:.*|(?P<body>.*),\s*(?P<nick>%s))[\s?!.]*$' % names,
re.I | re.DOTALL)
]
self.verb_pattern = re.compile(r'^(?:%s)\s+(?:%s)\s+' % (names, verbs),
re.I | re.DOTALL)
@handler
def handle_addressed(self, event):
if 'addressed' not in event:
event.addressed = False
if self.verb_pattern.match(event.message['stripped']):
return
for pattern in self.patterns:
matches = pattern.search(event.message['stripped'])
if matches and matches.group('nick'):
new_message = matches.group('body')
event.addressed = matches.group('nick')
event.message['clean'] = new_message
event.message['deaddressed'] = \
pattern.search(event.message['raw']).group('body')
class Strip(Processor):
priority = -1600
addressed = False
event_types = (u'message', u'action', u'notice')
pattern = re.compile(r'^\s*(.*?)\s*[?!.]*\s*$', re.DOTALL)
@handler
def handle_strip(self, event):
if isinstance(event.message, basestring):
event.message = {'raw': event.message, 'deaddressed': event.message,}
event.message['clean'] = event.message['stripped'] \
= self.pattern.search(event.message['raw']).group(1)
class Ignore(Processor):
priority = -1500
addressed = False
event_types = (u'message', u'action', u'notice', u'invite')
nicks = ListOption('ignore', 'List of nicks to ignore', [])
@handler
def handle_ignore(self, event):
for who in self.nicks:
if event.sender['nick'] == who:
event.processed = True
class IgnorePublic(Processor):
priority = -1490
@handler
def ignore_public(self, event):
if event.public and not ibid.auth.authorise(event, u'publicresponse'):
event.addresponse(
u"Sorry, I'm not allowed to talk to you in public. "
'Ask me by private message.'
)
class Address(Processor):
priority = 1600
processed = True
addressed = False
event_types = (u'message', u'action', u'notice', u'state', u'invite')
acknowledgements = ListOption('acknowledgements', 'Responses for positive acknowledgements',
(u'Okay', u'Sure', u'Done', u'Righto', u'Alrighty', u'Yessir'))
refusals = ListOption('refusals', 'Responses for negative acknowledgements',
(u'No', u"I won't", u"Shan't", u"I'm sorry, but I can't do that"))
@handler
def address(self, event):
for response in event.responses:
if isinstance(response['reply'], bool):
if response:
response['reply'] = choice(self.acknowledgements)
else:
response['reply'] = choice(self.refusals)
if (response.get('address', False)
and not response.get('action', False)
and not response.get('notice', False)
and event.public):
response['reply'] = ('%s: %s' % (
event.sender['nick'], response['reply']))
class Timestamp(Processor):
priority = -1900
def process(self, event):
event.time = datetime.utcnow()
class Complain(Processor):
priority = 950
processed = True
event_types = (u'message', u'action', u'invite')
complaints = DictOption('complaints', 'Complaint responses', {
'nonsense': (
u'Huh?', u'Sorry...',
u'Excuse me?', u'*blink*', u'What?',
),
'notauthed': (
u"I'm not your bitch", u"Just do it yourself",
u"I'm not going to listen to you", u"You're not the boss of me",
),
'exception': (
u"I'm not feeling too well", u"That didn't go down very well. Burp.",
u"That didn't seem to agree with me",
),
'network': (
u'The tubes are clogged!', u"I can't reach that site",
u"That site seems to be down",
),
})
@handler
def complain(self, event):
if 'complain' in event and not event.responses:
event.addresponse(choice(self.complaints[event.complain]))
elif event.processed:
return
else:
event.addresponse(choice(self.complaints['nonsense']))
class RateLimit(Processor):
priority = -1000
event_types = (u'message', u'action', u'notice')
limit_time = IntOption('limit_time', 'Time period over which to measure messages', 10)
limit_messages = IntOption('limit_messages', 'Number of messages to allow during the time period', 5)
messages = {}
@handler
def ratelimit(self, event):
if event.identity not in self.messages:
self.messages[event.identity] = [event.time]
else:
self.messages[event.identity].append(event.time)
self.messages[event.identity] = filter(
lambda x: event.time - x < timedelta(seconds=self.limit_time),
self.messages[event.identity])
if len(self.messages[event.identity]) > self.limit_messages:
if event.public:
event.addresponse(u'Geez, give me some time to think!', address=False)
else:
event.processed = True
class Format(Processor):
priority = 2000
def _truncate(self, line, length):
if length is not None:
eline = line.encode('utf-8')
if len(eline) > length:
# horizontal ellipsis = 3 utf-8 bytes
return eline[:length-3].decode('utf-8', 'ignore') \
+ u'\N{horizontal ellipsis}'
return line
def process(self, event):
filtered = []
for response in event.responses:
source = response['source'].lower()
supports = ibid.sources[source].supports
maxlen = ibid.sources[source].truncation_point(response, event)
if response.get('action', False) and 'action' not in supports:
response['reply'] = u'*%s*' % response['reply']
conflate = response.get('conflate', True)
# Expand response into multiple single-line responses:
if (not conflate and 'multiline' not in supports):
for line in response['reply'].split('\n'):
r = {'reply': self._truncate(line, maxlen)}
for k in response.iterkeys():
if k not in ('reply'):
r[k] = response[k]
filtered.append(r)
# Expand response into multiple multi-line responses:
elif (not conflate and 'multiline' in supports
and maxlen is not None):
message = response['reply']
while len(message.encode('utf-8')) > maxlen:
splitpoint = len(message.encode('utf-8')[:maxlen] \
.decode('utf-8', 'ignore'))
parts = [message[:splitpoint].rstrip(),
message[splitpoint:].lstrip()]
for sep in u'\n.;:, ':
if sep in u'\n ':
search = message[:splitpoint+1]
else:
search = message[:splitpoint]
if sep in search:
splitpoint = search.rindex(sep)
parts = [message[:splitpoint+1].rstrip(),
message[splitpoint+1:]]
break
r = {'reply': parts[0]}
for k in response.iterkeys():
if k not in ('reply'):
r[k] = response[k]
filtered.append(r)
message = parts[1]
response['reply'] = message
filtered.append(response)
else:
line = response['reply']
# Remove any characters that make no sense on IRC-like sources:
if 'multiline' not in supports:
line = line.expandtabs(1) \
.replace('\n', conflate == True
and u' ' or conflate or u'')
response['reply'] = self._truncate(line, maxlen)
filtered.append(response)
event.responses = filtered
class UnicodeWarning(Processor):
priority = 1950
def setup(self):
self.log = logging.getLogger('plugins.unicode')
def process(self, object):
if isinstance(object, dict):
for value in object.values():
self.process(value)
elif isinstance(object, list):
for value in object:
self.process(value)
elif isinstance(object, str):
self.log.warning(u'Found a non-unicode string: %r' % object)
class ChannelTracker(Processor):
priority = -1550
addressed = False
event_types = (u'state', u'source')
@handler
def track(self, event):
if event.type == u'source':
if event.status == u'disconnected':
ibid.channels.pop(event.source, None)
elif event.status == u'left':
ibid.channels[event.source].pop(event.channel, None)
elif event.public:
if event.state == u'online' and hasattr(event, 'othername'):
oldid = identify(event.session, event.source, event.othername)
for channel in ibid.channels[event.source].values():
if oldid in channel:
channel.remove(oldid)
channel.add(event.identity)
elif event.state == u'online':
ibid.channels[event.source][event.channel].add(event.identity)
elif event.state == u'offline' and not hasattr(event, 'othername'):
if event.channel:
ibid.channels[event.source][event.channel].remove(event.identity)
else:
for channel in ibid.channels[event.source].values():
channel.discard(event.identity)
# vi: set et sta sw=4 ts=4:
| gpl-3.0 | -4,911,617,517,799,839,000 | 37.316993 | 247 | 0.532367 | false |
openaid-IATI/deprecated-version-OIPA-v2 | iati/data/migrations/0035_auto__add_indicatorcitydata.py | 1 | 35113 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'IndicatorCityData'
db.create_table('data_indicatorcitydata', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('indicator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['data.Indicator'])),
('city', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['data.City'])),
('value', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('year', self.gf('django.db.models.fields.IntegerField')(max_length=5)),
))
db.send_create_signal('data', ['IndicatorCityData'])
def backwards(self, orm):
# Deleting model 'IndicatorCityData'
db.delete_table('data_indicatorcitydata')
models = {
'data.activitystatistics': {
'Meta': {'object_name': 'ActivityStatistics'},
'iati_identifier': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['data.IATIActivity']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_budget': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '20', 'decimal_places': '2', 'blank': 'True'})
},
'data.activitystatustype': {
'Meta': {'object_name': 'ActivityStatusType'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '8', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Country']", 'null': 'True', 'blank': 'True'})
},
'data.aidtype': {
'Meta': {'object_name': 'AidType'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.budget': {
'Meta': {'object_name': 'Budget'},
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CurrencyType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'period_end': ('django.db.models.fields.DateField', [], {}),
'period_start': ('django.db.models.fields.DateField', [], {}),
'type': ('django.db.models.fields.IntegerField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'})
},
'data.city': {
'Meta': {'object_name': 'City'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'data.collaborationtype': {
'Meta': {'object_name': 'CollaborationType'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '55', 'primary_key': 'True'})
},
'data.country': {
'Meta': {'object_name': 'Country'},
'country_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'dac_country_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dac_region_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dac_region_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'iso': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso2': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'iso3': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
'data.countrystatistics': {
'Meta': {'object_name': 'CountryStatistics'},
'country': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['data.Country']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'total_activities': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'data.currencytype': {
'Meta': {'object_name': 'CurrencyType'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3', 'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Country']", 'null': 'True', 'blank': 'True'})
},
'data.financetype': {
'Meta': {'object_name': 'FinanceType'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.flowtype': {
'Meta': {'object_name': 'FlowType'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.iatiactivity': {
'Meta': {'object_name': 'IATIActivity'},
'activity_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.ActivityStatusType']", 'null': 'True', 'blank': 'True'}),
'collaboration_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CollaborationType']", 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'default_aid_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.AidType']", 'null': 'True', 'blank': 'True'}),
'default_finance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.FinanceType']", 'null': 'True', 'blank': 'True'}),
'default_flow_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.FlowType']", 'null': 'True', 'blank': 'True'}),
'default_tied_status_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.TiedAidStatusType']", 'null': 'True', 'blank': 'True'}),
'end_actual': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'end_planned': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'iati_identifier': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'reporting_organisation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Organisation']"}),
'start_actual': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'start_planned': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'data.iatiactivitybudget': {
'Meta': {'object_name': 'IATIActivityBudget', '_ormbases': ['data.Budget']},
'budget_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['data.Budget']", 'unique': 'True', 'primary_key': 'True'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"})
},
'data.iatiactivitycontact': {
'Meta': {'object_name': 'IATIActivityContact'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailing_address': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'organisation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'person_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'telephone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'data.iatiactivitycountry': {
'Meta': {'object_name': 'IATIActivityCountry'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Country']"}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percentage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'data.iatiactivitydescription': {
'Meta': {'object_name': 'IATIActivityDescription'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Language']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'})
},
'data.iatiactivitydocument': {
'Meta': {'object_name': 'IATIActivityDocument'},
'format': ('django.db.models.fields.CharField', [], {'max_length': '55', 'null': 'True', 'blank': 'True'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Language']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'data.iatiactivitypolicymarker': {
'Meta': {'object_name': 'IATIActivityPolicyMarker'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'significance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.SignificanceType']", 'null': 'True', 'blank': 'True'}),
'vocabulary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.VocabularyType']", 'null': 'True', 'blank': 'True'})
},
'data.iatiactivityregion': {
'Meta': {'object_name': 'IATIActivityRegion'},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percentage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Region']"})
},
'data.iatiactivitysector': {
'Meta': {'object_name': 'IATIActivitySector'},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sectors'", 'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'percentage': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Sector']"})
},
'data.iatiactivitytitle': {
'Meta': {'object_name': 'IATIActivityTitle'},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Language']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'data.iatiactivitywebsite': {
'Meta': {'object_name': 'IATIActivityWebsite'},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
'data.iatitransaction': {
'Meta': {'object_name': 'IATITransaction', '_ormbases': ['data.Transaction']},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'transaction_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['data.Transaction']", 'unique': 'True', 'primary_key': 'True'})
},
'data.indicator': {
'Meta': {'object_name': 'Indicator'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'primary_key': 'True'})
},
'data.indicatorcitydata': {
'Meta': {'object_name': 'IndicatorCityData'},
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.City']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Indicator']"}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'max_length': '5'})
},
'data.indicatordata': {
'Meta': {'object_name': 'IndicatorData'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Indicator']"}),
'value': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {'max_length': '5'})
},
'data.language': {
'Meta': {'object_name': 'Language'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '55'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'data.organisation': {
'Meta': {'object_name': 'Organisation'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'org_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'org_name_lang': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '25', 'primary_key': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'data.organisationstatistics': {
'Meta': {'object_name': 'OrganisationStatistics'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organisation': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['data.Organisation']", 'unique': 'True'}),
'total_activities': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'data.otheridentifier': {
'Meta': {'object_name': 'OtherIdentifier'},
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner_ref': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'data.participatingorganisation': {
'Meta': {'object_name': 'ParticipatingOrganisation'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'org_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'org_name_lang': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'data.planneddisbursement': {
'Meta': {'object_name': 'PlannedDisbursement'},
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CurrencyType']"}),
'iati_activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.IATIActivity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'period_end': ('django.db.models.fields.DateField', [], {}),
'period_start': ('django.db.models.fields.DateField', [], {})
},
'data.region': {
'Meta': {'object_name': 'Region'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.sector': {
'Meta': {'object_name': 'Sector'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '55', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'vocabulary_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.VocabularyType']", 'null': 'True', 'blank': 'True'})
},
'data.significancetype': {
'Meta': {'object_name': 'SignificanceType'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.tiedaidstatustype': {
'Meta': {'object_name': 'TiedAidStatusType'},
'code': ('django.db.models.fields.IntegerField', [], {'max_length': '5', 'primary_key': 'True'})
},
'data.transaction': {
'Meta': {'object_name': 'Transaction'},
'aid_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.AidType']", 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.CurrencyType']", 'null': 'True', 'blank': 'True'}),
'disbursement_channel': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'finance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.FinanceType']", 'null': 'True', 'blank': 'True'}),
'flow_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.FlowType']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider_org': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'provider_org'", 'to': "orm['data.Organisation']"}),
'receiver_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'receiver_org'", 'null': 'True', 'to': "orm['data.Organisation']"}),
'tied_aid_status_type': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'transaction_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'transaction_type': ('django.db.models.fields.CharField', [], {'max_length': '55'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'value_date': ('django.db.models.fields.DateField', [], {})
},
'data.typedeprivationcity': {
'Meta': {'object_name': 'TypeDeprivationCity'},
'extra_type_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'four_shelter_deprivations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.UnHabitatIndicatorCity']"}),
'is_matrix': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'non_slum_household': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'one_shelter_deprivation': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rural': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'slum_household': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'three_shelter_deprivations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'total': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'two_shelter_deprivations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'type_deprivation': ('django.db.models.fields.IntegerField', [], {}),
'urban': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'data.typedeprivationcountry': {
'Meta': {'object_name': 'TypeDeprivationCountry'},
'extra_type_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'four_shelter_deprivations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.UnHabitatIndicatorCountry']"}),
'is_matrix': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'non_slum_household': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'one_shelter_deprivation': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rural': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'slum_household': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'three_shelter_deprivations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'total': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'two_shelter_deprivations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'type_deprivation': ('django.db.models.fields.IntegerField', [], {}),
'urban': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'data.unhabitatindicatorcity': {
'Meta': {'object_name': 'UnHabitatIndicatorCity'},
'avg_annual_rate_change_urban_agglomerations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'bottle_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.City']"}),
'composting_toilet': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'connection_to_electricity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cpi_4_dimensions': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cpi_5_dimensions': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cpi_environment_index': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cpi_equity_index': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cpi_infrastructure_index': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cpi_productivity_index': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'cpi_quality_of_live_index': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'diarrhea_had_ari': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'diarrhea_last_two_weeks': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'enrollment_female_primary_education': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'enrollment_male_primary_education': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'fever_last_two_weeks': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'has_telephone': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'improved_floor': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_flush_toilet': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_pit_latrine': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_spring_surface_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_toilet': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'perc_malnourished': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'perc_measles': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'piped_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pit_latrine_with_slab_or_covered_latrine': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pit_latrine_without_slab': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pop_urban_agglomerations': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'protected_well': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'public_tap_pump_borehole': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pump_borehole': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rainwater': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'slum_proportion_living_urban': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sufficient_living': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'under_five_mortality_rate': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'urban_population': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'urban_slum_population': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {}),
'year_plus_range': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'data.unhabitatindicatorcountry': {
'Meta': {'object_name': 'UnHabitatIndicatorCountry'},
'avg_annual_rate_change_percentage_urban': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'avg_annual_rate_change_total_population': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'bottle_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'composting_toilet': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'connection_to_electricity': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['data.Country']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'enrollment_female_primary_education': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'enrollment_male_primary_education': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'has_telephone': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'improved_floor': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_flush_toilet': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_pit_latrine': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_spring_surface_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_toilet': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'improved_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'piped_water': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pit_latrine_with_slab_or_covered_latrine': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pit_latrine_without_slab': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pop_rural_area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pop_urban_area': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pop_urban_percentage': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'protected_well': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'public_tap_pump_borehole': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'pump_borehole': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rainwater': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'slum_proportion_living_urban': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sufficient_living': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'under_five_mortality_rate': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'urban_population': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'urban_slum_population': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.IntegerField', [], {}),
'year_plus_range': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'data.vocabularytype': {
'Meta': {'object_name': 'VocabularyType'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'primary_key': 'True'})
}
}
complete_apps = ['data'] | agpl-3.0 | -1,100,802,931,386,025,900 | 82.208531 | 182 | 0.552018 | false |
delftrobotics/keras-retinanet | tests/utils/test_transform.py | 1 | 5871 | import numpy as np
from numpy.testing import assert_almost_equal
from math import pi
from keras_retinanet.utils.transform import (
colvec,
transform_aabb,
rotation, random_rotation,
translation, random_translation,
scaling, random_scaling,
shear, random_shear,
random_flip,
random_transform,
random_transform_generator,
change_transform_origin,
)
def test_colvec():
assert np.array_equal(colvec(0), np.array([[0]]))
assert np.array_equal(colvec(1, 2, 3), np.array([[1], [2], [3]]))
assert np.array_equal(colvec(-1, -2), np.array([[-1], [-2]]))
def test_rotation():
assert_almost_equal(colvec( 1, 0, 1), rotation(0.0 * pi).dot(colvec(1, 0, 1)))
assert_almost_equal(colvec( 0, 1, 1), rotation(0.5 * pi).dot(colvec(1, 0, 1)))
assert_almost_equal(colvec(-1, 0, 1), rotation(1.0 * pi).dot(colvec(1, 0, 1)))
assert_almost_equal(colvec( 0, -1, 1), rotation(1.5 * pi).dot(colvec(1, 0, 1)))
assert_almost_equal(colvec( 1, 0, 1), rotation(2.0 * pi).dot(colvec(1, 0, 1)))
assert_almost_equal(colvec( 0, 1, 1), rotation(0.0 * pi).dot(colvec(0, 1, 1)))
assert_almost_equal(colvec(-1, 0, 1), rotation(0.5 * pi).dot(colvec(0, 1, 1)))
assert_almost_equal(colvec( 0, -1, 1), rotation(1.0 * pi).dot(colvec(0, 1, 1)))
assert_almost_equal(colvec( 1, 0, 1), rotation(1.5 * pi).dot(colvec(0, 1, 1)))
assert_almost_equal(colvec( 0, 1, 1), rotation(2.0 * pi).dot(colvec(0, 1, 1)))
def test_random_rotation():
prng = np.random.RandomState(0)
for i in range(100):
assert_almost_equal(1, np.linalg.det(random_rotation(-i, i, prng)))
def test_translation():
assert_almost_equal(colvec( 1, 2, 1), translation(colvec( 0, 0)).dot(colvec(1, 2, 1)))
assert_almost_equal(colvec( 4, 6, 1), translation(colvec( 3, 4)).dot(colvec(1, 2, 1)))
assert_almost_equal(colvec(-2, -2, 1), translation(colvec(-3, -4)).dot(colvec(1, 2, 1)))
def assert_is_translation(transform, min, max):
assert transform.shape == (3, 3)
assert np.array_equal(transform[:, 0:2], np.eye(3, 2))
assert transform[2, 2] == 1
assert np.greater_equal(transform[0:2, 2], min).all()
assert np.less( transform[0:2, 2], max).all()
def test_random_translation():
prng = np.random.RandomState(0)
min = (-10, -20)
max = (20, 10)
for i in range(100):
assert_is_translation(random_translation(min, max, prng), min, max)
def test_shear():
assert_almost_equal(colvec( 1, 2, 1), shear(0.0 * pi).dot(colvec(1, 2, 1)))
assert_almost_equal(colvec(-1, 0, 1), shear(0.5 * pi).dot(colvec(1, 2, 1)))
assert_almost_equal(colvec( 1, -2, 1), shear(1.0 * pi).dot(colvec(1, 2, 1)))
assert_almost_equal(colvec( 3, 0, 1), shear(1.5 * pi).dot(colvec(1, 2, 1)))
assert_almost_equal(colvec( 1, 2, 1), shear(2.0 * pi).dot(colvec(1, 2, 1)))
def assert_is_shear(transform):
assert transform.shape == (3, 3)
assert np.array_equal(transform[:, 0], [1, 0, 0])
assert np.array_equal(transform[:, 2], [0, 0, 1])
assert transform[2, 1] == 0
# sin^2 + cos^2 == 1
assert_almost_equal(1, transform[0, 1] ** 2 + transform[1, 1] ** 2)
def test_random_shear():
prng = np.random.RandomState(0)
for i in range(100):
assert_is_shear(random_shear(-pi, pi, prng))
def test_scaling():
assert_almost_equal(colvec(1.0, 2, 1), scaling(colvec(1.0, 1.0)).dot(colvec(1, 2, 1)))
assert_almost_equal(colvec(0.0, 2, 1), scaling(colvec(0.0, 1.0)).dot(colvec(1, 2, 1)))
assert_almost_equal(colvec(1.0, 0, 1), scaling(colvec(1.0, 0.0)).dot(colvec(1, 2, 1)))
assert_almost_equal(colvec(0.5, 4, 1), scaling(colvec(0.5, 2.0)).dot(colvec(1, 2, 1)))
def assert_is_scaling(transform, min, max):
assert transform.shape == (3, 3)
assert np.array_equal(transform[2, :], [0, 0, 1])
assert np.array_equal(transform[:, 2], [0, 0, 1])
assert transform[1, 0] == 0
assert transform[0, 1] == 0
assert np.greater_equal(np.diagonal(transform)[:2], min).all()
assert np.less( np.diagonal(transform)[:2], max).all()
def test_random_scaling():
prng = np.random.RandomState(0)
min = (0.1, 0.2)
max = (20, 10)
for i in range(100):
assert_is_scaling(random_scaling(min, max, prng), min, max)
def assert_is_flip(transform):
assert transform.shape == (3, 3)
assert np.array_equal(transform[2, :], [0, 0, 1])
assert np.array_equal(transform[:, 2], [0, 0, 1])
assert transform[1, 0] == 0
assert transform[0, 1] == 0
assert abs(transform[0, 0]) == 1
assert abs(transform[1, 1]) == 1
def test_random_flip():
prng = np.random.RandomState(0)
for i in range(100):
assert_is_flip(random_flip(0.5, 0.5, prng))
def test_random_transform():
prng = np.random.RandomState(0)
for i in range(100):
transform = random_transform(prng=prng)
assert np.array_equal(transform, np.identity(3))
for i, transform in zip(range(100), random_transform_generator(prng=np.random.RandomState())):
assert np.array_equal(transform, np.identity(3))
def test_transform_aabb():
assert np.array_equal([1, 2, 3, 4], transform_aabb(np.identity(3), [1, 2, 3, 4]))
assert_almost_equal([-3, -4, -1, -2], transform_aabb(rotation(pi), [1, 2, 3, 4]))
assert_almost_equal([ 2, 4, 4, 6], transform_aabb(translation([1, 2]), [1, 2, 3, 4]))
def test_change_transform_origin():
assert np.array_equal(change_transform_origin(translation([3, 4]), [1, 2]), translation([3, 4]))
assert_almost_equal(colvec(1, 2, 1), change_transform_origin(rotation(pi), [1, 2]).dot(colvec(1, 2, 1)))
assert_almost_equal(colvec(0, 0, 1), change_transform_origin(rotation(pi), [1, 2]).dot(colvec(2, 4, 1)))
assert_almost_equal(colvec(0, 0, 1), change_transform_origin(scaling([0.5, 0.5]), [-2, -4]).dot(colvec(2, 4, 1)))
| apache-2.0 | -2,944,925,750,333,459,000 | 37.880795 | 117 | 0.615909 | false |
ctu-geoforall-lab-sandbox/qgis-aerogen-plugin | aerogen.py | 1 | 7702 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
AeroGen
A QGIS plugin
AeroGen Plugin
-------------------
begin : 2017-04-24
git sha : $Format:%H$
copyright : (C) 2017 by CTU GeoForAll Lab
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication, Qt
from PyQt4.QtGui import QAction, QIcon, QToolButton
# Initialize Qt resources from file resources.py
import resources
# Import the code for the DockWidget
from aerogen_dockwidget import AeroGenDockWidget
import os.path
class AeroGen:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'AeroGen_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&AeroGen')
# add plugin icon into plugin toolbar
self.toolButton = QToolButton()
#print "** INITIALIZING AeroGen"
self.pluginIsActive = False
self.dockwidget = None
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('AeroGen', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolButton.setDefaultAction(action)
self.iface.addToolBarWidget(self.toolButton)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/AeroGen/icon.png'
self.add_action(
icon_path,
text=self.tr(u'AeroGen'),
callback=self.run,
parent=self.iface.mainWindow())
#--------------------------------------------------------------------------
def onClosePlugin(self):
"""Cleanup necessary items here when plugin dockwidget is closed"""
#print "** CLOSING AeroGen"
# disconnects
self.dockwidget.closingPlugin.disconnect(self.onClosePlugin)
# remove this statement if dockwidget is to remain
# for reuse if plugin is reopened
# Commented next statement since it causes QGIS crashe
# when closing the docked window:
# self.dockwidget = None
self.pluginIsActive = False
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
#print "** UNLOAD AeroGen"
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&AeroGen'),
action)
self.iface.removeToolBarIcon(action)
#--------------------------------------------------------------------------
def run(self):
"""Run method that loads and starts the plugin"""
if not self.pluginIsActive:
self.pluginIsActive = True
#print "** STARTING AeroGen"
# dockwidget may not exist if:
# first run of plugin
# removed on close (see self.onClosePlugin method)
if self.dockwidget == None:
# Create the dockwidget (after translation) and keep reference
self.dockwidget = AeroGenDockWidget()
# connect to provide cleanup on closing of dockwidget
self.dockwidget.closingPlugin.connect(self.onClosePlugin)
# show the dockwidget
# TODO: fix to allow choice of dock location
self.iface.addDockWidget(Qt.LeftDockWidgetArea, self.dockwidget)
self.dockwidget.show()
| gpl-3.0 | 1,069,905,357,386,615,600 | 32.198276 | 79 | 0.550247 | false |
chiefenne/PyAero | src/SplineRefine.py | 1 | 13180 | import copy
import numpy as np
from scipy import interpolate
from PySide2 import QtGui, QtCore
from Utils import Utils
import GraphicsItemsCollection as gic
import GraphicsItem
import ContourAnalysis as ca
import logging
logger = logging.getLogger(__name__)
class SplineRefine:
def __init__(self):
# get MainWindow instance (overcomes handling parents)
self.mainwindow = QtCore.QCoreApplication.instance().mainwindow
def doSplineRefine(self, tolerance=172.0, points=150, ref_te=3,
ref_te_n=6, ref_te_ratio=3.0):
logger.debug('Arrived in doSplineRefine')
# get raw coordinates
x, y = self.mainwindow.airfoil.raw_coordinates
# interpolate a spline through the raw contour points
# constant point distribution used here
# typically nose radius poorly resolevd by that
self.spline_data = self.spline(x, y, points=points, degree=3)
# refine the contour in order to meet the tolerance
# this keeps the constant distribution but refines around the nose
spline_data = copy.deepcopy(self.spline_data)
self.refine(spline_data, tolerance=tolerance)
# redo spline on refined contour
# spline only evaluated at refined contour points (evaluate=True)
coo, u, t, der1, der2, tck = self.spline_data
x, y = coo
self.spline_data = self.spline(x, y, points=points, degree=3,
evaluate=True)
# refine the trailing edge of the spline
self.refine_te(ref_te, ref_te_n, ref_te_ratio)
# add spline data to airfoil object
self.mainwindow.airfoil.spline_data = self.spline_data
# add splined and refined contour to the airfoil contourGroup
# makeSplineMarkers call within makeContourSpline
self.mainwindow.airfoil.makeContourSpline()
# get LE radius, etc.
spline_data = self.mainwindow.airfoil.spline_data
curvature_data = ca.ContourAnalysis.getCurvature(spline_data)
rc, xc, yc, xle, yle, le_id = \
ca.ContourAnalysis.getLeRadius(spline_data, curvature_data)
self.makeLeCircle(rc, xc, yc, xle, yle)
logger.info('Leading edge radius: {:11.8f}'.format(rc))
logger.info('Leading edge circle tangent at point: {}'.format(le_id))
def makeLeCircle(self, rc, xc, yc, xle, yle):
# delete exitsing LE circle ItemGroup from scene
if hasattr(self.mainwindow.airfoil, 'le_circle'):
self.mainwindow.scene.removeItem(self.mainwindow.airfoil.le_circle)
del self.mainwindow.airfoil.le_circle
# put LE circle, center and tangent point in a list
circles = list()
circle = gic.GraphicsCollection()
circle.pen.setColor(QtGui.QColor(0, 150, 0, 255))
circle.pen.setWidthF(0.3)
# no pen thickness change when zoomed
circle.pen.setCosmetic(True)
circle.brush.setColor(QtGui.QColor(10, 200, 10, 150))
circle.Circle(xc, yc, rc)
circle = GraphicsItem.GraphicsItem(circle)
circles.append(circle)
circle = gic.GraphicsCollection()
circle.pen.setColor(QtGui.QColor(255, 0, 0, 255))
circle.pen.setWidthF(0.3)
# no pen thickness change when zoomed
circle.pen.setCosmetic(True)
circle.brush.setColor(QtGui.QColor(255, 0, 0, 255))
circle.Circle(xc, yc, 0.0002)
circle = GraphicsItem.GraphicsItem(circle)
circles.append(circle)
circle = gic.GraphicsCollection()
circle.pen.setColor(QtGui.QColor(255, 0, 0, 255))
circle.pen.setWidthF(1.6)
# no pen thickness change when zoomed
circle.pen.setCosmetic(True)
circle.brush.setColor(QtGui.QColor(255, 0, 0, 255))
circle.Circle(xle, yle, 0.0002)
circle = GraphicsItem.GraphicsItem(circle)
circles.append(circle)
self.mainwindow.airfoil.le_circle = \
self.mainwindow.scene.createItemGroup(circles)
self.mainwindow.airfoil.le_circle.setZValue(110)
self.mainwindow.centralwidget.cb7.setChecked(True)
self.mainwindow.centralwidget.cb7.setEnabled(True)
def spline(self, x, y, points=200, degree=2, evaluate=False):
"""Interpolate spline through given points
Args:
spline (int, optional): Number of points on the spline
degree (int, optional): Degree of the spline
evaluate (bool, optional): If True, evaluate spline just at
the coordinates of the knots
"""
# interpolate B-spline through data points
# returns knots of control polygon
# tck ... tuple (t,c,k) containing the vector of knots,
# the B-spline coefficients, and the degree of the spline.
# u ... array of the parameters for each knot
# NOTE: s=0.0 is important as no smoothing should be done on the spline
# after interpolating it
tck, u = interpolate.splprep([x, y], s=0.0, k=degree)
# number of points on interpolated B-spline (parameter t)
t = np.linspace(0.0, 1.0, points)
# if True, evaluate spline just at the coordinates of the knots
if evaluate:
t = u
# evaluate B-spline at given parameters
# der=0: returns point coordinates
coo = interpolate.splev(t, tck, der=0)
# evaluate 1st derivative at given parameters
der1 = interpolate.splev(t, tck, der=1)
# evaluate 2nd derivative at given parameters
der2 = interpolate.splev(t, tck, der=2)
spline_data = [coo, u, t, der1, der2, tck]
return spline_data
def refine(self, spline_data, tolerance=170.0, recursions=0):
"""Recursive refinement with respect to angle criterion (tol).
If angle between two adjacent line segments is less than tol,
a recursive refinement of the contour is performed until
tol is met.
Args:
tol (float, optional): Angle between two adjacent contour segments
recursions (int, optional): NO USER INPUT HERE
Needed just for level information
during recursions
"""
# self.spline_data = [coo, u, t, der1, der2, tck]
xx, yy = spline_data[0]
t = spline_data[2]
tck = spline_data[5]
logger.debug('\nPoints before refining: {} \n'.format(len(xx)))
xn = copy.deepcopy(xx)
yn = copy.deepcopy(yy)
tn = copy.deepcopy(t)
j = 0
refinements = 0
first = True
refined = dict()
for i in range(len(xx) - 2):
refined[i] = False
# angle between two contour line segments
a = np.array([xx[i], yy[i]])
b = np.array([xx[i + 1], yy[i + 1]])
c = np.array([xx[i + 2], yy[i + 2]])
angle = Utils.angle_between(a - b, c - b, degree=True)
if angle < tolerance:
logger.debug('Refining between segments {} {},'
.format(i, i + 1))
logger.debug('Tol={0:5.1f}, Angle={1:05.1f}\n'
.format(tolerance, angle))
refined[i] = True
refinements += 1
# parameters for new points
t1 = (t[i] + t[i + 1]) / 2.
t2 = (t[i + 1] + t[i + 2]) / 2.
# coordinates of new points
p1 = interpolate.splev(t1, tck, der=0)
p2 = interpolate.splev(t2, tck, der=0)
# insert points and their parameters into arrays
if i > 0 and not refined[i - 1]:
xn = np.insert(xn, i + 1 + j, p1[0])
yn = np.insert(yn, i + 1 + j, p1[1])
tn = np.insert(tn, i + 1 + j, t1)
j += 1
xn = np.insert(xn, i + 2 + j, p2[0])
yn = np.insert(yn, i + 2 + j, p2[1])
tn = np.insert(tn, i + 2 + j, t2)
j += 1
if first and recursions > 0:
logger.debug('Recursion level: {} \n'.format(recursions))
first = False
logger.debug('Points after refining: {}'.format(len(xn)))
# update coordinate array, including inserted points
spline_data[0] = (xn, yn)
# update parameter array, including parameters of inserted points
spline_data[2] = tn
# this is the recursion :)
if refinements > 0:
self.refine(spline_data, tolerance, recursions + 1)
# stopping from recursion if no refinements done in this recursion
else:
# update derivatives, including inserted points
spline_data[3] = interpolate.splev(tn, tck, der=1)
spline_data[4] = interpolate.splev(tn, tck, der=2)
logger.debug('No more refinements.')
logger.debug('\nTotal number of recursions: {}'
.format(recursions - 1))
# due to recursive call to refine, here no object can be returned
# instead use self to transfer data to the outer world :)
self.spline_data = copy.deepcopy(spline_data)
return
def refine_te(self, ref_te, ref_te_n, ref_te_ratio):
"""Refine the airfoil contour at the trailing edge
Args:
ref_te (TYPE): Description
ref_te_n (TYPE): Description
ref_te_ratio (TYPE): Description
Returns:
TYPE: Description
"""
# get parameter of point to which refinement reaches
tref = self.spline_data[2][ref_te]
# calculate the new spacing at the trailing edge points
spacing = self.spacing(divisions=ref_te_n, ratio=ref_te_ratio,
thickness=tref)
# insert new points with the spacing into the airfoil contour data
x, y = self.spline_data[0]
t = self.spline_data[2]
tck = self.spline_data[5]
# remove points which will be refined
index = range(ref_te + 1)
x = np.delete(x, index)
y = np.delete(y, index)
t = np.delete(t, index)
index = range(len(x))[-(ref_te + 1):]
x = np.delete(x, index)
y = np.delete(y, index)
t = np.delete(t, index)
# add refined points
for s in spacing[::-1]:
# upper side
p = interpolate.splev(s, tck, der=0)
x = np.insert(x, 0, p[0])
y = np.insert(y, 0, p[1])
t = np.insert(t, 0, s)
# lower side
p = interpolate.splev(1. - s, tck, der=0)
x = np.append(x, p[0])
y = np.append(y, p[1])
t = np.append(t, 1. - s)
# update coordinate array, including inserted points
self.spline_data[0] = (x, y)
# update parameter array, including parameters of inserted points
self.spline_data[2] = t
# update derivatives, including inserted points
self.spline_data[3] = interpolate.splev(t, tck, der=1)
self.spline_data[4] = interpolate.splev(t, tck, der=2)
def spacing(self, divisions=10, ratio=1.0, thickness=1.0):
"""Calculate point distribution on a line
Args:
divisions (int, optional): Number of subdivisions
ratio (float, optional): Ratio of last to first subdivision size
thickness (float, optional): length of line
Returns:
TYPE: Description
"""
if divisions == 1:
sp = [0.0, 1.0]
return np.array(sp)
growth = ratio**(1.0 / (float(divisions) - 1.0))
if growth == 1.0:
growth = 1.0 + 1.0e-10
s0 = 1.0
s = [s0]
for i in range(1, divisions + 1):
app = s0 * growth**i
s.append(app)
sp = np.array(s)
sp -= sp[0]
sp /= sp[-1]
sp *= thickness
return sp
def writeContour(self):
xr = self.raw_coordinates[0]
xc = self.coordinates[0]
yc = self.coordinates[1]
s = '# Spline with {0} points based on initial contour'.format(len(xc))
s1 = '({0} points)\n'.format(len(xr))
info = s + s1
with open(self.name + '_spline_' + str(len(xc)) + '.dat', 'w') as f:
f.write('#\n')
f.write('# Airfoil: ' + self.name + '\n')
f.write('# Created from ' + self.filename + '\n')
f.write(info)
f.write('#\n')
for i in range(len(xc)):
data = '{:10.8f} {:10.8f} \n'.format(xc[i], yc[i])
f.write(data)
| mit | -7,586,054,523,396,383,000 | 34.611111 | 79 | 0.547117 | false |
PinguinoIDE/pinguino-ide | pinguino/qtgui/gide/bloques/inside/inside2_bool.py | 1 | 1665 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/yeison/Documentos/Desarrollo/Pinguino/GitHub/pinguino-ide/pinguino/qtgui/gide/bloques/inside/inside2_bool.ui'
#
# Created: Wed Mar 16 13:19:41 2016
# by: pyside-uic 0.2.15 running on PySide 1.2.4
#
# WARNING! All changes made in this file will be lost!
from PySide2 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(94, 34)
Form.setWindowTitle("")
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.frame = QtWidgets.QFrame(Form)
self.frame.setMinimumSize(QtCore.QSize(9, 34))
self.frame.setMaximumSize(QtCore.QSize(9, 34))
self.frame.setObjectName("frame")
self.gridLayout.addWidget(self.frame, 0, 0, 1, 1)
self.frame_2 = QtWidgets.QFrame(Form)
self.frame_2.setMinimumSize(QtCore.QSize(0, 34))
self.frame_2.setMaximumSize(QtCore.QSize(16777215, 34))
self.frame_2.setObjectName("frame_2")
self.gridLayout.addWidget(self.frame_2, 0, 1, 1, 1)
self.frame_3 = QtWidgets.QFrame(Form)
self.frame_3.setMinimumSize(QtCore.QSize(9, 34))
self.frame_3.setMaximumSize(QtCore.QSize(9, 34))
self.frame_3.setObjectName("frame_3")
self.gridLayout.addWidget(self.frame_3, 0, 2, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
pass
| gpl-2.0 | -4,333,316,980,457,685,500 | 38.642857 | 169 | 0.669069 | false |
emory-libraries/eulxml | eulxml/xmlmap/core.py | 1 | 28298 | # file eulxml/xmlmap/core.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import logging
import os
import warnings
import urllib
import time
from lxml import etree
from lxml.builder import ElementMaker
import six
from six.moves.urllib.request import urlopen
from eulxml.utils.compat import u
from eulxml.xmlmap.fields import Field
logger = logging.getLogger(__name__)
__all__ = ['XmlObject', 'parseUri', 'parseString', 'loadSchema',
'load_xmlobject_from_string', 'load_xmlobject_from_file',
'load_xslt']
# NB: When parsing XML in this module, we explicitly create a new parser
# each time. Without this, lxml 2.2.7 uses a global default parser. When
# parsing strings, lxml appears to set that parser into no-network mode,
# causing subsequent network-based parses to fail. Specifically, under
# lxml 2.2.7, the second call here fails::
#
# >>> etree.fromstring('<foo/>') # set global parser to no-network
# >>> etree.parse('http://www.w3.org/2001/xml.xsd') # fails in no-network mode
#
# If we simply construct a separate parser each time, parses will be
# marginally slower, but this lxml bug will not affect us.
#
# This lxml behavior has been logged as a bug:
# https://bugs.launchpad.net/lxml/+bug/673205
def parseUri(stream, uri=None):
"""Read an XML document from a URI, and return a :mod:`lxml.etree`
document."""
return etree.parse(stream, parser=_get_xmlparser(), base_url=uri)
def parseString(string, uri=None):
"""Read an XML document provided as a byte string, and return a
:mod:`lxml.etree` document. String cannot be a Unicode string.
Base_uri should be provided for the calculation of relative URIs."""
return etree.fromstring(string, parser=_get_xmlparser(), base_url=uri)
# internal cache for loaded schemas, so we only load each schema once
_loaded_schemas = {}
def loadSchema(uri, base_uri=None):
"""Load an XSD XML document (specified by filename or URL), and return a
:class:`lxml.etree.XMLSchema`.
"""
# uri to use for reporting errors - include base uri if any
if uri in _loaded_schemas:
return _loaded_schemas[uri]
error_uri = uri
if base_uri is not None:
error_uri += ' (base URI %s)' % base_uri
try:
logger.debug('Loading schema %s' % uri)
_loaded_schemas[uri] = etree.XMLSchema(etree.parse(uri,
parser=_get_xmlparser(),
base_url=base_uri))
return _loaded_schemas[uri]
except IOError as io_err:
# add a little more detail to the error message - but should still be an IO error
raise IOError('Failed to load schema %s : %s' % (error_uri, io_err))
except etree.XMLSchemaParseError as parse_err:
# re-raise as a schema parse error, but ensure includes details about schema being loaded
raise etree.XMLSchemaParseError('Failed to parse schema %s -- %s' % (error_uri, parse_err))
def load_xslt(filename=None, xsl=None):
'''Load and compile an XSLT document (specified by filename or string)
for repeated use in transforming XML.
'''
parser = _get_xmlparser()
if filename is not None:
xslt_doc = etree.parse(filename, parser=parser)
if xsl is not None:
xslt_doc = etree.fromstring(xsl, parser=parser)
return etree.XSLT(xslt_doc)
def _http_uri(uri):
return uri.startswith('http:') or uri.startswith('https:')
class _FieldDescriptor(object):
def __init__(self, field):
self.field = field
def __get__(self, obj, objtype):
if obj is None:
# NOTE: return the *field* here rather than self;
# allows sphinx autodocumentation to inspect the type properly
return self.field
return self.field.get_for_node(obj.node, obj.context)
def __set__(self, obj, value):
return self.field.set_for_node(obj.node, obj.context, value)
def __delete__(self, obj):
return self.field.delete_for_node(obj.node, obj.context)
class XmlObjectType(type):
"""
A metaclass for :class:`XmlObject`.
Analogous in principle to Django's ``ModelBase``, this metaclass
functions rather differently. While it'll likely get a lot closer over
time, we just haven't been growing ours long enough to demand all of the
abstractions built into Django's models. For now, we do three things:
1. take any :class:`~eulxml.xmlmap.fields.Field` members and convert
them to descriptors,
2. store all of these fields and all of the base classes' fields in a
``_fields`` dictionary on the class, and
3. if any local (non-parent) fields look like self-referential
:class:`eulxml.xmlmap.NodeField` objects then patch them up
to refer to the newly-created :class:`XmlObject`.
"""
def __new__(cls, name, bases, defined_attrs):
use_attrs = {}
fields = {}
recursive_fields = []
# inherit base fields first; that way current class field defs will
# override parents. note that since the parents already added fields
# from *their* parents (because they were built from XmlObjectType),
# we don't have to recurse.
for base in bases:
base_fields = getattr(base, '_fields', None)
if base_fields:
fields.update(base_fields)
base_xsd = getattr(base, 'XSD_SCHEMA', None)
schema_obj = None
for attr_name, attr_val in defined_attrs.items():
# XXX: not a fan of isinstance here. maybe use something like
# django's contribute_to_class?
if isinstance(attr_val, Field):
if isinstance(attr_val, SchemaField):
# special case: schema field will look at the schema and return appropriate field type
if 'XSD_SCHEMA' in defined_attrs or base_xsd:
# load schema_obj the first time we need it
if schema_obj is None:
# if xsd schema is directly defined, use that
if 'XSD_SCHEMA' in defined_attrs:
schema_obj = load_xmlobject_from_file(defined_attrs['XSD_SCHEMA'],
XsdSchema)
# otherwise, use nearest parent xsd
else:
schema_obj = load_xmlobject_from_file(base_xsd, XsdSchema)
attr_val = attr_val.get_field(schema_obj)
field = attr_val
fields[attr_name] = field
use_attrs[attr_name] = _FieldDescriptor(field)
# collect self-referential NodeFields so that we can resolve
# them once we've created the new class
node_class = getattr(field, 'node_class', None)
if isinstance(node_class, six.string_types):
if node_class in ('self', name):
recursive_fields.append(field)
else:
msg = ('Class %s has field %s with node_class %s, ' +
'but the only supported class names are ' +
'"self" and %s.') % (name, attr_val,
repr(node_class),
repr(name))
raise ValueError(msg)
# if a field 'foo' has a 'create_for_node' method, then add
# a 'create_foo' method to call it. generally this isn't
# helpful, but NodeField uses it.
if hasattr(attr_val, 'create_for_node'):
create_method_name = 'create_' + attr_name
create_method = cls._make_create_field(create_method_name, attr_val)
use_attrs[create_method_name] = create_method
else:
use_attrs[attr_name] = attr_val
use_attrs['_fields'] = fields
super_new = super(XmlObjectType, cls).__new__
new_class = super_new(cls, name, bases, use_attrs)
# patch self-referential NodeFields (collected above) with the
# newly-created class
for field in recursive_fields:
assert field.node_class in ('self', name)
field.node_class = new_class
return new_class
@staticmethod
def _make_create_field(field_name, field):
def create_field(xmlobject):
field.create_for_node(xmlobject.node, xmlobject.context)
create_field.__name__ = str(field_name)
return create_field
@six.python_2_unicode_compatible
class XmlObject(six.with_metaclass(XmlObjectType, object)):
"""
A Python object wrapped around an XML node.
Typical programs will define subclasses of :class:`XmlObject` with
various field members. Some programs will use
:func:`load_xmlobject_from_string` and :func:`load_xmlobject_from_file`
to create instances of these subclasses. Other programs will create them
directly, passing a node argument to the constructor. If the
subclass defines a :attr:`ROOT_NAME` then this node argument is
optional: Programs may then create instances directly with no
constructor arguments.
Programs can also pass an optional dictionary to the constructor to
specify namespaces for XPath evaluation.
If keyword arguments are passed in to the constructor, they will be used to
set initial values for the corresponding fields on the :class:`XmlObject`.
(Only currently supported for non-list fields.)
Custom equality/non-equality tests: two instances of :class:`XmlObject` are
considered equal if they point to the same lxml element node.
"""
node = None
"""The top-level xml node wrapped by the object"""
ROOT_NAME = None
"""A default root element name (without namespace prefix) used when an object
of this type is created from scratch."""
ROOT_NS = None
"""The default namespace used when an object of this type is created from
scratch."""
ROOT_NAMESPACES = {}
"""A dictionary whose keys are namespace prefixes and whose values are
namespace URIs. These namespaces are used to create the root element when an
object of this type is created from scratch; should include the namespace
and prefix for the root element, if it has one. Any additional namespaces
will be added to the root element."""
XSD_SCHEMA = None
"""URI or file path to the XSD schema associated with this :class:`XmlObject`,
if any. If configured, will be used for optional validation when calling
:meth:`load_xmlobject_from_string` and :meth:`load_xmlobject_from_file`,
and with :meth:`is_valid`.
"""
schema_validate = True
'''Override for schema validation; if a schema must be defined for
the use of :class:`xmlmap.fields.SchemaField` for a sub-xmlobject
that should not be validated, set to False.'''
@property
def xmlschema(self):
"""A parsed XSD schema instance of
:class:`lxml.etree.XMLSchema`; will be loaded the first time
it is requested on any instance of this class if XSD_SCHEMA is
set and xmlchema is None. If you wish to load and parse the
schema at class definition time, instead of at class instance
initialization time, you may want to define your schema in
your subclass like this::
XSD_SCHEMA = "http://www.openarchives.org/OAI/2.0/oai_dc.xsd"
xmlschema = xmlmap.loadSchema(XSD_SCHEMA)
"""
if self.XSD_SCHEMA:
return loadSchema(self.XSD_SCHEMA)
# NOTE: DTD and RNG validation could be handled similarly to XSD validation logic
def __init__(self, node=None, context=None, **kwargs):
if node is None:
node = self._build_root_element()
self.node = node
# FIXME: context probably needs work
# get namespaces from current node OR its parent (in case of an lxml 'smart' string)
if hasattr(node, 'nsmap'):
nsmap = node.nsmap
elif hasattr(node, 'getParent'):
nsmap = node.nsmap
else:
nsmap = {}
# xpath has no notion of a default namespace - omit any namespace with no prefix
self.context = {'namespaces': dict([(prefix, ns) for prefix, ns
in six.iteritems(nsmap) if prefix])}
if context is not None:
self.context.update(context)
if hasattr(self, 'ROOT_NAMESPACES'):
# also include any root namespaces to guarantee that expected prefixes are available
self.context['namespaces'].update(self.ROOT_NAMESPACES)
for field, value in six.iteritems(kwargs):
# TODO (maybe): handle setting/creating list fields
setattr(self, field, value)
def _build_root_element(self):
opts = {}
if hasattr(self, 'ROOT_NS'):
opts['namespace'] = self.ROOT_NS
if hasattr(self, 'ROOT_NAMESPACES'):
opts['nsmap'] = self.ROOT_NAMESPACES
E = ElementMaker(**opts)
root = E(self.ROOT_NAME)
return root
def xsl_transform(self, filename=None, xsl=None, return_type=None, **params):
"""Run an xslt transform on the contents of the XmlObject.
XSLT can be passed in as an XSLT object generated by :meth:`load_xslt`
or as filename or string. If a params dictionary is specified, its items
will be passed as parameters to the XSL transformation, and any string
values will automatically be encoded as XSL string parameters.
.. Note::
If XSL is being used multiple times, it is recommended to
use :meth`:load_xslt` to load and compile the XSLT once.
:param filename: xslt filename (optional, one of file and xsl is required)
:param xsl: xslt as string OR compiled XSLT object as returned by
:meth:`load_xslt` (optional)
:param return_type: type of object to return; optional, defaults to
:class:`XmlObject`; specify unicode or string for text output
:returns: an instance of :class:`XmlObject` or the return_type specified
"""
# NOTE: converting _XSLTResultTree to XmlObject because of a bug in its unicode method
# - to output xml result, use serialize instead of unicode
if return_type is None:
return_type = XmlObject
# automatically encode any string params as XSLT string parameters
for key, val in six.iteritems(params):
if isinstance(val, six.string_types):
params[key] = etree.XSLT.strparam(val)
parser = _get_xmlparser()
# if a compiled xslt object is passed in, use that first
if xsl is not None and isinstance(xsl, etree.XSLT):
result = xsl(self.node, **params)
else:
# otherwise, load the xslt
if filename is not None:
xslt_doc = etree.parse(filename, parser=parser)
if xsl is not None:
xslt_doc = etree.fromstring(xsl, parser=parser)
# NOTE: there is a memory bug that results in malloc errors and
# segfaults when using the parsed etree.XSLT approach here.
# As a workaround, using the document xslt method instead.
if self.node == self.node.getroottree().getroot():
# if current node is root node, use entire document for transform
xmltree = self.node.getroottree()
else:
# otherwise, construct a temporary partial document from this node
partial_doc = etree.fromstring(self.serialize(), parser=parser)
xmltree = partial_doc.getroottree()
result = xmltree.xslt(xslt_doc, **params)
# If XSLT returns nothing, transform returns an _XSLTResultTree
# with no root node. Log a warning, and don't generate an
# empty xmlobject which will behave unexpectedly.
# text output does not include a root node, so check separately
if issubclass(return_type, six.string_types):
if result is None:
logger.warning("XSL transform generated an empty result")
return
else:
return return_type(result)
if result is None or result.getroot() is None:
logger.warning("XSL transform generated an empty result")
else:
# pass in root node, rather than the result tree object
return return_type(result.getroot())
def __str__(self):
if isinstance(self.node, six.string_types):
return self.node
return self.node.xpath("normalize-space(.)")
def __string__(self):
if isinstance(self.node, six.string_types):
return self.node
return u(self).encode('ascii', 'xmlcharrefreplace')
def __eq__(self, other):
# consider two xmlobjects equal if they are pointing to the same xml node
if hasattr(other, 'node') and self.node == other.node:
return True
# consider two xmlobjects equal if they serialize the same
if hasattr(other, 'serialize') and self.serialize() == other.serialize():
return True
# NOTE: does not address "equivalent" xml, which is potentially very complex
return False
def __ne__(self, other):
return not self.__eq__(other)
def serialize(self, stream=None, pretty=False):
"""Serialize the contents of the XmlObject to a stream. Serializes
current node only; for the entire XML document, use :meth:`serializeDocument`.
If no stream is specified, returns a string.
:param stream: stream or other file-like object to write content to (optional)
:param pretty: pretty-print the XML output; boolean, defaults to False
:rtype: stream passed in or an instance of :class:`cStringIO.StringIO`
"""
return self._serialize(self.node, stream=stream, pretty=pretty)
def serializeDocument(self, stream=None, pretty=False):
"""Serialize the contents of the entire XML document (including Doctype
declaration, if there is one), with an XML declaration, for the current
XmlObject to a stream.
If no stream is specified, returns a string.
:param stream: stream or other file-like object to write content to (optional)
:param pretty: pretty-print the XML output; boolean, defaults to False
:rtype: stream passed in or an instance of :class:`cStringIO.StringIO`
"""
return self._serialize(self.node.getroottree(), stream=stream, pretty=pretty,
xml_declaration=True)
def _serialize(self, node, stream=None, pretty=False, xml_declaration=False):
# actual logic of xml serialization
if stream is None:
string_mode = True
stream = six.BytesIO()
else:
string_mode = False
# NOTE: etree c14n doesn't seem to like fedora info: URIs
stream.write(etree.tostring(node, encoding='UTF-8', pretty_print=pretty,
xml_declaration=xml_declaration))
if string_mode:
data = stream.getvalue()
stream.close()
return data
return stream
def is_valid(self):
"""Determine if the current document is valid as far as we can determine.
If there is a schema associated, check for schema validity. Otherwise,
return True.
:rtype: boolean
"""
# valid if there are no validation errors
return self.validation_errors() == []
def validation_errors(self):
"""Return a list of validation errors. Returns an empty list
if the xml is schema valid or no schema is defined. If a
schema is defined but :attr:`schema_validate` is False, schema
validation will be skipped.
Currently only supports schema validation.
:rtype: list
"""
# if we add other types of validation (DTD, RNG), incorporate them here
if self.xmlschema and self.schema_validate and not self.schema_valid():
return self.schema_validation_errors()
return []
def schema_valid(self):
"""Determine if the current document is schema-valid according to the
configured XSD Schema associated with this instance of :class:`XmlObject`.
:rtype: boolean
:raises: Exception if no XSD schema is defined for this XmlObject instance
"""
if self.xmlschema is not None:
# clear out errors so they are not duplicated by repeated
# validations on the same schema object
self.xmlschema._clear_error_log()
# NOTE: _clear_error_log is technically private, but I can't find
# any public method to clear the validation log.
return self.xmlschema.validate(self.node)
else:
raise Exception('No XSD schema is defined, cannot validate document')
def schema_validation_errors(self):
"""
Retrieve any validation errors that occured during schema validation
done via :meth:`is_valid`.
:returns: a list of :class:`lxml.etree._LogEntry` instances
:raises: Exception if no XSD schema is defined for this XmlObject instance
"""
if self.xmlschema is not None:
return self.xmlschema.error_log
else:
raise Exception('No XSD schema is defined, cannot return validation errors')
def is_empty(self):
"""
Returns True if the root node contains no child elements, no
attributes, and no text. Returns False if any are present.
"""
return len(self.node) == 0 and len(self.node.attrib) == 0 \
and not self.node.text and not self.node.tail # regular text or text after a node
""" April 2016. Removing Urllib2Resolver so we can support
loading local copies of schema and skip validation in get_xml_parser """
def _get_xmlparser(xmlclass=XmlObject, validate=False, resolver=None):
"""Initialize an instance of :class:`lxml.etree.XMLParser` with appropriate
settings for validation. If validation is requested and the specified
instance of :class:`XmlObject` has an XSD_SCHEMA defined, that will be used.
Otherwise, uses DTD validation. Switched resolver to None to skip validation.
"""
if validate:
if hasattr(xmlclass, 'XSD_SCHEMA') and xmlclass.XSD_SCHEMA is not None:
# If the schema has already been loaded, use that.
# (since we accessing the *class*, accessing 'xmlschema' returns a property,
# not the initialized schema object we actually want).
xmlschema = getattr(xmlclass, '_xmlschema', None)
# otherwise, load the schema
if xmlschema is None:
xmlschema = loadSchema(xmlclass.XSD_SCHEMA)
opts = {'schema': xmlschema}
else:
# if configured XmlObject does not have a schema defined, assume DTD validation
opts = {'dtd_validation': True}
else:
# If validation is not requested, then the parsing should fail
# only for well-formedness issues.
#
# Therefore, we must turn off collect_ids, otherwise lxml will
# have a problem with duplicate IDs as it collects
# them. However, the XML spec declares ID uniqueness as a
# validation constraint, not a well-formedness
# constraint. (See https://www.w3.org/TR/xml/#id.)
opts = {"collect_ids": False}
parser = etree.XMLParser(**opts)
if resolver is not None:
parser.resolvers.add(resolver)
return parser
def load_xmlobject_from_string(string, xmlclass=XmlObject, validate=False,
resolver=None):
"""Initialize an XmlObject from a string.
If an xmlclass is specified, construct an instance of that class instead
of :class:`~eulxml.xmlmap.XmlObject`. It should be a subclass of XmlObject.
The constructor will be passed a single node.
If validation is requested and the specified subclass of :class:`XmlObject`
has an XSD_SCHEMA defined, the parser will be configured to validate against
the specified schema. Otherwise, the parser will be configured to use DTD
validation, and expect a Doctype declaration in the xml content.
:param string: xml content to be loaded, as a string
:param xmlclass: subclass of :class:`~eulxml.xmlmap.XmlObject` to initialize
:param validate: boolean, enable validation; defaults to false
:rtype: instance of :class:`~eulxml.xmlmap.XmlObject` requested
"""
parser = _get_xmlparser(xmlclass=xmlclass, validate=validate, resolver=resolver)
element = etree.fromstring(string, parser)
return xmlclass(element)
def load_xmlobject_from_file(filename, xmlclass=XmlObject, validate=False,
resolver=None):
"""Initialize an XmlObject from a file.
See :meth:`load_xmlobject_from_string` for more details; behaves exactly the
same, and accepts the same parameters, except that it takes a filename
instead of a string.
:param filename: name of the file that should be loaded as an xmlobject.
:meth:`etree.lxml.parse` will accept a file name/path, a file object, a
file-like object, or an HTTP or FTP url, however file path and URL are
recommended, as they are generally faster for lxml to handle.
"""
parser = _get_xmlparser(xmlclass=xmlclass, validate=validate, resolver=resolver)
tree = etree.parse(filename, parser)
return xmlclass(tree.getroot())
from eulxml.xmlmap.fields import *
# Import these for backward compatibility. Should consider deprecating these
# and asking new code to pull them from descriptor
# XSD schema xmlobjects - used in XmlObjectType to process SchemaFields
# FIXME: where should these actually go? depends on both XmlObject and fields
class XsdType(XmlObject):
ROOT_NAME = 'simpleType'
name = StringField('@name')
base = StringField('xs:restriction/@base')
restricted_values = StringListField('xs:restriction/xs:enumeration/@value')
def base_type(self):
# for now, only supports simple types - eventually, may want logic to
# traverse extended types to get to base XSD type
if ':' in self.base: # for now, ignore prefix (could be xsd, xs, etc. - how to know which?)
prefix, basetype = self.base.split(':')
else:
basetype = self.base
return basetype
class XsdSchema(XmlObject):
ROOT_NAME = 'schema'
ROOT_NS = 'http://www.w3.org/2001/XMLSchema'
ROOT_NAMESPACES = {'xs': ROOT_NS}
def get_type(self, name=None, xpath=None):
if xpath is None:
if name is None:
raise Exception("Must specify either name or xpath")
xpath = '//*[@name="%s"]' % name
result = self.node.xpath(xpath)
if len(result) == 0:
raise Exception("No Schema type definition found for xpath '%s'" % xpath)
elif len(result) > 1:
raise Exception("Too many schema type definitions found for xpath '%s' (found %d)" \
% (xpath, len(result)))
return XsdType(result[0], context=self.context) # pass in namespaces
| apache-2.0 | 6,997,299,404,527,287,000 | 40.675994 | 106 | 0.63478 | false |
cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/scripts/freestyle/style_modules/blueprint_ellipses.py | 1 | 1827 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Filename : blueprint_ellipses.py
# Author : Emmanuel Turquin
# Date : 04/08/2005
# Purpose : Produces a blueprint using elliptic contour strokes
from freestyle import ChainPredicateIterator, ConstantThicknessShader, ContourUP1D, IncreasingColorShader, \
Operators, QuantitativeInvisibilityUP1D, SameShapeIdBP1D, TextureAssignerShader, TrueUP1D
from PredicatesU1D import pyHigherLengthUP1D
from logical_operators import AndUP1D, NotUP1D
from shaders import pyBluePrintEllipsesShader, pyPerlinNoise1DShader
upred = AndUP1D(QuantitativeInvisibilityUP1D(0), ContourUP1D())
bpred = SameShapeIdBP1D()
Operators.select(upred)
Operators.bidirectional_chain(ChainPredicateIterator(upred, bpred), NotUP1D(upred))
Operators.select(pyHigherLengthUP1D(200))
shaders_list = [
ConstantThicknessShader(5),
pyBluePrintEllipsesShader(3),
pyPerlinNoise1DShader(0.1, 10, 8),
TextureAssignerShader(4),
IncreasingColorShader(0.6, 0.3, 0.3, 0.7, 0.3, 0.3, 0.3, 0.1),
]
Operators.create(TrueUP1D(), shaders_list)
| gpl-3.0 | 5,571,504,790,626,471,000 | 42.5 | 108 | 0.762452 | false |
citiususc/construe | construe/knowledge/abstraction_patterns/rhythm/patterns.py | 1 | 5602 | # -*- coding: utf-8 -*-
# pylint: disable-msg= E1002, E1101
"""
Created on Wed Nov 21 09:04:17 2012
This file contains the definition of a set of very simple abstraction patterns
in order to perform rhythm interpretation on an ECG signal.
@author: T. Teijeiro
"""
import copy
import construe.knowledge.observables as o
from construe.knowledge.constants import (PW_DURATION, ST_INTERVAL,
N_PR_INTERVAL, N_QT_INTERVAL,
ASYSTOLE_RR, PQ_INTERVAL, QRS_DUR)
from construe.model import Interval as Iv
from construe.model.automata import PatternAutomata, ABSTRACTED, ENVIRONMENT
from construe.utils.units_helper import msec2samples as ms2sp
def _rstart_tconst(pattern, qrs):
"""
Temporal constraints for the Rhythm Start abstraction pattern.
"""
pattern.tnet.set_equal(qrs.time, pattern.hypothesis.time)
def _p_qrs_tconst(pattern, pwave):
"""
Temporal constraints of the P Wave wrt the corresponding QRS complex
"""
obseq = pattern.obs_seq
idx = pattern.get_step(pwave)
if idx == 0 or not isinstance(obseq[idx-1], o.QRS):
return
qrs = obseq[idx-1]
pattern.tnet.add_constraint(pwave.start, pwave.end, PW_DURATION)
#PR interval
pattern.tnet.add_constraint(pwave.start, qrs.start, N_PR_INTERVAL)
pattern.tnet.set_before(pwave.end, qrs.start)
def _t_qrs_tconst(pattern, twave):
"""
Temporal constraints of the T waves with the corresponding QRS complex
"""
obseq = pattern.obs_seq
idx = pattern.get_step(twave)
#We find the qrs observation precedent to this T wave.
try:
qrs = next(obseq[i] for i in range(idx-1, -1, -1)
if isinstance(obseq[i], o.QRS))
tnet = pattern.tnet
if idx > 0 and isinstance(obseq[idx-1], o.PWave):
pwave = obseq[idx-1]
tnet.add_constraint(pwave.end, twave.start, Iv(ST_INTERVAL.start,
PQ_INTERVAL.end + QRS_DUR.end))
#ST interval
tnet.add_constraint(qrs.end, twave.start, ST_INTERVAL)
#QT duration
tnet.add_constraint(qrs.start, twave.end, N_QT_INTERVAL)
except StopIteration:
pass
def _prev_rhythm_tconst(pattern, rhythm):
"""Temporal constraints of a cardiac rhythm with the precedent one."""
pattern.tnet.set_equal(pattern.hypothesis.start, rhythm.end)
def _asyst_prev_rhythm_tconst(pattern, rhythm):
"""Temporal constraints of an asystole with the precedent rhythm."""
pattern.tnet.set_equal(pattern.hypothesis.start, rhythm.end)
pattern.tnet.add_constraint(pattern.hypothesis.start,
pattern.hypothesis.end, ASYSTOLE_RR)
def _qrs1_tconst(pattern, qrs):
"""Temporal constraints of the first QRS in the asystole."""
pattern.tnet.set_equal(pattern.hypothesis.start, qrs.time)
pattern.tnet.set_before(qrs.end, pattern.hypothesis.end)
def _qrs2_tconst(pattern, qrs):
"""Temporal constraints of the delayed QRS in the asystole."""
pattern.tnet.set_equal(qrs.time, pattern.hypothesis.end)
if len(pattern.evidence[o.QRS]) > 1:
prev = pattern.evidence[o.QRS][0]
pattern.tnet.add_constraint(prev.time, qrs.time, ASYSTOLE_RR)
def _rhythmstart_gconst(pattern, _):
"""General constraints of the rhythm start pattern."""
#We assume an starting mean rhythm of 75ppm, but the range allows from 65
#to 85bpm
pattern.hypothesis.meas = o.CycleMeasurements((ms2sp(800), ms2sp(200)),
(0, 0), (0, 0))
def _asystole_gconst(pattern, _):
"""General constraints of the asystole pattern."""
#The rhythm information is copied from the precedent rhythm.
if pattern.evidence[o.Cardiac_Rhythm]:
rhythm = pattern.evidence[o.Cardiac_Rhythm][0]
pattern.hypothesis.meas = copy.copy(rhythm.meas)
RHYTHMSTART_PATTERN = PatternAutomata()
RHYTHMSTART_PATTERN.name = "Rhythm Start"
RHYTHMSTART_PATTERN.Hypothesis = o.RhythmStart
RHYTHMSTART_PATTERN.add_transition(0, 1, o.QRS, ABSTRACTED, _rstart_tconst,
_rhythmstart_gconst)
RHYTHMSTART_PATTERN.add_transition(1, 2, o.PWave, ABSTRACTED, _p_qrs_tconst)
RHYTHMSTART_PATTERN.add_transition(2, 3, o.TWave, ABSTRACTED, _t_qrs_tconst)
RHYTHMSTART_PATTERN.add_transition(1, 3, o.TWave, ABSTRACTED, _t_qrs_tconst)
RHYTHMSTART_PATTERN.add_transition(1, 3)
RHYTHMSTART_PATTERN.final_states.add(3)
RHYTHMSTART_PATTERN.abstractions[o.QRS] = (RHYTHMSTART_PATTERN.transitions[0],)
RHYTHMSTART_PATTERN.freeze()
ASYSTOLE_PATTERN = PatternAutomata()
ASYSTOLE_PATTERN.name = "Asystole"
ASYSTOLE_PATTERN.Hypothesis = o.Asystole
ASYSTOLE_PATTERN.add_transition(0, 1, o.Cardiac_Rhythm, ENVIRONMENT,
_asyst_prev_rhythm_tconst)
ASYSTOLE_PATTERN.add_transition(1, 2, o.QRS, ENVIRONMENT, _qrs1_tconst)
ASYSTOLE_PATTERN.add_transition(2, 3, o.QRS, ABSTRACTED, _qrs2_tconst,
_asystole_gconst)
ASYSTOLE_PATTERN.add_transition(3, 4, o.PWave, ABSTRACTED, _p_qrs_tconst)
ASYSTOLE_PATTERN.add_transition(4, 5, o.TWave, ABSTRACTED, _t_qrs_tconst)
ASYSTOLE_PATTERN.add_transition(3, 5, o.TWave, ABSTRACTED, _t_qrs_tconst)
ASYSTOLE_PATTERN.add_transition(3, 5)
ASYSTOLE_PATTERN.final_states.add(5)
ASYSTOLE_PATTERN.abstractions[o.QRS] = (ASYSTOLE_PATTERN.transitions[2],)
ASYSTOLE_PATTERN.freeze()
if __name__ == "__main__":
pass
| agpl-3.0 | 8,645,814,896,490,964,000 | 40.80597 | 79 | 0.661014 | false |
Azure/azure-sdk-for-python | sdk/storage/azure-storage-blob/tests/test_large_block_blob.py | 1 | 16857 | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
from os import path, remove, sys, urandom
import platform
import unittest
import uuid
from devtools_testutils import ResourceGroupPreparer, StorageAccountPreparer
from azure.storage.blob import (
BlobServiceClient,
ContainerClient,
BlobClient,
ContentSettings
)
if sys.version_info >= (3,):
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
from _shared.testcase import GlobalStorageAccountPreparer
from devtools_testutils.storage import StorageTestCase
# ------------------------------------------------------------------------------
TEST_BLOB_PREFIX = 'largeblob'
LARGE_BLOB_SIZE = 12 * 1024 * 1024
LARGE_BLOCK_SIZE = 6 * 1024 * 1024
# ------------------------------------------------------------------------------
if platform.python_implementation() == 'PyPy':
pytest.skip("Skip tests for Pypy", allow_module_level=True)
class StorageLargeBlockBlobTest(StorageTestCase):
def _setup(self, storage_account, key):
# test chunking functionality by reducing the threshold
# for chunking and the size of each chunk, otherwise
# the tests would take too long to execute
self.bsc = BlobServiceClient(
self.account_url(storage_account, "blob"),
credential=key,
max_single_put_size=32 * 1024,
max_block_size=2 * 1024 * 1024,
min_large_block_upload_threshold=1 * 1024 * 1024)
self.config = self.bsc._config
self.container_name = self.get_resource_name('utcontainer')
if self.is_live:
try:
self.bsc.create_container(self.container_name)
except:
pass
def _teardown(self, file_name):
if path.isfile(file_name):
try:
remove(file_name)
except:
pass
# --Helpers-----------------------------------------------------------------
def _get_blob_reference(self):
return self.get_resource_name(TEST_BLOB_PREFIX)
def _create_blob(self):
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
blob.upload_blob(b'')
return blob
def assertBlobEqual(self, container_name, blob_name, expected_data):
blob = self.bsc.get_blob_client(container_name, blob_name)
actual_data = blob.download_blob()
self.assertEqual(b"".join(list(actual_data.chunks())), expected_data)
# --Test cases for block blobs --------------------------------------------
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_put_block_bytes_large(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
blob = self._create_blob()
# Act
for i in range(5):
resp = blob.stage_block(
'block {0}'.format(i).encode('utf-8'), urandom(LARGE_BLOCK_SIZE))
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
# Assert
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_put_block_bytes_large_with_md5(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
blob = self._create_blob()
# Act
for i in range(5):
resp = blob.stage_block(
'block {0}'.format(i).encode('utf-8'),
urandom(LARGE_BLOCK_SIZE),
validate_content=True)
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_put_block_stream_large(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
blob = self._create_blob()
# Act
for i in range(5):
stream = BytesIO(bytearray(LARGE_BLOCK_SIZE))
resp = resp = blob.stage_block(
'block {0}'.format(i).encode('utf-8'),
stream,
length=LARGE_BLOCK_SIZE)
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
# Assert
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_put_block_stream_large_with_md5(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
blob = self._create_blob()
# Act
for i in range(5):
stream = BytesIO(bytearray(LARGE_BLOCK_SIZE))
resp = resp = blob.stage_block(
'block {0}'.format(i).encode('utf-8'),
stream,
length=LARGE_BLOCK_SIZE,
validate_content=True)
self.assertIsNotNone(resp)
assert 'content_md5' in resp
assert 'content_crc64' in resp
assert 'request_id' in resp
# Assert
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_create_large_blob_from_path(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'large_blob_from_path.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, max_concurrency=2, overwrite=True)
block_list = blob.get_block_list()
# Assert
self.assertIsNot(len(block_list), 0)
self.assertBlobEqual(self.container_name, blob_name, data)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_create_large_blob_from_path_with_md5(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = "blob_from_path_with_md5.temp.dat"
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, validate_content=True, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_create_large_blob_from_path_non_parallel(self, resource_group, location, storage_account, storage_account_key):
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(self.get_random_bytes(100))
FILE_PATH = "blob_from_path_non_parallel.temp.dat"
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, max_concurrency=1)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_create_large_blob_from_path_with_progress(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = "blob_from_path_with_progress.temp.dat"
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(response):
current = response.context['upload_stream_current']
total = response.context['data_stream_total']
if current is not None:
progress.append((current, total))
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, max_concurrency=2, raw_response_hook=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self.assert_upload_progress(len(data), self.config.max_block_size, progress)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_create_large_blob_from_path_with_properties(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'blob_from_path_with_properties.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
content_settings = ContentSettings(
content_type='image/png',
content_language='spanish')
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, content_settings=content_settings, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
properties = blob.get_blob_properties()
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_create_large_blob_from_stream_chunked_upload(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'blob_from_stream_chunked_upload.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_creat_lrgblob_frm_stream_w_progress_chnkd_upload(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'stream_w_progress_chnkd_upload.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(response):
current = response.context['upload_stream_current']
total = response.context['data_stream_total']
if current is not None:
progress.append((current, total))
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, max_concurrency=2, raw_response_hook=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self.assert_upload_progress(len(data), self.config.max_block_size, progress)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_create_large_blob_from_stream_chunked_upload_with_count(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'chunked_upload_with_count.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
blob_size = len(data) - 301
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, length=blob_size, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_creat_lrgblob_frm_strm_chnkd_uplod_w_count_n_props(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'plod_w_count_n_props.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
content_settings = ContentSettings(
content_type='image/png',
content_language='spanish')
blob_size = len(data) - 301
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(
stream, length=blob_size, content_settings=content_settings, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
properties = blob.get_blob_properties()
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
self._teardown(FILE_PATH)
@pytest.mark.live_test_only
@GlobalStorageAccountPreparer()
def test_creat_lrg_blob_frm_stream_chnked_upload_w_props(self, resource_group, location, storage_account, storage_account_key):
# parallel tests introduce random order of requests, can only run live
self._setup(storage_account, storage_account_key)
blob_name = self._get_blob_reference()
blob = self.bsc.get_blob_client(self.container_name, blob_name)
data = bytearray(urandom(LARGE_BLOB_SIZE))
FILE_PATH = 'creat_lrg_blob.temp.{}.dat'.format(str(uuid.uuid4()))
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
content_settings = ContentSettings(
content_type='image/png',
content_language='spanish')
with open(FILE_PATH, 'rb') as stream:
blob.upload_blob(stream, content_settings=content_settings, max_concurrency=2)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
properties = blob.get_blob_properties()
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
self._teardown(FILE_PATH)
# ------------------------------------------------------------------------------ | mit | -1,553,217,074,981,763,000 | 40.117073 | 139 | 0.623599 | false |
hftools/hftools | hftools/file_formats/tests/test_common.py | 1 | 6853 | #-----------------------------------------------------------------------------
# Copyright (c) 2014, HFTools Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import os
import numpy as np
basepath = os.path.split(__file__)[0]
import hftools.file_formats.common as common
from hftools.testing import TestCase
from hftools.dataset import hfarray, DataBlock, DimSweep, DimMatrix_i,\
DimMatrix_j
class TestDateFunctions(TestCase):
def setUp(self):
pass
def test_conv_date(self):
self.assertEqual(common.conv_date("2014-05-08"),
np.datetime64("2014-05-08"))
def test_conv_date_error(self):
self.assertRaises(ValueError, common.conv_date, "not a date string")
def test_conv_datetime(self):
self.assertEqual(common.conv_date_time("2014-05-08 08:07:06"),
np.datetime64("2014-05-08 08:07:06"))
def test_conv_datetime_error(self):
self.assertRaises(ValueError,
common.conv_date_time,
"not a date string")
class TestProcessComment(TestCase):
def test_process_comment_1(self):
res = common.process_comment("Kalle [mV]: 1")
self.assertEqual(res["Kalle"], 0.001)
self.assertEqual(res["Kalle"].unit, "V")
def test_process_comment_2(self):
res = common.process_comment("Kalle: 1")
self.assertEqual(res["Kalle"], 1)
self.assertIsNone(res["Kalle"].unit)
def test_process_comment_3(self):
res = common.process_comment("Kalle: ap1")
self.assertEqual(res["Kalle"], "ap1")
def test_process_comment_4(self):
res = common.process_comment("Kalle ap1")
self.assertEqual(res, {})
def test_process_comment_5(self):
res = common.process_comment("Kalle [mV]: ap1")
self.assertEqual(res["Kalle"], "ap1")
class TestComments(TestCase):
def test_table_1(self):
c = common.Comments([])
self.assertEqual(c.table(), [])
def test_table_2(self):
c = common.Comments(["Kalle [V]: 1"])
self.assertEqual(c.table()[3:], ["Kalle 1" + " " * 70])
def test_table_3(self):
c = common.Comments(["Kalle [V]: a"])
self.assertEqual(c.table()[3:], ["Kalle a" + " " * 70])
class FormatUnitHeader(TestCase):
def test_format_unit_header_1(self):
res = common.format_unit_header("Power", hfarray([0.1], unit="W"))
self.assertEqual(res, "Power [W]")
def test_format_unit_header_2(self):
res = common.format_unit_header("Power", hfarray([0.1], unit=None))
self.assertEqual(res, "Power")
class FormatComplexHeader(TestCase):
def test_format_complex_header_1(self):
res = common.format_complex_header(["Z"],
[hfarray([0.1 + 1j], unit="Ohm")],
"%s",
"Re(%s)",
"Im(%s)"
)
self.assertEqual(res, ["Re(Z) [Ohm]", "Im(Z) [Ohm]"])
def test_format_complex_header_2(self):
res = common.format_complex_header(["Z"],
[hfarray([0.1 + 1j], unit=None)],
"%s",
"Re(%s)",
"Im(%s)"
)
self.assertEqual(res, ["Re(Z)", "Im(Z)"])
def test_format_complex_header_3(self):
res = common.format_complex_header(["Z"],
[hfarray([0.1], unit="Ohm")],
"%s",
"Re(%s)",
"Im(%s)"
)
self.assertEqual(res, ["Z [Ohm]"])
def test_format_complex_header_4(self):
res = common.format_complex_header(["Z"],
[hfarray([0.1], unit=None)],
"%s",
"Re(%s)",
"Im(%s)"
)
self.assertEqual(res, ["Z"])
def test_format_complex_header_5(self):
res = common.format_complex_header(["Z"],
[hfarray([0.1 + 1j], unit=None)],
"%s",
"%s",
None
)
self.assertEqual(res, ["Z"])
class TestNormalizeNames(TestCase):
def test_normalize_names_1(self):
db = DataBlock()
db.b = hfarray(1)
db["a1/a2 raw"] = hfarray(1)
res = common.normalize_names(db)
self.assertTrue("a12" in res.vardata)
def test_normalize_names_2(self):
db = DataBlock()
db.b = hfarray(1)
db["Mean(A)"] = hfarray(1)
res = common.normalize_names(db)
self.assertTrue("A" in res.vardata)
def test_normalize_names_3(self):
db = DataBlock()
db.b = hfarray(1)
db["a1/a2 raw"] = hfarray(1)
db["a12"] = hfarray(2)
res = common.normalize_names(db)
self.assertTrue("a1/a2 raw" in res.vardata)
self.assertTrue("a12" in res.vardata)
def test_normalize_names_error(self):
db = DataBlock()
db.b = hfarray(1)
db["Mean(A)"] = hfarray(1)
db["A"] = hfarray(2)
self.assertRaises(ValueError, common.normalize_names, db)
class Test_make_col_from_matrix(TestCase):
def test_make_col_from_matrix_1(self):
header = ["S", "P"]
dims = (DimSweep("f", 1), DimMatrix_i("i", 2), DimMatrix_j("j", 2), )
columns = [hfarray([[[11, 12], [21, 22]]], dims=dims),
hfarray([10], dims=dims[:1])]
res = common.make_col_from_matrix(header, columns, "%s%s%s")
self.assertEqual(res, (["S11", "S12", "S21", "S22", "P"],
[11, 12, 21, 22, 10]))
def test_make_col_from_matrix_2(self):
header = ["S"]
dims = (DimSweep("f", 1), DimMatrix_i("i", 2), DimMatrix_j("j", 2), )
columns = [hfarray([[[11, 12], [21, 22]]], dims=dims)]
res = common.make_col_from_matrix(header, columns, "%s%s%s",
fortranorder=True)
self.assertEqual(res, (["S11", "S21", "S12", "S22"],
[11, 21, 12, 22]))
| bsd-3-clause | 2,110,268,371,790,687,000 | 36.043243 | 78 | 0.468262 | false |
grow/pygrow | install.py | 1 | 7591 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
"""Standalone Grow SDK installer. Downloads Grow SDK and sets up command aliases."""
import argparse
import datetime
import json
import os
import platform
import re
import sys
import tempfile
import urllib
import urllib2
import zipfile
DOWNLOAD_URL_FORMAT = 'https://github.com/grow/grow/releases/download/{version}/{name}'
RELEASES_API = 'https://api.github.com/repos/grow/grow/releases'
RC_FILES = ['.bashrc', '.zshrc', '.bash_profile', '.profile']
RC_FILE_DEFAULT = '.bashrc'
BIN_PATH = '~/bin/grow'
# TODO: Remove when no longer checking for alias.
ALIAS_FILES = ['.bash_aliases', '.bash_profile', '.profile', '.bashrc']
ALIAS_RE = re.compile(r'^alias grow\=([\'"])(.*)\1$', re.MULTILINE)
if 'Linux' in platform.system():
PLATFORM = 'linux'
elif 'Darwin' in platform.system():
PLATFORM = 'mac'
else:
print('{} is not a supported platform. Please file an issue at '
'https://github.com/grow/grow/issues'.format(sys.platform))
sys.exit(-1)
def hai(text, *args):
print text.format(*args, **{
'red': '\033[0;31m',
'/red': '\033[0;m',
'green': '\033[0;32m',
'/green': '\033[0;m',
'yellow': '\033[0;33m',
'/yellow': '\033[0;m',
'white': '\033[0;37m',
'/white': '\033[0;m',
})
def orly(text, default=False):
resp = raw_input(text).strip().lower()
if resp == 'y':
return True
elif resp == 'n':
return False
return default
# TODO: Remove when no longer checking for alias.
def get_existing_aliases():
"""Find all existing aliases using the regex."""
files_to_alias = {}
for basename in ALIAS_FILES:
basepath = os.path.expanduser('~/{}'.format(basename))
if os.path.exists(basepath):
profile = open(basepath).read()
matches = re.findall(ALIAS_RE, profile)
if matches:
files_to_alias[basepath] = [x[1] for x in matches]
return files_to_alias
def get_rc_path():
for basename in RC_FILES:
basepath = os.path.expanduser('~/{}'.format(basename))
if os.path.exists(basepath):
return basepath
return os.path.expanduser('~/{}'.format(RC_FILE_DEFAULT))
def get_release_for_platform(releases, platform):
"""Find the latest release available for the platform."""
for release in releases:
for each_asset in release['assets']:
if platform in each_asset.get('name', '').lower():
return release
return None
def has_bin_in_path(bin_path):
"""Determine if the binary path is part of the system paths."""
return bin_path in os.environ['PATH'].split(':')
def install(rc_path=None, bin_path=None, force=False):
"""Download and install the binary."""
resp = json.loads(urllib.urlopen(RELEASES_API).read())
try:
release = get_release_for_platform(resp, PLATFORM)
except KeyError:
hai('{red}There was a problem accessing the GitHub Releases API.{/red}')
if 'message' in resp:
hai('{red}{}{/red}', resp['message'])
sys.exit(-1)
if release is None:
print 'Not available for platform: {}.'.format(platform.system())
sys.exit(-1)
version = release['tag_name']
asset = None
for each_asset in release['assets']:
if PLATFORM in each_asset.get('name', '').lower():
asset = each_asset
break
download_url = DOWNLOAD_URL_FORMAT.format(
version=version, name=asset['name'])
bin_path = os.path.expanduser(bin_path or BIN_PATH)
bin_dir = os.path.dirname(bin_path)
rc_comment = '# Added by Grow SDK Installer ({})'.format(
datetime.datetime.now())
rc_path = os.path.expanduser(rc_path or get_rc_path())
rc_path_append = 'export PATH={}:$PATH'.format(bin_dir)
hai('{yellow}Welcome to the installer for Grow SDK v{}{/yellow}', version)
hai('{yellow}Release notes: {/yellow}https://github.com/grow/grow/releases/tag/{}', version)
hai('{yellow}[ ]{/yellow} {green}This script will install:{/green} {}', bin_path)
bin_in_path = has_bin_in_path(bin_dir)
if bin_in_path:
hai(
'{green}[✓] You already have the binary directory in PATH:{/green} {}',
bin_dir)
else:
hai(
'{yellow}[ ]{/yellow} {green}{} will be added to the PATH in:{/green} {}',
bin_dir, rc_path)
if not force:
try:
result = orly('Continue installation? [Y]es / [n]o: ', default=True)
except KeyboardInterrupt:
result = False
if not result:
hai('\n\r{red}Aborted installation.{/red}')
sys.exit(-1)
try:
os.makedirs(bin_dir)
except OSError:
# If the directory already exists, let it go.
pass
remote = urllib2.urlopen(download_url)
try:
hai('Downloading from {}'.format(download_url))
local, temp_path = tempfile.mkstemp()
with os.fdopen(local, 'w') as local_file:
while True:
content = remote.read(1048576) # 1MB.
if not content:
sys.stdout.write(' done!\n')
sys.stdout.flush()
break
local_file.write(content)
sys.stdout.write('.')
sys.stdout.flush()
remote.close()
with open(temp_path, 'rb') as fp:
zp = zipfile.ZipFile(fp)
try:
zp.extract('grow', os.path.dirname(bin_path))
except IOError as e:
if 'Text file busy' in str(e):
hai('Unable to overwrite {}. Try closing Grow and installing again.'.format(
bin_path))
hai('You can use the installer by running: curl https://install.grow.io | bash')
sys.exit(-1)
raise
hai('{green}[✓] Installed Grow SDK to:{/green} {}', bin_path)
stat = os.stat(bin_path)
os.chmod(bin_path, stat.st_mode | 0111)
finally:
os.remove(temp_path)
if not bin_in_path:
with open(rc_path, 'a') as fp:
fp.write('\n' + rc_comment + '\n')
fp.write(rc_path_append)
hai('{green}[✓] Added {} to path in:{/green} {}',
bin_path, rc_path)
hai('{green}[✓] All done. Grow v{} successfully installed.{/green}', version)
if not bin_in_path:
hai(' To use Grow: reload your shell session OR use `source {}`,', rc_path)
hai(' then type `grow` and press enter.')
# TODO: Remove when no longer checking for alias.
aliases = get_existing_aliases()
if aliases:
hai('{red}Aliases for grow detected in: {}{/red}', ', '.join(aliases.keys()))
hai(' {red}please remove the old aliases to prevent version conflicts.{/red}')
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--bin-path', default=None,
help='Where to install `grow` executable. Ex: ~/bin/grow')
parser.add_argument('--force', dest='force', action='store_true',
help='Whether to force install and bypass prompts.')
parser.add_argument('--rc-path', default=None,
help='Profile to update with PATH. Ex: ~/.bashrc')
parser.set_defaults(force=False)
return parser.parse_args()
def main():
args = parse_args()
install(rc_path=args.rc_path, bin_path=args.bin_path, force=args.force)
if __name__ == '__main__':
main()
| mit | -4,453,811,987,850,760,000 | 32.258772 | 100 | 0.576553 | false |
Angeldude/csound | tests/commandline/test.py | 1 | 9537 | #!/usr/bin/python
# Csound Test Suite
# By Steven Yi <stevenyi at gmail dot com>
import os
import sys
from testUI import TestApplication
from Tkinter import *
parserType = ""
showUIatClose = False
##csoundExecutable = r"C:/Users/new/csound-csound6-git/csound.exe "
csoundExecutable =""
class Test:
def __init__(self, fileName, description, expected=True):
self.fileName = fileName
self.description = ""
self.expected = expected
def showUI(results):
root = Tk()
app = TestApplication(master=root)
app.setResults(results)
app.mainloop()
root.destroy()
def showHelp():
message = """Csound Test Suite by Steven Yi<[email protected]>
Runs tests using new parser and shows return values of tests. Results
are written to results.txt file. To show the results using a UI, pass
in the command "--show-ui" like so:
./test.py --show-ui
The test suite defaults to using the new parser. To use the old parser for
the tests, use "--old-parser" in the command like so:
./test.py --show-ui --old-parser
"""
print message
def runTest():
runArgs = "-Wdo test.wav"
if (parserType == "--old-parser"):
print "Testing with old parser"
else:
print "Testing with new parser"
tests = [
["test1.csd", "Simple Test, Single Channel"],
["test2.csd", "Simple Test, 2 Channel"],
["test3.csd", "Simple Test, using i-rate variables, 2 Channel"],
["test4.csd", "Simple Test, using k-rate variables, 2 Channel"],
["test5.csd", "Simple Test, using global i-rate variables, 2 Channel"],
["test6.csd", "Testing Pfields"],
["test7.csd", "Testing expressions, no functions"],
["test8.csd", "Testing multi-part expressions, no functions"],
["test9.csd", "Unused Label (to test labels get parsed)"],
["test10.csd", "kgoto going to a label"],
["test11.csd", "if-kgoto going to a label, boolean expressions"],
["test12.csd", "Simple if-then statement"],
["test13.csd", "function call"],
["test14.csd", "polymorphic test, 0xffff (init)"],
["test15.csd", "pluck test, 0xffff (init)"],
["test16.csd", "Simple if-then with multiple statements in body"],
["test17.csd", "Simple if-then-else with multiple statements in body"],
["test18.csd", "if-then-elseif with no else block"],
["test19.csd", "if-elseif-else"],
["test20.csd", "if-elseif-else with inner if-elseif-else blocks"],
["test21.csd", "if-elseif-else with multiple elseif blocks"],
["test22.csd", "simple UDO"],
["test23.csd", "named instrument"],
## ["test24.csd", "la_i opcodes"],
["test43.csd", "mixed numbered and named instruments"],
["test25.csd", "polymorphic test, 0xfffd (peak)"],
["test26.csd", "polymorphic test, 0xfffc (divz)"],
["test27.csd", "polymorphic test, 0xfffb (chnget)"],
["test28.csd", "label test"],
["test29.csd", "bit operations test"],
["test30.csd", "multi-numbered instrument test"],
["test31.csd", "i-rate conditional test"],
["test32.csd", "continuation lines test"],
["test33.csd", "using named instrument from score (testing score strings)"],
["test34.csd", "tertiary conditional expressions"],
["test35.csd", "test of passign"],
["test36.csd", "opcode with all input args optional (passign)"],
["test37.csd", "Testing in and out"],
["test38.csd", "Testing simple macro"],
["test39.csd", "Testing macro with argument"],
["test40.csd", "Testing i^j"],
["test41.csd", "if statement with = instead of =="],
["test42.csd", "extended string"],
["test44.csd", "expected failure with in-arg given to in opcode", 1],
["test45.csd", "if-goto with expression in boolean comparison"],
["test46.csd", "if-then with expression in boolean comparison"],
["test47.csd", "until loop and t-variables"],
["test48.csd", "expected failure with variable used before defined", 1],
["test_instr0_labels.csd", "test labels in instr0 space"],
["test_string.csd", "test string assignment and printing"],
["test_sprintf.csd", "test string assignment and printing"],
["test_sprintf2.csd", "test string assignment and printing that causes reallocation"],
["test_label_within_if_block.csd", "test label within if block"],
["test_arrays.csd", "test k-array with single dimension, assignment to expression value"],
["test_arrays2.csd", "test gk-array with single dimension, assignment to expression value"],
["test_arrays3.csd", "test k-array with single dimension, assignment with number"],
["test_arrays_multi.csd", "test multi-dimensionsl k-array, assigment to number and expression"],
["test_arrays_string.csd", "test string-array"],
["test_arrays_string2.csd", "test simple string-array assignment"],
["test_asig_as_array.csd", "test using a-sig with array get/set syntax"],
["test_arrays_negative_dimension_fail.csd",
"test expected failure with negative dimension size and array", 1],
["test_empty_conditional_branches.csd", "tests that empty branches do not cause compiler issues"],
["test_empty_instr.csd", "tests that empty instruments do not cause compiler issues"],
["test_empty_udo.csd", "tests that empty UDOs do not cause compiler issues"],
["test_semantics_undefined_var.csd", "test undefined var", 1],
["test_invalid_expression.csd", "test expression", 1],
["test_invalid_ternary.csd", "test expression", 1],
["test_opcode_as_function.csd", "test expression"],
["test_fsig_udo.csd", "UDO with f-sig arg"],
["test_karrays_udo.csd", "UDO with k[] arg"],
["test_arrays_addition.csd", "test array arithmetic (i.e. k[] + k[]"],
["test_arrays_fns.csd", "test functions on arrays (i.e. tabgen)"],
["test_polymorphic_udo.csd", "test polymorphic udo"],
["test_udo_a_array.csd", "test udo with a-array"],
["test_udo_2d_array.csd", "test udo with 2d-array"],
["test_udo_string_array_join.csd", "test udo with S[] arg returning S"],
["test_array_function_call.csd", "test synthesizing an array arg from a function-call"],
]
arrayTests = [["arrays/arrays_i_local.csd", "local i[]"],
["arrays/arrays_i_global.csd", "global i[]"],
["arrays/arrays_k_local.csd", "local k[]"],
["arrays/arrays_k_global.csd", "global k[]"],
["arrays/arrays_a_local.csd", "local a[]"],
["arrays/arrays_a_global.csd", "global a[]"],
["arrays/arrays_S_local.csd", "local S[]"],
["arrays/arrays_S_global.csd", "global S[]"],
]
udoTests = [["udo/fail_no_xin.csd", "fail due to no xin", 1],
["udo/fail_no_xout.csd", "fail due to no xout", 1],
["udo/fail_invalid_xin.csd", "fail due to invalid xin", 1],
["udo/fail_invalid_xout.csd", "fail due to invalid xout", 1],
]
tests += arrayTests
tests += udoTests
output = ""
tempfile = "/tmp/csound_test_output.txt"
if(os.sep == '/' and os.name == 'nt'):
tempfile = 'csound_test_output.txt'
counter = 1
retVals = []
testPass = 0
testFail = 0
for t in tests:
filename = t[0]
desc = t[1]
expectedResult = (len(t) == 3) and 1 or 0
if(os.sep == '\\' or os.name == 'nt'):
executable = (csoundExecutable == "") and "..\csound.exe" or csoundExecutable
command = "%s %s %s %s 2> %s"%(executable, parserType, runArgs, filename, tempfile)
print command
retVal = os.system(command)
else:
executable = (csoundExecutable == "") and "../../csound" or csoundExecutable
command = "%s %s %s %s &> %s"%(executable, parserType, runArgs, filename, tempfile)
#print command
retVal = os.system(command)
out = ""
if (retVal == 0) == (expectedResult == 0):
testPass += 1
out = "[pass] - "
else:
testFail += 1
out = "[FAIL] - "
out += "Test %i: %s (%s)\n\tReturn Code: %i\tExpected: %d\n"%(counter, desc, filename, retVal, expectedResult
)
print out
output += "%s\n"%("=" * 80)
output += "Test %i: %s (%s)\nReturn Code: %i\n"%(counter, desc, filename, retVal)
output += "%s\n\n"%("=" * 80)
f = open(tempfile, "r")
csOutput = ""
for line in f:
csOutput += line
output += csOutput
f.close()
retVals.append(t + [retVal, csOutput])
output += "\n\n"
counter += 1
# print output
print "%s\n\n"%("=" * 80)
print "Tests Passed: %i\nTests Failed: %i\n"%(testPass, testFail)
f = open("results.txt", "w")
f.write(output)
f.flush()
f.close()
return retVals
if __name__ == "__main__":
if(len(sys.argv) > 1):
for arg in sys.argv:
if (arg == "--help"):
showHelp()
sys.exit(0)
elif arg == "--show-ui":
showUIatClose = True
elif arg == "--old-parser":
parserType = "--old-parser"
elif arg.startswith("--csound-executable="):
csoundExecutable = arg[20:]
print csoundExecutable
elif arg.startswith("--opcode6dir64="):
os.environ['OPCODE6DIR64'] = arg[15:]
print os.environ['OPCODE6DIR64']
results = runTest()
if (showUIatClose):
showUI(results)
| lgpl-2.1 | 6,264,556,847,959,010,000 | 37.301205 | 117 | 0.594527 | false |
ImEmJay/AlexaPi | src/alexapi/device_platforms/hyperion.py | 1 | 5251 | from __future__ import print_function
import json
import threading
import time
import websocket
import alexapi.bcolors as bcolors
from baseplatform import BasePlatform
class HyperionPlatform(BasePlatform):
def __init__(self, config):
if config['debug']:
print("Initializing Hyperion platform")
super(HyperionPlatform, self).__init__(config, 'hyperion')
self.host = self._pconfig['hyperion_json_host']
self.port = self._pconfig['hyperion_json_port']
self.print_prefix = '{}[Hyperion]{}'.format(bcolors.BOLD, bcolors.ENDC)
self.service = ''
self.setup_complete = False
self.socket = None
self.socket_thread = None
def setup(self):
self.service = "ws://%s:%s" % (self.host, self.port)
self.print_debug("Hyperion JSON Server - {}".format(self.service))
self.init_connection()
def after_setup(self, trigger_callback=None): # pylint: disable=unused-argument
self.setup_complete = True
self.check_connection()
def check_connection(self):
print('Checking Hyperion Connection')
if self.socket_status():
status = '{}OK{}'.format(bcolors.OKGREEN, bcolors.ENDC)
else:
status = '{}FAIL{}'.format(bcolors.FAIL, bcolors.ENDC)
print('Connection {}'.format(status))
def display_state(self, state):
return 'start' if state else 'stop'
def force_recording(self):
pass
def get_color(self, mode):
return self._pconfig["color_{}".format(mode)]
def handle_indicate(self, mode, state=False, flash=False):
self.print_debug("indicate %s %s" % (mode, self.display_state(state)))
flash = self.should_flash(mode)
if not state and not flash:
self.hyperion_clear()
if state:
self.hyperion_indicate(self.get_color(mode), flash)
def hyperion_clear(self):
if self._config['debug']:
print("Clearing Hyperion settings")
self.hyperion_send(self.hyperion_message('clear', True))
def hyperion_effect(self, color, flash=False):
effect = {'args': {'color': color}}
if flash:
effect['name'] = "Strobe white"
effect['args']['frequency'] = self._pconfig['flash_frequency']
else:
effect['name'] = "Knight rider"
effect['args']['speed'] = self._pconfig['hyperion_effect_speed']
return {'effect': effect}
def hyperion_indicate(self, color, flash=False, duration=False):
command = self._pconfig['hyperion_mode']
if flash and command == 'color':
command = 'effect'
if flash:
duration = self._pconfig['flash_duration']
options = self.hyperion_options(command, color, duration, flash)
if self.setup_complete:
self.hyperion_send(self.hyperion_message(command, True, options))
def hyperion_options(self, command, color, duration=False, flash=False):
options = {'color': color}
if command == 'effect':
options = self.hyperion_effect(color, flash)
if duration:
options['duration'] = duration
return options
def hyperion_message(self, command, priority=False, options=None):
message = options or {}
message['command'] = command
if priority:
message['priority'] = self._pconfig['hyperion_priority']
return message
def hyperion_send(self, message):
if self._pconfig['verbose']:
self.print_debug("sending '{}'".format(json.dumps(message)))
if self.socket_status():
self.socket.send(json.dumps(message))
else:
print("Unable to send Hyperion state update")
self.init_connection()
def indicate_failure(self):
pass
def indicate_playback(self, state=True):
if self._pconfig['indicate_playback']:
self.handle_indicate('playback', state)
def indicate_processing(self, state=True):
self.handle_indicate('processing', state)
def indicate_recording(self, state=True):
self.handle_indicate('recording', state)
def indicate_success(self):
pass
def init_connection(self):
self.print_debug('Initializing connection')
if self._config['debug'] and self._pconfig['verbose']:
websocket.enableTrace(True)
self.socket = websocket.WebSocketApp(self.service,
on_message=self.on_socket_message,
on_close=self.on_socket_close,
on_error=self.on_socket_error)
self.socket_thread = threading.Thread(target=self.socket.run_forever)
self.socket_thread.daemon = True
self.socket_thread.start()
def on_socket_close(self, ws): # pylint: disable=unused-argument
self.print_debug('Closing connection')
def on_socket_error(self, ws, error): # pylint: disable=unused-argument
self.print_debug(error)
def on_socket_message(self, ws, message): # pylint: disable=unused-argument
message = json.loads(message)
if not message['success']:
self.print_error(message['error'])
if self._pconfig['verbose']:
self.print_debug("Received '{}'".format(message))
def print_debug(self, message):
if self._config['debug']:
print(time.asctime(), self.print_prefix, message)
def print_error(self, message):
error_prefix = '{}Error{}:'.format(bcolors.FAIL, bcolors.ENDC)
print(error_prefix, self.print_prefix, message)
def should_flash(self, mode):
return self._pconfig["flash_state_{}".format(mode)]
def socket_status(self):
if not self.socket:
return False
return self.socket.sock and self.socket.sock.connected
def cleanup(self):
if self._config['debug']:
print("Cleaning up Hyperion platform")
self.hyperion_clear()
if self.socket_status():
self.socket.close()
| mit | -990,850,973,479,044,700 | 29.352601 | 80 | 0.707103 | false |
gsobczyk/hamster | src/hamster/widgets/facttree.py | 1 | 23335 | # -*- coding: utf-8 -*-
# Copyright (C) 2008-2009, 2014 Toms Bauģis <toms.baugis at gmail.com>
# This file is part of Project Hamster.
# Project Hamster is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Project Hamster is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Project Hamster. If not, see <http://www.gnu.org/licenses/>.
import bisect
import cairo
from collections import defaultdict
from gi.repository import GObject as gobject
from gi.repository import Gtk as gtk
from gi.repository import Gdk as gdk
from gi.repository import PangoCairo as pangocairo
from gi.repository import Pango as pango
from hamster.lib import datetime as dt
from hamster.lib import graphics
from hamster.lib import stuff
from hamster.lib.fact import Fact
class ActionRow(graphics.Sprite):
def __init__(self):
graphics.Sprite.__init__(self)
self.visible = False
self.restart = graphics.Icon("view-refresh-symbolic", size=18,
interactive=True,
mouse_cursor=gdk.CursorType.HAND1,
y=4)
self.add_child(self.restart)
self.width = 50 # Simon says
class TotalFact(Fact):
"""An extension of Fact that is used for daily totals.
Instances of this class are rendered differently than instances
of Fact.
A TotalFact doesn't have a meaningful start and an end, but a
total duration (delta).
FIXME: Ideally, we should have a common parent for Fact and Total Fact
so we don't need to have nonsensical start and end properties here.
"""
def __init__(self, activity, duration):
super().__init__(activity=activity, start=dt.datetime.now(), end=dt.datetime.now())
self.duration = duration
@property
def delta(self):
return self.duration
class Label(object):
"""a much cheaper label that would be suitable for cellrenderer"""
def __init__(self, x=0, y=0, color=None):
self.x = x
self.y = y
self.color = color
self._label_context = cairo.Context(cairo.ImageSurface(cairo.FORMAT_A1, 0, 0))
self.layout = pangocairo.create_layout(self._label_context)
self.layout.set_font_description(pango.FontDescription(graphics._font_desc))
self.set_text("Hamster") # dummy
@property
def height(self):
"""Label height in pixels."""
return self.layout.get_pixel_size()[1]
def set_text(self, text):
self.text = text
self.layout.set_markup(text)
def get_text(self):
return self.text
def show(self, g, text=None, x=None, y=None):
"""Show the label.
If text is given, it overrides any previous set_text().
x and y can be passed to temporary override the position.
(self.x and self.y will not be changed)
"""
g.save_context()
# fallback to self.x
if x is None:
x = self.x
if y is None:
y = self.y
g.move_to(x, y)
if text is not None:
self.set_text(text)
if self.color:
g.set_color(self.color)
pangocairo.show_layout(g.context, self.layout)
g.restore_context()
class TagLabel(Label):
"""Tag label, with small text."""
def set_text(self, text):
Label.set_text(self, "<small>{}</small>".format(text))
class FactRow(object):
def __init__(self):
self.to_export = Label()
self.time_label = Label(x=30)
self.activity_label = Label(x=130)
self.category_label = Label()
self.description_label = Label()
self.tag_label = TagLabel()
self.duration_label = Label()
self.duration_label.layout.set_alignment(pango.Alignment.RIGHT)
self.duration_label.layout.set_width(90 * pango.SCALE)
self.width = 0
# margins (in pixels)
self.tag_row_margin_H = 2.5
self.tag_row_margin_V = 2.5
self.tag_inner_margin_H = 3
self.tag_inner_margin_V = 2
self.inter_tag_margin = 4
self.row_margin_H = 5
self.row_margin_V = 2
self.category_offset_V = self.category_label.height * 0.1
@property
def height(self):
res = self.activity_label.height + 2 * 3
if self.fact.description:
res += self.description_label.height
if self.fact.tags:
res += (self.tag_label.height
+ self.tag_inner_margin_V * 2
+ self.tag_row_margin_V * 2)
res += self.row_margin_V * 2
return res
def set_fact(self, fact):
"""Set current fact."""
self.fact = fact
time_label = fact.start_time.strftime("%H:%M -")
if fact.end_time:
time_label += fact.end_time.strftime(" %H:%M")
self.time_label.set_text(time_label)
self.to_export.set_text("🔸" if fact.exported else ("📤️" if fact.range.end else "⏳"))
self.activity_label.set_text(stuff.escape_pango(fact.activity))
category_text = " - {}".format(stuff.escape_pango(fact.category)) if fact.category else ""
self.category_label.set_text(category_text)
text = stuff.escape_pango(fact.description)
description_text = "<small><i>{}</i></small>".format(text) if fact.description else ""
self.description_label.set_text(description_text)
if fact.tags:
# for now, tags are on a single line.
# The first one is enough to determine the height.
self.tag_label.set_text(stuff.escape_pango(fact.tags[0]))
def _show_tags(self, g, color, bg):
label = self.tag_label
label.color = bg
g.save_context()
g.translate(self.tag_row_margin_H, self.tag_row_margin_V)
for tag in self.fact.tags:
label.set_text(stuff.escape_pango(tag))
w, h = label.layout.get_pixel_size()
rw = w + self.tag_inner_margin_H * 2
rh = h + self.tag_inner_margin_V * 2
g.rectangle(0, 0, rw, rh, 2)
g.fill(color, 0.5)
label.show(g, x=self.tag_inner_margin_H, y=self.tag_inner_margin_V)
g.translate(rw + self.inter_tag_margin, 0)
g.restore_context()
def show(self, g, colors, fact=None, is_selected=False):
"""Display the fact row.
If fact is given, the fact attribute is updated.
"""
g.save_context()
if fact is not None:
# before the selection highlight, to get the correct height
self.set_fact(fact)
color, bg = colors["normal"], colors["normal_bg"]
if is_selected:
color, bg = colors["selected"], colors["selected_bg"]
g.fill_area(0, 0, self.width, self.height, bg)
g.translate(self.row_margin_H, self.row_margin_V)
g.set_color(color)
# Do not show the start/end time for Totals
if not isinstance(self.fact, TotalFact):
self.time_label.show(g)
self.to_export.show(g)
self.activity_label.show(g, self.activity_label.get_text() if not isinstance(self.fact, TotalFact) else "<b>{}</b>".format(self.activity_label.get_text()))
if self.fact.category:
g.save_context()
category_color = graphics.ColorUtils.mix(bg, color, 0.57)
g.set_color(category_color)
x = self.activity_label.x + self.activity_label.layout.get_pixel_size()[0]
self.category_label.show(g, x=x, y=self.category_offset_V)
g.restore_context()
if self.fact.description or self.fact.tags:
g.save_context()
g.translate(self.activity_label.x, self.activity_label.height + 3)
if self.fact.tags:
self._show_tags(g, color, bg)
tag_height = (self.tag_label.height
+ self.tag_inner_margin_V * 2
+ self.tag_row_margin_V * 2)
g.translate(0, tag_height)
if self.fact.description:
self.description_label.show(g)
g.restore_context()
self.duration_label.show(g, self.fact.delta.format() if not isinstance(self.fact, TotalFact) else "<b>{}</b>".format(self.fact.delta.format()), x=self.width - 105)
g.restore_context()
class FactTree(graphics.Scene, gtk.Scrollable):
"""
The fact tree is a painter.
It does not change facts by itself, only sends signals.
Facts get updated only through `set_facts`.
It maintains scroll state and shows what we can see.
That means it does not show all the facts there are,
but rather only those that you can see.
It's also painter as it reuses labels.
Caching is futile, we do all the painting every time
ASCII Art!
| Weekday | Start - End | Activity - category [actions]| Duration |
| Month, Day | | tags, description | |
| | Start - End | Activity - category | Duration |
| | | Total | Total Duration |
Inline edit?
"""
__gsignals__ = {
# enter or double-click, passes in current day and fact
'on-activate-row': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT, gobject.TYPE_PYOBJECT)),
'on-delete-called': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
'on-toggle-exported-row': (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,)),
}
hadjustment = gobject.property(type=gtk.Adjustment, default=None)
hscroll_policy = gobject.property(type=gtk.ScrollablePolicy, default=gtk.ScrollablePolicy.MINIMUM)
vadjustment = gobject.property(type=gtk.Adjustment, default=None)
vscroll_policy = gobject.property(type=gtk.ScrollablePolicy, default=gtk.ScrollablePolicy.MINIMUM)
def __init__(self):
graphics.Scene.__init__(self, style_class=gtk.STYLE_CLASS_VIEW)
self.date_label = Label(10, 3)
fontdesc = pango.FontDescription(graphics._font_desc)
fontdesc.set_weight(pango.Weight.BOLD)
self.date_label.layout.set_alignment(pango.Alignment.RIGHT)
self.date_label.layout.set_width(80 * pango.SCALE)
self.date_label.layout.set_font_description(fontdesc)
self.fact_row = FactRow()
self.action_row = ActionRow()
# self.add_child(self.action_row)
self.row_positions = []
self.row_heights = []
self.y = 0
self.day_padding = 20
self.hover_day = None
self.hover_fact = None
self.current_fact = None
self.style = self._style
self.visible_range = None
self.set_size_request(500, 400)
self.connect("on-mouse-scroll", self.on_scroll)
self.connect("on-mouse-move", self.on_mouse_move)
self.connect("on-mouse-down", self.on_mouse_down)
self.connect("on-resize", self.on_resize)
self.connect("on-key-press", self.on_key_press)
self.connect("notify::vadjustment", self._on_vadjustment_change)
self.connect("on-enter-frame", self.on_enter_frame)
self.connect("on-double-click", self.on_double_click)
self.clipboard = gtk.Clipboard.get(gdk.SELECTION_CLIPBOARD)
@property
def current_fact_index(self):
"""Current fact index in the self.facts list."""
facts_ids = [fact.id for fact in self.facts]
return facts_ids.index(self.current_fact.id)
def on_mouse_down(self, scene, event):
self.on_mouse_move(None, event)
self.grab_focus()
if self.hover_fact:
# match either content or id
if (self.hover_fact == self.current_fact
or (self.hover_fact
and self.current_fact
and self.hover_fact.id == self.current_fact.id)
):
self.unset_current_fact()
# Totals can't be selected
elif not isinstance(self.hover_fact, TotalFact):
self.set_current_fact(self.hover_fact)
def activate_row(self, day, fact):
self.emit("on-activate-row", day, fact)
def toggle_exported_row(self, day, fact):
self.emit("on-toggle-exported-row", fact)
def delete_row(self, fact):
self.emit("on-delete-called", fact)
def copy_fact(self, fact):
self.clipboard.set_text(fact.serialized(), -1)
def on_double_click(self, scene, event):
if self.hover_fact and not isinstance(self.hover_fact, TotalFact):
self.activate_row(self.hover_day, self.hover_fact)
def on_key_press(self, scene, event):
# all keys should appear also in the Overview.on_key_press
# to be forwarded here even without focus.
if event.keyval == gdk.KEY_Up:
if self.facts:
if self.current_fact:
idx = max(0, self.current_fact_index - 1)
else:
# enter from below
idx = len(self.facts) - 1
self.set_current_fact(self.facts[idx])
elif event.keyval == gdk.KEY_Down:
if self.facts:
if self.current_fact:
idx = min(len(self.facts) - 1, self.current_fact_index + 1)
else:
# enter from top
idx = 0
self.set_current_fact(self.facts[idx])
elif event.keyval == gdk.KEY_Home:
if self.facts:
self.set_current_fact(self.facts[0])
elif event.keyval == gdk.KEY_End:
if self.facts:
self.set_current_fact(self.facts[-1])
elif event.keyval == gdk.KEY_Page_Down:
self.y += self.height * 0.8
self.on_scroll()
elif event.keyval == gdk.KEY_Page_Up:
self.y -= self.height * 0.8
self.on_scroll()
elif event.keyval == gdk.KEY_x:
if self.current_fact:
self.toggle_exported_row(self.hover_day, self.current_fact)
elif event.keyval in (gdk.KEY_Return, gdk.KEY_e):
if self.current_fact:
self.activate_row(self.hover_day, self.current_fact)
elif event.keyval == gdk.KEY_Delete:
if self.current_fact:
self.delete_row(self.current_fact)
elif event.state & gdk.ModifierType.CONTROL_MASK and event.keyval == gdk.KEY_c:
if self.current_fact:
self.copy_fact(self.current_fact)
def set_current_fact(self, fact):
self.current_fact = fact
if fact.y < self.y:
self.y = fact.y
if (fact.y + fact.height) > (self.y + self.height):
self.y = fact.y + fact.height - self.height
self.on_scroll()
def unset_current_fact(self):
"""Deselect fact."""
self.current_fact = None
self.on_scroll()
def get_visible_range(self):
start, end = (bisect.bisect(self.row_positions, self.y) - 1,
bisect.bisect(self.row_positions, self.y + self.height))
y = self.y
return [{"i": start + i, "y": pos - y, "h": height, "day": day, "facts": facts}
for i, (pos, height, (day, facts)) in enumerate(zip(self.row_positions[start:end],
self.row_heights[start:end],
self.days[start:end]))]
def on_mouse_move(self, tree, event):
hover_day, hover_fact = None, None
for rec in self.visible_range:
if rec['y'] <= event.y <= (rec['y'] + rec['h']):
hover_day = rec
break
if hover_day != self.hover_day:
# Facts are considered equal if their content is the same,
# even if their id is different.
# redraw only cares about content, not id.
self.redraw()
# make sure it is always fully updated, including facts ids.
self.hover_day = hover_day
if self.hover_day:
for fact in self.hover_day.get('facts', []):
if (fact.y - self.y) <= event.y <= (fact.y - self.y + fact.height):
hover_fact = fact
break
if (hover_fact
and self.hover_fact
and hover_fact.id != self.hover_fact.id
):
self.move_actions()
# idem, always update hover_fact, not just if they appear different
self.hover_fact = hover_fact
def move_actions(self):
if self.hover_fact:
self.action_row.visible = True
self.action_row.x = self.width - 80 - self.action_row.width
self.action_row.y = self.hover_fact.y - self.y
else:
self.action_row.visible = False
def _on_vadjustment_change(self, scene, vadjustment):
if not self.vadjustment:
return
self.vadjustment.connect("value_changed", self.on_scroll_value_changed)
self.set_size_request(500, 300)
def set_facts(self, facts, scroll_to_top=False):
# FactTree adds attributes to its facts. isolate these side effects
# copy the id too; most of the checks are based on id here.
self.facts = [fact.copy(id=fact.id) for fact in facts]
del facts # make sure facts is not used by inadvertance below.
# If we get an entirely new set of facts, scroll back to the top
if scroll_to_top:
self.y = 0
self.hover_fact = None
if self.vadjustment:
self.vadjustment.set_value(self.y)
if self.facts:
start = self.facts[0].date
end = self.facts[-1].date
else:
start = end = dt.hday.today()
by_date = defaultdict(list)
delta_by_date = defaultdict(dt.timedelta)
for fact in self.facts:
by_date[fact.date].append(fact)
delta_by_date[fact.date] += fact.delta
# Add a TotalFact at the end of each day if we are
# displaying more than one day.
if len(by_date) > 1:
for key in by_date:
total_by_date = TotalFact(_("Total"), delta_by_date[key])
by_date[key].append(total_by_date)
days = []
for i in range((end - start).days + 1):
current_date = start + dt.timedelta(days=i)
if current_date in by_date:
days.append((current_date, by_date[current_date]))
self.days = days
self.set_row_heights()
if (self.current_fact
and self.current_fact.id in (fact.id for fact in self.facts)
):
self.on_scroll()
else:
# will also trigger an on_scroll
self.unset_current_fact()
def set_row_heights(self):
"""
the row height is defined by following factors:
* how many facts are there in the day
* does the fact have description / tags
This func creates a list of row start positions to be able to
quickly determine what to display
"""
if not self.height:
return
y, pos, heights = 0, [], []
for date, facts in self.days:
height = 0
for fact in facts:
self.fact_row.set_fact(fact)
fact_height = self.fact_row.height
fact.y = y + height
fact.height = fact_height
height += fact.height
height += self.day_padding
height = max(height, 60)
pos.append(y)
heights.append(height)
y += height
self.row_positions, self.row_heights = pos, heights
maxy = max(y, 1)
if self.vadjustment:
self.vadjustment.set_lower(0)
self.vadjustment.set_upper(max(maxy, self.height))
self.vadjustment.set_page_size(self.height)
def on_resize(self, scene, event):
self.set_row_heights()
self.fact_row.width = self.width - 105
self.on_scroll()
def on_scroll_value_changed(self, scroll):
self.y = int(scroll.get_value())
self.on_scroll()
def on_scroll(self, scene=None, event=None):
if not self.height:
return
y_pos = self.y
direction = 0
if event and event.direction == gdk.ScrollDirection.UP:
direction = -1
elif event and event.direction == gdk.ScrollDirection.DOWN:
direction = 1
y_pos += 15 * direction
if self.vadjustment:
y_pos = max(0, min(self.vadjustment.get_upper() - self.height, y_pos))
self.vadjustment.set_value(y_pos)
self.y = y_pos
self.move_actions()
self.redraw()
self.visible_range = self.get_visible_range()
def on_enter_frame(self, scene, context):
has_focus = self.get_toplevel().has_toplevel_focus()
if has_focus:
colors = {
"normal": self.style.get_color(gtk.StateFlags.NORMAL),
"normal_bg": self.style.get_background_color(gtk.StateFlags.NORMAL),
"selected": self.style.get_color(gtk.StateFlags.SELECTED),
"selected_bg": self.style.get_background_color(gtk.StateFlags.SELECTED),
}
else:
colors = {
"normal": self.style.get_color(gtk.StateFlags.BACKDROP),
"normal_bg": self.style.get_background_color(gtk.StateFlags.BACKDROP),
"selected": self.style.get_color(gtk.StateFlags.BACKDROP),
"selected_bg": self.style.get_background_color(gtk.StateFlags.BACKDROP),
}
if not self.height:
return
g = graphics.Graphics(context)
g.set_line_style(1)
g.translate(0.5, 0.5)
date_bg_color = self.colors.mix(colors["normal_bg"], colors["normal"], 0.15)
g.fill_area(0, 0, 105, self.height, date_bg_color)
y = int(self.y)
for rec in self.visible_range:
g.save_context()
g.translate(0, rec['y'])
g.set_color(colors["normal"])
self.date_label.show(g, rec['day'].strftime("%A\n%b %d"))
g.translate(105, 0)
for fact in rec['facts']:
is_selected = (self.current_fact is not None
and fact.id == self.current_fact.id)
self.fact_row.set_fact(fact)
self.fact_row.show(g, colors, is_selected=is_selected)
g.translate(0, self.fact_row.height)
g.restore_context()
| gpl-3.0 | -825,646,525,799,796,400 | 33.916168 | 171 | 0.572543 | false |
Answeror/aip | aip/imfs/baidupcs.py | 1 | 3088 | from .base import NameMixin, guarded
from .error import ImfsError, NotFoundError
from .utils import thumbnail
from pprint import pformat
from .. import img
from datetime import datetime
BASE = '/apps/aip/cache/'
class PCSError(ImfsError):
pass
class BadResponse(PCSError):
def __init__(self, r):
self.status_code = r.status_code
try:
self.content = r.json()
except:
self.content = r.content
def __str__(self):
return pformat({
'status_code': self.status_code,
'content': self.content
})
def wrap(name):
return BASE + name
def error_code(r):
try:
d = r.json()
code = d.get('error_code', None)
if code is None:
code = d.get('content', {}).get('error_code', None)
return code
except:
return None
class BaiduPCS(NameMixin):
def __init__(self, access_token):
self.access_token = access_token
@guarded
def _load(self, name):
r = self.pcs.download(wrap(name))
if r.status_code == 404:
return None
if not r.ok:
raise BadResponse(r)
return r.content
@guarded
def _save(self, name, data):
r = self.pcs.upload(wrap(name), data, ondup='overwrite')
if not r.ok:
if r.status_code == 400 and error_code(r) == 31061:
pass
else:
raise BadResponse(r)
def _thumbnail(self, name, width, height):
data = self.load(name)
if data is None:
return None
kind = img.kind(data=data)
if kind is None:
raise PCSError('cannot detect image type')
return thumbnail(data, kind, width, height)
@guarded
def _has(self, name):
r = self.pcs.meta(wrap(name))
if r.status_code == 404:
return False
if not r.ok:
raise BadResponse(r)
return True
@guarded
def _remove(self, name):
r = self.pcs.delete(wrap(name))
if not r.ok and r.status_code not in (404,):
raise BadResponse(r)
@guarded
def _mtime(self, name):
r = self.pcs.meta(wrap(name))
if not r.ok:
if r.status_code == 404:
raise NotFoundError(name)
raise BadResponse(r)
return datetime.fromtimestamp(r.json()['list'][0]['mtime'])
def _cache_timeout(self, name):
return None
@property
def pcs(self):
if not hasattr(self, '_pcs'):
from baidupcs import PCS
self._pcs = PCS(self.access_token)
ensure_base(self._pcs, BASE)
return self._pcs
@guarded
def ensure_base(pcs, base):
r = pcs.mkdir(base)
if not r.ok:
if r.status_code == 400 and error_code(r) == 31061:
r = pcs.meta(base)
if not r.ok:
raise BadResponse(r)
if not r.json()['list'][0]['isdir']:
raise PCSError('%s is not dir' % base)
else:
raise BadResponse(r)
| mit | 4,479,358,954,816,668,000 | 23.507937 | 67 | 0.53886 | false |
dwavesystems/dimod | dimod/reference/composites/roofduality.py | 1 | 2936 | # Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A composite that uses the roof duality algorithm [#bht]_ [#bh]_ to fix some
variables in the binary quadratic model before passing it on to its child
sampler.
.. [#bht] Boros, E., P.L. Hammer, G. Tavares. Preprocessing of Unconstrained
Quadratic Binary Optimization. Rutcor Research Report 10-2006, April, 2006.
.. [#bh] Boros, E., P.L. Hammer. Pseudo-Boolean optimization. Discrete Applied
Mathematics 123, (2002), pp. 155-225
"""
from dimod.reference.composites.fixedvariable import FixedVariableComposite
from dimod.roof_duality import fix_variables
__all__ = ['RoofDualityComposite']
class RoofDualityComposite(FixedVariableComposite):
"""Uses roof duality to assign some variables before invoking child sampler.
Uses the :func:`~dimod.roof_duality.fix_variables` function to determine
variable assignments, then fixes them before calling the child sampler.
Returned samples include the fixed variables.
Args:
child (:obj:`dimod.Sampler`):
A dimod sampler. Used to sample the binary quadratic model after
variables have been fixed.
"""
@property
def parameters(self):
params = self.child.parameters.copy()
params['sampling_mode'] = []
return params
def sample(self, bqm, sampling_mode=True, **parameters):
"""Sample from the provided binary quadratic model.
Uses the :func:`~dimod.roof_duality.fix_variables` function to determine
which variables to fix.
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
sampling_mode (bool, optional, default=True):
In sampling mode, only roof-duality is used. When
`sampling_mode` is false, strongly connected components are used
to fix more variables, but in some optimal solutions these
variables may take different values.
**parameters:
Parameters for the child sampler.
Returns:
:obj:`dimod.SampleSet`
"""
# use roof-duality to decide which variables to fix
parameters['fixed_variables'] = fix_variables(bqm, sampling_mode=sampling_mode)
return super(RoofDualityComposite, self).sample(bqm, **parameters)
| apache-2.0 | -527,168,984,225,831,500 | 36.641026 | 87 | 0.680858 | false |
aewallin/openvoronoi | src/test/pytest_ttt_alphabet/ttt_alphabet.py | 1 | 3568 | import truetypetracer as ttt
import openvoronoi as ovd
import time
import sys
def translate(segs,x,y):
out = []
for seg in segs:
seg2 = []
for p in seg:
p2 = []
p2.append(p[0] + x)
p2.append(p[1] + y)
seg2.append(p2)
#seg2.append(seg[3] + y)
out.append(seg2)
return out
def insert_polygon_points(vd, polygon):
pts=[]
for p in polygon:
pts.append( ovd.Point( p[0], p[1] ) )
id_list = []
print "inserting ",len(pts)," point-sites:"
m=0
for p in pts:
id_list.append( vd.addVertexSite( p ) )
print " ",m," added vertex ", id_list[ len(id_list) -1 ]
m=m+1
return id_list
def insert_polygon_segments(vd,id_list):
j=0
print "inserting ",len(id_list)," line-segments:"
for n in range(len(id_list)):
n_nxt = n+1
if n==(len(id_list)-1):
n_nxt=0
print " ",j,"inserting segment ",id_list[n]," - ",id_list[n_nxt]
vd.addLineSite( id_list[n], id_list[n_nxt])
j=j+1
def modify_segments(segs):
segs_mod =[]
for seg in segs:
first = seg[0]
last = seg[ len(seg)-1 ]
assert( first[0]==last[0] and first[1]==last[1] )
seg.pop()
seg.reverse()
segs_mod.append(seg)
#drawSegment(myscreen, seg)
return segs_mod
def insert_many_polygons(vd,segs):
polygon_ids =[]
t_before = time.time()
for poly in segs:
poly_id = insert_polygon_points(vd,poly)
polygon_ids.append(poly_id)
t_after = time.time()
pt_time = t_after-t_before
t_before = time.time()
for ids in polygon_ids:
insert_polygon_segments(vd,ids)
t_after = time.time()
seg_time = t_after-t_before
return [pt_time, seg_time]
def ttt_segments(text,scale,conic_subdiv):
wr = ttt.SEG_Writer()
# wr.scale = 3
wr.arc = False
wr.conic = False
wr.cubic = False
wr.conic_biarc_subdivision = 10 # this has no effect?
wr.conic_line_subdivision = conic_subdiv # this increases nr of points
wr.cubic_biarc_subdivision = 10 # no effect?
wr.cubic_line_subdivision = 10 # no effect?
wr.setFont(3)
wr.scale = float(1)/float(scale)
s3 = ttt.ttt(text,wr)
segs = wr.get_segments()
return segs
if __name__ == "__main__":
conic_subdiv = 200
if len(sys.argv) == 2:
conic_subdiv = int(sys.argv[1])
scale = 25000
segs = ttt_segments( "ABCDEFGHIJKLM", scale, conic_subdiv)
segs2 = ttt_segments( "NOPQRSTUVWXYZ", scale, conic_subdiv)
segs3 = ttt_segments( "abcdefghijklm", scale, conic_subdiv)
#segs3 = ttt_segments( "m", 6400)
segs4 = ttt_segments( "nopqrstuvwxyz", scale, conic_subdiv) # NOPQRSTUVWXYZ", 64000)
segs5 = ttt_segments( "0123456789+-*/", scale, conic_subdiv)
dx = float(50000)/float(scale)
xt=-0.3
segs = translate(segs, xt*dx, 0.05*dx)
segs = modify_segments(segs)
segs2 = translate(segs2, xt*dx, -0.05*dx)
segs2 = modify_segments(segs2)
segs3 = translate(segs3, xt*dx, -0.15*dx)
segs3 = modify_segments(segs3)
segs4 = translate(segs4, xt*dx, -0.22*dx)
segs4 = modify_segments(segs4)
segs5 = translate(segs5, xt*dx, -0.32*dx)
segs5 = modify_segments(segs5)
vd = ovd.VoronoiDiagram(1,120)
all_segs=segs+segs2 +segs3 +segs4+segs5
insert_many_polygons(vd,all_segs)
c = vd.check()
print " VD check: ", c
if c:
exit(0)
else:
exit(-1)
| lgpl-2.1 | -4,697,356,656,404,354,000 | 26.658915 | 88 | 0.571749 | false |
gannetson/sportschooldeopenlucht | apps/cowry/migrations/0001_initial.py | 1 | 3506 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Payment'
db.create_table(u'cowry_payment', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('polymorphic_ctype', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'polymorphic_cowry.payment_set', null=True, to=orm['contenttypes.ContentType'])),
('amount', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('currency', self.gf('django.db.models.fields.CharField')(default='', max_length=3)),
('fee', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('payment_method_id', self.gf('django.db.models.fields.CharField')(default='', max_length=20, blank=True)),
('payment_submethod_id', self.gf('django.db.models.fields.CharField')(default='', max_length=20, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default='new', max_length=15, db_index=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
))
db.send_create_signal(u'cowry', ['Payment'])
def backwards(self, orm):
# Deleting model 'Payment'
db.delete_table(u'cowry_payment')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'cowry.payment': {
'Meta': {'object_name': 'Payment'},
'amount': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '3'}),
'fee': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_method_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'payment_submethod_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_cowry.payment_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '15', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
}
}
complete_apps = ['cowry'] | bsd-3-clause | 2,409,316,479,135,669,000 | 62.763636 | 196 | 0.596121 | false |
gangadhar-kadam/nassimapp | stock/doctype/purchase_receipt/purchase_receipt.py | 1 | 12927 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cstr, flt, cint
from webnotes.model.bean import getlist
from webnotes.model.code import get_obj
from webnotes import msgprint, _
import webnotes.defaults
from stock.utils import update_bin
from controllers.buying_controller import BuyingController
class DocType(BuyingController):
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
self.tname = 'Purchase Receipt Item'
self.fname = 'purchase_receipt_details'
self.count = 0
self.status_updater = [{
'source_dt': 'Purchase Receipt Item',
'target_dt': 'Purchase Order Item',
'join_field': 'prevdoc_detail_docname',
'target_field': 'received_qty',
'target_parent_dt': 'Purchase Order',
'target_parent_field': 'per_received',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'prevdoc_docname',
}]
def onload(self):
billed_qty = webnotes.conn.sql("""select sum(ifnull(qty, 0)) from `tabPurchase Invoice Item`
where purchase_receipt=%s""", self.doc.name)
if billed_qty:
total_qty = sum((item.qty for item in self.doclist.get({"parentfield": "purchase_receipt_details"})))
self.doc.fields["__billing_complete"] = billed_qty[0][0] == total_qty
def validate(self):
super(DocType, self).validate()
self.po_required()
if not self.doc.status:
self.doc.status = "Draft"
import utilities
utilities.validate_status(self.doc.status, ["Draft", "Submitted", "Cancelled"])
self.validate_with_previous_doc()
self.validate_rejected_warehouse()
self.validate_accepted_rejected_qty()
self.validate_inspection()
self.validate_uom_is_integer("uom", ["qty", "received_qty"])
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.validate_challan_no()
pc_obj = get_obj(dt='Purchase Common')
pc_obj.validate_for_items(self)
self.check_for_stopped_status(pc_obj)
# sub-contracting
self.validate_for_subcontracting()
self.create_raw_materials_supplied("pr_raw_material_details")
self.update_valuation_rate("purchase_receipt_details")
def validate_rejected_warehouse(self):
for d in self.doclist.get({"parentfield": "purchase_receipt_details"}):
if flt(d.rejected_qty) and not d.rejected_warehouse:
d.rejected_warehouse = self.doc.rejected_warehouse
if not d.rejected_warehouse:
webnotes.throw(_("Rejected Warehouse is mandatory against regected item"))
# validate accepted and rejected qty
def validate_accepted_rejected_qty(self):
for d in getlist(self.doclist, "purchase_receipt_details"):
if not flt(d.received_qty) and flt(d.qty):
d.received_qty = flt(d.qty) - flt(d.rejected_qty)
elif not flt(d.qty) and flt(d.rejected_qty):
d.qty = flt(d.received_qty) - flt(d.rejected_qty)
elif not flt(d.rejected_qty):
d.rejected_qty = flt(d.received_qty) - flt(d.qty)
# Check Received Qty = Accepted Qty + Rejected Qty
if ((flt(d.qty) + flt(d.rejected_qty)) != flt(d.received_qty)):
msgprint("Sum of Accepted Qty and Rejected Qty must be equal to Received quantity. Error for Item: " + cstr(d.item_code))
raise Exception
def validate_challan_no(self):
"Validate if same challan no exists for same supplier in a submitted purchase receipt"
if self.doc.challan_no:
exists = webnotes.conn.sql("""select name from `tabPurchase Receipt`
where docstatus=1 and name!=%s and supplier=%s and challan_no=%s
and fiscal_year=%s""", (self.doc.name, self.doc.supplier,
self.doc.challan_no, self.doc.fiscal_year))
if exists:
webnotes.throw(_("Supplier delivery number duplicate in {0}").format(exists))
def validate_with_previous_doc(self):
super(DocType, self).validate_with_previous_doc(self.tname, {
"Purchase Order": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]],
},
"Purchase Order Item": {
"ref_dn_field": "prevdoc_detail_docname",
"compare_fields": [["project_name", "="], ["uom", "="], ["item_code", "="]],
"is_child_table": True
}
})
if cint(webnotes.defaults.get_global_default('maintain_same_rate')):
super(DocType, self).validate_with_previous_doc(self.tname, {
"Purchase Order Item": {
"ref_dn_field": "prevdoc_detail_docname",
"compare_fields": [["import_rate", "="]],
"is_child_table": True
}
})
def po_required(self):
if webnotes.conn.get_value("Buying Settings", None, "po_required") == 'Yes':
for d in getlist(self.doclist,'purchase_receipt_details'):
if not d.prevdoc_docname:
msgprint("Purchse Order No. required against item %s"%d.item_code)
raise Exception
def update_stock(self):
sl_entries = []
stock_items = self.get_stock_items()
for d in getlist(self.doclist, 'purchase_receipt_details'):
if d.item_code in stock_items and d.warehouse:
pr_qty = flt(d.qty) * flt(d.conversion_factor)
if pr_qty:
sl_entries.append(self.get_sl_entries(d, {
"actual_qty": flt(pr_qty),
"serial_no": cstr(d.serial_no).strip(),
"incoming_rate": d.valuation_rate
}))
if flt(d.rejected_qty) > 0:
sl_entries.append(self.get_sl_entries(d, {
"warehouse": d.rejected_warehouse,
"actual_qty": flt(d.rejected_qty) * flt(d.conversion_factor),
"serial_no": cstr(d.rejected_serial_no).strip(),
"incoming_rate": d.valuation_rate
}))
self.bk_flush_supp_wh(sl_entries)
self.make_sl_entries(sl_entries)
def update_ordered_qty(self):
stock_items = self.get_stock_items()
for d in self.doclist.get({"parentfield": "purchase_receipt_details"}):
if d.item_code in stock_items and d.warehouse \
and cstr(d.prevdoc_doctype) == 'Purchase Order':
already_received_qty = self.get_already_received_qty(d.prevdoc_docname,
d.prevdoc_detail_docname)
po_qty, ordered_warehouse = self.get_po_qty_and_warehouse(d.prevdoc_detail_docname)
if not ordered_warehouse:
webnotes.throw(_("Warehouse is missing in Purchase Order"))
if already_received_qty + d.qty > po_qty:
ordered_qty = - (po_qty - already_received_qty) * flt(d.conversion_factor)
else:
ordered_qty = - flt(d.qty) * flt(d.conversion_factor)
update_bin({
"item_code": d.item_code,
"warehouse": ordered_warehouse,
"posting_date": self.doc.posting_date,
"ordered_qty": flt(ordered_qty) if self.doc.docstatus==1 else -flt(ordered_qty)
})
def get_already_received_qty(self, po, po_detail):
qty = webnotes.conn.sql("""select sum(qty) from `tabPurchase Receipt Item`
where prevdoc_detail_docname = %s and docstatus = 1
and prevdoc_doctype='Purchase Order' and prevdoc_docname=%s
and parent != %s""", (po_detail, po, self.doc.name))
return qty and flt(qty[0][0]) or 0.0
def get_po_qty_and_warehouse(self, po_detail):
po_qty, po_warehouse = webnotes.conn.get_value("Purchase Order Item", po_detail,
["qty", "warehouse"])
return po_qty, po_warehouse
def bk_flush_supp_wh(self, sl_entries):
for d in getlist(self.doclist, 'pr_raw_material_details'):
# negative quantity is passed as raw material qty has to be decreased
# when PR is submitted and it has to be increased when PR is cancelled
sl_entries.append(self.get_sl_entries(d, {
"item_code": d.rm_item_code,
"warehouse": self.doc.supplier_warehouse,
"actual_qty": -1*flt(d.consumed_qty),
"incoming_rate": 0
}))
def validate_inspection(self):
for d in getlist(self.doclist, 'purchase_receipt_details'): #Enter inspection date for all items that require inspection
ins_reqd = webnotes.conn.sql("select inspection_required from `tabItem` where name = %s",
(d.item_code,), as_dict = 1)
ins_reqd = ins_reqd and ins_reqd[0]['inspection_required'] or 'No'
if ins_reqd == 'Yes' and not d.qa_no:
msgprint("Item: " + d.item_code + " requires QA Inspection. Please enter QA No or report to authorized person to create Quality Inspection")
# Check for Stopped status
def check_for_stopped_status(self, pc_obj):
check_list =[]
for d in getlist(self.doclist, 'purchase_receipt_details'):
if d.fields.has_key('prevdoc_docname') and d.prevdoc_docname and d.prevdoc_docname not in check_list:
check_list.append(d.prevdoc_docname)
pc_obj.check_for_stopped_status( d.prevdoc_doctype, d.prevdoc_docname)
# on submit
def on_submit(self):
purchase_controller = webnotes.get_obj("Purchase Common")
# Check for Approving Authority
get_obj('Authorization Control').validate_approving_authority(self.doc.doctype, self.doc.company, self.doc.grand_total)
# Set status as Submitted
webnotes.conn.set(self.doc, 'status', 'Submitted')
self.update_prevdoc_status()
self.update_ordered_qty()
self.update_stock()
from stock.doctype.serial_no.serial_no import update_serial_nos_after_submit
update_serial_nos_after_submit(self, "purchase_receipt_details")
purchase_controller.update_last_purchase_rate(self, 1)
self.make_gl_entries()
def check_next_docstatus(self):
submit_rv = webnotes.conn.sql("select t1.name from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2 where t1.name = t2.parent and t2.purchase_receipt = '%s' and t1.docstatus = 1" % (self.doc.name))
if submit_rv:
msgprint("Purchase Invoice : " + cstr(self.submit_rv[0][0]) + " has already been submitted !")
raise Exception , "Validation Error."
def on_cancel(self):
pc_obj = get_obj('Purchase Common')
self.check_for_stopped_status(pc_obj)
# Check if Purchase Invoice has been submitted against current Purchase Order
# pc_obj.check_docstatus(check = 'Next', doctype = 'Purchase Invoice', docname = self.doc.name, detail_doctype = 'Purchase Invoice Item')
submitted = webnotes.conn.sql("select t1.name from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2 where t1.name = t2.parent and t2.purchase_receipt = '%s' and t1.docstatus = 1" % self.doc.name)
if submitted:
msgprint("Purchase Invoice : " + cstr(submitted[0][0]) + " has already been submitted !")
raise Exception
webnotes.conn.set(self.doc,'status','Cancelled')
self.update_ordered_qty()
self.update_stock()
self.update_prevdoc_status()
pc_obj.update_last_purchase_rate(self, 0)
self.make_cancel_gl_entries()
def get_current_stock(self):
for d in getlist(self.doclist, 'pr_raw_material_details'):
if self.doc.supplier_warehouse:
bin = webnotes.conn.sql("select actual_qty from `tabBin` where item_code = %s and warehouse = %s", (d.rm_item_code, self.doc.supplier_warehouse), as_dict = 1)
d.current_stock = bin and flt(bin[0]['actual_qty']) or 0
def get_rate(self,arg):
return get_obj('Purchase Common').get_rate(arg,self)
def get_gl_entries(self, warehouse_account=None):
against_stock_account = self.get_company_default("stock_received_but_not_billed")
gl_entries = super(DocType, self).get_gl_entries(warehouse_account, against_stock_account)
return gl_entries
def get_invoiced_qty_map(purchase_receipt):
"""returns a map: {pr_detail: invoiced_qty}"""
invoiced_qty_map = {}
for pr_detail, qty in webnotes.conn.sql("""select pr_detail, qty from `tabPurchase Invoice Item`
where purchase_receipt=%s and docstatus=1""", purchase_receipt):
if not invoiced_qty_map.get(pr_detail):
invoiced_qty_map[pr_detail] = 0
invoiced_qty_map[pr_detail] += qty
return invoiced_qty_map
@webnotes.whitelist()
def make_purchase_invoice(source_name, target_doclist=None):
from webnotes.model.mapper import get_mapped_doclist
invoiced_qty_map = get_invoiced_qty_map(source_name)
def set_missing_values(source, target):
pi = webnotes.bean(target)
pi.run_method("set_missing_values")
pi.run_method("set_supplier_defaults")
pi.set_doclist(pi.doclist.get({"parentfield": ["!=", "entries"]}) +
pi.doclist.get({"parentfield": "entries", "qty": [">", 0]}))
if len(pi.doclist.get({"parentfield": "entries"})) == 0:
webnotes.msgprint(_("All items have already been invoiced."), raise_exception=True)
return pi.doclist
def update_item(source_doc, target_doc, source_parent):
target_doc.qty = source_doc.qty - invoiced_qty_map.get(source_doc.name, 0)
doclist = get_mapped_doclist("Purchase Receipt", source_name, {
"Purchase Receipt": {
"doctype": "Purchase Invoice",
"validation": {
"docstatus": ["=", 1],
}
},
"Purchase Receipt Item": {
"doctype": "Purchase Invoice Item",
"field_map": {
"name": "pr_detail",
"parent": "purchase_receipt",
"prevdoc_detail_docname": "po_detail",
"prevdoc_docname": "purchase_order",
"purchase_rate": "rate"
},
"postprocess": update_item
},
"Purchase Taxes and Charges": {
"doctype": "Purchase Taxes and Charges",
"add_if_empty": True
}
}, target_doclist, set_missing_values)
return [d.fields for d in doclist]
| agpl-3.0 | 7,571,372,898,239,984,000 | 35.724432 | 205 | 0.691034 | false |
zvoase/django-relax | relax/templatetags/couchdb.py | 1 | 2778 | # -*- coding: utf-8 -*-
import re
from django import template
from django.template.defaultfilters import stringfilter
from relax import json, settings
register = template.Library()
class SettingNode(template.Node):
def __init__(self, setting_name, var_name=None, default_value=None):
# The variable name will be stored no matter what.
self.var_name = var_name
# If there is a default value, it will be added to the args for the
# relax.settings._ function; otherwise it will just be the setting
# name.
self.setting_args = ((setting_name, default_value) if default_value
else (setting_name,))
def render(self, context):
# We pre-stored these arguments in __init__, remember?
value = settings._(*self.setting_args)
# If a variable name was provided, use it.
if self.var_name:
context[self.var_name] = value
return ''
# Otherwise, render the setting as a string in the template.
else:
return str(value)
def get_setting(parser, token):
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError(
'%r tag requires arguments' % (token.contents.split()[0],))
# Here we match 4 different regexs. This deals with the optional presence
# of both a default value and a variable name.
match = re.search(r'^([A-Za-z0-9_-]+)$', arg)
if not match:
match = re.search(r'^([A-Za-z0-9_-]+) (.*?)$', arg)
if not match:
match = re.search(r'^([A-Za-z0-9_-]+) (.*?) as ([A-Za-z0-9_]+)$', arg)
if not match:
match = re.search(r'^([A-Za-z0-9_-]+) as ([A-Za-z0-9_]+)$', arg)
if not match:
# If all else fails, just raise an error.
raise template.TemplateSyntaxError('Invalid arguments for %r tag' %
(tag_name,))
setting_name, var_name = match.groups()
return SettingNode(setting_name, var_name=var_name)
setting_name, default_value, var_name = match.groups()
# The default value should be specified in JSON format. This makes
# things considerably more secure than just using eval().
default_value = json.loads(default_value)
return SettingNode(setting_name, var_name=var_name,
default_value=default_value)
setting_name, default_value = match.groups()
default_value = json.loads(default_value)
return SettingNode(setting_name, default_value=default_value)
setting_name = match.groups()[0]
return SettingNode(setting_name)
register.tag('setting', get_setting) | mit | -2,214,768,308,070,269,200 | 39.275362 | 87 | 0.598632 | false |
MissionCriticalCloud/marvin | marvin/cloudstackAPI/prepareTemplate.py | 1 | 6722 | """load template into primary storage"""
from baseCmd import *
from baseResponse import *
class prepareTemplateCmd (baseCmd):
typeInfo = {}
def __init__(self):
self.isAsync = "false"
"""template ID of the template to be prepared in primary storage(s)."""
"""Required"""
self.templateid = None
self.typeInfo['templateid'] = 'uuid'
"""zone ID of the template to be prepared in primary storage(s)."""
"""Required"""
self.zoneid = None
self.typeInfo['zoneid'] = 'uuid'
"""storage pool ID of the primary storage pool to which the template should be prepared. If it is not provided the template is prepared on all the available primary storage pools."""
self.storageid = None
self.typeInfo['storageid'] = 'uuid'
self.required = ["templateid", "zoneid", ]
class prepareTemplateResponse (baseResponse):
typeInfo = {}
def __init__(self):
"""the template ID"""
self.id = None
self.typeInfo['id'] = 'string'
"""the account name to which the template belongs"""
self.account = None
self.typeInfo['account'] = 'string'
"""the account id to which the template belongs"""
self.accountid = None
self.typeInfo['accountid'] = 'string'
"""true if the ISO is bootable, false otherwise"""
self.bootable = None
self.typeInfo['bootable'] = 'boolean'
"""checksum of the template"""
self.checksum = None
self.typeInfo['checksum'] = 'string'
"""the date this template was created"""
self.created = None
self.typeInfo['created'] = 'date'
"""true if the template is managed across all Zones, false otherwise"""
self.crossZones = None
self.typeInfo['crossZones'] = 'boolean'
"""additional key/value details tied with template"""
self.details = None
self.typeInfo['details'] = 'map'
"""the template display text"""
self.displaytext = None
self.typeInfo['displaytext'] = 'string'
"""the name of the domain to which the template belongs"""
self.domain = None
self.typeInfo['domain'] = 'string'
"""the ID of the domain to which the template belongs"""
self.domainid = None
self.typeInfo['domainid'] = 'string'
"""the format of the template."""
self.format = None
self.typeInfo['format'] = 'imageformat'
"""the ID of the secondary storage host for the template"""
self.hostid = None
self.typeInfo['hostid'] = 'string'
"""the name of the secondary storage host for the template"""
self.hostname = None
self.typeInfo['hostname'] = 'string'
"""the hypervisor on which the template runs"""
self.hypervisor = None
self.typeInfo['hypervisor'] = 'string'
"""true if template contains XS tools inorder to support dynamic scaling of VM cpu/memory"""
self.isdynamicallyscalable = None
self.typeInfo['isdynamicallyscalable'] = 'boolean'
"""true if the template is extractable, false otherwise"""
self.isextractable = None
self.typeInfo['isextractable'] = 'boolean'
"""true if this template is a featured template, false otherwise"""
self.isfeatured = None
self.typeInfo['isfeatured'] = 'boolean'
"""true if this template is a public template, false otherwise"""
self.ispublic = None
self.typeInfo['ispublic'] = 'boolean'
"""true if the template is ready to be deployed from, false otherwise."""
self.isready = None
self.typeInfo['isready'] = 'boolean'
"""the template name"""
self.name = None
self.typeInfo['name'] = 'string'
"""the ID of the OS type for this template."""
self.ostypeid = None
self.typeInfo['ostypeid'] = 'string'
"""the name of the OS type for this template."""
self.ostypename = None
self.typeInfo['ostypename'] = 'string'
"""true if the reset password feature is enabled, false otherwise"""
self.passwordenabled = None
self.typeInfo['passwordenabled'] = 'boolean'
"""the project name of the template"""
self.project = None
self.typeInfo['project'] = 'string'
"""the project id of the template"""
self.projectid = None
self.typeInfo['projectid'] = 'string'
"""the date this template was removed"""
self.removed = None
self.typeInfo['removed'] = 'date'
"""the size of the template"""
self.size = None
self.typeInfo['size'] = 'long'
"""the template ID of the parent template if present"""
self.sourcetemplateid = None
self.typeInfo['sourcetemplateid'] = 'string'
"""true if template is sshkey enabled, false otherwise"""
self.sshkeyenabled = None
self.typeInfo['sshkeyenabled'] = 'boolean'
"""the status of the template"""
self.status = None
self.typeInfo['status'] = 'string'
"""the tag of this template"""
self.templatetag = None
self.typeInfo['templatetag'] = 'string'
"""the type of the template"""
self.templatetype = None
self.typeInfo['templatetype'] = 'string'
"""the ID of the zone for this template"""
self.zoneid = None
self.typeInfo['zoneid'] = 'string'
"""the name of the zone for this template"""
self.zonename = None
self.typeInfo['zonename'] = 'string'
"""the list of resource tags associated with tempate"""
self.tags = []
"""the ID of the latest async job acting on this object"""
self.jobid = None
self.typeInfo['jobid'] = ''
"""the current status of the latest async job acting on this object"""
self.jobstatus = None
self.typeInfo['jobstatus'] = ''
class tags:
def __init__(self):
""""the account associated with the tag"""
self.account = None
""""customer associated with the tag"""
self.customer = None
""""the domain associated with the tag"""
self.domain = None
""""the ID of the domain associated with the tag"""
self.domainid = None
""""tag key name"""
self.key = None
""""the project name where tag belongs to"""
self.project = None
""""the project id the tag belongs to"""
self.projectid = None
""""id of the resource"""
self.resourceid = None
""""resource type"""
self.resourcetype = None
""""tag value"""
self.value = None
| apache-2.0 | 6,533,930,235,692,661,000 | 39.739394 | 190 | 0.591937 | false |
shinose/qplaybox | packages/x11/driver/xf86-video-nvidia/scripts/make_nvidia_udev.py | 1 | 2169 | #!/usr/bin/env python
import os
import requests
from lxml import html
__cwd__ = os.path.dirname(os.path.realpath(__file__))
__rules__ = __cwd__ + '/../udev.d/96-nvidia.rules'
__package__ = __cwd__ + '/../package.mk'
# Get the Nvidia driver version currently being used
for line in open(__package__, 'r'):
if "PKG_VERSION" in line:
__version__ = line.split('=')[1].replace('"','').strip()
break
url = 'http://us.download.nvidia.com/XFree86/Linux-x86_64/' + __version__ + '/README/supportedchips.html'
page = requests.get(url)
tree = html.fromstring(page.content)
# These are the tables we want to use (gpu's supported by the current driver)
# NVIDIA GeForce GPUs = 1
# NVIDIA Quadro GPUs = 2
# NVIDIA NVS GPUs = 3
# NVIDIA Tesla GPUs = 4
# NVIDIA GRID GPUs = 5
ids = []
for table in range(1, 6):
ids = ids + tree.xpath('//html/body/div[@class="appendix"]/div[@class="informaltable"][' + str(table) + ']/table/tbody/tr[starts-with(@id, "devid")]/td[2]//text()')
# If three IDs are listed, the first is the PCI Device ID, the second is the PCI Subsystem Vendor ID, and the third is the PCI Subsystem Device ID.
# We only want the PCI Device ID (the first value)
unique_ids = []
for id in ids:
unique_ids.append(id.split()[0].lower())
# Sort and remove duplicate ID's
unique_ids = sorted(set(unique_ids))
# Write the rules to the file
with open(__rules__, 'w') as f:
f.write('ACTION!="add|change", GOTO="end_video"\n')
f.write('SUBSYSTEM=="pci", ATTR{class}!="0x030000", GOTO="end_video"\n\n')
for id in unique_ids:
f.write('ATTRS{vendor}=="0x10de", ATTRS{device}=="0x' + str(id) + '", GOTO="configure_nvidia"\n')
f.write('ATTRS{vendor}=="0x10de", GOTO="configure_nvidia-legacy"\n')
f.write('GOTO="end_video"\n\n')
f.write('LABEL="configure_nvidia"\n')
f.write('ENV{xorg_driver}="nvidia", TAG+="systemd", ENV{SYSTEMD_WANTS}+="[email protected]"\n')
f.write('GOTO="end_video"\n\n')
f.write('LABEL="configure_nvidia-legacy"\n')
f.write('ENV{xorg_driver}="nvidia", TAG+="systemd", ENV{SYSTEMD_WANTS}+="[email protected]"\n')
f.write('GOTO="end_video"\n\n')
f.write('LABEL="end_video"\n')
| gpl-2.0 | -1,896,295,597,228,511,500 | 38.436364 | 166 | 0.659751 | false |
stivalet/PHP-Vuln-test-suite-generator | bin/core.py | 1 | 3062 | from Classes.Manifest import *
from Flaws_generators.Generator_factory import *
from Flaws_generators.Generation_functions import *
import global_variables as g
def main(argv):
# List of flaws
flaws = ["XSS", "IDOR", "Injection", "URF", "SM", "SDE"]
flaw_list = []
#Gets options & arguments
try:
opts, args = getopt.getopt(argv, "c:f:h", ["cwe=", "flaws=", "help"])
except getopt.GetoptError:
print('Invalid argument')
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-f", "--flaws"): #Select flaws
flaw_list = arg.split(',')
elif opt in ("-c", "--cwe"): #Select CWEs
g.cwe_list = arg.split(',')
elif opt in ("-h", "--help"): #Show usage
usage()
return 0
else: #Default
usage()
return 0
for flaw in flaw_list:
if flaw not in flaws:
usage()
return 0
date = time.strftime("%m-%d-%Y_%Hh%Mm%S")
root = ET.parse('output.xml').getroot()
if len(flaw_list) == 0 or len(g.cwe_list) > 0: #Select all flaws
flaw_list=flaws
for flaw in flaw_list:
if flaw == "XSS":
initialization(Generator_factory.makeXSS_Generator(date), root)
if flaw == "Injection":
initialization(Generator_factory.makeInjection_Generator(date), root)
if flaw == "IDOR":
initialization(Generator_factory.makeIDOR_Generator(date), root)
if flaw == "URF":
initialization(Generator_factory.makeURF_Generator(date), root)
if flaw == "SM":
for input in root.findall('input'):
root.remove(input)
initialization(Generator_factory.makeSM_Generator(date), root)
if flaw == "SDE":
for input in root.findall('input'):
root.remove(input)
initialization(Generator_factory.makeSDE_Generator(date), root)
def usage():
flaw = "-f flaws to generate (flaw1,flaw2,flaw3,...):\n\tIDOR :\tInsecure Direct Object Reference\n\tInjection :\tInjection (SQL,LDAP,XPATH)\n\tSDE :\tSensitive Data Exposure\n\tSM :\tSecurity Misconfiguration\n\tURF :\tURL Redirects and Forwards\n\tXSS :\tCross-site Scripting"
cweparam = "-c generate particular CWE:\n\t78 :\tCommand OS Injection\n\t79 :\tXSS\n\t89 :\tSQL Injection\n\t90 :\tLDAP Injection\n\t91 :\tXPath Injection\n\t95 :\tCode Injection\n\t98 :\tFile Injection\n\t209 :\tInformation Exposure Through an Error Message\n\t311 :\tMissing Encryption of Sensitive Data\n\t327 :\tUse of a Broken or Risky Cryptographic Algorithm\n\t601 :\tURL Redirection to Untrusted Site\n\t862 :\tInsecure Direct Object References"
example = "$py core.py -f Injection \t// generate test cases with Injection flaws\n $py core.py -c 79 \t\t// generate test cases with cross site scripting."
print("usage: [-f flaw | -c cwe ] [arg]\nOptions and arguments:\n", flaw, "\n", cweparam,"\n",example )
if __name__ == "__main__":
main(sys.argv[1:])
| mit | 526,869,589,661,282,600 | 43.376812 | 457 | 0.612998 | false |
pjdufour/geonode | geonode/base/forms.py | 1 | 13646 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import autocomplete_light
from fields import MultiThesauriField
from widgets import MultiThesauriWidget
from autocomplete_light.contrib.taggit_field import TaggitField, TaggitWidget
from django import forms
from django.forms import models
from django.forms.fields import ChoiceField
from django.forms.utils import flatatt
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.db.models import Q
from django.utils.encoding import (
force_text,
)
from bootstrap3_datetime.widgets import DateTimePicker
from modeltranslation.forms import TranslationModelForm
from geonode.base.models import TopicCategory, Region
from geonode.people.models import Profile
def get_tree_data():
def rectree(parent, path):
children_list_of_tuples = list()
c = Region.objects.filter(parent=parent)
for child in c:
children_list_of_tuples.append(
tuple((path + parent.name, tuple((child.id, child.name))))
)
childrens = rectree(child, parent.name + '/')
if childrens:
children_list_of_tuples.extend(childrens)
return children_list_of_tuples
data = list()
try:
t = Region.objects.filter(Q(level=0) | Q(parent=None))
for toplevel in t:
data.append(
tuple((toplevel.id, toplevel.name))
)
childrens = rectree(toplevel, '')
if childrens:
data.append(
tuple((toplevel.name, childrens))
)
except:
pass
return tuple(data)
class AdvancedModelChoiceIterator(models.ModelChoiceIterator):
def choice(self, obj):
return (self.field.prepare_value(obj), self.field.label_from_instance(obj), obj)
class CategoryChoiceField(forms.ModelChoiceField):
def _get_choices(self):
if hasattr(self, '_choices'):
return self._choices
return AdvancedModelChoiceIterator(self)
choices = property(_get_choices, ChoiceField._set_choices)
def label_from_instance(self, obj):
return '<i class="fa '+obj.fa_class+' fa-2x unchecked"></i>' \
'<i class="fa '+obj.fa_class+' fa-2x checked"></i>' \
'<span class="has-popover" data-container="body" data-toggle="popover" data-placement="top" ' \
'data-content="' + obj.description + '" trigger="hover">' \
'<br/><strong>' + obj.gn_description + '</strong></span>'
class TreeWidget(forms.TextInput):
input_type = 'text'
def __init__(self, attrs=None):
super(TreeWidget, self).__init__(attrs)
def render(self, name, values, attrs=None):
if isinstance(values, basestring):
vals = values
else:
vals = ','.join([str(i.tag.name) for i in values])
output = ["""<input class='form-control' id='id_resource-keywords' name='resource-keywords'
value='%s'><br/>""" % (vals)]
output.append('<div id="treeview" class=""></div>')
return mark_safe(u'\n'.join(output))
class RegionsMultipleChoiceField(forms.MultipleChoiceField):
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise forms.ValidationError(self.error_messages['required'], code='required')
class RegionsSelect(forms.Select):
allow_multiple_selected = True
def render(self, name, value, attrs=None):
if value is None:
value = []
final_attrs = self.build_attrs(attrs, name=name)
output = [format_html('<select multiple="multiple"{}>', flatatt(final_attrs))]
options = self.render_options(value)
if options:
output.append(options)
output.append('</select>')
return mark_safe('\n'.join(output))
def value_from_datadict(self, data, files, name):
try:
getter = data.getlist
except AttributeError:
getter = data.get
return getter(name)
def render_option_value(self, selected_choices, option_value, option_label, data_section=None):
if option_value is None:
option_value = ''
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected')
if not self.allow_multiple_selected:
# Only allow for a single selection.
selected_choices.remove(option_value)
else:
selected_html = ''
label = force_text(option_label)
if data_section is None:
data_section = ''
else:
data_section = force_text(data_section)
if '/' in data_section:
label = format_html('{} [{}]', label, data_section.rsplit('/', 1)[1])
return format_html('<option data-section="{}" value="{}"{}>{}</option>',
data_section,
option_value,
selected_html,
label)
def render_options(self, selected_choices):
# Normalize to strings.
selected_choices = set(force_text(v) for v in selected_choices)
output = []
output.append(format_html('<optgroup label="{}">', 'Global'))
for option_value, option_label in self.choices:
if not isinstance(option_label, (list, tuple)) and isinstance(option_label, basestring):
output.append(self.render_option_value(selected_choices, option_value, option_label))
output.append('</optgroup>')
for option_value, option_label in self.choices:
if isinstance(option_label, (list, tuple)) and not isinstance(option_label, basestring):
output.append(format_html('<optgroup label="{}">', force_text(option_value)))
for option in option_label:
if isinstance(option, (list, tuple)) and not isinstance(option, basestring):
if isinstance(option[1][0], (list, tuple)) and not isinstance(option[1][0], basestring):
for option_child in option[1][0]:
output.append(self.render_option_value(selected_choices,
*option_child,
data_section=force_text(option[1][0][0])))
else:
output.append(self.render_option_value(selected_choices,
*option[1],
data_section=force_text(option[0])))
else:
output.append(self.render_option_value(selected_choices,
*option,
data_section=force_text(option_value)))
output.append('</optgroup>')
return '\n'.join(output)
class CategoryForm(forms.Form):
category_choice_field = CategoryChoiceField(required=False,
label='*' + _('Category'),
empty_label=None,
queryset=TopicCategory.objects.filter(is_choice=True)
.extra(order_by=['description']))
def clean(self):
cleaned_data = self.data
ccf_data = cleaned_data.get("category_choice_field")
if not ccf_data:
msg = _("Category is required.")
self._errors = self.error_class([msg])
# Always return the full collection of cleaned data.
return cleaned_data
class TKeywordForm(forms.Form):
tkeywords = MultiThesauriField(
label=_("Keywords from Thesauri"),
required=False,
help_text=_("List of keywords from Thesauri"),
widget=MultiThesauriWidget())
def clean(self):
cleaned_data = None
if self.data:
try:
cleaned_data = [{key: self.data.getlist(key)} for key, value
in self.data.items()
if 'tkeywords-tkeywords' in key.lower() and 'autocomplete' not in key.lower()]
except:
pass
return cleaned_data
class ResourceBaseForm(TranslationModelForm):
"""Base form for metadata, should be inherited by childres classes of ResourceBase"""
owner = forms.ModelChoiceField(
empty_label="Owner",
label=_("Owner"),
required=False,
queryset=Profile.objects.exclude(
username='AnonymousUser'),
widget=autocomplete_light.ChoiceWidget('ProfileAutocomplete'))
_date_widget_options = {
"icon_attrs": {"class": "fa fa-calendar"},
"attrs": {"class": "form-control input-sm"},
# "format": "%Y-%m-%d %I:%M %p",
"format": "%Y-%m-%d",
# Options for the datetimepickers are not set here on purpose.
# They are set in the metadata_form_js.html template because
# bootstrap-datetimepicker uses jquery for its initialization
# and we need to ensure it is available before trying to
# instantiate a new datetimepicker. This could probably be improved.
"options": False,
}
date = forms.DateTimeField(
label=_("Date"),
localize=True,
input_formats=['%Y-%m-%d'],
widget=DateTimePicker(**_date_widget_options)
)
temporal_extent_start = forms.DateTimeField(
label=_("temporal extent start"),
required=False,
localize=True,
input_formats=['%Y-%m-%d'],
widget=DateTimePicker(**_date_widget_options)
)
temporal_extent_end = forms.DateTimeField(
label=_("temporal extent end"),
required=False,
localize=True,
input_formats=['%Y-%m-%d'],
widget=DateTimePicker(**_date_widget_options)
)
poc = forms.ModelChoiceField(
empty_label=_("Person outside GeoNode (fill form)"),
label=_("Point of Contact"),
required=False,
queryset=Profile.objects.exclude(
username='AnonymousUser'),
widget=autocomplete_light.ChoiceWidget('ProfileAutocomplete'))
metadata_author = forms.ModelChoiceField(
empty_label=_("Person outside GeoNode (fill form)"),
label=_("Metadata Author"),
required=False,
queryset=Profile.objects.exclude(
username='AnonymousUser'),
widget=autocomplete_light.ChoiceWidget('ProfileAutocomplete'))
keywords = TaggitField(
label=_("Free-text Keywords"),
required=False,
help_text=_("A space or comma-separated list of keywords. Use the widget to select from Hierarchical tree."),
widget=TaggitWidget('HierarchicalKeywordAutocomplete'))
"""
regions = TreeNodeMultipleChoiceField(
label=_("Regions"),
required=False,
queryset=Region.objects.all(),
level_indicator=u'___')
"""
regions = RegionsMultipleChoiceField(
label=_("Regions"),
required=False,
choices=get_tree_data(),
widget=RegionsSelect)
regions.widget.attrs = {"size": 20}
def __init__(self, *args, **kwargs):
super(ResourceBaseForm, self).__init__(*args, **kwargs)
for field in self.fields:
help_text = self.fields[field].help_text
self.fields[field].help_text = None
if help_text != '':
self.fields[field].widget.attrs.update(
{
'class': 'has-popover',
'data-content': help_text,
'data-placement': 'right',
'data-container': 'body',
'data-html': 'true'})
class Meta:
exclude = (
'contacts',
'name',
'uuid',
'bbox_x0',
'bbox_x1',
'bbox_y0',
'bbox_y1',
'srid',
'category',
'csw_typename',
'csw_schema',
'csw_mdsource',
'csw_type',
'csw_wkt_geometry',
'metadata_uploaded',
'metadata_xml',
'csw_anytext',
'popular_count',
'share_count',
'thumbnail',
'charset',
'rating',
'detail_url'
)
| gpl-3.0 | -3,462,400,340,119,322,600 | 36.081522 | 117 | 0.555767 | false |
whitews/FlowIO | examples/print_channels.py | 1 | 1358 | import flowio
import os
import sys
if len(sys.argv) > 1:
flow_dir = sys.argv[1]
else:
flow_dir = os.getcwd()
files = os.listdir(flow_dir)
for file in files:
try:
flow_data = flowio.FlowData("/".join([flow_dir,file]))
except:
continue
print file + ':'
for key in sorted(flow_data.channels.keys()):
line = key + '\t' + \
flow_data.channels[key]['PnN'] + '\t'
if 'PnS' in flow_data.channels[key]:
line += flow_data.channels[key]['PnS']
print '\t' + line
if 'creator' in flow_data.text:
print '\t' + 'Creator: ' + flow_data.text['creator']
if 'export time' in flow_data.text:
print '\t' + 'Export time: ' + flow_data.text['export time']
if 'experiment name' in flow_data.text:
print '\t' + 'Experiment name: ' + flow_data.text['experiment name']
if 'patient id' in flow_data.text:
print '\t' + 'Patient ID: ' + flow_data.text['patient id']
if 'tube name' in flow_data.text:
print '\t' + 'Tube name: ' + flow_data.text['tube name']
if 'src' in flow_data.text:
print '\t' + 'Source: ' + flow_data.text['src']
if 'sample id' in flow_data.text:
print '\t' + 'Sample ID: ' + flow_data.text['sample id']
if 'tot' in flow_data.text:
print '\t' + 'Total: ' + flow_data.text['tot'] | bsd-3-clause | -1,600,365,581,725,694,500 | 31.357143 | 76 | 0.562592 | false |
scalyr/scalyr-agent-2 | scalyr_agent/compat.py | 1 | 7997 | # Copyright 2014-2020 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
if False: # NOSONAR
from typing import Union, Tuple, Any, Generator, Iterable, Optional
import sys
import struct
import os
import subprocess
import six
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY26 = sys.version_info[0] == 2 and sys.version_info[1] == 6
PY27 = sys.version_info[0] == 2 and sys.version_info[1] == 7
PY2_pre_279 = PY2 and sys.version_info < (2, 7, 9)
PY_post_equal_279 = sys.version_info >= (2, 7, 9)
PY3_pre_32 = PY3 and sys.version_info < (3, 2)
PY3_post_equal_37 = PY3 and sys.version_info >= (3, 7)
# NOTE: ssl.match_hostname was added in Python 2.7.9 so for earlier versions, we need to use
# version from backports package
if PY2_pre_279 or PY3_pre_32:
try:
from backports.ssl_match_hostname import (
match_hostname as ssl_match_hostname,
) # NOQA
from backports.ssl_match_hostname import CertificateError # NOQA
except ImportError:
# NOTE: We should never come here in real life. If we do, it indicates we messed up package
# creation and / or path mangling in scalyr_init().
raise Exception(
"Missing backports.ssl_match_hostname module, hostname verification can't "
"be performed"
)
else:
# ssl module in Python 2 >= 2.7.9 and Python 3 >= 3.2 includes match hostname function
from ssl import match_hostname as ssl_match_hostname # NOQA
from ssl import CertificateError # type: ignore # NOQA
def custom_any(iterable):
if sys.version_info[:2] > (2, 4):
return any(iterable)
else:
for element in iterable:
if element:
return True
return False
def custom_all(iterable):
if sys.version_info[:2] > (2, 4):
return all(iterable)
else:
for element in iterable:
if not element:
return False
return True
def custom_defaultdict(default_type):
if sys.version_info[:2] > (2, 4):
from collections import defaultdict
return defaultdict(default_type)
else:
class DefaultDict(dict):
def __getitem__(self, key):
if key not in self:
dict.__setitem__(self, key, default_type())
return dict.__getitem__(self, key)
return DefaultDict()
if six.PY2:
class EnvironUnicode(object):
"""Just a wrapper for os.environ, to convert its items to unicode in python2."""
def __getitem__(self, item):
value = os.environ[item]
return six.ensure_text(value)
def get(self, item, default=None):
value = os.environ.get(item, default)
if value is not None:
value = six.ensure_text(value)
return value
def pop(self, item, default=None):
value = os.environ.pop(item, default)
if value is not None:
value = six.ensure_text(value)
return value
def __setitem__(self, key, value):
key = six.ensure_text(key)
value = six.ensure_text(value)
os.environ[key] = value
@staticmethod
def _iterable_elements_to_unicode_generator(iterable):
# type: (Iterable) -> Generator[Union[Tuple, Any]]
"""Generator that gets values from original iterable and converts its 'str' values to 'unicode'"""
for element in iterable:
if type(element) is tuple:
yield tuple(
v.decode("utf-8", "replace")
if type(v) is six.binary_type
else v
for v in element
)
else:
yield six.ensure_text(element)
def iteritems(self):
return self._iterable_elements_to_unicode_generator(
six.iteritems(os.environ)
)
def items(self):
return list(
self._iterable_elements_to_unicode_generator(os.environ.items())
)
def iterkeys(self):
return self._iterable_elements_to_unicode_generator(
six.iterkeys(os.environ)
)
def keys(self):
return list(self._iterable_elements_to_unicode_generator(os.environ.keys()))
def itervalues(self):
return self._iterable_elements_to_unicode_generator(
six.itervalues(os.environ)
)
def values(self):
return list(
self._iterable_elements_to_unicode_generator(os.environ.values())
)
def copy(self):
return dict(self.items())
def __iter__(self):
return self.iterkeys()
def os_getenv_unicode(name, default=None):
"""The same logic as in os.environ, but with None check."""
result = os.getenv(name, default)
if result is not None:
result = six.ensure_text(result)
return result
os_environ_unicode = EnvironUnicode()
else:
os_environ_unicode = os.environ
os_getenv_unicode = os.getenv
# 2->TODO struct.pack|unpack, does not accept unicode as format string.
# see more: https://python-future.org/stdlib_incompatibilities.html#struct-pack
# to avoid conversion of format string on every struct.pack call, we can monkey patch it here.
if sys.version_info[:3] < (2, 7, 7):
def python_unicode_pack_unpack_wrapper(f):
def _pack_unpack(format_str, *args):
"""wrapper for struct.pack function that converts unicode format string to 'str'"""
binary_format_str = six.ensure_binary(format_str)
return f(binary_format_str, *args)
return _pack_unpack
struct_pack_unicode = python_unicode_pack_unpack_wrapper(struct.pack)
struct_unpack_unicode = python_unicode_pack_unpack_wrapper(struct.unpack)
else:
struct_pack_unicode = struct.pack
struct_unpack_unicode = struct.unpack
def which(executable):
# type: (str) -> Optional[str]
"""
Search for the provided executable in PATH and return path to it if found.
"""
paths = os.environ["PATH"].split(os.pathsep)
for path in paths:
full_path = os.path.join(path, executable)
if os.path.exists(full_path) and os.access(full_path, os.X_OK):
return full_path
return None
def find_executable(executable):
# type: (str) -> Optional[str]
"""
Wrapper around distutils.spawn.find_executable which is not available in some default Python 3
installations where full blown python3-distutils package is not installed.
"""
try:
from distutils.spawn import find_executable as distutils_find_executable
except ImportError:
# Likely Ubuntu 18.04 where python3-distutils package is not present (default behavior)
return which(executable)
return distutils_find_executable(executable)
def subprocess_check_output(cmd, *args, **kwargs):
"""
Wrapper around subprocess.check_output which is not available under Python 2.6.
"""
if sys.version_info < (2, 7, 0):
output = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, *args, **kwargs
).communicate()[0]
else:
output = subprocess.check_output(cmd, *args, **kwargs)
return output
| apache-2.0 | 6,758,009,555,331,464,000 | 31.909465 | 110 | 0.615481 | false |
wiki2014/Learning-Summary | alps/cts/apps/CameraITS/tests/inprog/test_burst_sameness_fullres_auto.py | 1 | 3382 | # Copyright 2014 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import its.image
import its.device
import its.objects
import its.caps
import os.path
import numpy
import pylab
import matplotlib
import matplotlib.pyplot
def main():
"""Take long bursts of images and check that they're all identical.
Assumes a static scene. Can be used to idenfity if there are sporadic
frames that are processed differently or have artifacts, or if 3A isn't
stable, since this test converges 3A at the start but doesn't lock 3A
throughout capture.
"""
NAME = os.path.basename(__file__).split(".")[0]
BURST_LEN = 6
BURSTS = 2
FRAMES = BURST_LEN * BURSTS
DELTA_THRESH = 0.1
with its.device.ItsSession() as cam:
# Capture at full resolution.
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.manual_sensor(props) and
its.caps.awb_lock(props))
w,h = its.objects.get_available_output_sizes("yuv", props)[0]
# Converge 3A prior to capture.
cam.do_3a(lock_ae=True, lock_awb=True)
# After 3A has converged, lock AE+AWB for the duration of the test.
req = its.objects.fastest_auto_capture_request(props)
req["android.blackLevel.lock"] = True
req["android.control.awbLock"] = True
req["android.control.aeLock"] = True
# Capture bursts of YUV shots.
# Build a 4D array, which is an array of all RGB images after down-
# scaling them by a factor of 4x4.
imgs = numpy.empty([FRAMES,h/4,w/4,3])
for j in range(BURSTS):
caps = cam.do_capture([req]*BURST_LEN)
for i,cap in enumerate(caps):
n = j*BURST_LEN + i
imgs[n] = its.image.downscale_image(
its.image.convert_capture_to_rgb_image(cap), 4)
# Dump all images.
print "Dumping images"
for i in range(FRAMES):
its.image.write_image(imgs[i], "%s_frame%03d.jpg"%(NAME,i))
# The mean image.
img_mean = imgs.mean(0)
its.image.write_image(img_mean, "%s_mean.jpg"%(NAME))
# Compute the deltas of each image from the mean image; this test
# passes if none of the deltas are large.
print "Computing frame differences"
delta_maxes = []
for i in range(FRAMES):
deltas = (imgs[i] - img_mean).reshape(h*w*3/16)
delta_max_pos = numpy.max(deltas)
delta_max_neg = numpy.min(deltas)
delta_maxes.append(max(abs(delta_max_pos), abs(delta_max_neg)))
max_delta_max = max(delta_maxes)
print "Frame %d has largest diff %f" % (
delta_maxes.index(max_delta_max), max_delta_max)
assert(max_delta_max < DELTA_THRESH)
if __name__ == '__main__':
main()
| gpl-3.0 | 4,365,159,712,139,811,000 | 34.978723 | 75 | 0.630692 | false |
shayn1234/apps-catalog | deployment/catalog-ci-jenkins/modules/catalog_ci/files/scripts/generate_names.py | 1 | 1579 | #!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import re
from sys import argv
import yaml
def yaml_to_dict(infile, k):
stream = open(infile, 'r')
rdict = yaml.load(stream)[k]
return rdict
def diff_images_config(images1, images2):
if images1 == images2:
return ''
intersec = [item for item in images1 if item in images2]
sym_diff = [item for item in itertools.chain(
images1, images2) if item not in intersec]
name = ''
d_size = len(sym_diff)
if d_size <= 2:
i = d_size - 1
else:
return ''
if 'name' in sym_diff[i].keys() and 'format' in sym_diff[i].keys():
i_name = re.sub('[(){}<>]', '', sym_diff[i]['name'])
i_type = sym_diff[i]['format']
name = i_name + '.' + i_type
name = name.lower().replace(" ", "_")
return name
if __name__ == '__main__':
if argv[1] == 'glance':
images1 = yaml_to_dict(argv[2], 'images')
images2 = yaml_to_dict(argv[3], 'images')
print(diff_images_config(images1, images2))
| apache-2.0 | -7,187,242,229,561,227,000 | 30.58 | 75 | 0.631412 | false |
aeppert/py-cifsdk | test/email/t1.py | 1 | 2367 | # -*- coding: utf-8 -*-
msg = """
Delivered-To: [email protected]
Received: by 10.112.40.50 with SMTP id u18csp916705lbk;
Sun, 19 Apr 2015 05:50:04 -0700 (PDT)
X-Received: by 10.42.151.4 with SMTP id c4mr13784232icw.77.1429447803846;
Sun, 19 Apr 2015 05:50:03 -0700 (PDT)
Return-Path: <[email protected]>
Received: from gmail.com ([61.72.137.254])
by mx.google.com with SMTP id s93si13575887ioe.52.2015.04.19.05.50.00
for <[email protected]>;
Sun, 19 Apr 2015 05:50:03 -0700 (PDT)
Received-SPF: softfail (google.com: domain of transitioning [email protected] does not designate 61.72.137.254 as permitted sender) client-ip=61.72.137.254;
Authentication-Results: mx.google.com;
spf=softfail (google.com: domain of transitioning [email protected] does not designate 61.72.137.254 as permitted sender) [email protected];
dmarc=fail (p=NONE dis=NONE) header.from=gmail.com
Message-ID: <[email protected]>
Date: Sun, 19 Apr 2015 05:24:33 -0700
Reply-To: "HENRY" <[email protected]>
From: "HENRY" <[email protected]>
User-Agent: Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.19) Gecko/20081209 Thunderbird/2.0.0.19
MIME-Version: 1.0
To: <[email protected]>
Subject: Boost Social Presence with FB posts likes
Content-Type: text/plain;
charset="us-ascii"
Content-Transfer-Encoding: 7bit
Hello,
Boost your Facebook posts with a massive promotion
and gain over 10.000 likes in total towards all your posts.
We can promote up to 20 posts links at a time.
Increase exposure with guaranteed promotion service.
Use this coupon and get another 10% discount on your purchase
==================
10% Coupon = EB2CA
==================
Order today, cheap and guaranteed service:
http://www.socialservices.cn/detail.php?id=9
Regards
HENRY
Â
Unsubscribe option is available on the footer of our website
"""
from cifsdk.email import parse_message
from cifsdk.urls import extract_urls
from pprint import pprint
def test_parse_message():
body = parse_message(msg)
assert type(body) is list
assert body[0].startswith(b'Delivered-To: [email protected]')
def test_email_urls():
body = parse_message(msg)
urls = extract_urls(body[0])
assert 'http://www.socialservices.cn/detail.php?id=9' in urls | lgpl-3.0 | -4,987,111,927,963,269,000 | 30.546667 | 173 | 0.724736 | false |
lrocheWB/navitia | source/jormungandr/jormungandr/scenarios/tests/qualifier_tests.py | 1 | 13540 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from nose.tools import eq_
from jormungandr.scenarios import qualifier
import navitiacommon.response_pb2 as response_pb2
from jormungandr.utils import str_to_time_stamp
#journey.arrival_date_time
#journey.duration
#journey.nb_transfers
#journeys.sections[i].type
#journeys.sections[i].duration
#journey.sections
def qualifier_one_direct_test():
journeys = []
journey_direct = response_pb2.Journey()
journey_direct.arrival_date_time = str_to_time_stamp("20131107T151200")
journey_direct.duration = 25 * 60
journey_direct.nb_transfers = 0
journey_direct.sections.add()
journey_direct.sections.add()
journey_direct.sections[0].type = response_pb2.STREET_NETWORK
journey_direct.sections[0].duration = 2 * 60
journey_direct.sections[1].type = response_pb2.STREET_NETWORK
journey_direct.sections[1].duration = 4 * 60
journeys.append(journey_direct)
qualifier.qualifier_one(journeys, "departure")
assert("non_pt" in journey_direct.type)
# Test with 5 journeys
# a standard with 3 tranfers and 60mn trip
# the fastest with 2 transfers and 62 mn trip
# the healthiest with 1 transfert, 65 mn trip and more walk
# the most confortable with 1 transfert and 80mn trip and less walk
def qualifier_two_test():
journeys = []
journey_standard = response_pb2.Journey()
journey_standard.type = "none"
journey_standard.arrival_date_time = str_to_time_stamp("20131107T150000")
journey_standard.duration = 60 * 60
journey_standard.nb_transfers = 3
journey_standard.sections.add()
journey_standard.sections.add()
journey_standard.sections.add()
journey_standard.sections.add()
journey_standard.sections[0].type = response_pb2.STREET_NETWORK
journey_standard.sections[0].duration = 10 * 60
journey_standard.sections[-1].type = response_pb2.STREET_NETWORK
journey_standard.sections[-1].duration = 10 * 60
journeys.append(journey_standard)
journey_rapid = response_pb2.Journey()
journey_rapid.arrival_date_time = str_to_time_stamp("20131107T150500")
journey_rapid.duration = 62 * 60
journey_rapid.nb_transfers = 2
journey_rapid.sections.add()
journey_rapid.sections.add()
journey_rapid.sections.add()
journey_rapid.sections[0].type = response_pb2.STREET_NETWORK
journey_rapid.sections[0].duration = 10 * 60
journey_rapid.sections[-1].type = response_pb2.STREET_NETWORK
journey_rapid.sections[-1].duration = 10 * 60
journeys.append(journey_rapid)
journey_health = response_pb2.Journey()
journey_health.arrival_date_time = str_to_time_stamp("20131107T151000")
journey_health.duration = 70 * 60
journey_health.nb_transfers = 1
journey_health.sections.add()
journey_health.sections.add()
journey_health.sections.add()
journey_health.sections[0].type = response_pb2.STREET_NETWORK
journey_health.sections[0].duration = 15 * 60
journey_health.sections[1].type = response_pb2.TRANSFER
journey_health.sections[1].duration = 10 * 60
journey_health.sections[-1].type = response_pb2.STREET_NETWORK
journey_health.sections[-1].duration = 10 * 60
journeys.append(journey_health)
journey_confort = response_pb2.Journey()
journey_confort.arrival_date_time = str_to_time_stamp("20131107T152000")
journey_confort.duration = 80 * 60
journey_confort.nb_transfers = 1
journey_confort.sections.add()
journey_confort.sections.add()
journey_confort.sections.add()
journey_confort.sections[0].type = response_pb2.STREET_NETWORK
journey_confort.sections[0].duration = 5 * 60
journey_confort.sections[-1].type = response_pb2.STREET_NETWORK
journey_confort.sections[-1].duration = 5 * 60
journeys.append(journey_confort)
qualifier.qualifier_one(journeys, "departure")
eq_(journey_standard.type, "fastest") # the standard should be the fastest
eq_(journey_rapid.type, "rapid")
#TODO! refacto this test with custom rules not to depends on changing business rules
# eq_(journey_confort.type, "comfort")
# eq_(journey_health.type, "healthy")
def has_car_test():
journey = response_pb2.Journey()
journey.sections.add()
section = journey.sections[0]
section.type = response_pb2.STREET_NETWORK
section.street_network.mode = response_pb2.Car
assert(qualifier.has_car(journey))
foot_journey = response_pb2.Journey()
foot_journey.sections.add()
foot_journey.sections.add()
foot_journey.sections.add()
foot_journey.sections[0].street_network.mode = response_pb2.Walking
foot_journey.sections[1].street_network.mode = response_pb2.Bike
foot_journey.sections[2].street_network.mode = response_pb2.Bss
assert(not qualifier.has_car(foot_journey))
foot_journey.sections.add()
foot_journey.sections[3].type = response_pb2.STREET_NETWORK
foot_journey.sections[3].street_network.mode = response_pb2.Car
assert(qualifier.has_car(foot_journey))
def standard_choice_test():
journeys = []
#the first is the worst one
journey_worst = response_pb2.Journey()
journey_worst.arrival_date_time = str_to_time_stamp("20131107T161200")
journey_worst.sections.add()
journey_worst.sections[0].type = response_pb2.STREET_NETWORK
journey_worst.sections[0].street_network.mode = response_pb2.Car
journeys.append(journey_worst)
# arrive later but no car
journey_not_good = response_pb2.Journey()
journey_not_good.arrival_date_time = str_to_time_stamp("20131107T171200")
journey_not_good.sections.add()
journey_not_good.sections[0].type = response_pb2.STREET_NETWORK
journey_not_good.sections[0].street_network.mode = response_pb2.Bike
journeys.append(journey_not_good)
#this is the standard
journey_1 = response_pb2.Journey()
journey_1.arrival_date_time = str_to_time_stamp("20131107T151200")
journey_1.sections.add()
journey_1.sections[0].type = response_pb2.STREET_NETWORK
journey_1.sections[0].street_network.mode = response_pb2.Bike
journeys.append(journey_1)
# a better journey, but using car
journey_2 = response_pb2.Journey()
journey_2.arrival_date_time = str_to_time_stamp("20131107T151000")
journey_2.sections.add()
journey_2.sections[0].type = response_pb2.STREET_NETWORK
journey_2.sections[0].street_network.mode = response_pb2.Car
journeys.append(journey_2)
standard = qualifier.choose_standard(journeys, qualifier.arrival_crit)
print(qualifier.has_car(standard))
print("standard ", standard.arrival_date_time)
eq_(standard, journey_1)
def standard_choice_with_pt_test():
journeys = []
#the first is the worst one
journey_worst = response_pb2.Journey()
journey_worst.arrival_date_time = str_to_time_stamp("20131107T161200")
journey_worst.sections.add()
journey_worst.sections[0].type = response_pb2.STREET_NETWORK
journey_worst.sections[0].street_network.mode = response_pb2.Car
journeys.append(journey_worst)
# arrive later but no car
journey_not_good = response_pb2.Journey()
journey_not_good.arrival_date_time = str_to_time_stamp("20131107T171200")
journey_not_good.sections.add()
journey_not_good.sections[0].type = response_pb2.STREET_NETWORK
journey_not_good.sections[0].street_network.mode = response_pb2.Bike
journeys.append(journey_not_good)
#this is the standard
journey_1 = response_pb2.Journey()
journey_1.arrival_date_time = str_to_time_stamp("20131107T201200")
journey_1.sections.add()
journey_1.sections[0].type = response_pb2.PUBLIC_TRANSPORT
journeys.append(journey_1)
# a better journey, but using car
journey_2 = response_pb2.Journey()
journey_2.arrival_date_time = str_to_time_stamp("20131107T151000")
journey_2.sections.add()
journey_2.sections[0].type = response_pb2.STREET_NETWORK
journey_2.sections[0].street_network.mode = response_pb2.Car
journeys.append(journey_2)
standard = qualifier.choose_standard(journeys, qualifier.arrival_crit)
print(qualifier.has_car(standard))
print("standard ", standard.arrival_date_time)
eq_(standard, journey_1)
def choose_standard_pt_car():
journeys = []
journey1 = response_pb2.Journey()
journey1.arrival_date_time = str_to_time_stamp("20141120T170000")
journey1.sections.add()
journey1.sections[0].type = response_pb2.STREET_NETWORK
journey1.sections[0].street_ne
def tranfers_cri_test():
journeys = []
dates = ["20131107T100000", "20131107T150000", "20131107T050000",
"20131107T100000", "20131107T150000", "20131107T050000"]
transfers = [4, 3, 8, 1, 1, 2]
for i in range(6):
journey = response_pb2.Journey()
journey.nb_transfers = transfers[i]
journey.arrival_date_time = str_to_time_stamp(dates[i])
journeys.append(journey)
best = qualifier.min_from_criteria(journeys, [qualifier.transfers_crit,
qualifier.arrival_crit])
#the transfert criterion is first, and then if 2 journeys have
#the same nb_transfers, we compare the dates
eq_(best.nb_transfers, 1)
eq_(best.arrival_date_time, str_to_time_stamp("20131107T100000"))
def qualifier_crowfly_test():
journeys = []
journey_standard = response_pb2.Journey()
journey_standard.arrival_date_time = str_to_time_stamp('20140825T113224')
journey_standard.duration = 2620
journey_standard.nb_transfers = 1
journey_standard.sections.add()
journey_standard.sections.add()
journey_standard.sections.add()
journey_standard.sections[0].type = response_pb2.CROW_FLY
journey_standard.sections[0].duration = 796
journey_standard.sections[-1].type = response_pb2.CROW_FLY
journey_standard.sections[-1].duration = 864
journeys.append(journey_standard)
journey_health = response_pb2.Journey()
journey_health.arrival_date_time = str_to_time_stamp('20140825T114000')
journey_health.duration = 3076
journey_health.nb_transfers = 1
journey_health.sections.add()
journey_health.sections.add()
journey_health.sections.add()
journey_health.sections[0].type = response_pb2.CROW_FLY
journey_health.sections[0].duration = 796
journey_health.sections[-1].type = response_pb2.CROW_FLY
journey_health.sections[-1].duration = 0
journeys.append(journey_health)
qualifier.qualifier_one(journeys, "departure")
eq_(journey_standard.type, "rapid") # the standard should be the fastest
eq_(journey_health.type, "less_fallback_walk")
def qualifier_nontransport_duration_only_walk_test():
journey = response_pb2.Journey()
journey.arrival_date_time = str_to_time_stamp('20140825T113224')
journey.duration = 2620
journey.nb_transfers = 1
journey.sections.add()
journey.sections.add()
journey.sections.add()
journey.sections[0].type = response_pb2.CROW_FLY
journey.sections[0].duration = 796
journey.sections[1].type = response_pb2.TRANSFER
journey.sections[1].duration = 328
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].duration = 864
eq_(qualifier.get_nontransport_duration(journey), 1988)
def qualifier_nontransport_duration_with_tc_test():
journey = response_pb2.Journey()
journey.arrival_date_time = str_to_time_stamp('20140825T113224')
journey.duration = 2620
journey.nb_transfers = 2
journey.sections.add()
journey.sections.add()
journey.sections.add()
journey.sections.add()
journey.sections.add()
journey.sections.add()
journey.sections[0].type = response_pb2.CROW_FLY
journey.sections[0].duration = 796
journey.sections[1].type = response_pb2.PUBLIC_TRANSPORT
journey.sections[1].duration = 328
journey.sections[2].type = response_pb2.TRANSFER
journey.sections[2].duration = 418
journey.sections[3].type = response_pb2.WAITING
journey.sections[3].duration = 719
journey.sections[4].type = response_pb2.PUBLIC_TRANSPORT
journey.sections[4].duration = 175
journey.sections[-1].type = response_pb2.STREET_NETWORK
journey.sections[-1].duration = 864
eq_(qualifier.get_nontransport_duration(journey), 2797)
| agpl-3.0 | -3,661,965,739,037,925,000 | 34.07772 | 88 | 0.713885 | false |
lmorillas/catalogoprogramaseducativos | lib/extrae_info_wikispaces.py | 1 | 4799 | # -*- coding: utf-8 -*-
import urlparse
import re
import json
from amara.bindery import html
from amara.lib import U
import shelve
catsh = shelve.open('catalogo.dat')
def quita_punto(texto):
if texto.endswith('.'):
return texto[:-1]
else: return texto
def dame_texto(texto, inicio, fin):
_ini = texto.find(inicio)
if _ini != -1:
i = _ini +len(inicio)
f = texto[i:].find(fin)
f1 = texto[i:].find('\n\n')
_fin = min(f, f1)
if _fin == -1:
_fin = max(f, f1)
texto = texto[i:i+_fin]
texto = texto.replace (u'\u200d', '').strip()
return texto
def parse_proyecto(url):
item = {}
item['url'] = url
#doc = html.parse(url)
#texto = U(doc.xml_select(u'//div[@class="ws-theme-content-inner"]'))
#catsh[url.encode('utf-8')] = texto
texto = catsh.get(url.encode('utf-8'))
#texto = texto.decode('utf-8')
texto = texto[texto.find("NOMBRE DE LA"):]
nombre = dame_texto(texto, u'NOMBRE DE LA ACTUACIÓN', u'ÓRGANO GESTOR')
item ['label'] = nombre
gestor = dame_texto(texto, u'ÓRGANO GESTOR', u'DESCRIPCIÓN')
if gestor:
gestor = quita_punto(gestor)
if '\n' in gestor:
gestor = gestor.split('\n')
gestor = map(quita_punto, gestor)
item['gestor'] = gestor
descripcion = dame_texto(texto, u'DESCRIPCIÓN', 'DESTINATARIOS')
if descripcion:
item['descripcion'] = descripcion
destinatarios = dame_texto(texto, 'DESTINATARIOS', 'SOLICITUD')
item['destinatarios'] = destinatarios
solicitud = dame_texto(texto, 'SOLICITUD', 'FECHAS' )
item['solicitud'] = solicitud
fechas = dame_texto(texto, 'FECHAS' , u'FINANCIACIÓN')
item['fechas'] = fechas
financiacion = dame_texto(texto, u'FINANCIACIÓN', '\n\n')
if financiacion:
item['financiacion'] = financiacion
masinfo = dame_texto(texto, u'MÁS INFORMACIÓN', '\n\n')
if masinfo:
mas_url = re.search("(?P<url>https?://[^\s]+)", masinfo)
if mas_url:
url = mas_url.group("url")
masinfo = masinfo.replace(url, '<a href="{}" target="_blank">{}</a>'.format(url, url) )
item['masinfo'] = masinfo
return item
f = json.load(open('catacata.json'))
items = f.get('items')
nitems = []
errores = []
for it in items:
url = it.get('url')
if url:
#try:
print '-->', url
res = parse_proyecto(url)
nitems.append(res)
#except:
# print '***', url
# errores.append(url)
#catsh.sync()
catsh.close()
import json
cat = json.load(open('catalogoprog.json'))
items = cat.get('items')
ld = {}
for n in nitems:
ld[n.get('url')] = n
for it in items:
if it.get('type') == 'ta':
n = ld.get(it.get('url'))
if not n:
print '***', it
else:
for k in 'destinatarios fechas gestor solicitud descripcion masinfo'.split():
it[k] = n.get(k)
json.dump(cat, open('catalogoprog.json', 'w'))
'''
gestor = re.compile('RGANO GESTOR</h2>\W*([^<]*)', re.DOTALL)
item['nombre'] = sel.xpath('//h1[@class="pageTitle"]//text()').extract()
item['gestor'] = sel.re(gestor)
item['programa'] = sel.xpath('//h1[@id="toc0"]//text()').extract()
item['gestor'] = sel.xpath()
item['name'] = site.xpath('a/text()').extract()
item['url'] = site.xpath('a/@href').extract()
item['description'] = site.xpath('text()').re('-\s([^\n]*?)\\n')
items.append(item)
yield item
nombre = scrapy.Field()
gestor = scrapy.Field()
descripcion = scrapy.Field()
destinatarios = scrapy.Field()
solicitud = scrapy.Field()
fechas = scrapy.Field()
financiacion = scrapy.Field()
masinfo = scrapy.Field()
apartado = scrapy.Field()
return items
def parse(self, response):
for h3 in response.xpath('//h3').extract():
yield MyItem(title=h3)
for url in response.xpath('//a/@href').extract():
yield scrapy.Request(url, callback=self.parse)
http://catalogo00.wikispaces.com/Reconocimiento+de+buenas+pr%C3%A1cticas+de+educaci%C3%B3n+inclusiva+y+de+convivencia.+Centros+p%C3%BAblicos+-+concertados
http://catalogo00.wikispaces.com/Reconocimiento+de+buenas+pr%C3%A1cticas+de+educaci%C3%B3n+inclusiva+y+de+convivencia.+Centros+p%C3%BAblicos+-+concertados
"http://catalogo1.wikispaces.com/Indice+Educaci%C3%B3n+Inclusiva",
"http://catalogo2.wikispaces.com/Indice+aprender+a+aprender",
"http://catalogo3.wikispaces.com/Indice+Convive+y+Concilia",
"http://catalogo4.wikispaces.com/Indice+excelencia+acad%C3%A9mica",
"http://catalogo5.wikispaces.com/Indice+actuaciones+otros+departamentos",
"http://catalogo6.wikispaces.com/Indice+entidades+privadas",
]
'''
| apache-2.0 | 9,024,883,634,799,330,000 | 27.855422 | 154 | 0.603549 | false |
tzuhsienli/resume-site | resume/settings.py | 1 | 6235 | # Django settings for resume project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Administrator', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'resumedb', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'root',
'PASSWORD': 'ligang',
'HOST': '127.0.0.1', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '3306', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '8ybb^f*xwt6s=5=q5$b$qlrw=yg2y=i)3o4yexun&(&k2jv)*5'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'resume.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'resume.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
# RESTful service framework supports
'rest_framework',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# A RESTful API configuration
# Any global settings for a REST framework API are kept in a
# single configuration dictionary named REST_FRAMEWORK.
# Start off by adding the following to your settings.py module:
REST_FRAMEWORK = {
# Use hyperlinked styles by default.
# Only used if the `serializer_class` attribute is not set on a view.
'DEFAULT_MODEL_SERIALIZER_CLASS':
'rest_framework.serializers.HyperlinkedModelSerializer',
# Use Django's standard `django.contrib.auth` permissions,
# or allow read-only access for unauthenticated users.
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly',
'rest_framework.permissions.IsAdminUser'
],
'PAGINATE_BY': 10
}
| mit | 4,027,110,781,063,915,000 | 33.832402 | 136 | 0.691259 | false |
outkaj/xpython | list-ops/list_ops_test.py | 1 | 3318 | import unittest
import operator
from list_ops import *
class ListOpsTest(unittest.TestCase):
# tests for map
def test_map_square(self):
self.assertEqual(
(1, 4, 9, 16, 25, 36, 49, 64, 81, 100),
tuple(map_clone(
lambda x: x**2, (1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
)
)
def test_map_cube(self):
self.assertEqual(
(-1, 8, -27, 64, -125, 216, -343, 512, -729, 1000),
tuple(map_clone(
lambda x: x**3, (-1, 2, -3, 4, -5, 6, -7, 8, -9, 10))
)
)
def test_map_absolute(self):
self.assertEqual(
(1, 2, 3, 4, 5, 6, 7, 8, 9, 10),
tuple(map_clone(
lambda x: abs(x), (-1, 2, -3, 4, -5, 6, -7, 8, -9, 10))
)
)
# tests for length
def test_pos_leng(self):
self.assertEqual(10, length((-1, 2, -3, 4, -5, 6, -7, 8, -9, 10)))
def test_empty_len(self):
self.assertEqual(0, length([]))
# tests for filter
def test_filter_odd(self):
self.assertEqual(
(1, 3, 5),
tuple(filter_clone(lambda x: x % 2 != 0, [1, 2, 3, 4, 5, 6]))
)
def test_filter_even(self):
self.assertEqual(
(2, 4, 6),
tuple(filter_clone(lambda x: x % 2 == 0, [1, 2, 3, 4, 5, 6]))
)
# tests for reverse
def test_reverse_small(self):
self.assertEqual([1, 2, 3], reverse([3, 2, 1]))
def test_reverse_mixed_types(self):
self.assertEqual(
(1, "cat", 4.0, "xyz"),
reverse(("xyz", 4.0, "cat", 1))
)
def test_reverse_empty(self):
self.assertEqual([], reverse(()))
# tests for append
def test_append_tuple(self):
self.assertEqual(
["10", "python", "hello"],
append(["10", "python"], "hello")
)
def test_append_range(self):
self.assertEqual([100, range(1000)], append([100], range(1000)))
# tests for foldl
def test_foldl_sum(self):
self.assertEqual(21, foldl(operator.add, [1, 2, 3, 4, 5, 6], 0))
def test_foldl_product(self):
self.assertEqual(720, foldl(operator.mul, [1, 2, 3, 4, 5, 6], 1))
def test_foldl_minus(self):
self.assertEqual(-15, foldl(operator.sub, [1, 2, 3, 4, 5], 0))
# tests for foldr
def test_foldr_quotient(self):
try:
self.assertEqual(0, foldr(operator.floordiv, [1, 2, 3, 4, 5], 1))
except ZeroDivisionError as e:
pass
def test_foldr_minus(self):
self.assertEqual(
3, foldr((lambda x, y: operator.sub(x, y)), (1, 2, 3, 4, 5), 0)
)
# tests for flatten
def test_flatten_nested(self):
self.assertEqual([1, 2, 3, 4], flat([[[1, 2], [3]], [[4]]]))
def test_flatten_once(self):
self.assertEqual(["x", "y", "z"], flat([["x", "y", "z"]]))
# tests for concat
def test_concat_two(self):
self.assertEqual(
[1, 3, 5, 8, 9, 4, 5, 6],
concat([1, 3, 5, 8], [9, 4, 5, 6])
)
def test_concat_nothing(self):
self.assertEqual(
["orange", "apple", "banana"],
concat(['orange', 'apple', 'banana'], None)
)
if __name__ == '__main__':
unittest.main()
| mit | 6,382,588,370,581,128,000 | 26.65 | 77 | 0.484027 | false |
nehudesi/MSim | module/tsutil.py | 1 | 32434 | '''
Version: MRT v3.0
Type: Library
Location: C:\MRT3.0\module
Author: Chintan Patel
Email: [email protected]
'''
import math
import datetime as dt
#import numpy as np
import module.qsdateutil as qsdateutil
from math import sqrt
import pandas as pd
from copy import deepcopy
import matplotlib.pyplot as plt
#import random as rand
#import module.DataAccess as da
import module.qsdateutil as du
import numpy as np
def daily(lfFunds):
"""
@summary Computes daily returns centered around 0
@param funds: A time series containing daily fund values
@return an array of daily returns
"""
if type(lfFunds) == type(pd.Series()):
ldt_timestamps = du.getNYSEdays(lfFunds.index[0], lfFunds.index[-1], dt.timedelta(hours=16))
lfFunds = lfFunds.reindex(index=ldt_timestamps, method='ffill')
nds = np.asarray(deepcopy(lfFunds))
s= np.shape(nds)
if len(s)==1:
nds=np.expand_dims(nds,1)
returnize0(nds)
return(nds)
# def daily1(lfFunds):
# """
# @summary Computes daily returns centered around 1
# @param funds: A time series containing daily fund values
# @return an array of daily returns
# """
# nds = np.asarray(deepcopy(lfFunds))
# s= np.shape(nds)
# if len(s)==1:
# nds=np.expand_dims(nds,1)
# returnize1(nds)
# return(nds)
# def monthly(funds):
# """
# @summary Computes monthly returns centered around 0
# @param funds: A time series containing daily fund values
# @return an array of monthly returns
# """
# funds2 = []
# last_last_month = -1
# years = qsdateutil.getYears(funds)
# for year in years:
# months = qsdateutil.getMonths(funds, year)
# for month in months:
# last_this_month = qsdateutil.getLastDay(funds, year, month)
# if last_last_month == -1 :
# last_last_month=qsdateutil.getFirstDay(funds, year, month)
# if type(funds).__name__=='TimeSeries':
# funds2.append(funds[last_this_month]/funds[last_last_month]-1)
# else:
# funds2.append(funds.xs(last_this_month)/funds.xs(last_last_month)-1)
# last_last_month = last_this_month
# return(funds2)
def average_monthly(funds):
"""
@summary Computes average monthly returns centered around 0
@param funds: A time series containing daily fund values
@return an array of average monthly returns
"""
rets = daily(funds)
ret_i = 0
years = qsdateutil.getYears(funds)
averages = []
for year in years:
months = qsdateutil.getMonths(funds, year)
for month in months:
avg = 0
count = 0
days = qsdateutil.getDays(funds, year, month)
for day in days:
avg += rets[ret_i]
ret_i += 1
count += 1
averages.append(float(avg) / count)
return(averages)
def fillforward(nds):
"""
@summary Removes NaNs from a 2D array by scanning forward in the
1st dimension. If a cell is NaN, the value above it is carried forward.
@param nds: the array to fill forward
@return the array is revised in place
"""
for col in range(nds.shape[1]):
for row in range(1, nds.shape[0]):
if math.isnan(nds[row, col]):
nds[row, col] = nds[row-1, col]
def fillbackward(nds):
"""
@summary Removes NaNs from a 2D array by scanning backward in the
1st dimension. If a cell is NaN, the value above it is carried backward.
@param nds: the array to fill backward
@return the array is revised in place
"""
for col in range(nds.shape[1]):
for row in range(nds.shape[0] - 2, -1, -1):
if math.isnan(nds[row, col]):
nds[row, col] = nds[row+1, col]
def returnize0(nds):
"""
@summary Computes stepwise (usually daily) returns relative to 0, where
0 implies no change in value.
@return the array is revised in place
"""
if type(nds) == type(pd.DataFrame()):
nds = (nds / nds.shift(1)) - 1.0
nds = nds.fillna(0.0)
return nds
s= np.shape(nds)
if len(s)==1:
nds=np.expand_dims(nds,1)
nds[1:, :] = (nds[1:, :] / nds[0:-1]) - 1
nds[0, :] = np.zeros(nds.shape[1])
return nds
# def returnize1(nds):
# """
# @summary Computes stepwise (usually daily) returns relative to 1, where
# 1 implies no change in value.
# @param nds: the array to fill backward
# @return the array is revised in place
# """
# if type(nds) == type(pd.DataFrame()):
# nds = nds / nds.shift(1)
# nds = nds.fillna(1.0)
# return nds
#
# s= np.shape(nds)
# if len(s)==1:
# nds=np.expand_dims(nds,1)
# nds[1:, :] = (nds[1:, :]/nds[0:-1])
# nds[0, :] = np.ones(nds.shape[1])
# return nds
# def priceize1(nds):
# """
# @summary Computes stepwise (usually daily) returns relative to 1, where
# 1 implies no change in value.
# @param nds: the array to fill backward
# @return the array is revised in place
# """
#
# nds[0, :] = 100
# for i in range(1, nds.shape[0]):
# nds[i, :] = nds[i-1, :] * nds[i, :]
#
# def logreturnize(nds):
# """
# @summary Computes stepwise (usually daily) logarithmic returns.
# @param nds: the array to fill backward
# @return the array is revised in place
# """
# returnize1(nds)
# nds = np.log(nds)
# return nds
# def get_winning_days( rets):
# """
# @summary Returns the percentage of winning days of the returns.
# @param rets: 1d numpy array or fund list of daily returns (centered on 0)
# @return Percentage of winning days
# """
# negative_rets = []
# for i in rets:
# if(i<0):
# negative_rets.append(i)
# return 100 * (1 - float(len(negative_rets)) / float(len(rets)))
# def get_max_draw_down(ts_vals):
# """
# @summary Returns the max draw down of the returns.
# @param ts_vals: 1d numpy array or fund list
# @return Max draw down
# """
# MDD = 0
# DD = 0
# peak = -99999
# for value in ts_vals:
# if (value > peak):
# peak = value
# else:
# DD = (peak - value) / peak
# if (DD > MDD):
# MDD = DD
# return -1*MDD
def get_sortino_ratio( rets, risk_free=0.00 ):
"""
@summary Returns the daily Sortino ratio of the returns.
@param rets: 1d numpy array or fund list of daily returns (centered on 0)
@param risk_free: risk free return, default is 0%
@return Sortino Ratio, computed off daily returns
"""
rets = np.asarray(rets)
f_mean = np.mean( rets, axis=0 )
negative_rets = rets[rets < 0]
f_dev = np.std( negative_rets, axis=0 )
f_sortino = (f_mean*252 - risk_free) / (f_dev * np.sqrt(252))
return f_sortino
def get_sharpe_ratio( rets, risk_free=0.00 ):
"""
@summary Returns the daily Sharpe ratio of the returns.
@param rets: 1d numpy array or fund list of daily returns (centered on 0)
@param risk_free: risk free returns, default is 0%
@return Annualized rate of return, not converted to percent
"""
f_dev = np.std( rets, axis=0 )
f_mean = np.mean( rets, axis=0 )
f_sharpe = (f_mean *252 - risk_free) / ( f_dev * np.sqrt(252) )
return f_sharpe
# def get_ror_annual( rets ):
# """
# @summary Returns the rate of return annualized. Assumes len(rets) is number of days.
# @param rets: 1d numpy array or list of daily returns
# @return Annualized rate of return, not converted to percent
# """
#
# f_inv = 1.0
# for f_ret in rets:
# f_inv = f_inv * f_ret
#
# f_ror_ytd = f_inv - 1.0
#
# #print ' RorYTD =', f_inv, 'Over days:', len(rets)
#
# return ( (1.0 + f_ror_ytd)**( 1.0/(len(rets)/252.0) ) ) - 1.0
# def getPeriodicRets( dmPrice, sOffset ):
# """
# @summary Reindexes a DataMatrix price array and returns the new periodic returns.
# @param dmPrice: DataMatrix of stock prices
# @param sOffset: Offset string to use, choose from _offsetMap in pandas/core/datetools.py
# e.g. 'EOM', 'WEEKDAY', 'W@FRI', 'A@JAN'. Or use a pandas DateOffset.
# """
#
# # Could possibly use DataMatrix.asfreq here """
# # Use pandas DateRange to create the dates we want, use 4:00 """
# drNewRange = DateRange(dmPrice.index[0], dmPrice.index[-1], timeRule=sOffset)
# drNewRange += DateOffset(hours=16)
#
# dmPrice = dmPrice.reindex( drNewRange, method='ffill' )
#
# returnize1( dmPrice.values )
#
# # Do not leave return of 1.0 for first time period: not accurate """
# return dmPrice[1:]
def getReindexedRets( rets, l_period ):
"""
@summary Reindexes returns using the cumulative product. E.g. if returns are 1.5 and 1.5, a period of 2 will
produce a 2-day return of 2.25. Note, these must be returns centered around 1.
@param rets: Daily returns of the various stocks (using returnize1)
@param l_period: New target period.
@note: Note that this function does not track actual weeks or months, it only approximates with trading days.
You can use 5 for week, or 21 for month, etc.
"""
naCumData = np.cumprod(rets, axis=0)
lNewRows =(rets.shape[0]-1) / (l_period)
# We compress data into height / l_period + 1 new rows """
for i in range( lNewRows ):
lCurInd = -1 - i*l_period
# Just hold new data in same array"""
# new return is cumprod on day x / cumprod on day x-l_period """
start=naCumData[lCurInd - l_period, :]
naCumData[-1 - i, :] = naCumData[lCurInd, :] / start
# Select new returns from end of cumulative array """
return naCumData[-lNewRows:, ]
def getOptPort(rets, f_target, l_period=1, naLower=None, naUpper=None, lNagDebug=0):
"""
@summary Returns the Markowitz optimum portfolio for a specific return.
@param rets: Daily returns of the various stocks (using returnize1)
@param f_target: Target return, i.e. 0.04 = 4% per period
@param l_period: Period to compress the returns to, e.g. 7 = weekly
@param naLower: List of floats which corresponds to lower portfolio% for each stock
@param naUpper: List of floats which corresponds to upper portfolio% for each stock
@return tuple: (weights of portfolio, min possible return, max possible return)
"""
# Attempt to import library """
try:
pass
import nagint as nag
except ImportError:
print 'Could not import NAG library'
print 'make sure nagint.so is in your python path'
return ([], 0, 0)
# Get number of stocks """
lStocks = rets.shape[1]
# If period != 1 we need to restructure the data """
if( l_period != 1 ):
rets = getReindexedRets( rets, l_period)
# Calculate means and covariance """
naAvgRets = np.average( rets, axis=0 )
naCov = np.cov( rets, rowvar=False )
# Special case for None == f_target"""
# simply return average returns and cov """
if( f_target is None ):
return naAvgRets, np.std(rets, axis=0)
# Calculate upper and lower limits of variables as well as constraints """
if( naUpper is None ):
naUpper = np.ones( lStocks ) # max portfolio % is 1
if( naLower is None ):
naLower = np.zeros( lStocks ) # min is 0, set negative for shorting
# Two extra constraints for linear conditions"""
# result = desired return, and sum of weights = 1 """
naUpper = np.append( naUpper, [f_target, 1.0] )
naLower = np.append( naLower, [f_target, 1.0] )
# Initial estimate of portfolio """
naInitial = np.array([1.0/lStocks]*lStocks)
# Set up constraints matrix"""
# composed of expected returns in row one, unity row in row two """
naConstraints = np.vstack( (naAvgRets, np.ones(lStocks)) )
# Get portfolio weights, last entry in array is actually variance """
try:
naReturn = nag.optPort( naConstraints, naLower, naUpper, \
naCov, naInitial, lNagDebug )
except RuntimeError:
print 'NAG Runtime error with target: %.02lf'%(f_target)
return ( naInitial, sqrt( naCov[0][0] ) )
#return semi-junk to not mess up the rest of the plot
# Calculate stdev of entire portfolio to return"""
# what NAG returns is slightly different """
fPortDev = np.std( np.dot(rets, naReturn[0,0:-1]) )
# Show difference between above stdev and sqrt NAG covariance"""
# possibly not taking correlation into account """
#print fPortDev / sqrt(naReturn[0, -1])
# Return weights and stdDev of portfolio."""
# note again the last value of naReturn is NAG's reported variance """
return (naReturn[0, 0:-1], fPortDev)
def OptPort( naData, fTarget, naLower=None, naUpper=None, naExpected=None, s_type = "long"):
"""
@summary Returns the Markowitz optimum portfolio for a specific return.
@param naData: Daily returns of the various stocks (using returnize1)
@param fTarget: Target return, i.e. 0.04 = 4% per period
@param lPeriod: Period to compress the returns to, e.g. 7 = weekly
@param naLower: List of floats which corresponds to lower portfolio% for each stock
@param naUpper: List of floats which corresponds to upper portfolio% for each stock
@return tuple: (weights of portfolio, min possible return, max possible return)
"""
''' Attempt to import library '''
try:
pass
from cvxopt import matrix
from cvxopt.blas import dot
from cvxopt.solvers import qp, options
except ImportError:
print 'Could not import CVX library'
raise
''' Get number of stocks '''
length = naData.shape[1]
b_error = False
naLower = deepcopy(naLower)
naUpper = deepcopy(naUpper)
naExpected = deepcopy(naExpected)
# Assuming AvgReturns as the expected returns if parameter is not specified
if (naExpected==None):
naExpected = np.average( naData, axis=0 )
na_signs = np.sign(naExpected)
indices, = np.where(na_signs == 0)
na_signs[indices] = 1
if s_type == "long":
na_signs = np.ones(len(na_signs))
elif s_type == "short":
na_signs = np.ones(len(na_signs))*(-1)
naData = na_signs*naData
naExpected = na_signs*naExpected
# Covariance matrix of the Data Set
naCov=np.cov(naData, rowvar=False)
# If length is one, just return 100% single symbol
if length == 1:
return (list(na_signs), np.std(naData, axis=0)[0], False)
if length == 0:
return ([], [0], False)
# If we have 0/1 "free" equity we can't optimize
# We just use limits since we are stuck with 0 degrees of freedom
''' Special case for None == fTarget, simply return average returns and cov '''
if( fTarget is None ):
return (naExpected, np.std(naData, axis=0), b_error)
# Upper bound of the Weights of a equity, If not specified, assumed to be 1.
if(naUpper is None):
naUpper= np.ones(length)
# Lower bound of the Weights of a equity, If not specified assumed to be 0 (No shorting case)
if(naLower is None):
naLower= np.zeros(length)
if sum(naLower) == 1:
fPortDev = np.std(np.dot(naData, naLower))
return (naLower, fPortDev, False)
if sum(naUpper) == 1:
fPortDev = np.std(np.dot(naData, naUpper))
return (naUpper, fPortDev, False)
naFree = naUpper != naLower
if naFree.sum() <= 1:
lnaPortfolios = naUpper.copy()
# If there is 1 free we need to modify it to make the total
# Add up to 1
if naFree.sum() == 1:
f_rest = naUpper[~naFree].sum()
lnaPortfolios[naFree] = 1.0 - f_rest
lnaPortfolios = na_signs * lnaPortfolios
fPortDev = np.std(np.dot(naData, lnaPortfolios))
return (lnaPortfolios, fPortDev, False)
# Double the covariance of the diagonal elements for calculating risk.
for i in range(length):
naCov[i][i]=2*naCov[i][i]
# Note, returns are modified to all be long from here on out
(fMin, fMax) = getRetRange(False, naLower, naUpper, naExpected, "long")
#print (fTarget, fMin, fMax)
if fTarget<fMin or fTarget>fMax:
print "<<<(i) Target not achievable..", fTarget, fMin, fMax
b_error = True
naLower = naLower*(-1)
# Setting up the parameters for the CVXOPT Library, it takes inputs in Matrix format.
'''
The Risk minimization problem is a standard Quadratic Programming problem according to the Markowitz Theory.
'''
S=matrix(naCov)
#pbar=matrix(naExpected)
naLower.shape=(length,1)
naUpper.shape=(length,1)
naExpected.shape = (1,length)
zeo=matrix(0.0,(length,1))
I = np.eye(length)
minusI=-1*I
G=matrix(np.vstack((I, minusI)))
h=matrix(np.vstack((naUpper, naLower)))
ones=matrix(1.0,(1,length))
A=matrix(np.vstack((naExpected, ones)))
b=matrix([float(fTarget),1.0])
# Optional Settings for CVXOPT
options['show_progress'] = False
options['abstol']=1e-25
options['reltol']=1e-24
options['feastol']=1e-25
# Optimization Calls
# Optimal Portfolio
try:
lnaPortfolios = qp(S, -zeo, G, h, A, b)['x']
except:
b_error = True
if b_error == True:
print "<<<(i) Optimization not Possible"
na_port = naLower*-1
if sum(na_port) < 1:
if sum(naUpper) == 1:
na_port = naUpper
else:
i=0
while(sum(na_port)<1 and i<25):
naOrder = naUpper - na_port
i = i+1
indices = np.where(naOrder > 0)
na_port[indices]= na_port[indices] + (1-sum(na_port))/len(indices[0])
naOrder = naUpper - na_port
indices = np.where(naOrder < 0)
na_port[indices]= naUpper[indices]
lnaPortfolios = matrix(na_port)
lnaPortfolios = (na_signs.reshape(-1,1) * lnaPortfolios).reshape(-1)
# Expected Return of the Portfolio
# lfReturn = dot(pbar, lnaPortfolios)
# Risk of the portfolio
fPortDev = np.std(np.dot(naData, lnaPortfolios))
return (lnaPortfolios, fPortDev, b_error)
def getRetRange( rets, naLower, naUpper, naExpected = "False", s_type = "long"):
"""
@summary Returns the range of possible returns with upper and lower bounds on the portfolio participation
@param rets: Expected returns
@param naLower: List of lower percentages by stock
@param naUpper: List of upper percentages by stock
@return tuple containing (fMin, fMax)
"""
# Calculate theoretical minimum and maximum theoretical returns """
fMin = 0
fMax = 0
rets = deepcopy(rets)
if naExpected == "False":
naExpected = np.average( rets, axis=0 )
na_signs = np.sign(naExpected)
indices, = np.where(na_signs == 0)
na_signs[indices] = 1
if s_type == "long":
na_signs = np.ones(len(na_signs))
elif s_type == "short":
na_signs = np.ones(len(na_signs))*(-1)
rets = na_signs*rets
naExpected = na_signs*naExpected
naSortInd = naExpected.argsort()
# First add the lower bounds on portfolio participation """
for i, fRet in enumerate(naExpected):
fMin = fMin + fRet*naLower[i]
fMax = fMax + fRet*naLower[i]
# Now calculate minimum returns"""
# allocate the max possible in worst performing equities """
# Subtract min since we have already counted it """
naUpperAdd = naUpper - naLower
fTotalPercent = np.sum(naLower[:])
for i, lInd in enumerate(naSortInd):
fRetAdd = naUpperAdd[lInd] * naExpected[lInd]
fTotalPercent = fTotalPercent + naUpperAdd[lInd]
fMin = fMin + fRetAdd
# Check if this additional percent puts us over the limit """
if fTotalPercent > 1.0:
fMin = fMin - naExpected[lInd] * (fTotalPercent - 1.0)
break
# Repeat for max, just reverse the sort, i.e. high to low """
naUpperAdd = naUpper - naLower
fTotalPercent = np.sum(naLower[:])
for i, lInd in enumerate(naSortInd[::-1]):
fRetAdd = naUpperAdd[lInd] * naExpected[lInd]
fTotalPercent = fTotalPercent + naUpperAdd[lInd]
fMax = fMax + fRetAdd
# Check if this additional percent puts us over the limit """
if fTotalPercent > 1.0:
fMax = fMax - naExpected[lInd] * (fTotalPercent - 1.0)
break
return (fMin, fMax)
def _create_dict(df_rets, lnaPortfolios):
allocations = {}
for i, sym in enumerate(df_rets.columns):
allocations[sym] = lnaPortfolios[i]
return allocations
def optimizePortfolio(df_rets, list_min, list_max, list_price_target,
target_risk, direction="long"):
naLower = np.array(list_min)
naUpper = np.array(list_max)
naExpected = np.array(list_price_target)
b_same_flag = np.all( naExpected == naExpected[0])
if b_same_flag and (naExpected[0] == 0):
naExpected = naExpected + 0.1
if b_same_flag:
na_randomness = np.ones(naExpected.shape)
target_risk = 0
for i in range(len(na_randomness)):
if i%2 ==0:
na_randomness[i] = -1
naExpected = naExpected + naExpected*0.0000001*na_randomness
(fMin, fMax) = getRetRange( df_rets.values, naLower, naUpper,
naExpected, direction)
# Try to avoid intractible endpoints due to rounding errors """
fMin += abs(fMin) * 0.00000000001
fMax -= abs(fMax) * 0.00000000001
if target_risk == 1:
(naPortWeights, fPortDev, b_error) = OptPort( df_rets.values, fMax, naLower, naUpper, naExpected, direction)
allocations = _create_dict(df_rets, naPortWeights)
return {'allocations': allocations, 'std_dev': fPortDev, 'expected_return': fMax, 'error': b_error}
fStep = (fMax - fMin) / 50.0
lfReturn = [fMin + x * fStep for x in range(51)]
lfStd = []
lnaPortfolios = []
for fTarget in lfReturn:
(naWeights, fStd, b_error) = OptPort( df_rets.values, fTarget, naLower, naUpper, naExpected, direction)
if b_error == False:
lfStd.append(fStd)
lnaPortfolios.append( naWeights )
else:
# Return error on ANY failed optimization
allocations = _create_dict(df_rets, np.zeros(df_rets.shape[1]))
return {'allocations': allocations, 'std_dev': 0.0,
'expected_return': fMax, 'error': True}
if len(lfStd) == 0:
(naPortWeights, fPortDev, b_error) = OptPort( df_rets.values, fMax, naLower, naUpper, naExpected, direction)
allocations = _create_dict(df_rets, naPortWeights)
return {'allocations': allocations, 'std_dev': fPortDev, 'expected_return': fMax, 'error': True}
f_return = lfReturn[lfStd.index(min(lfStd))]
if target_risk == 0:
naPortWeights=lnaPortfolios[lfStd.index(min(lfStd))]
allocations = _create_dict(df_rets, naPortWeights)
return {'allocations': allocations, 'std_dev': min(lfStd), 'expected_return': f_return, 'error': False}
# If target_risk = 0.5, then return the one with maximum sharpe
if target_risk == 0.5:
lf_return_new = np.array(lfReturn)
lf_std_new = np.array(lfStd)
lf_std_new = lf_std_new[lf_return_new >= f_return]
lf_return_new = lf_return_new[lf_return_new >= f_return]
na_sharpe = lf_return_new / lf_std_new
i_index_max_sharpe, = np.where(na_sharpe == max(na_sharpe))
i_index_max_sharpe = i_index_max_sharpe[0]
fTarget = lf_return_new[i_index_max_sharpe]
(naPortWeights, fPortDev, b_error) = OptPort(df_rets.values, fTarget, naLower, naUpper, naExpected, direction)
allocations = _create_dict(df_rets, naPortWeights)
return {'allocations': allocations, 'std_dev': fPortDev, 'expected_return': fTarget, 'error': b_error}
# Otherwise try to hit custom target between 0-1 min-max return
fTarget = f_return + ((fMax - f_return) * target_risk)
(naPortWeights, fPortDev, b_error) = OptPort( df_rets.values, fTarget, naLower, naUpper, naExpected, direction)
allocations = _create_dict(df_rets, naPortWeights)
return {'allocations': allocations, 'std_dev': fPortDev, 'expected_return': fTarget, 'error': b_error}
def getFrontier( rets, lRes=100, fUpper=0.2, fLower=0.00):
"""
@summary Generates an efficient frontier based on average returns.
@param rets: Array of returns to use
@param lRes: Resolution of the curve, default=100
@param fUpper: Upper bound on portfolio percentage
@param fLower: Lower bound on portfolio percentage
@return tuple containing (lf_ret, lfStd, lnaPortfolios)
lf_ret: List of returns provided by each point
lfStd: list of standard deviations provided by each point
lnaPortfolios: list of numpy arrays containing weights for each portfolio
"""
# Limit/enforce percent participation """
naUpper = np.ones(rets.shape[1]) * fUpper
naLower = np.ones(rets.shape[1]) * fLower
(fMin, fMax) = getRetRange( rets, naLower, naUpper )
# Try to avoid intractible endpoints due to rounding errors """
fMin *= 1.0000001
fMax *= 0.9999999
# Calculate target returns from min and max """
lf_ret = []
for i in range(lRes):
lf_ret.append( (fMax - fMin) * i / (lRes - 1) + fMin )
lfStd = []
lnaPortfolios = []
# Call the function lRes times for the given range, use 1 for period """
for f_target in lf_ret:
(naWeights, fStd) = getOptPort( rets, f_target, 1, \
naUpper=naUpper, naLower=naLower )
lfStd.append(fStd)
lnaPortfolios.append( naWeights )
# plot frontier """
#plt.plot( lfStd, lf_ret )
plt.plot( np.std( rets, axis=0 ), np.average( rets, axis=0 ), \
'g+', markersize=10 )
#plt.show()"""
return (lf_ret, lfStd, lnaPortfolios)
# def stockFilter( dmPrice, dmVolume, fNonNan=0.95, fPriceVolume=100*1000 ):
# """
# @summary Returns the list of stocks filtered based on various criteria.
# @param dmPrice: DataMatrix of stock prices
# @param dmVolume: DataMatrix of stock volumes
# @param fNonNan: Optional non-nan percent, default is .95
# @param fPriceVolume: Optional price*volume, default is 100,000
# @return list of stocks which meet the criteria
# """
#
# lsRetStocks = list( dmPrice.columns )
#
# for sStock in dmPrice.columns:
# fValid = 0.0
# print sStock
# # loop through all dates """
# for dtDate in dmPrice.index:
# # Count null (nan/inf/etc) values """
# fPrice = dmPrice[sStock][dtDate]
# if( not isnull(fPrice) ):
# fValid = fValid + 1
# # else test price volume """
# fVol = dmVolume[sStock][dtDate]
# if( not isnull(fVol) and fVol * fPrice < fPriceVolume ):
# lsRetStocks.remove( sStock )
# break
#
# # Remove if too many nan values """
# if( fValid / len(dmPrice.index) < fNonNan and sStock in lsRetStocks ):
# lsRetStocks.remove( sStock )
#
# return lsRetStocks
#
#
# def getRandPort( lNum, dtStart=None, dtEnd=None, lsStocks=None,\
# dmPrice=None, dmVolume=None, bFilter=True, fNonNan=0.95,\
# fPriceVolume=100*1000, lSeed=None ):
# """
# @summary Returns a random portfolio based on certain criteria.
# @param lNum: Number of stocks to be included
# @param dtStart: Start date for portfolio
# @param dtEnd: End date for portfolio
# @param lsStocks: Optional list of ticker symbols, if not provided all symbols will be used
# @param bFilter: If False, stocks are not filtered by price or volume data, simply return random Portfolio.
# @param dmPrice: Optional price data, if not provided, data access will be queried
# @param dmVolume: Optional volume data, if not provided, data access will be queried
# @param fNonNan: Optional non-nan percent for filter, default is .95
# @param fPriceVolume: Optional price*volume for filter, default is 100,000
# @warning: Does not work for all sets of optional inputs, e.g. if you don't include dtStart, dtEnd, you need
# to include dmPrice/dmVolume
# @return list of stocks which meet the criteria
# """
#
# if( lsStocks is None ):
# if( dmPrice is None and dmVolume is None ):
# norObj = da.DataAccess('Norgate')
# lsStocks = norObj.get_all_symbols()
# elif( not dmPrice is None ):
# lsStocks = list(dmPrice.columns)
# else:
# lsStocks = list(dmVolume.columns)
#
# if( dmPrice is None and dmVolume is None and bFilter == True ):
# norObj = da.DataAccess('Norgate')
# ldtTimestamps = du.getNYSEdays( dtStart, dtEnd, dt.timedelta(hours=16) )
#
# # if dmPrice and dmVol are provided then we don't query it every time """
# bPullPrice = False
# bPullVol = False
# if( dmPrice is None ):
# bPullPrice = True
# if( dmVolume is None ):
# bPullVol = True
#
# # Default seed (none) uses system clock """
# rand.seed(lSeed)
# lsRetStocks = []
#
# # Loop until we have enough randomly selected stocks """
# llRemainingIndexes = range(0,len(lsStocks))
# lsValid = None
# while( len(lsRetStocks) != lNum ):
#
# lsCheckStocks = []
# for i in range( lNum - len(lsRetStocks) ):
# lRemaining = len(llRemainingIndexes)
# if( lRemaining == 0 ):
# print 'Error in getRandPort: ran out of stocks'
# return lsRetStocks
#
# # Pick a stock and remove it from the list of remaining stocks """
# lPicked = rand.randint(0, lRemaining-1)
# lsCheckStocks.append( lsStocks[ llRemainingIndexes.pop(lPicked) ] )
#
# # If bFilter is false"""
# # simply return our first list of stocks, don't check prive/vol """
# if( not bFilter ):
# return sorted(lsCheckStocks)
#
#
# # Get data if needed """
# if( bPullPrice ):
# dmPrice = norObj.get_data( ldtTimestamps, lsCheckStocks, 'close' )
#
# # Get data if needed """
# if( bPullVol ):
# dmVolume = norObj.get_data(ldtTimestamps, lsCheckStocks, 'volume' )
#
# # Only query this once if data is provided"""
# # else query every time with new data """
# if( lsValid is None or bPullVol or bPullPrice ):
# lsValid = stockFilter(dmPrice, dmVolume, fNonNan, fPriceVolume)
#
# for sAdd in lsValid:
# if sAdd in lsCheckStocks:
# lsRetStocks.append( sAdd )
#
# return sorted(lsRetStocks)
| agpl-3.0 | -8,922,782,742,476,446,000 | 35.452656 | 118 | 0.588981 | false |
pipermerriam/flex | tests/loading/schema/paths/operation/responses/test_response_validation.py | 1 | 2139 | import pytest
from flex.error_messages import MESSAGES
from flex.exceptions import (
ValidationError,
)
from flex.loading.schema.paths.path_item.operation.responses import (
responses_validator,
)
def test_description_is_required(msg_assertions):
with pytest.raises(ValidationError) as err:
responses_validator({
200: {},
})
msg_assertions.assert_message_in_errors(
MESSAGES['required']['required'],
err.value.detail,
'200.required.description',
)
def test_response_as_reference_missing_description(msg_assertions):
responses = {
200: {
'$ref': '#/responses/SomeResponse'
},
}
context = {
'responses': {
'SomeResponse': {},
},
}
try:
responses_validator(responses, context=context)
except ValidationError as err:
errors = err.detail
else:
errors = {}
msg_assertions.assert_path_not_in_errors('200', errors)
def test_with_description(msg_assertions):
try:
responses_validator({
200: {'description': 'A Description'},
})
except ValidationError as err:
errors = err.detail
else:
errors = {}
msg_assertions.assert_path_not_in_errors('200', errors)
def test_with_description_in_reference(msg_assertions):
responses = {
200: {'$ref': '#/responses/SomeResponse'},
}
context = {
'responses': {
'SomeResponse': {'description': 'A Description'},
},
}
try:
responses_validator(responses, context=context)
except ValidationError as err:
errors = err.detail
else:
errors = {}
msg_assertions.assert_path_not_in_errors('200', errors)
def test_with_missing_reference(msg_assertions):
responses = {
200: {'$ref': '#/responses/UnknownReference'},
}
with pytest.raises(ValidationError) as err:
responses_validator(responses, context={})
msg_assertions.assert_message_in_errors(
MESSAGES['reference']['undefined'],
err.value.detail,
'200.$ref',
)
| mit | -4,137,606,633,608,327,700 | 23.033708 | 69 | 0.603086 | false |
klmitch/nova | nova/tests/unit/scheduler/test_utils.py | 1 | 80648 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
import os_resource_classes as orc
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.scheduler.client import report
from nova.scheduler import utils
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit.scheduler import fakes
class FakeResourceRequest(object):
"""A fake of ``nova.scheduler.utils.ResourceRequest``.
Allows us to assert that various properties of a real ResourceRequest
object are set as we'd like them to be.
"""
def __init__(self):
self._rg_by_id = {}
self._group_policy = None
self._limit = 1000
class TestUtilsBase(test.NoDBTestCase):
def setUp(self):
super(TestUtilsBase, self).setUp()
self.context = nova_context.get_admin_context()
self.mock_host_manager = mock.Mock()
def assertResourceRequestsEqual(self, expected, observed):
self.assertEqual(expected._limit, observed._limit)
self.assertEqual(expected._group_policy, observed._group_policy)
ex_by_id = expected._rg_by_id
ob_by_id = observed._rg_by_id
self.assertEqual(set(ex_by_id), set(ob_by_id))
for ident in ex_by_id:
self.assertEqual(vars(ex_by_id[ident]), vars(ob_by_id[ident]))
@ddt.ddt
class TestUtils(TestUtilsBase):
def _test_resources_from_request_spec(self, expected, flavor, image=None):
if image is None:
image = objects.ImageMeta(properties=objects.ImageMetaProps())
fake_spec = objects.RequestSpec(flavor=flavor, image=image)
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
return resources
def test_resources_from_request_spec_flavor_only(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_resources_from_request_spec_flavor_req_traits(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={'trait:CUSTOM_FLAVOR_TRAIT': 'required'})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits=set(['CUSTOM_FLAVOR_TRAIT'])
)
resources = self._test_resources_from_request_spec(
expected_resources, flavor)
expected_result = set(['CUSTOM_FLAVOR_TRAIT'])
self.assertEqual(expected_result, resources.all_required_traits)
def test_resources_from_request_spec_flavor_and_image_traits(self):
image = objects.ImageMeta.from_dict({
'properties': {
'trait:CUSTOM_IMAGE_TRAIT1': 'required',
'trait:CUSTOM_IMAGE_TRAIT2': 'required',
},
'id': 'c8b1790e-a07d-4971-b137-44f2432936cd',
})
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
'trait:CUSTOM_FLAVOR_TRAIT': 'required',
'trait:CUSTOM_IMAGE_TRAIT2': 'required'})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits={
# trait:CUSTOM_IMAGE_TRAIT2 is defined in both extra_specs and
# image metadata. We get a union of both.
'CUSTOM_IMAGE_TRAIT1',
'CUSTOM_IMAGE_TRAIT2',
'CUSTOM_FLAVOR_TRAIT',
}
)
self._test_resources_from_request_spec(expected_resources, flavor,
image)
def test_resources_from_request_spec_flavor_forbidden_trait(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
'trait:CUSTOM_FLAVOR_TRAIT': 'forbidden'})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
forbidden_traits={
'CUSTOM_FLAVOR_TRAIT',
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_resources_from_request_spec_with_no_disk(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=0,
ephemeral_gb=0,
swap=0)
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_custom_resource_class(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={"resources:CUSTOM_TEST_CLASS": 1})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 15,
"CUSTOM_TEST_CLASS": 1,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_override_flavor_amounts(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
"resources:VCPU": 99,
"resources:MEMORY_MB": 99,
"resources:DISK_GB": 99})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
"VCPU": 99,
"MEMORY_MB": 99,
"DISK_GB": 99,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_remove_flavor_amounts(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
"resources:VCPU": 0,
"resources:DISK_GB": 0})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
"MEMORY_MB": 1024,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_vgpu(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=0,
swap=0,
extra_specs={
"resources:VGPU": 1,
"resources:VGPU_DISPLAY_HEAD": 1})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
"VCPU": 1,
"MEMORY_MB": 1024,
"DISK_GB": 10,
"VGPU": 1,
"VGPU_DISPLAY_HEAD": 1,
}
)
self._test_resources_from_request_spec(expected_resources, flavor)
def test_get_resources_from_request_spec_bad_std_resource_class(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
"resources:DOESNT_EXIST": 0})
fake_spec = objects.RequestSpec(flavor=flavor)
with mock.patch("nova.objects.request_spec.LOG.warning") as mock_log:
utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
mock_log.assert_called_once()
args = mock_log.call_args[0]
self.assertEqual(args[0], "Received an invalid ResourceClass "
"'%(key)s' in extra_specs.")
self.assertEqual(args[1], {"key": "DOESNT_EXIST"})
def test_get_resources_from_request_spec_granular(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=0, swap=0,
extra_specs={'resources1:VGPU': '1',
'resources1:VGPU_DISPLAY_HEAD': '2',
# Replace
'resources3:VCPU': '2',
# Stay separate (don't sum)
'resources42:SRIOV_NET_VF': '1',
'resources24:SRIOV_NET_VF': '2',
# Ignore
'some:bogus': 'value',
# Custom in the unnumbered group (merge with DISK_GB)
'resources:CUSTOM_THING': '123',
# Traits make it through
'trait3:CUSTOM_SILVER': 'required',
'trait3:CUSTOM_GOLD': 'required',
# Delete standard
'resources86:MEMORY_MB': '0',
# Standard and custom zeroes don't make it through
'resources:IPV4_ADDRESS': '0',
'resources:CUSTOM_FOO': '0',
# Bogus values don't make it through
'resources1:MEMORY_MB': 'bogus',
'group_policy': 'none'})
expected_resources = FakeResourceRequest()
expected_resources._group_policy = 'none'
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'DISK_GB': 10,
'CUSTOM_THING': 123,
}
)
expected_resources._rg_by_id['1'] = objects.RequestGroup(
requester_id='1',
resources={
'VGPU': 1,
'VGPU_DISPLAY_HEAD': 2,
}
)
expected_resources._rg_by_id['3'] = objects.RequestGroup(
requester_id='3',
resources={
'VCPU': 2,
},
required_traits={
'CUSTOM_GOLD',
'CUSTOM_SILVER',
}
)
expected_resources._rg_by_id['24'] = objects.RequestGroup(
requester_id='24',
resources={
'SRIOV_NET_VF': 2,
},
)
expected_resources._rg_by_id['42'] = objects.RequestGroup(
requester_id='42',
resources={
'SRIOV_NET_VF': 1,
}
)
rr = self._test_resources_from_request_spec(expected_resources, flavor)
expected_querystring = (
'group_policy=none&'
'limit=1000&'
'required3=CUSTOM_GOLD%2CCUSTOM_SILVER&'
'resources=CUSTOM_THING%3A123%2CDISK_GB%3A10&'
'resources1=VGPU%3A1%2CVGPU_DISPLAY_HEAD%3A2&'
'resources24=SRIOV_NET_VF%3A2&'
'resources3=VCPU%3A2&'
'resources42=SRIOV_NET_VF%3A1'
)
self.assertEqual(expected_querystring, rr.to_querystring())
def test_all_required_traits(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={
'trait:HW_CPU_X86_SSE': 'required',
'trait:HW_CPU_X86_AVX': 'required',
'trait:HW_CPU_X86_AVX2': 'forbidden'})
expected_resources = FakeResourceRequest()
expected_resources._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits={
'HW_CPU_X86_SSE',
'HW_CPU_X86_AVX'
},
forbidden_traits={
'HW_CPU_X86_AVX2'
}
)
resource = self._test_resources_from_request_spec(expected_resources,
flavor)
expected_result = {'HW_CPU_X86_SSE', 'HW_CPU_X86_AVX'}
self.assertEqual(expected_result,
resource.all_required_traits)
def test_resources_from_request_spec_aggregates(self):
destination = objects.Destination()
flavor = objects.Flavor(vcpus=1, memory_mb=1024,
root_gb=1, ephemeral_gb=0,
swap=0)
reqspec = objects.RequestSpec(flavor=flavor,
requested_destination=destination)
destination.require_aggregates(['foo', 'bar'])
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([['foo', 'bar']],
req.get_request_group(None).aggregates)
destination.require_aggregates(['baz'])
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([['foo', 'bar'], ['baz']],
req.get_request_group(None).aggregates)
def test_resources_from_request_spec_no_aggregates(self):
flavor = objects.Flavor(vcpus=1, memory_mb=1024,
root_gb=1, ephemeral_gb=0,
swap=0)
reqspec = objects.RequestSpec(flavor=flavor)
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([], req.get_request_group(None).aggregates)
reqspec.requested_destination = None
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([], req.get_request_group(None).aggregates)
reqspec.requested_destination = objects.Destination()
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([], req.get_request_group(None).aggregates)
reqspec.requested_destination.aggregates = None
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual([], req.get_request_group(None).aggregates)
def test_resources_from_request_spec_forbidden_aggregates(self):
flavor = objects.Flavor(vcpus=1, memory_mb=1024,
root_gb=1, ephemeral_gb=0,
swap=0)
reqspec = objects.RequestSpec(
flavor=flavor,
requested_destination=objects.Destination(
forbidden_aggregates=set(['foo', 'bar'])))
req = utils.resources_from_request_spec(self.context, reqspec,
self.mock_host_manager)
self.assertEqual(set(['foo', 'bar']),
req.get_request_group(None).forbidden_aggregates)
def test_resources_from_request_spec_no_forbidden_aggregates(self):
flavor = objects.Flavor(vcpus=1, memory_mb=1024,
root_gb=1, ephemeral_gb=0,
swap=0)
reqspec = objects.RequestSpec(flavor=flavor)
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual(set([]), req.get_request_group(None).
forbidden_aggregates)
reqspec.requested_destination = None
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual(set([]), req.get_request_group(None).
forbidden_aggregates)
reqspec.requested_destination = objects.Destination()
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual(set([]), req.get_request_group(None).
forbidden_aggregates)
reqspec.requested_destination.forbidden_aggregates = None
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual(set([]), req.get_request_group(None).
forbidden_aggregates)
def test_process_extra_specs_granular_called(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={"resources:CUSTOM_TEST_CLASS": 1})
fake_spec = objects.RequestSpec(flavor=flavor)
# just call this to make sure things don't explode
utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
def test_process_extra_specs_granular_not_called(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor)
# just call this to make sure things don't explode
utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
def test_process_missing_extra_specs_value(self):
flavor = objects.Flavor(
vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs={"resources:CUSTOM_TEST_CLASS": ""})
fake_spec = objects.RequestSpec(flavor=flavor)
# just call this to make sure things don't explode
utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
def test_process_no_force_hosts_or_force_nodes(self):
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
rr = self._test_resources_from_request_spec(expected, flavor)
expected_querystring = (
'limit=1000&'
'resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1'
)
self.assertEqual(expected_querystring, rr.to_querystring())
def test_process_use_force_nodes(self):
fake_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(host='fake-host',
uuid='12345678-1234-1234-1234-123456789012',
hypervisor_hostname='test')])
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
return_value = fake_nodes
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor, force_nodes=['test'])
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
in_tree='12345678-1234-1234-1234-123456789012',
)
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
expected_querystring = (
'in_tree=12345678-1234-1234-1234-123456789012&'
'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
self.assertEqual(expected_querystring, resources.to_querystring())
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
assert_called_once_with(self.context, None, 'test', cell=None)
def test_process_use_force_hosts(self):
fake_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(host='test',
uuid='12345678-1234-1234-1234-123456789012')
])
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
return_value = fake_nodes
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor, force_hosts=['test'])
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
in_tree='12345678-1234-1234-1234-123456789012',
)
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
expected_querystring = (
'in_tree=12345678-1234-1234-1234-123456789012&'
'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
self.assertEqual(expected_querystring, resources.to_querystring())
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
assert_called_once_with(self.context, 'test', None, cell=None)
def test_process_use_force_hosts_multinodes_found(self):
fake_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(host='test',
uuid='12345678-1234-1234-1234-123456789012'),
objects.ComputeNode(host='test',
uuid='87654321-4321-4321-4321-210987654321'),
])
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
return_value = fake_nodes
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(flavor=flavor, force_hosts=['test'])
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
# Validate that the limit is unset
expected._limit = None
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
# Validate that the limit is unset
expected_querystring = (
'resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
self.assertEqual(expected_querystring, resources.to_querystring())
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
assert_called_once_with(self.context, 'test', None, cell=None)
def test_process_use_requested_destination(self):
fake_cell = objects.CellMapping(uuid=uuids.cell1, name='foo')
destination = objects.Destination(
host='fake-host', node='fake-node', cell=fake_cell)
fake_nodes = objects.ComputeNodeList(objects=[
objects.ComputeNode(host='fake-host',
uuid='12345678-1234-1234-1234-123456789012',
hypervisor_hostname='fake-node')
])
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
return_value = fake_nodes
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(
flavor=flavor, requested_destination=destination)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
in_tree='12345678-1234-1234-1234-123456789012',
)
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
expected_querystring = (
'in_tree=12345678-1234-1234-1234-123456789012&'
'limit=1000&resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1')
self.assertEqual(expected_querystring, resources.to_querystring())
self.mock_host_manager.get_compute_nodes_by_host_or_node.\
assert_called_once_with(
self.context, 'fake-host', 'fake-node', cell=fake_cell)
def test_resources_from_request_spec_having_requested_resources(self):
flavor = objects.Flavor(
vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
rg1 = objects.RequestGroup(
resources={'CUSTOM_FOO': 1}, requester_id='The-first-group')
# Leave requester_id out to trigger ValueError
rg2 = objects.RequestGroup(required_traits={'CUSTOM_BAR'})
reqspec = objects.RequestSpec(flavor=flavor,
requested_resources=[rg1, rg2])
self.assertRaises(
ValueError,
utils.resources_from_request_spec,
self.context, reqspec, self.mock_host_manager)
# Set conflicting requester_id
rg2.requester_id = 'The-first-group'
self.assertRaises(
exception.RequestGroupSuffixConflict,
utils.resources_from_request_spec,
self.context, reqspec, self.mock_host_manager)
# Good path: nonempty non-conflicting requester_id
rg2.requester_id = 'The-second-group'
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual({'MEMORY_MB': 1024, 'DISK_GB': 15, 'VCPU': 1},
req.get_request_group(None).resources)
self.assertIs(rg1, req.get_request_group('The-first-group'))
self.assertIs(rg2, req.get_request_group('The-second-group'))
# Make sure those ended up as suffixes correctly
qs = req.to_querystring()
self.assertIn('resourcesThe-first-group=CUSTOM_FOO%3A1', qs)
self.assertIn('requiredThe-second-group=CUSTOM_BAR', qs)
def test_resources_from_request_spec_requested_resources_unfilled(self):
flavor = objects.Flavor(
vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0)
reqspec = objects.RequestSpec(flavor=flavor)
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual({'MEMORY_MB': 1024, 'DISK_GB': 15, 'VCPU': 1},
req.get_request_group(None).resources)
self.assertEqual(1, len(list(req._rg_by_id)))
reqspec = objects.RequestSpec(flavor=flavor, requested_resources=[])
req = utils.resources_from_request_spec(
self.context, reqspec, self.mock_host_manager)
self.assertEqual({'MEMORY_MB': 1024, 'DISK_GB': 15, 'VCPU': 1},
req.get_request_group(None).resources)
self.assertEqual(1, len(list(req._rg_by_id)))
@ddt.data(
# Test single hint that we are checking for.
{'group': [uuids.fake]},
# Test hint we care about and some other random hint.
{'same_host': [uuids.fake], 'fake-hint': ['fake-value']},
# Test multiple hints we are checking for.
{'same_host': [uuids.server1], 'different_host': [uuids.server2]})
def test_resources_from_request_spec_no_limit_based_on_hint(self, hints):
"""Tests that there is no limit applied to the
GET /allocation_candidates query string if a given scheduler hint
is in the request spec.
"""
flavor = objects.Flavor(vcpus=1,
memory_mb=1024,
root_gb=15,
ephemeral_gb=0,
swap=0)
fake_spec = objects.RequestSpec(
flavor=flavor, scheduler_hints=hints)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
expected._limit = None
resources = utils.resources_from_request_spec(
self.context, fake_spec, self.mock_host_manager)
self.assertResourceRequestsEqual(expected, resources)
expected_querystring = (
'resources=DISK_GB%3A15%2CMEMORY_MB%3A1024%2CVCPU%3A1'
)
self.assertEqual(expected_querystring, resources.to_querystring())
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
def test_resources_from_flavor_no_bfv(self, mock_is_bfv):
flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10,
ephemeral_gb=5, swap=1024,
extra_specs={})
instance = objects.Instance()
expected = {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 16,
}
actual = utils.resources_from_flavor(instance, flavor)
self.assertEqual(expected, actual)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=True)
def test_resources_from_flavor_bfv(self, mock_is_bfv):
flavor = objects.Flavor(vcpus=1, memory_mb=1024, root_gb=10,
ephemeral_gb=5, swap=1024,
extra_specs={})
instance = objects.Instance()
expected = {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 6, # No root disk...
}
actual = utils.resources_from_flavor(instance, flavor)
self.assertEqual(expected, actual)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
new=mock.Mock(return_value=False))
def test_resources_from_flavor_with_override(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=1024,
extra_specs={
# Replace
'resources:VCPU': '2',
# Sum up
'resources42:SRIOV_NET_VF': '1',
'resources24:SRIOV_NET_VF': '2',
# Ignore
'some:bogus': 'value',
# Custom
'resources:CUSTOM_THING': '123',
# Ignore
'trait:CUSTOM_GOLD': 'required',
# Delete standard
'resources86:MEMORY_MB': 0,
# Standard and custom zeroes don't make it through
'resources:IPV4_ADDRESS': 0,
'resources:CUSTOM_FOO': 0,
'group_policy': 'none'})
instance = objects.Instance()
expected = {
'VCPU': 2,
'DISK_GB': 16,
'CUSTOM_THING': 123,
'SRIOV_NET_VF': 3,
}
actual = utils.resources_from_flavor(instance, flavor)
self.assertEqual(expected, actual)
def test_resource_request_init(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
rs = objects.RequestSpec(flavor=flavor, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_init_with_extra_specs(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'resources:VCPU': '2',
'resources:MEMORY_MB': '2048',
'trait:HW_CPU_X86_AVX': 'required',
# Key skipped because no colons
'nocolons': '42',
'trait:CUSTOM_MAGIC': 'required',
'trait:CUSTOM_BRONZE': 'forbidden',
# Resource skipped because invalid resource class name
'resources86:CUTSOM_MISSPELLED': '86',
'resources1:SRIOV_NET_VF': '1',
# Resource skipped because non-int-able value
'resources86:CUSTOM_FOO': 'seven',
# Resource skipped because negative value
'resources86:CUSTOM_NEGATIVE': '-7',
'resources1:IPV4_ADDRESS': '1',
# Trait skipped because unsupported value
'trait86:CUSTOM_GOLD': 'preferred',
'trait1:CUSTOM_PHYSNET_NET1': 'required',
'trait1:CUSTOM_PHYSNET_NET2': 'forbidden',
'resources2:SRIOV_NET_VF': '1',
'resources2:IPV4_ADDRESS': '2',
'trait2:CUSTOM_PHYSNET_NET2': 'required',
'trait2:HW_NIC_ACCEL_SSL': 'required',
# Groupings that don't quite match the patterns are ignored
'resources_*5:SRIOV_NET_VF': '7',
'traitFoo$:HW_NIC_ACCEL_SSL': 'required',
# Solo resource, no corresponding traits
'resources3:DISK_GB': '5',
'group_policy': 'isolate',
})
expected = FakeResourceRequest()
expected._group_policy = 'isolate'
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 2,
'MEMORY_MB': 2048,
},
required_traits={
'HW_CPU_X86_AVX',
'CUSTOM_MAGIC',
},
forbidden_traits={
'CUSTOM_BRONZE',
},
)
expected._rg_by_id['1'] = objects.RequestGroup(
requester_id='1',
resources={
'SRIOV_NET_VF': 1,
'IPV4_ADDRESS': 1,
},
required_traits={
'CUSTOM_PHYSNET_NET1',
},
forbidden_traits={
'CUSTOM_PHYSNET_NET2',
},
)
expected._rg_by_id['2'] = objects.RequestGroup(
requester_id='2',
resources={
'SRIOV_NET_VF': 1,
'IPV4_ADDRESS': 2,
},
required_traits={
'CUSTOM_PHYSNET_NET2',
'HW_NIC_ACCEL_SSL',
}
)
expected._rg_by_id['3'] = objects.RequestGroup(
requester_id='3',
resources={
'DISK_GB': 5,
}
)
rs = objects.RequestSpec(flavor=flavor, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
expected_querystring = (
'group_policy=isolate&'
'limit=1000&'
'required=CUSTOM_MAGIC%2CHW_CPU_X86_AVX%2C%21CUSTOM_BRONZE&'
'required1=CUSTOM_PHYSNET_NET1%2C%21CUSTOM_PHYSNET_NET2&'
'required2=CUSTOM_PHYSNET_NET2%2CHW_NIC_ACCEL_SSL&'
'resources=MEMORY_MB%3A2048%2CVCPU%3A2&'
'resources1=IPV4_ADDRESS%3A1%2CSRIOV_NET_VF%3A1&'
'resources2=IPV4_ADDRESS%3A2%2CSRIOV_NET_VF%3A1&'
'resources3=DISK_GB%3A5'
)
self.assertEqual(expected_querystring, rr.to_querystring())
def _test_resource_request_init_with_legacy_extra_specs(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'hw:cpu_policy': 'dedicated',
'hw:cpu_thread_policy': 'isolate',
'hw:emulator_threads_policy': 'isolate',
})
return objects.RequestSpec(flavor=flavor, is_bfv=False)
def test_resource_request_init_with_legacy_extra_specs(self):
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
# we should have two PCPUs, one due to hw:cpu_policy and the
# other due to hw:cpu_thread_policy
'PCPU': 2,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
forbidden_traits={
# we should forbid hyperthreading due to hw:cpu_thread_policy
'HW_CPU_HYPERTHREADING',
},
)
rs = self._test_resource_request_init_with_legacy_extra_specs()
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
self.assertTrue(rr.cpu_pinning_requested)
def test_resource_request_init_with_legacy_extra_specs_no_translate(self):
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
# we should have a VCPU despite hw:cpu_policy because
# enable_pinning_translate=False
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
# we should not require hyperthreading despite hw:cpu_thread_policy
# because enable_pinning_translate=False
forbidden_traits=set(),
)
rs = self._test_resource_request_init_with_legacy_extra_specs()
rr = utils.ResourceRequest(rs, enable_pinning_translate=False)
self.assertResourceRequestsEqual(expected, rr)
self.assertFalse(rr.cpu_pinning_requested)
def test_resource_request_init_with_image_props(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
image = objects.ImageMeta.from_dict({
'properties': {
'trait:CUSTOM_TRUSTED': 'required',
},
'id': 'c8b1790e-a07d-4971-b137-44f2432936cd'
})
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits={
'CUSTOM_TRUSTED',
}
)
rs = objects.RequestSpec(flavor=flavor, image=image, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def _test_resource_request_init_with_legacy_image_props(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
image = objects.ImageMeta.from_dict({
'properties': {
'hw_cpu_policy': 'dedicated',
'hw_cpu_thread_policy': 'require',
},
'id': 'c8b1790e-a07d-4971-b137-44f2432936cd',
})
return objects.RequestSpec(flavor=flavor, image=image, is_bfv=False)
def test_resource_request_init_with_legacy_image_props(self):
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
# we should have a PCPU due to hw_cpu_policy
'PCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits={
# we should require hyperthreading due to hw_cpu_thread_policy
'HW_CPU_HYPERTHREADING',
},
)
rs = self._test_resource_request_init_with_legacy_image_props()
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
self.assertTrue(rr.cpu_pinning_requested)
def test_resource_request_init_with_legacy_image_props_no_translate(self):
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
# we should have a VCPU despite hw_cpu_policy because
# enable_pinning_translate=False
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
# we should not require hyperthreading despite hw_cpu_thread_policy
# because enable_pinning_translate=False
required_traits=set(),
)
rs = self._test_resource_request_init_with_legacy_image_props()
rr = utils.ResourceRequest(rs, enable_pinning_translate=False)
self.assertResourceRequestsEqual(expected, rr)
self.assertFalse(rr.cpu_pinning_requested)
def _test_resource_request_init_with_mixed_cpus(self, extra_specs):
flavor = objects.Flavor(
vcpus=4, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs=extra_specs)
rs = objects.RequestSpec(flavor=flavor)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'PCPU': 2,
'VCPU': 2,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits=set(),
)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_init_with_mixed_cpus_dedicated(self):
"""Ensure the mixed instance, which is generated through
'hw:cpu_dedicated_mask' extra spec, properly requests the PCPU, VCPU,
MEMORY_MB and DISK_GB resources.
"""
extra_specs = {
'hw:cpu_policy': 'mixed',
'hw:cpu_dedicated_mask': '2,3'
}
self._test_resource_request_init_with_mixed_cpus(extra_specs)
def test_resource_request_init_with_mixed_cpus_realtime(self):
"""Ensure the mixed instance, which is generated through real-time CPU
interface, properly requests the PCPU, VCPU, MEMORY_BM and DISK_GB
resources.
"""
extra_specs = {
'hw:cpu_policy': 'mixed',
"hw:cpu_realtime": "yes",
"hw:cpu_realtime_mask": '2,3'
}
self._test_resource_request_init_with_mixed_cpus(extra_specs)
def _test_resource_request_init_with_mixed_cpus_iso_emu(self, extra_specs):
flavor = objects.Flavor(
vcpus=4, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs=extra_specs)
rs = objects.RequestSpec(flavor=flavor)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
# An extra PCPU resource is requested due to 'ISOLATE' emulator
# thread policy.
'PCPU': 3,
'VCPU': 2,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
required_traits=set(),
)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_init_with_mixed_cpus_iso_emu_realtime(self):
"""Ensure the mixed instance, which is generated through the
'hw:cpu_dedicated_mask' extra spec, specs, properly requests the PCPU,
VCPU, MEMORY_MB, DISK_GB resources, ensure an extra PCPU resource is
requested due to a ISOLATE emulator thread policy.
"""
extra_specs = {
'hw:cpu_policy': 'mixed',
'hw:cpu_dedicated_mask': '2,3',
'hw:emulator_threads_policy': 'isolate',
}
self._test_resource_request_init_with_mixed_cpus_iso_emu(extra_specs)
def test_resource_request_init_with_mixed_cpus_iso_emu_dedicated(self):
"""Ensure the mixed instance, which is generated through realtime extra
specs, properly requests the PCPU, VCPU, MEMORY_MB, DISK_GB resources,
ensure an extra PCPU resource is requested due to a ISOLATE emulator
thread policy.
"""
extra_specs = {
'hw:cpu_policy': 'mixed',
"hw:cpu_realtime": "yes",
"hw:cpu_realtime_mask": '2,3',
'hw:emulator_threads_policy': 'isolate',
}
self._test_resource_request_init_with_mixed_cpus_iso_emu(extra_specs)
def test_resource_request_init_is_bfv(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=1555)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
# this should only include the ephemeral and swap disk, and the
# latter should be converted from MB to GB and rounded up
'DISK_GB': 7,
},
)
rs = objects.RequestSpec(flavor=flavor, is_bfv=True)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_with_vpmems(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={'hw:pmem': '4GB, 4GB,SMALL'})
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
'CUSTOM_PMEM_NAMESPACE_4GB': 2,
'CUSTOM_PMEM_NAMESPACE_SMALL': 1
},
)
rs = objects.RequestSpec(flavor=flavor, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_with_vtpm_1_2(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={'hw:tpm_version': '1.2', 'hw:tpm_model': 'tpm-tis'},
)
image = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_tpm_version='1.2',
hw_tpm_model='tpm-tis',
)
)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
required_traits={'COMPUTE_SECURITY_TPM_1_2'},
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
rs = objects.RequestSpec(flavor=flavor, image=image, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_with_vtpm_2_0(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={'hw:tpm_version': '2.0', 'hw:tpm_model': 'tpm-crb'},
)
image = objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_tpm_version='2.0',
hw_tpm_model='tpm-crb',
)
)
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
required_traits={'COMPUTE_SECURITY_TPM_2_0'},
resources={
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
},
)
rs = objects.RequestSpec(flavor=flavor, image=image, is_bfv=False)
rr = utils.ResourceRequest(rs)
self.assertResourceRequestsEqual(expected, rr)
def test_resource_request_add_group_inserts_the_group(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
rs = objects.RequestSpec(flavor=flavor, is_bfv=False)
req = utils.ResourceRequest(rs)
rg1 = objects.RequestGroup(requester_id='foo',
required_traits={'CUSTOM_FOO'})
req._add_request_group(rg1)
rg2 = objects.RequestGroup(requester_id='bar',
forbidden_traits={'CUSTOM_BAR'})
req._add_request_group(rg2)
self.assertIs(rg1, req.get_request_group('foo'))
self.assertIs(rg2, req.get_request_group('bar'))
def test_empty_groups_forbidden(self):
"""Not allowed to add premade RequestGroup without resources/traits/
aggregates.
"""
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0)
rs = objects.RequestSpec(flavor=flavor, is_bfv=False)
req = utils.ResourceRequest(rs)
rg = objects.RequestGroup(requester_id='foo')
self.assertRaises(ValueError, req._add_request_group, rg)
def test_claim_resources_on_destination_no_source_allocations(self):
"""Tests the negative scenario where the instance does not have
allocations in Placement on the source compute node so no claim is
attempted on the destination compute node.
"""
reportclient = report.SchedulerReportClient()
instance = fake_instance.fake_instance_obj(self.context)
source_node = objects.ComputeNode(
uuid=uuids.source_node, host=instance.host)
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
@mock.patch.object(reportclient,
'get_allocs_for_consumer',
return_value={})
@mock.patch.object(reportclient,
'claim_resources',
new_callable=mock.NonCallableMock)
def test(mock_claim, mock_get_allocs):
ex = self.assertRaises(
exception.ConsumerAllocationRetrievalFailed,
utils.claim_resources_on_destination,
self.context, reportclient, instance, source_node, dest_node)
mock_get_allocs.assert_called_once_with(
self.context, instance.uuid)
self.assertIn(
'Expected to find allocations for source node resource '
'provider %s' % source_node.uuid, str(ex))
test()
def test_claim_resources_on_destination_claim_fails(self):
"""Tests the negative scenario where the resource allocation claim
on the destination compute node fails, resulting in an error.
"""
reportclient = report.SchedulerReportClient()
instance = fake_instance.fake_instance_obj(self.context)
source_node = objects.ComputeNode(
uuid=uuids.source_node, host=instance.host)
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
source_res_allocs = {
'allocations': {
uuids.source_node: {
'resources': {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
# This would really include ephemeral and swap too but
# we're lazy.
'DISK_GB': instance.root_gb
}
}
},
'consumer_generation': 1,
'project_id': uuids.project_id,
'user_id': uuids.user_id
}
dest_alloc_request = {
'allocations': {
uuids.dest_node: {
'resources': {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
'DISK_GB': instance.root_gb
}
}
},
}
@mock.patch.object(reportclient,
'get_allocs_for_consumer',
return_value=source_res_allocs)
@mock.patch.object(reportclient,
'claim_resources', return_value=False)
def test(mock_claim, mock_get_allocs):
# NOTE(danms): Don't pass source_node_allocations here to test
# that they are fetched if needed.
self.assertRaises(exception.NoValidHost,
utils.claim_resources_on_destination,
self.context, reportclient, instance,
source_node, dest_node)
mock_get_allocs.assert_called_once_with(
self.context, instance.uuid)
mock_claim.assert_called_once_with(
self.context, instance.uuid, dest_alloc_request,
instance.project_id, instance.user_id,
allocation_request_version='1.28', consumer_generation=1)
test()
def test_claim_resources_on_destination(self):
"""Happy path test where everything is successful."""
reportclient = report.SchedulerReportClient()
instance = fake_instance.fake_instance_obj(self.context)
source_node = objects.ComputeNode(
uuid=uuids.source_node, host=instance.host)
dest_node = objects.ComputeNode(uuid=uuids.dest_node, host='dest-host')
source_res_allocs = {
uuids.source_node: {
'resources': {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
# This would really include ephemeral and swap too but
# we're lazy.
'DISK_GB': instance.root_gb
}
}
}
dest_alloc_request = {
'allocations': {
uuids.dest_node: {
'resources': {
'VCPU': instance.vcpus,
'MEMORY_MB': instance.memory_mb,
'DISK_GB': instance.root_gb
}
}
},
}
@mock.patch.object(reportclient,
'get_allocs_for_consumer')
@mock.patch.object(reportclient,
'claim_resources', return_value=True)
def test(mock_claim, mock_get_allocs):
utils.claim_resources_on_destination(
self.context, reportclient, instance, source_node, dest_node,
source_res_allocs, consumer_generation=None)
self.assertFalse(mock_get_allocs.called)
mock_claim.assert_called_once_with(
self.context, instance.uuid, dest_alloc_request,
instance.project_id, instance.user_id,
allocation_request_version='1.28', consumer_generation=None)
test()
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
@mock.patch('nova.scheduler.utils.request_is_rebuild')
def test_claim_resources(self, mock_is_rebuild, mock_client):
"""Tests that when claim_resources() is called, that we appropriately
call the placement client to claim resources for the instance.
"""
mock_is_rebuild.return_value = False
ctx = nova_context.RequestContext(user_id=uuids.user_id)
spec_obj = objects.RequestSpec(project_id=uuids.project_id)
instance_uuid = uuids.instance
alloc_req = mock.sentinel.alloc_req
mock_client.claim_resources.return_value = True
res = utils.claim_resources(ctx, mock_client, spec_obj, instance_uuid,
alloc_req)
mock_client.claim_resources.assert_called_once_with(
ctx, uuids.instance, mock.sentinel.alloc_req, uuids.project_id,
uuids.user_id, allocation_request_version=None,
consumer_generation=None)
self.assertTrue(res)
# Now do it again but with RequestSpec.user_id set.
spec_obj.user_id = uuids.spec_user_id
mock_client.reset_mock()
utils.claim_resources(ctx, mock_client, spec_obj, instance_uuid,
alloc_req)
mock_client.claim_resources.assert_called_once_with(
ctx, uuids.instance, mock.sentinel.alloc_req, uuids.project_id,
uuids.spec_user_id, allocation_request_version=None,
consumer_generation=None)
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
@mock.patch('nova.scheduler.utils.request_is_rebuild')
def test_claim_resources_for_policy_check(self, mock_is_rebuild,
mock_client):
mock_is_rebuild.return_value = True
ctx = mock.Mock(user_id=uuids.user_id)
res = utils.claim_resources(ctx, None, mock.sentinel.spec_obj,
mock.sentinel.instance_uuid, [])
self.assertTrue(res)
mock_is_rebuild.assert_called_once_with(mock.sentinel.spec_obj)
self.assertFalse(mock_client.claim_resources.called)
def test_get_weight_multiplier(self):
host_attr = {'vcpus_total': 4, 'vcpus_used': 6,
'cpu_allocation_ratio': 1.0}
host1 = fakes.FakeHostState('fake-host', 'node', host_attr)
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'cpu_weight_multiplier': 'invalid'},
)]
# Get value from default given value if the agg meta is invalid.
self.assertEqual(
1.0,
utils.get_weight_multiplier(host1, 'cpu_weight_multiplier', 1.0)
)
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'cpu_weight_multiplier': '1.9'},
)]
# Get value from aggregate metadata
self.assertEqual(
1.9,
utils.get_weight_multiplier(host1, 'cpu_weight_multiplier', 1.0)
)
host1.aggregates = [
objects.Aggregate(
id=1,
name='foo',
hosts=['fake-host'],
metadata={'cpu_weight_multiplier': '1.9'}),
objects.Aggregate(
id=2,
name='foo',
hosts=['fake-host'],
metadata={'cpu_weight_multiplier': '1.8'}),
]
# Get min value from aggregate metadata
self.assertEqual(
1.8,
utils.get_weight_multiplier(host1, 'cpu_weight_multiplier', 1.0)
)
def _set_up_and_fill_provider_mapping(self, requested_resources):
request_spec = objects.RequestSpec()
request_spec.requested_resources = requested_resources
allocs = {
uuids.rp_uuid1: {
'resources': {
'NET_BW_EGR_KILOBIT_PER_SEC': 1,
}
},
uuids.rp_uuid2: {
'resources': {
'NET_BW_INGR_KILOBIT_PER_SEC': 1,
}
}
}
mappings = {
uuids.port_id1: [uuids.rp_uuid2],
uuids.port_id2: [uuids.rp_uuid1],
}
allocation_req = {'allocations': allocs, 'mappings': mappings}
selection = objects.Selection(
allocation_request=jsonutils.dumps(allocation_req))
# Unmapped initially
for rg in requested_resources:
self.assertEqual([], rg.provider_uuids)
utils.fill_provider_mapping(request_spec, selection)
def test_fill_provider_mapping(self):
rg1 = objects.RequestGroup(requester_id=uuids.port_id1)
rg2 = objects.RequestGroup(requester_id=uuids.port_id2)
self._set_up_and_fill_provider_mapping([rg1, rg2])
# Validate the mappings
self.assertEqual([uuids.rp_uuid2], rg1.provider_uuids)
self.assertEqual([uuids.rp_uuid1], rg2.provider_uuids)
def test_fill_provider_mapping_no_op(self):
# This just proves that having 'mappings' in the allocation request
# doesn't break anything.
self._set_up_and_fill_provider_mapping([])
@mock.patch.object(objects.RequestSpec,
'map_requested_resources_to_providers')
def test_fill_provider_mapping_based_on_allocation_returns_early(
self, mock_map):
context = nova_context.RequestContext()
request_spec = objects.RequestSpec()
# set up the request that there is nothing to do
request_spec.requested_resources = []
report_client = mock.sentinel.report_client
allocation = mock.sentinel.allocation
utils.fill_provider_mapping_based_on_allocation(
context, report_client, request_spec, allocation)
mock_map.assert_not_called()
@mock.patch('nova.scheduler.client.report.SchedulerReportClient')
@mock.patch.object(objects.RequestSpec,
'map_requested_resources_to_providers')
def test_fill_provider_mapping_based_on_allocation(
self, mock_map, mock_report_client):
context = nova_context.RequestContext()
request_spec = objects.RequestSpec()
# set up the request that there is nothing to do
request_spec.requested_resources = [objects.RequestGroup()]
allocation = {
uuids.rp_uuid: {
'resources': {
'NET_BW_EGR_KILOBIT_PER_SEC': 1,
}
}
}
traits = ['CUSTOM_PHYSNET1', 'CUSTOM_VNIC_TYPE_NORMAL']
mock_report_client.get_provider_traits.return_value = report.TraitInfo(
traits=['CUSTOM_PHYSNET1', 'CUSTOM_VNIC_TYPE_NORMAL'],
generation=0)
utils.fill_provider_mapping_based_on_allocation(
context, mock_report_client, request_spec, allocation)
mock_map.assert_called_once_with(allocation, {uuids.rp_uuid: traits})
class TestEncryptedMemoryTranslation(TestUtilsBase):
flavor_name = 'm1.test'
image_name = 'cirros'
def _get_request_spec(self, extra_specs, image):
flavor = objects.Flavor(name=self.flavor_name,
vcpus=1,
memory_mb=1024,
root_gb=10,
ephemeral_gb=5,
swap=0,
extra_specs=extra_specs)
# NOTE(aspiers): RequestSpec.flavor is not nullable, but
# RequestSpec.image is.
reqspec = objects.RequestSpec(flavor=flavor)
if image:
reqspec.image = image
return reqspec
def _get_resource_request(self, extra_specs, image):
reqspec = self._get_request_spec(extra_specs, image)
return utils.ResourceRequest(reqspec)
def _get_expected_resource_request(self, mem_encryption_context):
expected_resources = {
'VCPU': 1,
'MEMORY_MB': 1024,
'DISK_GB': 15,
}
if mem_encryption_context:
expected_resources[orc.MEM_ENCRYPTION_CONTEXT] = 1
expected = FakeResourceRequest()
expected._rg_by_id[None] = objects.RequestGroup(
use_same_provider=False,
resources=expected_resources)
return expected
def _test_encrypted_memory_support_not_required(self, extra_specs,
image=None):
resreq = self._get_resource_request(extra_specs, image)
expected = self._get_expected_resource_request(False)
self.assertResourceRequestsEqual(expected, resreq)
def test_encrypted_memory_support_empty_extra_specs(self):
self._test_encrypted_memory_support_not_required(extra_specs={})
def test_encrypted_memory_support_false_extra_spec(self):
for extra_spec in ('0', 'false', 'False'):
self._test_encrypted_memory_support_not_required(
extra_specs={'hw:mem_encryption': extra_spec})
def test_encrypted_memory_support_empty_image_props(self):
self._test_encrypted_memory_support_not_required(
extra_specs={},
image=objects.ImageMeta(properties=objects.ImageMetaProps()))
def test_encrypted_memory_support_false_image_prop(self):
for image_prop in ('0', 'false', 'False'):
self._test_encrypted_memory_support_not_required(
extra_specs={},
image=objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_mem_encryption=image_prop))
)
def test_encrypted_memory_support_both_false(self):
for extra_spec in ('0', 'false', 'False'):
for image_prop in ('0', 'false', 'False'):
self._test_encrypted_memory_support_not_required(
extra_specs={'hw:mem_encryption': extra_spec},
image=objects.ImageMeta(
properties=objects.ImageMetaProps(
hw_mem_encryption=image_prop))
)
def _test_encrypted_memory_support_conflict(self, extra_spec,
image_prop_in,
image_prop_out):
# NOTE(aspiers): hw_mem_encryption image property is a
# FlexibleBooleanField, so the result should always be coerced
# to a boolean.
self.assertIsInstance(image_prop_out, bool)
image = objects.ImageMeta(
name=self.image_name,
properties=objects.ImageMetaProps(
hw_mem_encryption=image_prop_in)
)
reqspec = self._get_request_spec(
extra_specs={'hw:mem_encryption': extra_spec},
image=image)
# Sanity check that our test request spec has an extra_specs
# dict, which is needed in order for there to be a conflict.
self.assertIn('flavor', reqspec)
self.assertIn('extra_specs', reqspec.flavor)
error = (
"Flavor %(flavor_name)s has hw:mem_encryption extra spec "
"explicitly set to %(flavor_val)s, conflicting with "
"image %(image_name)s which has hw_mem_encryption property "
"explicitly set to %(image_val)s"
)
exc = self.assertRaises(
exception.FlavorImageConflict,
utils.ResourceRequest, reqspec
)
error_data = {
'flavor_name': self.flavor_name,
'flavor_val': extra_spec,
'image_name': self.image_name,
'image_val': image_prop_out,
}
self.assertEqual(error % error_data, str(exc))
def test_encrypted_memory_support_conflict1(self):
for extra_spec in ('0', 'false', 'False'):
for image_prop_in in ('1', 'true', 'True'):
self._test_encrypted_memory_support_conflict(
extra_spec, image_prop_in, True
)
def test_encrypted_memory_support_conflict2(self):
for extra_spec in ('1', 'true', 'True'):
for image_prop_in in ('0', 'false', 'False'):
self._test_encrypted_memory_support_conflict(
extra_spec, image_prop_in, False
)
@mock.patch.object(utils, 'LOG')
def _test_encrypted_memory_support_required(self, requesters, extra_specs,
mock_log, image=None):
resreq = self._get_resource_request(extra_specs, image)
expected = self._get_expected_resource_request(True)
self.assertResourceRequestsEqual(expected, resreq)
mock_log.debug.assert_has_calls([
mock.call('Added %s=1 to requested resources',
orc.MEM_ENCRYPTION_CONTEXT)
])
def test_encrypted_memory_support_extra_spec(self):
for extra_spec in ('1', 'true', 'True'):
self._test_encrypted_memory_support_required(
'hw:mem_encryption extra spec',
{'hw:mem_encryption': extra_spec},
image=objects.ImageMeta(
id='005249be-3c2f-4351-9df7-29bb13c21b14',
properties=objects.ImageMetaProps(
hw_machine_type='q35',
hw_firmware_type='uefi'))
)
def test_encrypted_memory_support_image_prop(self):
for image_prop in ('1', 'true', 'True'):
self._test_encrypted_memory_support_required(
'hw_mem_encryption image property',
{},
image=objects.ImageMeta(
id='005249be-3c2f-4351-9df7-29bb13c21b14',
name=self.image_name,
properties=objects.ImageMetaProps(
hw_machine_type='q35',
hw_firmware_type='uefi',
hw_mem_encryption=image_prop))
)
def test_encrypted_memory_support_both_required(self):
for extra_spec in ('1', 'true', 'True'):
for image_prop in ('1', 'true', 'True'):
self._test_encrypted_memory_support_required(
'hw:mem_encryption extra spec and '
'hw_mem_encryption image property',
{'hw:mem_encryption': extra_spec},
image=objects.ImageMeta(
id='005249be-3c2f-4351-9df7-29bb13c21b14',
name=self.image_name,
properties=objects.ImageMetaProps(
hw_machine_type='q35',
hw_firmware_type='uefi',
hw_mem_encryption=image_prop))
)
class TestResourcesFromRequestGroupDefaultPolicy(test.NoDBTestCase):
"""These test cases assert what happens when the group policy is missing
from the flavor but more than one numbered request group is requested from
various sources. Note that while image can provide required traits for the
resource request those traits are always added to the unnumbered group so
image cannot be a source of additional numbered groups.
"""
def setUp(self):
super(TestResourcesFromRequestGroupDefaultPolicy, self).setUp()
self.context = nova_context.get_admin_context()
self.port_group1 = objects.RequestGroup.from_port_request(
self.context, uuids.port1,
port_resource_request={
"resources": {
"NET_BW_IGR_KILOBIT_PER_SEC": 1000,
"NET_BW_EGR_KILOBIT_PER_SEC": 1000},
"required": ["CUSTOM_PHYSNET_2",
"CUSTOM_VNIC_TYPE_NORMAL"]
})
self.port_group2 = objects.RequestGroup.from_port_request(
self.context, uuids.port2,
port_resource_request={
"resources": {
"NET_BW_IGR_KILOBIT_PER_SEC": 2000,
"NET_BW_EGR_KILOBIT_PER_SEC": 2000},
"required": ["CUSTOM_PHYSNET_3",
"CUSTOM_VNIC_TYPE_DIRECT"]
})
self.image = objects.ImageMeta(properties=objects.ImageMetaProps())
def test_one_group_from_flavor_dont_warn(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'resources1:CUSTOM_BAR': '2',
})
request_spec = objects.RequestSpec(
flavor=flavor, image=self.image, requested_resources=[])
rr = utils.resources_from_request_spec(
self.context, request_spec, host_manager=mock.Mock())
log = self.stdlog.logger.output
self.assertNotIn(
"There is more than one numbered request group in the allocation "
"candidate query but the flavor did not specify any group policy.",
log)
self.assertNotIn(
"To avoid the placement failure nova defaults the group policy to "
"'none'.",
log)
self.assertIsNone(rr.group_policy)
self.assertNotIn('group_policy=none', rr.to_querystring())
def test_one_group_from_port_dont_warn(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={})
request_spec = objects.RequestSpec(
flavor=flavor, image=self.image,
requested_resources=[self.port_group1])
rr = utils.resources_from_request_spec(
self.context, request_spec, host_manager=mock.Mock())
log = self.stdlog.logger.output
self.assertNotIn(
"There is more than one numbered request group in the allocation "
"candidate query but the flavor did not specify any group policy.",
log)
self.assertNotIn(
"To avoid the placement failure nova defaults the group policy to "
"'none'.",
log)
self.assertIsNone(rr.group_policy)
self.assertNotIn('group_policy=none', rr.to_querystring())
def test_two_groups_from_flavor_only_warns(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'resources1:CUSTOM_BAR': '2',
'resources2:CUSTOM_FOO': '1'
})
request_spec = objects.RequestSpec(
flavor=flavor, image=self.image, requested_resources=[])
rr = utils.resources_from_request_spec(
self.context, request_spec, host_manager=mock.Mock())
log = self.stdlog.logger.output
self.assertIn(
"There is more than one numbered request group in the allocation "
"candidate query but the flavor did not specify any group policy.",
log)
self.assertNotIn(
"To avoid the placement failure nova defaults the group policy to "
"'none'.",
log)
self.assertIsNone(rr.group_policy)
self.assertNotIn('group_policy', rr.to_querystring())
def test_one_group_from_flavor_one_from_port_policy_defaulted(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={
'resources1:CUSTOM_BAR': '2',
})
request_spec = objects.RequestSpec(
flavor=flavor, image=self.image,
requested_resources=[self.port_group1])
rr = utils.resources_from_request_spec(
self.context, request_spec, host_manager=mock.Mock())
log = self.stdlog.logger.output
self.assertIn(
"There is more than one numbered request group in the allocation "
"candidate query but the flavor did not specify any group policy.",
log)
self.assertIn(
"To avoid the placement failure nova defaults the group policy to "
"'none'.",
log)
self.assertEqual('none', rr.group_policy)
self.assertIn('group_policy=none', rr.to_querystring())
def test_two_groups_from_ports_policy_defaulted(self):
flavor = objects.Flavor(
vcpus=1, memory_mb=1024, root_gb=10, ephemeral_gb=5, swap=0,
extra_specs={})
request_spec = objects.RequestSpec(
flavor=flavor, image=self.image,
requested_resources=[self.port_group1, self.port_group2])
rr = utils.resources_from_request_spec(
self.context, request_spec, host_manager=mock.Mock())
log = self.stdlog.logger.output
self.assertIn(
"There is more than one numbered request group in the allocation "
"candidate query but the flavor did not specify any group policy.",
log)
self.assertIn(
"To avoid the placement failure nova defaults the group policy to "
"'none'.",
log)
self.assertEqual('none', rr.group_policy)
self.assertIn('group_policy=none', rr.to_querystring())
| apache-2.0 | -4,330,534,855,583,730,700 | 40.614035 | 79 | 0.542183 | false |
willettk/gzhubble | python/fzeta_examples.py | 1 | 5008 | from matplotlib import pyplot as plt
from astropy.table import Table
from scipy import optimize
from scipy.stats import distributions as dist
import numpy as np
# Load data
data = Table.read("../data/ferengi_debiasable_data.fits")
# Use only galaxies with surface brightness/redshift ranges that are considered "debiasable"
data = data#[data['Correctable_Category']=='correctable']
# Limit to galaxies that have data at z_sim = 0.3, since that's what we're normalizing to.
unique_galaxies = set(data['sdss_id'])
z0ind = np.zeros(len(data),dtype=bool)
for ug in unique_galaxies:
ind = (data['sdss_id'] == ug)
if data[ind]['sim_redshift'].min() < 0.301:
z0ind[ind] = True
data_z0 = data[z0ind]
def fzeta_exp(p,x):
#y = p[0] * np.exp(-1 * (x-p[1])/p[2])
y = np.exp(-1 * (x-0.3)/p[0])
return y
def fzeta_lin(p,x):
y = p[0] + p[1] * x
return y
def fzeta(p,x):
# results are qualitatively the same for both lin and exp versions
return fzeta_exp(p,x)
def errfunc(p,x,y,s):
err = (y - fzeta(p,x))/s
return err
def errfunc_lin(p,x,y,s):
err = (y - fzeta_lin(p,x))/s
return err
def error_bars(k,n=40,c=0.683):
f_gal_lower = dist.beta.ppf((1-c)/2.,k+1,n-k+1)
f_gal_upper = dist.beta.ppf(1-(1-c)/2.,k+1,n-k+1)
f_gal_err = (f_gal_upper - f_gal_lower) / 2.0
return f_gal_err
def common_labels(fig,xlabel=None,ylabel=None,xfontsize=16,yfontsize=40,
xlabelpad=None, ylabelpad=None):
# Set common labels
cax = fig.add_subplot(111) # The big subplot
cax.set_axis_bgcolor('none')
cax.spines['top'].set_color('none')
cax.spines['bottom'].set_color('none')
cax.spines['left'].set_color('none')
cax.spines['right'].set_color('none')
cax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
cax.set_xlabel(xlabel,fontsize=xfontsize, labelpad=xlabelpad)
cax.set_ylabel(ylabel,fontsize=yfontsize, labelpad=ylabelpad)
return cax
p_guess = np.array([0.5])
nrows = 4
ncols = 5
# Set up plot
fig,axarr = plt.subplots(nrows=nrows,ncols=ncols,sharex=True,sharey=True,figsize=(18,14))
bigax = common_labels(fig,'Redshift',r'$\frac{1-f_{{\rm features},z=0.3}}{1-f_{\rm features}}$',20,28, 12, 12)
zarr = np.linspace(0,1,50)
# For examples, only plot galaxies with an evolution correction of zero.
evol = 0.0
e0 = data_z0[np.absolute(data_z0['sim_evolution'] - evol) < 0.001]
e0_z0 = e0[e0['sim_redshift'] < 0.35]
#e0_z0 = data_z0
unique_galaxies = []
plist = np.linspace(0.1, 1.0, nrows*ncols+1)
nchoose = 2
for p1, p2 in zip(plist[:-1], plist[1:]):
p_match = (e0_z0['p_features'] > p1) & (e0_z0['p_features'] <= p2)
if p_match.sum() > nchoose:
#print(p_match.sum(), p_match.nonzero()[0], nchoose)
p_match = np.random.choice(p_match.nonzero()[0], nchoose, replace=False)
nchoose = 1
elif p_match.sum() <= nchoose:
#print(p_match.sum(), p_match.nonzero()[0], nchoose)
nchoose = 1 + nchoose - p_match.sum()
p_match = p_match.nonzero()[0]
p_match = p_match[np.argsort(e0_z0['p_features'][p_match])]
unique_galaxies.extend(e0_z0['sdss_id'][p_match])
for ax in axarr.ravel():
if len(unique_galaxies) == 0: break
slen = 0
# Make sure there are enough points to fit a function
while slen < (len(p_guess)+1):
ind = (e0['sdss_id'] == unique_galaxies.pop())
slen = sum(ind)
galaxy1 = e0[ind]
galaxy1.sort('sim_redshift')
z_gal = galaxy1['sim_redshift']
f_gal = galaxy1['p_features']
# ADD ERROR BARS
n = 40 # assume 40 classifications per galaxy; it'd be better to use true value, though
f_gal_err = error_bars(f_gal*n,n)
f_gal_norm = (1-f_gal[0]) /(1- f_gal)
f_gal_norm_err = np.sqrt((f_gal_err/f_gal)**2 + (f_gal_err[0]/f_gal[0])**2) * f_gal_norm
# Values must be explicitly cast as double-type precision for optimization to work. Incredibly frustrating.
# Fix: http://stackoverflow.com/questions/12473406/scipy-optimize-leastsq-returns-best-guess-parameters-not-new-best-fit
p, cov, infodict, mesg, ier = optimize.leastsq(errfunc,p_guess,args=(z_gal.astype(np.float64),
f_gal_norm.astype(np.float64),
f_gal_norm_err.astype(np.float64)),
full_output=1)
ax.plot(z_gal,f_gal_norm,lw=2)
ax.errorbar(z_gal,f_gal_norm, f_gal_norm_err)
ax.plot(zarr,fzeta_exp(p,zarr),'--',lw=1)
zeta = '={:.2f}'.format(p[0]) if p[0] <= 10.0 else '>10'
# ax.set_title('$f_{z=0}={:.2f}\; \zeta{:s}$'.format(f_gal[0], zeta), y=1.01, fontsize=16)
ax.set_title('$f_{features,z=0.3}=%s\; \zeta%s$'%(round(f_gal[0],2),zeta),y=1.01,fontsize=16)
ax.set_xlim(0.11,1.05)
ax.set_ylim(0,2)
fig.subplots_adjust(hspace=0.20, wspace=0.05)
fig.savefig('../writeup/figures/zeta_examples_sorted.pdf')
| mit | -8,789,516,641,017,032,000 | 31.732026 | 124 | 0.608826 | false |
mozilla/kuma | kuma/core/validators.py | 1 | 3885 | # see also: http://github.com/tav/scripts/raw/master/validate_jsonp.py
# Placed into the Public Domain by tav <[email protected]>
"""Validate Javascript Identifiers for use as JSON-P callback parameters."""
import re
from unicodedata import category
# ------------------------------------------------------------------------------
# javascript identifier unicode categories and "exceptional" chars
# ------------------------------------------------------------------------------
valid_jsid_categories_start = frozenset([
'Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl'
])
valid_jsid_categories = frozenset([
'Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', 'Mn', 'Mc', 'Nd', 'Pc'
])
valid_jsid_chars = ('$', '_')
# ------------------------------------------------------------------------------
# regex to find array[index] patterns
# ------------------------------------------------------------------------------
array_index_regex = re.compile(r'\[[0-9]+\]$')
has_valid_array_index = array_index_regex.search
replace_array_index = array_index_regex.sub
# ------------------------------------------------------------------------------
# javascript reserved words -- including keywords and null/boolean literals
# ------------------------------------------------------------------------------
is_reserved_js_word = frozenset([
'abstract', 'boolean', 'break', 'byte', 'case', 'catch', 'char', 'class',
'const', 'continue', 'debugger', 'default', 'delete', 'do', 'double',
'else', 'enum', 'export', 'extends', 'false', 'final', 'finally', 'float',
'for', 'function', 'goto', 'if', 'implements', 'import', 'in', 'instanceof',
'int', 'interface', 'long', 'native', 'new', 'null', 'package', 'private',
'protected', 'public', 'return', 'short', 'static', 'super', 'switch',
'synchronized', 'this', 'throw', 'throws', 'transient', 'true', 'try',
'typeof', 'var', 'void', 'volatile', 'while', 'with',
# potentially reserved in a future version of the ES5 standard
# 'let', 'yield'
]).__contains__
# ------------------------------------------------------------------------------
# the core validation functions
# ------------------------------------------------------------------------------
def valid_javascript_identifier(identifier, escape='\\u', ucd_cat=category):
"""Return whether the given ``id`` is a valid Javascript identifier."""
if not identifier:
return False
if not isinstance(identifier, str):
try:
identifier = str(identifier, 'utf-8')
except UnicodeDecodeError:
return False
if escape in identifier:
new = []
add_char = new.append
split_id = identifier.split(escape)
add_char(split_id.pop(0))
for segment in split_id:
if len(segment) < 4:
return False
try:
add_char(chr(int('0x' + segment[:4], 16)))
except Exception:
return False
add_char(segment[4:])
identifier = ''.join(new)
if is_reserved_js_word(identifier):
return False
first_char = identifier[0]
if not ((first_char in valid_jsid_chars) or
(ucd_cat(first_char) in valid_jsid_categories_start)):
return False
for char in identifier[1:]:
if not ((char in valid_jsid_chars) or
(ucd_cat(char) in valid_jsid_categories)):
return False
return True
def valid_jsonp_callback_value(value):
"""Return whether the given ``value`` can be used as a JSON-P callback."""
for identifier in value.split('.'):
while '[' in identifier:
if not has_valid_array_index(identifier):
return False
identifier = replace_array_index('', identifier)
if not valid_javascript_identifier(identifier):
return False
return True
| mpl-2.0 | 3,343,287,382,419,359,000 | 32.491379 | 80 | 0.501673 | false |
datascopeanalytics/scrubadub | tests/test_detector_skype.py | 1 | 2411 | import faker
import unittest
from scrubadub.filth import SkypeFilth
from base import BaseTestCase
import scrubadub
class SkypeTestCase(unittest.TestCase, BaseTestCase):
def setUp(self):
from scrubadub.detectors.skype import SkypeDetector
scrubadub.detectors.register_detector(SkypeDetector, autoload=True)
def test_inline_skype_name(self):
"""
BEFORE: contact me on skype (dean.malmgren) to chat
AFTER: contact me on skype ({{SKYPE}}) to chat
"""
self.compare_before_after()
def test_pre_inline_skype_name(self):
"""
BEFORE: i'm dean.malmgren on skype
AFTER: i'm {{SKYPE}} on skype
"""
self.compare_before_after()
def test_parenthetical_skype(self):
"""
BEFORE: i'm on skype (dean.malmgren) or can be reached on my cell
AFTER: i'm on skype ({{SKYPE}}) or can be reached on my cell
"""
self.compare_before_after()
def test_skype_signature(self):
"""
BEFORE: skype: dean.malmgren\nnerd
AFTER: skype: {{SKYPE}}\nnerd
"""
self.compare_before_after()
def test_skype_addition(self):
"""
BEFORE: I have added you on Skype. My ID is dean.malmgren
AFTER: I have added you on Skype. My ID is {{SKYPE}}
"""
self.compare_before_after()
def test_skype_usernames(self):
"""test different skype username formats"""
usernames = (
"joecool",
"joe,cool",
"joe.cool",
"joe-cool",
)
docstring_template ="""
BEFORE: My Skype is %s
AFTER: My Skype is {{SKYPE}}
"""
for username in usernames:
self.compare_before_after(docstring_template % username)
def test_all_caps_words_nearby(self):
"""
BEFORE: SCREAM to get my attention on Skype (dean.malmgren)
AFTER: SCREAM to get my attention on Skype ({{SKYPE}})
"""
self.compare_before_after()
def test_generate(self):
fake = faker.Faker()
faker.Faker.seed(4321)
self.assertEqual(
'rickbrown',
SkypeFilth.generate(faker=fake),
)
def tearDown(self) -> None:
from scrubadub.detectors.skype import SkypeDetector
del scrubadub.detectors.detector_configuration[SkypeDetector.name]
| mit | -5,046,148,948,238,348,000 | 28.048193 | 75 | 0.589382 | false |
synth3tk/the-blue-alliance | tests/test_fms_api_event_list_parser.py | 1 | 5508 | import datetime
import json
import unittest2
from datafeeds.parsers.fms_api.fms_api_event_list_parser import FMSAPIEventListParser
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.district_type import DistrictType
from consts.event_type import EventType
from models.event import Event
class TestFMSAPIEventListParser(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def test_parse_event_list(self):
with open('test_data/fms_api/2015_event_list.json', 'r') as f:
events = FMSAPIEventListParser(2015).parse(json.loads(f.read()))
self.assertTrue(isinstance(events, list))
# File has 5 events, but we ignore CMP divisions (only subdivisions), so only 4 are expected back
self.assertEquals(len(events), 4)
def test_parse_regional_event(self):
with open('test_data/fms_api/2015_event_list.json', 'r') as f:
events = FMSAPIEventListParser(2015).parse(json.loads(f.read()))
event = events[0]
self.assertEquals(event.key_name, "2015nyny")
self.assertEquals(event.name, "New York City Regional")
self.assertEquals(event.short_name, "New York City")
self.assertEquals(event.event_short, "nyny")
self.assertEquals(event.official, True)
self.assertEquals(event.start_date, datetime.datetime(year=2015, month=3, day=12, hour=0, minute=0, second=0))
self.assertEquals(event.end_date, datetime.datetime(year=2015, month=3, day=15, hour=23, minute=59, second=59))
self.assertEquals(event.venue, "Jacob K. Javits Convention Center")
self.assertEquals(event.location, "New York, NY, USA")
self.assertEquals(event.year, 2015)
self.assertEquals(event.event_type_enum, EventType.REGIONAL)
self.assertEquals(event.event_district_enum, DistrictType.NO_DISTRICT)
def test_parse_district_event(self):
with open('test_data/fms_api/2015_event_list.json', 'r') as f:
events = FMSAPIEventListParser(2015).parse(json.loads(f.read()))
event = events[1]
self.assertEquals(event.key_name, "2015cthar")
self.assertEquals(event.name, "NE District - Hartford Event")
self.assertEquals(event.short_name, "Hartford")
self.assertEquals(event.event_short, "cthar")
self.assertEquals(event.official, True)
self.assertEquals(event.start_date, datetime.datetime(year=2015, month=3, day=27, hour=0, minute=0, second=0))
self.assertEquals(event.end_date, datetime.datetime(year=2015, month=3, day=29, hour=23, minute=59, second=59))
self.assertEquals(event.venue, "Hartford Public High School")
self.assertEquals(event.location, "Hartford, CT, USA")
self.assertEquals(event.year, 2015)
self.assertEquals(event.event_type_enum, EventType.DISTRICT)
self.assertEquals(event.event_district_enum, DistrictType.NEW_ENGLAND)
def test_parse_district_cmp(self):
with open('test_data/fms_api/2015_event_list.json', 'r') as f:
events = FMSAPIEventListParser(2015).parse(json.loads(f.read()))
event = events[2]
self.assertEquals(event.key_name, "2015necmp")
self.assertEquals(event.name, "NE FIRST District Championship presented by United Technologies")
self.assertEquals(event.short_name, "NE FIRST")
self.assertEquals(event.event_short, "necmp")
self.assertEquals(event.official, True)
self.assertEquals(event.start_date, datetime.datetime(year=2015, month=4, day=8, hour=0, minute=0, second=0))
self.assertEquals(event.end_date, datetime.datetime(year=2015, month=4, day=11, hour=23, minute=59, second=59))
self.assertEquals(event.venue, "Sports and Recreation Center, WPI")
self.assertEquals(event.location, "Worcester, MA, USA")
self.assertEquals(event.year, 2015)
self.assertEquals(event.event_type_enum, EventType.DISTRICT_CMP)
self.assertEquals(event.event_district_enum, DistrictType.NEW_ENGLAND)
def test_parse_cmp_subdivision(self):
with open('test_data/fms_api/2015_event_list.json', 'r') as f:
events = FMSAPIEventListParser(2015).parse(json.loads(f.read()))
event = events[3]
self.assertEquals(event.key_name, "2015tes")
self.assertEquals(event.name, "Tesla Division")
self.assertEquals(event.short_name, "Tesla")
self.assertEquals(event.event_short, "tes")
self.assertEquals(event.official, True)
self.assertEquals(event.start_date, datetime.datetime(year=2015, month=4, day=22, hour=0, minute=0, second=0))
self.assertEquals(event.end_date, datetime.datetime(year=2015, month=4, day=25, hour=23, minute=59, second=59))
self.assertEquals(event.venue, "Edward Jones Dome")
self.assertEquals(event.location, "St. Louis, MO, USA")
self.assertEquals(event.year, 2015)
self.assertEquals(event.event_type_enum, EventType.CMP_DIVISION)
self.assertEquals(event.event_district_enum, DistrictType.NO_DISTRICT)
| mit | -7,174,664,230,224,272,000 | 51.961538 | 123 | 0.662672 | false |
all-of-us/raw-data-repository | rdr_service/model/bq_workbench_workspace.py | 1 | 9460 |
from rdr_service.model.bq_base import BQTable, BQSchema, BQView, BQField, BQFieldTypeEnum, BQFieldModeEnum, \
BQRecordField
class BQWorkspaceRaceEthnicitySchema(BQSchema):
race_ethnicity = BQField('race_ethnicity', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
race_ethnicity_id = BQField('race_ethnicity_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
class BQWorkspaceAgeSchema(BQSchema):
age = BQField('age', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
age_id = BQField('age_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
class BQRWBWorkspaceSchema(BQSchema):
"""
Represents the workbench_workspace_snapshot table.
"""
id = BQField('id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
created = BQField('created', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
modified = BQField('modified', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
workspace_source_id = BQField('workspace_source_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
name = BQField('name', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
creation_time = BQField('creation_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
modified_time = BQField('modified_time', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
status = BQField('status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
status_id = BQField('status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
exclude_from_public_directory = BQField('exclude_from_public_directory', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
disease_focused_research = BQField('disease_focused_research', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
disease_focused_research_name = BQField('disease_focused_research_name', BQFieldTypeEnum.STRING,
BQFieldModeEnum.NULLABLE)
other_purpose_details = BQField('other_purpose_details', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
methods_development = BQField('methods_development', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
control_set = BQField('control_set', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ancestry = BQField('ancestry', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
social_behavioral = BQField('social_behavioral', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
population_health = BQField('population_health', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
drug_development = BQField('drug_development', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
commercial_purpose = BQField('commercial_purpose', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
educational = BQField('educational', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
other_purpose = BQField('other_purpose', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
ethical_legal_social_implications = BQField('ethical_legal_social_implications', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
scientific_approaches = BQField('scientific_approaches', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
intend_to_study = BQField('intend_to_study', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
findings_from_study = BQField('findings_from_study', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
focus_on_underrepresented_populations = BQField('focus_on_underrepresented_populations', BQFieldTypeEnum.INTEGER,
BQFieldModeEnum.NULLABLE)
race_ethnicities = BQRecordField('race_ethnicities', BQWorkspaceRaceEthnicitySchema)
ages = BQRecordField('ages', BQWorkspaceAgeSchema)
sex_at_birth = BQField('sex_at_birth', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
sex_at_birth_id = BQField('sex_at_birth_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
gender_identity = BQField('gender_identity', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
gender_identity_id = BQField('gender_identity_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
sexual_orientation = BQField('sexual_orientation', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
sexual_orientation_id = BQField('sexual_orientation_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
geography = BQField('geography', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
geography_id = BQField('geography_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
disability_status = BQField('disability_status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
disability_status_id = BQField('disability_status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
access_to_care = BQField('access_to_care', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
access_to_care_id = BQField('access_to_care_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
education_level = BQField('education_level', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
education_level_id = BQField('education_level_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
income_level = BQField('income_level', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
income_level_id = BQField('income_level_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
others = BQField('others', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
review_requested = BQField('review_requested', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
is_reviewed = BQField('is_reviewed', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
cdr_version = BQField('cdr_version', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
access_tier = BQField('access_tier', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
access_tier_id = BQField('access_tier_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
class BQRWBWorkspace(BQTable):
""" Research Workbench Workspace BigQuery Table """
__tablename__ = 'rwb_workspace'
__schema__ = BQRWBWorkspaceSchema
class BQRWBWorkspaceView(BQView):
__viewname__ = 'v_rwb_workspace'
__viewdescr__ = 'Research Workbench Workspace View'
__pk_id__ = 'workspace_source_id'
__table__ = BQRWBWorkspace
# We need to build a SQL statement with all fields except sub-tables and remove duplicates.
__sql__ = """
SELECT
%%FIELD_LIST%%
FROM (
SELECT *, MAX(modified) OVER (PARTITION BY workspace_source_id) AS max_timestamp
FROM `{project}`.{dataset}.rwb_workspace
) t
WHERE t.modified = t.max_timestamp
""".replace('%%FIELD_LIST%%', BQRWBWorkspaceSchema.get_sql_field_names(
exclude_fields=[
'race_ethnicities',
'ages'
])
)
class BQRWBWorkspaceRaceEthnicityView(BQView):
__viewname__ = 'v_rwb_workspace_race_ethnicity'
__viewdescr__ = 'Research Workbench Workspace Race Ethnicity View'
__pk_id__ = 'workspace_source_id'
__table__ = BQRWBWorkspace
__sql__ = """
SELECT t.id, t.created, t.modified, t.workspace_source_id, nt.*
FROM (
SELECT *, MAX(modified) OVER (PARTITION BY workspace_source_id) AS max_timestamp
FROM `{project}`.{dataset}.rwb_workspace
) t cross join unnest(race_ethnicities) as nt
WHERE t.modified = t.max_timestamp
"""
class BQRWBWorkspaceAgeView(BQView):
__viewname__ = 'v_rwb_workspace_age'
__viewdescr__ = 'Research Workbench Workspace Age View'
__pk_id__ = 'workspace_source_id'
__table__ = BQRWBWorkspace
__sql__ = """
SELECT t.id, t.created, t.modified, t.workspace_source_id, nt.*
FROM (
SELECT *, MAX(modified) OVER (PARTITION BY workspace_source_id) AS max_timestamp
FROM `{project}`.{dataset}.rwb_workspace
) t cross join unnest(ages) as nt
WHERE t.modified = t.max_timestamp
"""
class BQRWBWorkspaceUsersSchema(BQSchema):
id = BQField('id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
created = BQField('created', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
modified = BQField('modified', BQFieldTypeEnum.DATETIME, BQFieldModeEnum.REQUIRED)
workspace_id = BQField('workspace_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
user_id = BQField('user_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.REQUIRED)
role = BQField('role', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
role_id = BQField('role_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
status = BQField('status', BQFieldTypeEnum.STRING, BQFieldModeEnum.NULLABLE)
status_id = BQField('status_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
researcher_id = BQField('researcher_id', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
is_creator = BQField('is_creator', BQFieldTypeEnum.INTEGER, BQFieldModeEnum.NULLABLE)
class BQRWBWorkspaceUsers(BQTable):
""" Research Workbench Workspace Users BigQuery Table """
__tablename__ = 'rwb_workspace_users'
__schema__ = BQRWBWorkspaceUsersSchema
class BQRWBWorkspaceUsersView(BQView):
__viewname__ = 'v_rwb_workspace_users'
__viewdescr__ = 'Research Workbench Workspace Users View'
__pk_id__ = 'id'
__table__ = BQRWBWorkspaceUsers
| bsd-3-clause | -1,766,450,911,606,441,000 | 54.321637 | 117 | 0.723996 | false |
Jaesin/OctoPrint | src/octoprint/vendor/sockjs/tornado/transports/jsonp.py | 1 | 3693 | # -*- coding: utf-8 -*-
"""
sockjs.tornado.transports.jsonp
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
JSONP transport implementation.
"""
import logging
from tornado.web import asynchronous
from octoprint.vendor.sockjs.tornado import proto
from octoprint.vendor.sockjs.tornado.transports import pollingbase
from octoprint.vendor.sockjs.tornado.util import bytes_to_str, unquote_plus
LOG = logging.getLogger("tornado.general")
class JSONPTransport(pollingbase.PollingTransportBase):
name = 'jsonp'
@asynchronous
def get(self, session_id):
# Start response
self.handle_session_cookie()
self.disable_cache()
# Grab callback parameter
self.callback = self.get_argument('c', None)
if not self.callback:
self.write('"callback" parameter required')
self.set_status(500)
self.finish()
return
# Get or create session without starting heartbeat
if not self._attach_session(session_id, False):
return
# Might get already detached because connection was closed in on_open
if not self.session:
return
if not self.session.send_queue:
self.session.start_heartbeat()
else:
self.session.flush()
def send_pack(self, message, binary=False):
if binary:
raise Exception('binary not supported for JSONPTransport')
self.active = False
try:
# TODO: Just escape
msg = '%s(%s);\r\n' % (self.callback, proto.json_encode(message))
self.set_header('Content-Type', 'application/javascript; charset=UTF-8')
self.set_header('Content-Length', len(msg))
# TODO: Fix me
self.set_header('Etag', 'dummy')
self.write(msg)
self.flush(callback=self.send_complete)
except IOError:
# If connection dropped, make sure we close offending session instead
# of propagating error all way up.
self.session.delayed_close()
class JSONPSendHandler(pollingbase.PollingTransportBase):
def post(self, session_id):
self.preflight()
self.handle_session_cookie()
self.disable_cache()
session = self._get_session(session_id)
if session is None or session.is_closed:
self.set_status(404)
return
data = bytes_to_str(self.request.body)
ctype = self.request.headers.get('Content-Type', '').lower()
if ctype == 'application/x-www-form-urlencoded':
if not data.startswith('d='):
LOG.exception('jsonp_send: Invalid payload.')
self.write("Payload expected.")
self.set_status(500)
return
data = unquote_plus(data[2:])
if not data:
LOG.debug('jsonp_send: Payload expected.')
self.write("Payload expected.")
self.set_status(500)
return
try:
messages = proto.json_decode(data)
except:
# TODO: Proper error handling
LOG.debug('jsonp_send: Invalid json encoding')
self.write("Broken JSON encoding.")
self.set_status(500)
return
try:
session.on_messages(messages)
except Exception:
LOG.exception('jsonp_send: on_message() failed')
session.close()
self.write('Message handler failed.')
self.set_status(500)
return
self.write('ok')
self.set_header('Content-Type', 'text/plain; charset=UTF-8')
self.set_status(200)
| agpl-3.0 | -149,909,300,359,437,600 | 28.07874 | 84 | 0.581099 | false |
Alberto-Beralix/Beralix | i386-squashfs-root/usr/share/hplip/ui4/printernamecombobox.py | 1 | 5122 | # -*- coding: utf-8 -*-
#
# (c) Copyright 2001-2009 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Author: Don Welch
#
# Std Lib
#import sys
# Local
from base.g import *
from ui_utils import *
from base import device
# Qt
from PyQt4.QtCore import *
from PyQt4.QtGui import *
PRINTERNAMECOMBOBOX_TYPE_PRINTER_ONLY = 0
PRINTERNAMECOMBOBOX_TYPE_FAX_ONLY = 1
PRINTERNAMECOMBOBOX_TYPE_PRINTER_AND_FAX = 2
class PrinterNameComboBox(QWidget):
def __init__(self, parent):
QWidget.__init__(self, parent)
self.printer_name = ''
self.device_uri = ''
self.printer_index = {}
self.initial_printer = None
self.updating = False
self.typ = PRINTERNAMECOMBOBOX_TYPE_PRINTER_ONLY
self.user_settings = UserSettings()
self.user_settings.load()
self.user_settings.debug()
self.initUi()
def initUi(self):
#print "PrinterNameComboBox.initUi()"
HBoxLayout = QHBoxLayout(self)
HBoxLayout.setObjectName("HBoxLayout")
self.NameLabel = QLabel(self)
self.NameLabel.setObjectName("NameLabel")
HBoxLayout.addWidget(self.NameLabel)
SpacerItem = QSpacerItem(20, 20, QSizePolicy.Minimum, QSizePolicy.Minimum)
HBoxLayout.addItem(SpacerItem)
self.ComboBox = QComboBox(self)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ComboBox.sizePolicy().hasHeightForWidth())
self.ComboBox.setSizePolicy(sizePolicy)
self.ComboBox.setObjectName("ComboBox")
HBoxLayout.addWidget(self.ComboBox)
self.NameLabel.setText(self.__tr("Printer:"))
#self.connect(self.ComboBox, SIGNAL("currentIndexChanged(int)"),
# self.ComboBox_currentIndexChanged)
self.connect(self.ComboBox, SIGNAL("currentIndexChanged(const QString &)"),
self.ComboBox_currentIndexChanged)
def setType(self, typ):
if typ in (PRINTERNAMECOMBOBOX_TYPE_PRINTER_ONLY,
PRINTERNAMECOMBOBOX_TYPE_FAX_ONLY,
PRINTERNAMECOMBOBOX_TYPE_PRINTER_AND_FAX):
self.typ = typ
def setInitialPrinter(self, printer_name):
self.initial_printer = printer_name
def updateUi(self):
#print "PrinterNameComboBox.updateUi()"
if self.typ == PRINTERNAMECOMBOBOX_TYPE_PRINTER_ONLY:
self.NameLabel.setText(self.__tr("Printer Name:"))
be_filter = ['hp']
elif self.typ == PRINTERNAMECOMBOBOX_TYPE_FAX_ONLY:
self.NameLabel.setText(self.__tr("Fax Name:"))
be_filter = ['hpfax']
else: # PRINTERNAMECOMBOBOX_TYPE_PRINTER_AND_FAX
self.NameLabel.setText(self.__tr("Printer/Fax Name:"))
be_filter = ['hp', 'hpfax']
self.printers = device.getSupportedCUPSPrinters(be_filter)
self.printer_index.clear() # = {}
if self.printers:
if self.initial_printer is None:
#user_conf.get('last_used', 'printer_name')
self.initial_printer = self.user_settings.last_used_printer
self.updating = True
try:
k = 0
for i, p in enumerate(self.printers):
self.printer_index[p.name] = p.device_uri
self.ComboBox.insertItem(i, p.name)
if self.initial_printer is not None and p.name == self.initial_printer:
self.initial_printer = None
k = i
self.ComboBox.setCurrentIndex(-1)
finally:
self.updating = False
self.ComboBox.setCurrentIndex(k)
else:
self.emit(SIGNAL("PrinterNameComboBox_noPrinters"))
def ComboBox_currentIndexChanged(self, t):
self.printer_name = unicode(t)
if self.updating:
return
self.device_uri = self.printer_index[self.printer_name]
#user_conf.set('last_used', 'printer_name', self.printer_name)
self.user_settings.last_used_printer = self.printer_name
self.user_settings.save()
self.emit(SIGNAL("PrinterNameComboBox_currentChanged"), self.device_uri, self.printer_name)
def __tr(self,s,c = None):
return qApp.translate("PrinterNameComboBox",s,c)
| gpl-3.0 | 8,314,230,958,582,028,000 | 32.045161 | 99 | 0.641156 | false |
coolblaze03/WSNNS3Port | doc/tutorial/source/conf.py | 1 | 6980 | # -*- coding: utf-8 -*-
#
# ns-3 documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 14 09:00:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ns-3'
copyright = u'2010, ns-3 project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'ns-3.12'
# The full version, including alpha/beta/rc tags.
release = 'ns-3.12'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ns-3-tutorial.tex', u'ns-3 Tutorial',
u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-tutorial', u'ns-3 Tutorial',
[u'ns-3 project'], 1)
]
| gpl-2.0 | -6,015,266,642,801,439,000 | 31.314815 | 80 | 0.706017 | false |
IntersectAustralia/hcsvlab_robochef | hcsvlab_robochef/paradisec/rdf.py | 1 | 1904 | from hcsvlab_robochef.rdf.map import *
PARADISEC = "PARADISEC"
paradisecSpeakerMap = FieldMapper(AUSNC)
paradisecSpeakerMap.add('name', mapto=FOAF.name)
paradisecSpeakerMap.add('role', ignore=True)
paradisecMap = MetadataMapper(PARADISEC, speakerMap=paradisecSpeakerMap, documentMap = get_generic_doc_mapper())
paradisecMap.add('Box', mapto=DC.box)
paradisecMap.add('DCMIType', mapto=DC.type, ignore=True)
paradisecMap.add('ISO3166', mapto=DC.coverage)
paradisecMap.add('URI', ignore=True)
paradisecMap.add('W3CDTF', mapto=DC.created)
paradisecMap.add('accessRights', mapto=DC.accessRights)
paradisecMap.add('author', mapto=OLAC.author, ignore=True)
paradisecMap.add('bibliographicCitation', mapto=DC.bibliographicCitation)
paradisecMap.add('compiler', mapto=OLAC.compiler, ignore=True)
paradisecMap.add('consultant', mapto=OLAC.consultant, ignore=True)
paradisecMap.add('data_inputter', mapto=OLAC.data_inputter, ignore=True)
paradisecMap.add('depositor', mapto=OLAC.depositor, ignore=True)
paradisecMap.add('description', mapto=DC.description)
paradisecMap.add('discourse-type', mapto=OLAC.discourse_type)
paradisecMap.add('format', ignore=True)
paradisecMap.add('identifier', mapto=DC.identifier)
paradisecMap.add('interviewer', mapto=OLAC.interviewer, ignore=True)
paradisecMap.add('language', mapto=OLAC.language)
paradisecMap.add('linguistic-field', mapto=OLAC.linguistic_field)
paradisecMap.add('linguistic-type', mapto=OLAC.linguistic_type)
paradisecMap.add('photographer', mapto=OLAC.photographer, ignore=True)
paradisecMap.add('recorder', mapto=OLAC.recorder, ignore=True)
paradisecMap.add('researcher', mapto=OLAC.researcher, ignore=True)
paradisecMap.add('rights', mapto=DC.rights)
paradisecMap.add('speaker', mapto=OLAC.speaker, ignore=True)
paradisecMap.add('tableOfContents', ignore=True)
paradisecMap.add('title', mapto=DC.title)
paradisecMap.add('type', mapto=DC.type, ignore=True)
| gpl-3.0 | -8,536,860,154,655,141,000 | 47.820513 | 112 | 0.798319 | false |
indro/t2c | apps/external_apps/ajax_validation/views.py | 2 | 1452 | from django import forms
from django.http import HttpResponse
from django.views.decorators.http import require_POST
from ajax_validation.utils import LazyEncoder
def validate(request, *args, **kwargs):
form_class = kwargs.pop('form_class')
extra_args_func = kwargs.pop('callback', lambda request, *args, **kwargs: {})
kwargs = extra_args_func(request, *args, **kwargs)
kwargs['data'] = request.POST
form = form_class(**kwargs)
if form.is_valid():
data = {
'valid': True,
}
else:
if request.POST.getlist('fields'):
fields = request.POST.getlist('fields') + ['__all__']
errors = dict([(key, val) for key, val in form.errors.iteritems() if key in fields])
else:
errors = form.errors
final_errors = {}
for key, val in errors.iteritems():
if key == '__all__':
final_errors['__all__'] = val
if not isinstance(form.fields[key], forms.FileField):
html_id = form.fields[key].widget.attrs.get('id') or form[key].auto_id
html_id = form.fields[key].widget.id_for_label(html_id)
final_errors[html_id] = val
data = {
'valid': False,
'errors': final_errors,
}
json_serializer = LazyEncoder()
return HttpResponse(json_serializer.encode(data), mimetype='application/json')
validate = require_POST(validate)
| mit | 6,643,868,920,188,275,000 | 38.243243 | 96 | 0.588154 | false |
silentfuzzle/calibre | src/calibre/gui2/viewer/behavior_manager/behavior_manager_builder.py | 1 | 6334 |
__license__ = 'GPL v3'
__copyright__ = '2014, Emily Palmieri <[email protected]>'
from calibre.gui2.viewer.behavior.adventurous_behavior import AdventurousBehavior
from calibre.gui2.viewer.behavior.adventurous_base_behavior import BaseAdventurousBehavior
from calibre.gui2.viewer.behavior.calibre_behavior import CalibreBehavior
from calibre.gui2.viewer.toc_sections import TOCSections
from calibre.gui2.viewer.behavior_manager.behavior_manager import BehaviorManager
from calibre.gui2.viewer.behavior_manager.behavior_manager_switch import SwitchBehaviorManager
from calibre.gui2.viewer.toc_container.toc_hierarchy_container import TreeTOCContainer
from calibre.gui2.viewer.toc_container.toc_network_container import NetworkTOCContainer
# This class builds the TOC interface(s) and page numbering behavior(s) to use in the ebook viewer interface
class BehaviorManagerBuilder (object):
# Constructor
# main (EBookViewer) - the ebook viewer interface
# b1_single_document (bool) - True if the main page behavior should display all the book's text in a single document
# b1_use_hierarchy (bool) - True if the main TOC interface should display the TOC in a hierarchy
# switch (bool) - True if the user can switch between two ebook viewer behaviors
# b2_single_document (bool) - True if the second page behavior should display all the book's text in a single document
# b2_use_hierarchy (bool) - True if the second TOC interface should display the TOC in a hierarchy
def __init__(self, main, b1_single_document=True,
b1_use_hierarchy=True, switch=False, b2_single_document=False,
b2_use_hierarchy=False):
# If both interface behaviors are the same, don't create a switch between the two
if (b1_single_document == b2_single_document and
b1_use_hierarchy == b2_use_hierarchy):
switch = False
self.b1_single_document = b1_single_document
self.b1_use_hierarchy = b1_use_hierarchy
self.switch = switch
self.b2_single_document = b2_single_document
self.b2_use_hierarchy = b2_use_hierarchy
# Create a default TOC interface to use until the user selects an ebook
self.default_manager = BehaviorManager(CalibreBehavior(),
TreeTOCContainer(main))
self.network_container = NetworkTOCContainer(main)
# Return a page behavior given if the current ebook should be display in a single document
# single_document (bool) - True if the page behavior should display all the book's text in a single document
# setup_vscrollbar_method (method) - the method from EBookViewer to use when updating the scrollbar and page numbers
def get_page_behavior(self, single_document, setup_vscrollbar_method):
if (single_document):
# Display the book in a single document
page_behavior = CalibreBehavior()
else:
# Break the book into groups and display each group as a separate document
page_behavior = AdventurousBehavior(setup_vscrollbar_method)
return page_behavior
# Return a TOC interface given if it should be displayed as a network or a hierarchy
# use_hierarchy (bool) - True if the TOC interface should display the TOC in a hierarchy
# main (EBookViewer) - the ebook viewer interface
def get_toc_interface(self, use_hierarchy, main):
if (use_hierarchy):
# Display the ebook's TOC as a hierarchy of sections
toc_container = TreeTOCContainer(main)
toc_container.connect_toc_actions(main.toc_clicked)
else:
# Display the ebook's TOC as a network of sections
toc_container = self.network_container
return toc_container
# Returns a behavior manager from the given parameters
# main (EBookViewer) - the ebook viewer interface
def build_behavior_manager(self, main):
# Create the main interface behavior
b1_page_behavior = self.get_page_behavior(self.b1_single_document,
main.setup_vscrollbar)
b1_toc_interface = self.get_toc_interface(
self.b1_use_hierarchy, main)
if (self.switch):
# Create the second interface behavior if specified
b2_page_behavior = self.get_page_behavior(self.b2_single_document,
main.setup_vscrollbar)
b2_toc_interface = self.get_toc_interface(
self.b2_use_hierarchy, main)
# Create a behavior manager to switch between the main and second behavior
behavior_manager = SwitchBehaviorManager(
main, b1_page_behavior,
b1_toc_interface, b2_page_behavior,
b2_toc_interface)
else:
# Disable the behavior toggle
main.action_toggle_adventurous_mode.setVisible(False)
behavior_manager = BehaviorManager(b1_page_behavior,
b1_toc_interface)
self.behavior_manager = behavior_manager
return behavior_manager
# main (EBookViewer) - the ebook viewer interface
# title (string) - the title of the ebook
# pathtoebook (string) - the path to the ebook on the user's file system
def setup_behavior_manager(self, main, title, pathtoebook):
toc = main.iterator.toc
toc_sections = None
# If there isn't a TOC, display the ebook in a single document with a
# hierarchical TOC interface at all times
if (not toc):
main.action_toggle_adventurous_mode.setEnabled(False)
behavior_manager = self.default_manager
else:
main.action_toggle_adventurous_mode.setEnabled(True)
main.set_toc_view(self.behavior_manager.toc_interface)
behavior_manager = self.behavior_manager
toc_sections = TOCSections(toc, main.iterator.spine)
total_num_pages = sum(main.iterator.pages)
behavior_manager.setup_ebook(total_num_pages, toc_sections, main.toc_model, title,
pathtoebook)
# Return the behavior manager to use if it has changed
return behavior_manager
| gpl-3.0 | 4,841,253,666,804,379,000 | 50.495935 | 122 | 0.667193 | false |
bossiernesto/melta | test/core/test_syncronizer.py | 1 | 2704 | from unittest import TestCase, skip
from test.fixture.class_repositories import person1, house2
from melta.core.object_converter import MeltaObjectConverter
from melta.core.melta_syncronizer import MeltaSyncronizer
import copy
class TestSyncronizer(TestCase):
def setUp(self):
self.syncronizer = MeltaSyncronizer()
# house2 object
# house2 => "building_age", 34
# house2 => "material", "brick"
# house2 => "sq2mts", 453
self.house2 = house2
self.converter = MeltaObjectConverter()
self.melta_house2 = self.converter.to_melta_object(self.house2)
self.melta_house2_deepcopy = copy.deepcopy(self.melta_house2) #keep a deepcopy of the object to compare
@skip
def test_no_syncronization(self):
#syncronzation should be python_object => melta_object, with side effect True as default value,
# if default value is False a new melta object is created, but identity is lost by enabling this last value.
self.syncronizer.syncronize(self.house2, self.melta_house2, side_effect=True)
self.assertEqual(self.melta_house2, self.melta_house2_deepcopy)
@skip
def test_syncronization(self):
self.house2.building_age = 45
self.syncronizer.syncronize(self.house2, self.melta_house2)
self.assertNotEqual(self.melta_house2, self.melta_house2_deepcopy)
self.assertEqual(45, self.melta_house2.building_age)
self.assertEqual(self.house2.building_age, self.melta_house2.building_age)
self.assertLess(self.melta_house2_deepcopy.building_age, self.melta_house2.building_age)
@skip
def test_double_syncronization(self):
self.house2.building_age = 20
self.syncronizer.syncronize(self.house2, self.melta_house2)
self.assertEqual(20, self.melta_house2.building_age)
self.house2.material = "concrete"
self.assertNotEqual(self.house2.material, self.melta_house2.material)
self.assertEqual('brick', self.melta_house2.material)
self.syncronizer.syncronize(self.melta_house2, self.house2)
self.assertEqual(self.house2.material, self.melta_house2.material)
self.assertEqual('concrete', self.melta_house2.material)
@skip
def test_nosideeffect_syncronization(self):
from melta.core.basicmodel import AggregationObject
self.house2.building_age = 130
new_object = self.syncronizer.syncronize(self.melta_house2, self.house2, side_effect=False)
self.assertNotEqual(self.melta_house2, new_object)
self.assertTrue(isinstance(new_object, AggregationObject))
self.assertEqual(self.house2.__class__, new_object.get_class()) | bsd-3-clause | -3,970,391,530,764,089,300 | 44.847458 | 116 | 0.70858 | false |
xianjunzhengbackup/code | data science/machine_learning_for_the_web/chapter_8/movie_reviews_analizer_app/webmining_server/pages/models.py | 1 | 1215 | from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
class SearchTerm(models.Model):
term = models.CharField(_('search'), max_length=255)
num_reviews = models.IntegerField(null=True,default=0)
#display term on admin panel
def __unicode__(self):
return self.term
class Page(models.Model):
searchterm = models.ForeignKey(SearchTerm, related_name='pages',null=True,blank=True)
url = models.URLField(_('url'), default='', blank=True)
title = models.CharField(_('name'), max_length=255)
depth = models.IntegerField(null=True,default=-1)
html = models.TextField(_('html'),blank=True, default='')
review = models.BooleanField(default=False)
old_rank = models.FloatField(null=True,default=0)
new_rank = models.FloatField(null=True,default=1)
content = models.TextField(_('content'),blank=True, default='')
sentiment = models.IntegerField(null=True,default=100)
class Link(models.Model):
searchterm = models.ForeignKey(SearchTerm, related_name='links',null=True,blank=True)
from_id = models.IntegerField(null=True)
to_id = models.IntegerField(null=True) | mit | 3,143,095,247,785,500,000 | 42.428571 | 90 | 0.699588 | false |
ganga-devs/ganga | ganga/GangaAtlas/Lib/Athena/ganga-stagein.py | 1 | 4800 | #! /usr/bin/env python
import os, sys
from getopt import getopt,GetoptError
from threading import Thread
from commands import getstatusoutput
from lfc import *
def usage():
print 'Name:'
print ' ganga-stagein.py'
print
print 'Arguments:'
print ' logical names'
print
print 'Options:'
print ' -h, --help this prinout'
print ' -i, --input file list of logical names'
print ' -d, --directory path to stage the input files (default $PWD)'
print ' -t, --timeout seconds for the staging in (default 900)'
print ' -r, --retry number for the staging command (default 3)'
print ' -v, --verbose verbosity'
def get_guid(lfn):
'''Get guid for a lfn
'''
statg = lfc_filestatg()
rc = lfc_statg(lfn,'',statg)
if not rc: return statg.guid
def get_replicas(lfn):
'''List replicas and sort the one on close SE first
'''
replicas = []
listp = lfc_list()
res = lfc_listreplica(lfn,'',CNS_LIST_BEGIN,listp)
while res:
if res.host in closeSEs:
replicas.insert(0,res.sfn)
else:
replicas.append(res.sfn)
res = lfc_listreplica(lfn,'',CNS_LIST_CONTINUE,listp)
lfc_listreplica(lfn,'',CNS_LIST_END,listp)
return replicas
class PoolFileCatalog:
'''Helper class to create PoolFileCatalog.xml
'''
def __init__(self,name='PoolFileCatalog.xml'):
self.pfc = open(name,'w')
print >>self.pfc,'<?xml version="1.0" ?>'
print >>self.pfc,'<POOLFILECATALOG>'
def addFile(self,guid,lfn,pfn):
print >>self.pfc,' <File ID="%s">' % guid
print >>self.pfc,' <logical>'
print >>self.pfc,' <lfn name="%s"/>' % lfn
print >>self.pfc,' </logical>'
print >>self.pfc,' <physical>'
print >>self.pfc,' <pfn filetype="ROOT_All" name="%s"/>' % pfn
print >>self.pfc,' </physical>'
print >>self.pfc,' </File>'
def close(self):
print >>self.pfc,'</POOLFILECATALOG>'
class StageIn(Thread):
def __init__(self,lfn,replicas,file):
Thread.__init__(self)
self.lfn = lfn
self.replicas = replicas
self.file = file
def run(self):
for rep in self.replicas:
for r in xrange(0,retry):
if verbose: print 'INFO LFN: %s Replica: %s Retry: %d' % (lfn,rep,r)
cmd = 'lcg-cp --vo atlas -t %d %s file:%s' % (timeout,rep,self.file)
rc, out = getstatusoutput(cmd)
if not rc: return
print 'Return code %d from %s' % (rc,cmd)
print out
if __name__ == '__main__':
directory = os.getcwd()
retry = 2
timeout = 900
input = None
verbose = False
try:
opts, args = getopt(sys.argv[1:],'ht:d:r:i:v',['help','directory=','input=','timeout=','retry=','verbose'])
except GetoptError:
usage()
sys.exit(1)
for opt, val in opts:
if opt in ['-h','--help']:
usage()
sys.exit()
if opt in ['-d','--directory']:
direcory = val
if opt in ['-i','--input']:
input = val
if opt in ['-t','--timeout']:
timeout = int(val)
if opt in ['-r','--retry']:
retry = int(val)
if opt in ['-v','--verbose']:
verbose = True
if input:
lfns = [ line.strip() for line in file(input) ]
else:
lfns = args
if not len(lfns):
print 'No files requested.'
sys.exit()
# determine the closeSEs
rc, output = getstatusoutput('edg-brokerinfo getCloseSEs')
if rc:
print 'ERROR: Could not determine close SEs'
closeSEs = []
else:
closeSEs = output.split()
print 'INFO: Close SEs are ' + ', '.join(closeSEs)
pfc = PoolFileCatalog()
workers=[]
try: lfc_startsess('','')
except NameError: pass
for lfn in lfns:
if verbose: print 'LFN: %s' % lfn
guid = get_guid(lfn)
if not guid:
print 'ERROR: LFN %s not found.' % lfn
continue
if verbose: print 'GUID: %s' % guid
name = os.path.basename(lfn)
pfn = os.path.join(directory,name)
pfc.addFile(guid,name,pfn)
replicas = get_replicas(lfn)
if not replicas:
print 'ERROR: No replica found for LFN %s' % lfn
continue
if verbose:
print 'Replicas :\n %s' % '\n '.join(replicas)
s = StageIn(lfn,replicas,pfn)
s.start()
workers.append(s)
pfc.close()
try: lfc_stopsess()
except NameError: pass
for s in workers:
s.join()
| gpl-2.0 | -5,774,584,406,186,113,000 | 23.870466 | 115 | 0.527083 | false |
nrclark/grouplib | unit_tests/basic_test.py | 1 | 2441 | #!/usr/bin/env python
import shutil
import random
import md5
import unittest
import os
import subprocess
def sanitize(path):
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
path = os.path.abspath(path)
return path
def run_make(Makefile=None, targets=None, flags=None, dir='.'):
dir = sanitize(dir)
cmd = ['make']
if Makefile != None:
Makefile = sanitize(Makefile)
cmd = cmd + ['-f', Makefile]
if targets != None:
if type(targets) == str:
targets = targets.split(' ')
if type(targets) != list:
raise ValueError, 'targets could not be determined from '+str(targets)
targets = [x.strip() for x in targets]
cmd = cmd + targets
if flags != None:
if type(flags) == str:
flags = flags.split(' ')
if type(flags) != list:
raise ValueError, 'flags could not be determined from '+str(flags)
flags = [x.strip() for x in flags]
cmd = cmd + flags
p = subprocess.Popen(cmd, cwd=dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return p.returncode, stdout, stderr
def create_source(filename, dir='.'):
string = os.path.basename(filename)
myFile = open(os.path.join(dir, filename), 'w')
myFile.write(string + '\n')
myFile.close()
class MakeTest(unittest.TestCase):
def setUp(self, Makefile, groupLib):
self.Makefile = sanitize(Makefile)
self.groupLib = sanitize(groupLib)
self.assertTrue(os.path.isfile(self.Makefile))
self.assertTrue(os.path.isfile(self.groupLib))
self.workDir = '_tmp_' + md5.new(str(random.random())).hexdigest()
os.mkdir(self.workDir)
shutil.copy2(self.groupLib, self.workDir)
shutil.copy2(self.Makefile, os.path.join(self.workDir, 'Makefile'))
def tearDown(self):
shutil.rmtree(self.workDir)
class TestBasicOperation(MakeTest):
def setUp(self):
super(TestBasicOperation, self).setUp('case_01.mk', '../grouplib.mk')
create_source('source.1', self.workDir)
create_source('source.2', self.workDir)
def test_Simple(self):
retval, stdout, stderr = run_make(dir=self.workDir)
print retval
print stdout
print stderr
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -5,865,620,327,662,661,000 | 29.135802 | 86 | 0.611635 | false |
none-da/zeshare | debug_toolbar/panels/headers.py | 1 | 1292 | from django.template.loader import render_to_string
from debug_toolbar.panels import DebugPanel
class HeaderDebugPanel(DebugPanel):
"""
A panel to display HTTP headers.
"""
name = 'Header'
has_content = True
# List of headers we want to display
header_filter = (
'CONTENT_TYPE',
'HTTP_ACCEPT',
'HTTP_ACCEPT_CHARSET',
'HTTP_ACCEPT_ENCODING',
'HTTP_ACCEPT_LANGUAGE',
'HTTP_CACHE_CONTROL',
'HTTP_CONNECTION',
'HTTP_HOST',
'HTTP_KEEP_ALIVE',
'HTTP_REFERER',
'HTTP_USER_AGENT',
'QUERY_STRING',
'REMOTE_ADDR',
'REMOTE_HOST',
'REQUEST_METHOD',
'SCRIPT_NAME',
'SERVER_NAME',
'SERVER_PORT',
'SERVER_PROTOCOL',
'SERVER_SOFTWARE',
)
def nav_title(self):
return 'HTTP Headers'
def title(self):
return 'HTTP Headers'
def url(self):
return ''
def process_request(self, request):
self.headers = dict(
[(k, request.META[k]) for k in self.header_filter if k in request.META]
)
def content(self):
context = {
'headers': self.headers
}
return render_to_string('debug_toolbar/panels/headers.html', context) | bsd-3-clause | -9,078,421,765,086,998,000 | 23.865385 | 83 | 0.557276 | false |
elkingtonmcb/bcbio-nextgen | bcbio/variation/coverage_experimental.py | 1 | 7319 | import os
import pandas as pd
import subprocess
from collections import Counter
import numpy as np
import math
import pysam
import pybedtools
from bcbio.utils import (file_exists, tmpfile, chdir, splitext_plus,
max_command_length, robust_partition_all)
from bcbio.provenance import do
from bcbio.distributed.transaction import file_transaction
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio import broad
from bcbio.pipeline import config_utils
class cov_class:
def __init__(self, size, name, sample):
self.size = int(size)
self.name = name
self.position = ""
self.sample = sample
self.cov = {'4': 0, '10': 0, '20': 0, '50': 0}
self.total = Counter()
self.raw = 0
def update(self, size):
self.size += size
def save(self, cov, pt):
self.raw += cov
self.total[cov] = pt
for cut in [4, 10, 20, 50]:
if cov > cut:
self.cov[str(cut)] += pt
def save_coverage(self, cov, nt):
if cov > 100:
cov = 100
elif cov > 10:
cov = int(math.ceil(cov / 10.0)) * 10
# self.size += size
self.total[cov] += nt
def write_coverage(self, out_file):
# names = ["region", "size", "sample", "10", "25", "50"]
df = pd.DataFrame({'depth': self.total.keys(), 'nt': self.total.values()})
df["size"] = self.size
df["sample"] = self.sample
df.to_csv(out_file, mode='a', header=False, index=False, sep="\t")
def _noise(self):
m = np.average(map(int, self.total.keys()), weights=self.total.values())
x = []
[x.extend([k] * int(float(v) * self.size)) for k, v in self.total.items()]
sd = np.std(x)
return m, sd
def write_regions(self, out_file):
m, sd = self._noise()
with open(out_file, 'a') as out_handle:
print >>out_handle, "\t".join(map(str, [self.position, self.name, self.raw,
"+", self.size, self.sample, m, sd] + self.cov.values()))
def _get_exome_coverage_stats(fn, sample, out_file, total_cov):
tmp_region = ""
stats = ""
with open(fn) as in_handle:
for line in in_handle:
if line.startswith("all"):
continue
cols = line.strip().split()
cur_region = "_".join(cols[0:3]) if not isinstance(cols[3], str) else "_".join(cols[0:4])
if cur_region != tmp_region:
if tmp_region != "":
stats.write_regions(out_file)
stats = cov_class(cols[-2], cur_region, sample)
stats.position = "\t".join(cols[0:3])
stats.save(int(cols[-4]), float(cols[-1]))
total_cov.save_coverage(int(cols[-4]), int(cols[-3]))
tmp_region = cur_region
total_cov.update(int(cols[-2]))
stats.write_regions(out_file)
return total_cov
def _silence_run(cmd):
do._do_run(cmd, False)
def coverage(data):
AVERAGE_REGION_STRING_LENGTH = 100
bed_file = dd.get_coverage_experimental(data)
if not bed_file:
return data
work_dir = os.path.join(dd.get_work_dir(data), "report", "coverage")
batch_size = max_command_length() / AVERAGE_REGION_STRING_LENGTH
with chdir(work_dir):
in_bam = data['work_bam']
sample = dd.get_sample_name(data)
logger.debug("doing coverage for %s" % sample)
region_bed = pybedtools.BedTool(bed_file)
parse_file = os.path.join(sample + "_coverage.bed")
parse_total_file = os.path.join(sample + "_cov_total.tsv")
if not file_exists(parse_file):
total_cov = cov_class(0, None, sample)
with file_transaction(parse_file) as out_tx:
with open(out_tx, 'w') as out_handle:
HEADER = ["#chrom", "start", "end", "region", "reads",
"strand", "size", "sample", "mean", "sd", "cutoff10",
"cutoff20", "cutoff4", "cutoff50"]
out_handle.write("\t".join(HEADER) + "\n")
with tmpfile() as tx_tmp_file:
lcount = 0
for chunk in robust_partition_all(batch_size, region_bed):
coord_batch = []
line_batch = ""
for line in chunk:
lcount += 1
chrom = line.chrom
start = max(line.start, 0)
end = line.end
coords = "%s:%s-%s" % (chrom, start, end)
coord_batch.append(coords)
line_batch += str(line)
if not coord_batch:
continue
region_file = pybedtools.BedTool(line_batch,
from_string=True).saveas().fn
coord_string = " ".join(coord_batch)
cmd = ("samtools view -b {in_bam} {coord_string} | "
"bedtools coverage -a {region_file} -b - "
"-hist > {tx_tmp_file}")
_silence_run(cmd.format(**locals()))
total_cov = _get_exome_coverage_stats(os.path.abspath(tx_tmp_file), sample, out_tx, total_cov)
logger.debug("Processed %d regions." % lcount)
total_cov.write_coverage(parse_total_file)
data['coverage'] = os.path.abspath(parse_file)
return data
def variants(data):
if not "vrn_file" in data:
return data
in_vcf = data['vrn_file']
work_dir = os.path.join(dd.get_work_dir(data), "report", "variants")
with chdir(work_dir):
in_bam = data['work_bam']
ref_file = dd.get_ref_file(data)
assert ref_file, "Need the reference genome fasta file."
jvm_opts = broad.get_gatk_framework_opts(data['config'])
gatk_jar = config_utils.get_program("gatk", data['config'], "dir")
bed_file = dd.get_variant_regions(data)
sample = dd.get_sample_name(data)
in_bam = data["work_bam"]
cg_file = os.path.join(sample + "_with-gc.vcf.gz")
parse_file = os.path.join(sample + "_gc-depth-parse.tsv")
if not file_exists(cg_file):
with file_transaction(cg_file) as tx_out:
cmd = ("java -jar {gatk_jar}/GenomeAnalysisTK.jar -T VariantAnnotator -R {ref_file} "
"-L {bed_file} -I {in_bam} "
"-A GCContent --variant {in_vcf} --out {tx_out}")
do.run(cmd.format(**locals()), " GC bias for %s" % in_vcf)
if not file_exists(parse_file):
with file_transaction(parse_file) as out_tx:
with open(out_tx, 'w') as out_handle:
print >>out_handle, "CG\tdepth\tsample"
cmd = ("bcftools query -f '[%GC][\\t%DP][\\t%SAMPLE]\\n' -R {bed_file} {cg_file} >> {out_tx}")
do.run(cmd.format(**locals()), " query for %s" % in_vcf)
logger.debug('parsing coverage: %s' % sample)
# return df
return data
| mit | 2,294,521,246,895,444,000 | 40.350282 | 118 | 0.515781 | false |
Kagami/kisa | lib/twisted/runner/test/test_procmon.py | 1 | 18664 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.runner.procmon}.
"""
from twisted.trial import unittest
from twisted.runner.procmon import LoggingProtocol, ProcessMonitor
from twisted.internet.error import (ProcessDone, ProcessTerminated,
ProcessExitedAlready)
from twisted.internet.task import Clock
from twisted.python.failure import Failure
from twisted.test.proto_helpers import MemoryReactor
class DummyProcess(object):
"""
An incomplete and fake L{IProcessTransport} implementation for testing how
L{ProcessMonitor} behaves when its monitored processes exit.
@ivar _terminationDelay: the delay in seconds after which the DummyProcess
will appear to exit when it receives a TERM signal
"""
pid = 1
proto = None
_terminationDelay = 1
def __init__(self, reactor, executable, args, environment, path,
proto, uid=None, gid=None, usePTY=0, childFDs=None):
self.proto = proto
self._reactor = reactor
self._executable = executable
self._args = args
self._environment = environment
self._path = path
self._uid = uid
self._gid = gid
self._usePTY = usePTY
self._childFDs = childFDs
def signalProcess(self, signalID):
"""
A partial implementation of signalProcess which can only handle TERM and
KILL signals.
- When a TERM signal is given, the dummy process will appear to exit
after L{DummyProcess._terminationDelay} seconds with exit code 0
- When a KILL signal is given, the dummy process will appear to exit
immediately with exit code 1.
@param signalID: The signal name or number to be issued to the process.
@type signalID: C{str}
"""
params = {
"TERM": (self._terminationDelay, 0),
"KILL": (0, 1)
}
if self.pid is None:
raise ProcessExitedAlready()
if signalID in params:
delay, status = params[signalID]
self._signalHandler = self._reactor.callLater(
delay, self.processEnded, status)
def processEnded(self, status):
"""
Deliver the process ended event to C{self.proto}.
"""
self.pid = None
statusMap = {
0: ProcessDone,
1: ProcessTerminated,
}
self.proto.processEnded(Failure(statusMap[status](status)))
class DummyProcessReactor(MemoryReactor, Clock):
"""
@ivar spawnedProcesses: a list that keeps track of the fake process
instances built by C{spawnProcess}.
@type spawnedProcesses: C{list}
"""
def __init__(self):
MemoryReactor.__init__(self)
Clock.__init__(self)
self.spawnedProcesses = []
def spawnProcess(self, processProtocol, executable, args=(), env={},
path=None, uid=None, gid=None, usePTY=0,
childFDs=None):
"""
Fake L{reactor.spawnProcess}, that logs all the process
arguments and returns a L{DummyProcess}.
"""
proc = DummyProcess(self, executable, args, env, path,
processProtocol, uid, gid, usePTY, childFDs)
processProtocol.makeConnection(proc)
self.spawnedProcesses.append(proc)
return proc
class ProcmonTests(unittest.TestCase):
"""
Tests for L{ProcessMonitor}.
"""
def setUp(self):
"""
Create an L{ProcessMonitor} wrapped around a fake reactor.
"""
self.reactor = DummyProcessReactor()
self.pm = ProcessMonitor(reactor=self.reactor)
self.pm.minRestartDelay = 2
self.pm.maxRestartDelay = 10
self.pm.threshold = 10
def test_getStateIncludesProcesses(self):
"""
The list of monitored processes must be included in the pickle state.
"""
self.pm.addProcess("foo", ["arg1", "arg2"],
uid=1, gid=2, env={})
self.assertEqual(self.pm.__getstate__()['processes'],
{'foo': (['arg1', 'arg2'], 1, 2, {})})
def test_getStateExcludesReactor(self):
"""
The private L{ProcessMonitor._reactor} instance variable should not be
included in the pickle state.
"""
self.assertNotIn('_reactor', self.pm.__getstate__())
def test_addProcess(self):
"""
L{ProcessMonitor.addProcess} only starts the named program if
L{ProcessMonitor.startService} has been called.
"""
self.pm.addProcess("foo", ["arg1", "arg2"],
uid=1, gid=2, env={})
self.assertEqual(self.pm.protocols, {})
self.assertEqual(self.pm.processes,
{"foo": (["arg1", "arg2"], 1, 2, {})})
self.pm.startService()
self.reactor.advance(0)
self.assertEqual(self.pm.protocols.keys(), ["foo"])
def test_addProcessDuplicateKeyError(self):
"""
L{ProcessMonitor.addProcess} raises a C{KeyError} if a process with the
given name already exists.
"""
self.pm.addProcess("foo", ["arg1", "arg2"],
uid=1, gid=2, env={})
self.assertRaises(KeyError, self.pm.addProcess,
"foo", ["arg1", "arg2"], uid=1, gid=2, env={})
def test_addProcessEnv(self):
"""
L{ProcessMonitor.addProcess} takes an C{env} parameter that is passed to
L{IReactorProcess.spawnProcess}.
"""
fakeEnv = {"KEY": "value"}
self.pm.startService()
self.pm.addProcess("foo", ["foo"], uid=1, gid=2, env=fakeEnv)
self.reactor.advance(0)
self.assertEqual(
self.reactor.spawnedProcesses[0]._environment, fakeEnv)
def test_removeProcess(self):
"""
L{ProcessMonitor.removeProcess} removes the process from the public
processes list.
"""
self.pm.startService()
self.pm.addProcess("foo", ["foo"])
self.assertEqual(len(self.pm.processes), 1)
self.pm.removeProcess("foo")
self.assertEqual(len(self.pm.processes), 0)
def test_removeProcessUnknownKeyError(self):
"""
L{ProcessMonitor.removeProcess} raises a C{KeyError} if the given
process name isn't recognised.
"""
self.pm.startService()
self.assertRaises(KeyError, self.pm.removeProcess, "foo")
def test_startProcess(self):
"""
When a process has been started, an instance of L{LoggingProtocol} will
be added to the L{ProcessMonitor.protocols} dict and the start time of
the process will be recorded in the L{ProcessMonitor.timeStarted}
dictionary.
"""
self.pm.addProcess("foo", ["foo"])
self.pm.startProcess("foo")
self.assertIsInstance(self.pm.protocols["foo"], LoggingProtocol)
self.assertIn("foo", self.pm.timeStarted.keys())
def test_startProcessAlreadyStarted(self):
"""
L{ProcessMonitor.startProcess} silently returns if the named process is
already started.
"""
self.pm.addProcess("foo", ["foo"])
self.pm.startProcess("foo")
self.assertIdentical(None, self.pm.startProcess("foo"))
def test_startProcessUnknownKeyError(self):
"""
L{ProcessMonitor.startProcess} raises a C{KeyError} if the given
process name isn't recognised.
"""
self.assertRaises(KeyError, self.pm.startProcess, "foo")
def test_stopProcessNaturalTermination(self):
"""
L{ProcessMonitor.stopProcess} immediately sends a TERM signal to the
named process.
"""
self.pm.startService()
self.pm.addProcess("foo", ["foo"])
self.assertIn("foo", self.pm.protocols)
# Configure fake process to die 1 second after receiving term signal
timeToDie = self.pm.protocols["foo"].transport._terminationDelay = 1
# Advance the reactor to just before the short lived process threshold
# and leave enough time for the process to die
self.reactor.advance(self.pm.threshold)
# Then signal the process to stop
self.pm.stopProcess("foo")
# Advance the reactor just enough to give the process time to die and
# verify that the process restarts
self.reactor.advance(timeToDie)
# We expect it to be restarted immediately
self.assertEqual(self.reactor.seconds(),
self.pm.timeStarted["foo"])
def test_stopProcessForcedKill(self):
"""
L{ProcessMonitor.stopProcess} kills a process which fails to terminate
naturally within L{ProcessMonitor.killTime} seconds.
"""
self.pm.startService()
self.pm.addProcess("foo", ["foo"])
self.assertIn("foo", self.pm.protocols)
self.reactor.advance(self.pm.threshold)
proc = self.pm.protocols["foo"].transport
# Arrange for the fake process to live longer than the killTime
proc._terminationDelay = self.pm.killTime + 1
self.pm.stopProcess("foo")
# If process doesn't die before the killTime, procmon should
# terminate it
self.reactor.advance(self.pm.killTime - 1)
self.assertEqual(0.0, self.pm.timeStarted["foo"])
self.reactor.advance(1)
# We expect it to be immediately restarted
self.assertEqual(self.reactor.seconds(), self.pm.timeStarted["foo"])
def test_stopProcessUnknownKeyError(self):
"""
L{ProcessMonitor.stopProcess} raises a C{KeyError} if the given process
name isn't recognised.
"""
self.assertRaises(KeyError, self.pm.stopProcess, "foo")
def test_stopProcessAlreadyStopped(self):
"""
L{ProcessMonitor.stopProcess} silently returns if the named process
is already stopped. eg Process has crashed and a restart has been
rescheduled, but in the meantime, the service is stopped.
"""
self.pm.addProcess("foo", ["foo"])
self.assertIdentical(None, self.pm.stopProcess("foo"))
def test_connectionLostLongLivedProcess(self):
"""
L{ProcessMonitor.connectionLost} should immediately restart a process
if it has been running longer than L{ProcessMonitor.threshold} seconds.
"""
self.pm.addProcess("foo", ["foo"])
# Schedule the process to start
self.pm.startService()
# advance the reactor to start the process
self.reactor.advance(0)
self.assertIn("foo", self.pm.protocols)
# Long time passes
self.reactor.advance(self.pm.threshold)
# Process dies after threshold
self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0)))
self.assertNotIn("foo", self.pm.protocols)
# Process should be restarted immediately
self.reactor.advance(0)
self.assertIn("foo", self.pm.protocols)
def test_connectionLostMurderCancel(self):
"""
L{ProcessMonitor.connectionLost} cancels a scheduled process killer and
deletes the DelayedCall from the L{ProcessMonitor.murder} list.
"""
self.pm.addProcess("foo", ["foo"])
# Schedule the process to start
self.pm.startService()
# Advance 1s to start the process then ask ProcMon to stop it
self.reactor.advance(1)
self.pm.stopProcess("foo")
# A process killer has been scheduled, delayedCall is active
self.assertIn("foo", self.pm.murder)
delayedCall = self.pm.murder["foo"]
self.assertTrue(delayedCall.active())
# Advance to the point at which the dummy process exits
self.reactor.advance(
self.pm.protocols["foo"].transport._terminationDelay)
# Now the delayedCall has been cancelled and deleted
self.assertFalse(delayedCall.active())
self.assertNotIn("foo", self.pm.murder)
def test_connectionLostProtocolDeletion(self):
"""
L{ProcessMonitor.connectionLost} removes the corresponding
ProcessProtocol instance from the L{ProcessMonitor.protocols} list.
"""
self.pm.startService()
self.pm.addProcess("foo", ["foo"])
self.assertIn("foo", self.pm.protocols)
self.pm.protocols["foo"].transport.signalProcess("KILL")
self.reactor.advance(
self.pm.protocols["foo"].transport._terminationDelay)
self.assertNotIn("foo", self.pm.protocols)
def test_connectionLostMinMaxRestartDelay(self):
"""
L{ProcessMonitor.connectionLost} will wait at least minRestartDelay s
and at most maxRestartDelay s
"""
self.pm.minRestartDelay = 2
self.pm.maxRestartDelay = 3
self.pm.startService()
self.pm.addProcess("foo", ["foo"])
self.assertEqual(self.pm.delay["foo"], self.pm.minRestartDelay)
self.reactor.advance(self.pm.threshold - 1)
self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0)))
self.assertEqual(self.pm.delay["foo"], self.pm.maxRestartDelay)
def test_connectionLostBackoffDelayDoubles(self):
"""
L{ProcessMonitor.connectionLost} doubles the restart delay each time
the process dies too quickly.
"""
self.pm.startService()
self.pm.addProcess("foo", ["foo"])
self.reactor.advance(self.pm.threshold - 1) #9s
self.assertIn("foo", self.pm.protocols)
self.assertEqual(self.pm.delay["foo"], self.pm.minRestartDelay)
# process dies within the threshold and should not restart immediately
self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0)))
self.assertEqual(self.pm.delay["foo"], self.pm.minRestartDelay * 2)
def test_startService(self):
"""
L{ProcessMonitor.startService} starts all monitored processes.
"""
self.pm.addProcess("foo", ["foo"])
# Schedule the process to start
self.pm.startService()
# advance the reactor to start the process
self.reactor.advance(0)
self.assertTrue("foo" in self.pm.protocols)
def test_stopService(self):
"""
L{ProcessMonitor.stopService} should stop all monitored processes.
"""
self.pm.addProcess("foo", ["foo"])
self.pm.addProcess("bar", ["bar"])
# Schedule the process to start
self.pm.startService()
# advance the reactor to start the processes
self.reactor.advance(self.pm.threshold)
self.assertIn("foo", self.pm.protocols)
self.assertIn("bar", self.pm.protocols)
self.reactor.advance(1)
self.pm.stopService()
# Advance to beyond the killTime - all monitored processes
# should have exited
self.reactor.advance(self.pm.killTime + 1)
# The processes shouldn't be restarted
self.assertEqual({}, self.pm.protocols)
def test_stopServiceCancelRestarts(self):
"""
L{ProcessMonitor.stopService} should cancel any scheduled process
restarts.
"""
self.pm.addProcess("foo", ["foo"])
# Schedule the process to start
self.pm.startService()
# advance the reactor to start the processes
self.reactor.advance(self.pm.threshold)
self.assertIn("foo", self.pm.protocols)
self.reactor.advance(1)
# Kill the process early
self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0)))
self.assertTrue(self.pm.restart['foo'].active())
self.pm.stopService()
# Scheduled restart should have been cancelled
self.assertFalse(self.pm.restart['foo'].active())
def test_stopServiceCleanupScheduledRestarts(self):
"""
L{ProcessMonitor.stopService} should cancel all scheduled process
restarts.
"""
self.pm.threshold = 5
self.pm.minRestartDelay = 5
# Start service and add a process (started immediately)
self.pm.startService()
self.pm.addProcess("foo", ["foo"])
# Stop the process after 1s
self.reactor.advance(1)
self.pm.stopProcess("foo")
# Wait 1s for it to exit it will be scheduled to restart 5s later
self.reactor.advance(1)
# Meanwhile stop the service
self.pm.stopService()
# Advance to beyond the process restart time
self.reactor.advance(6)
# The process shouldn't have restarted because stopService has cancelled
# all pending process restarts.
self.assertEqual(self.pm.protocols, {})
def test_activeAttributeEqualsRunning(self):
"""
L{ProcessMonitor.active} unneccessarily duplicates the standard
L{IService.running} flag.
"""
self.assertEqual(self.pm.active, self.pm.running)
self.pm.startService()
self.assertEqual(self.pm.active, self.pm.running)
def test_activeAttributeDeprecation(self):
"""
L{ProcessMonitor.active} unneccessarily duplicates the standard
L{IService.running} flag and is therefore deprecated.
"""
def getActive():
return self.pm.active
expectedMessage = "active is deprecated since Twisted 10.1.0. Use " \
"running instead."
self.assertWarns(DeprecationWarning,
expectedMessage, __file__, getActive)
def test_consistencyAttributeDeprecation(self):
"""
L{ProcessMonitor.consistency} is no longer needed since the removal of
the ProcessMonitor._checkConsistency function and is therefore
deprecated.
"""
def getConsistency():
return self.pm.consistency
expectedMessage = "consistency is deprecated since Twisted 10.1.0."
self.assertWarns(DeprecationWarning,
expectedMessage, __file__, getConsistency)
def test_consistencyDelayAttributeDeprecation(self):
"""
L{ProcessMonitor.consistencyDelay} is no longer needed since the
removal of the ProcessMonitor._checkConsistency function and is
therefore deprecated.
"""
def getConsistencyDelay():
return self.pm.consistencyDelay
expectedMessage = "consistencyDelay is deprecated since Twisted 10.1.0."
self.assertWarns(DeprecationWarning,
expectedMessage, __file__, getConsistencyDelay)
| cc0-1.0 | -1,454,100,166,417,864,700 | 34.348485 | 80 | 0.622857 | false |
boazmohar/pySparkUtils | test/test_change.py | 1 | 2205 | from builtins import str
from pySparkUtils.utils import change
import pytest
pytestmark = pytest.mark.usefixtures("eng")
def test_ip_wait(eng):
with pytest.raises(RuntimeError) as ex:
_ = change(sc=eng, master='local[2]', fail_on_timeout=True, wait='ips', min_ips=10, timeout=4)
assert 'Time out' in str(ex.value)
def test_cores_wait(eng):
eng.stop()
new_sc = change(sc=None, master='local[2]', fail_on_timeout=False, wait='cores', min_cores=2)
assert new_sc.defaultParallelism == 2
new_sc.stop()
def test_cores_wait2(eng):
old_default = eng.defaultParallelism
new_sc = change(sc=eng, master=None, fail_on_timeout=False, wait='cores', min_cores=None)
assert new_sc.defaultParallelism == old_default
new_sc.stop()
def test_cores_wait3(eng):
with pytest.raises(RuntimeError) as ex:
_ = change(sc=eng, master='local[2]', fail_on_timeout=True, wait='cores', min_cores=3, timeout=4)
assert 'Time out' in str(ex.value)
def test_cores_wait4(eng):
eng.stop()
new_sc = change(sc=None, master='local', fail_on_timeout=False, wait='cores', min_cores=None)
assert new_sc.defaultParallelism >= 1
new_sc.stop()
def test_no_input(eng):
with pytest.raises(ValueError) as ex:
change(sc=None, master=None)
assert 'Both master and sc are None' in str(ex.value)
with pytest.raises(ValueError) as ex:
change(sc=eng, master=None, wait='error')
assert 'wait should be' in str(ex.value)
def test_cores(eng):
n_cores = eng.defaultParallelism
new_sc = change(sc=eng)
assert new_sc.defaultParallelism == n_cores
new_sc.stop()
def test_local(eng):
eng.stop()
new_sc = change(sc=None, master='local[2]', fail_on_timeout=False)
assert new_sc.defaultParallelism == 2
new_sc.stop()
def test_args(eng):
old_conf = eng.getConf()
old_value = old_conf.get('spark.rpc.message.maxSize')
if old_value is None:
new_value = u'250'
else:
new_value = str(int(old_value+1))
new_sc = change(sc=eng, spark_rpc_message_maxSize=new_value)
new_conf = new_sc.getConf()
assert new_conf.get('spark.rpc.message.maxSize') == new_value
new_sc.stop()
| mit | 2,964,898,981,211,689,500 | 29.205479 | 105 | 0.662585 | false |
tgquintela/TimeSeriesTools | TimeSeriesTools/TS_statistics/probabilitytools.py | 1 | 2552 |
"""
This module contains functions related with probability and complements the
usual numpy or scipy tools.
"""
import numpy as np
def compute_conditional_probs(probs, marginal_vars):
"""Function which computes the conditional probability from the joint
probability. We have to inform about the dependant variables.
Parameters
----------
probs: array_like
multidimensional array which all the possible combinations of states of
all the variables and the probability of being in each of thie
combinations.
marginal_vars: list or array_like of int
the index of which variables we want to compute the marginal variables.
Returns
-------
p_y_x: array_like
the conditional probability.
"""
## Preparing needed variables
n_vars = len(probs.shape)
dependants = [i for i in range(n_vars) if i not in marginal_vars]
dependants = np.sort(dependants)[::-1]
marginal_vars = np.sort(marginal_vars)[::-1]
n_np = dependants.shape[0]
## Computing dependendants
p_x = compute_marginal_probs(probs, marginal_vars)
## Compute conditioned prob
# Compute swap
swp = np.array([[dependants[i], -i-1] for i in range(n_np)])
# Swap axis
for i in range(swp.shape[0]):
probs = np.swapaxes(probs, swp[i, 0], swp[i, 1])
# Division
p_y_x = np.divide(probs, p_x)
# Reswap axis
for i in range(swp.shape[0]):
p_y_x = np.swapaxes(p_y_x, swp[i, 1], swp[i, 0])
for i in range(swp.shape[0]):
probs = np.swapaxes(probs, swp[i, 1], swp[i, 0])
return p_y_x
def compute_marginal_probs(probs, marginal_vars):
"""Function which computes marginal probabilities given the variables we
want to marginalize.
Parameters
----------
probs: array_like
the joint probability distribution.
marginal_vars: list or array of int
the indexes of the variables to marginalize.
Returns
-------
p_x: array_like
the marginal probability distribution.
"""
## Formatting inputs
# Formatting marginal variables
marginal_vars = np.sort(marginal_vars)[::-1]
## Marginalizing
p_x = probs[:]
for d in marginal_vars:
nstates = p_x.shape[d]
p_x = np.tensordot(np.ones(nstates), p_x, axes=np.array([0, d]))
return p_x
def compute_division_probs(probs1, probs2, correspondences):
"""
"""
return probs
def compute_product_sum():
"""
TODO: Not to sum nan or inf values.
"""
return measure
| mit | 2,569,273,240,266,878,500 | 25.309278 | 79 | 0.635188 | false |
sherzberg/python-beanstalk-api | beanstalk/api/base.py | 1 | 1554 | import requests
import json
class IncorrectSetupException(Exception):
pass
class BeanstalkAuth(object):
_instance = None
def __new__(cls, domain, username, password):
if not cls._instance:
cls._instance = object.__new__(cls)
return cls._instance
def __init__(self, domain, username, password):
self.domain = domain
self.username = username
self.password = password
self.api_url = 'https://{0}.beanstalkapp.com/api/'.format(self.domain)
@staticmethod
def get_instance():
if BeanstalkAuth._instance:
return BeanstalkAuth._instance
else:
raise IncorrectSetupException("You need to run beanstalk.setup first!")
class Base():
def _do_request(self, url, method, data):
auth = BeanstalkAuth.get_instance()
request_url = auth.api_url + url
r = getattr(requests, method)(request_url,
data=json.dumps(data),
auth=(auth.username, auth.password),
headers={'content-type': 'application/json'})
r.raise_for_status()
return r.json()
def _do_get(self, url):
return self._do_request(url, 'get', None)
def _do_post(self, url, data):
return self._do_request(url, 'post', data)
def _do_put(self, url, data):
return self._do_request(url, 'put', data)
def _do_delete(self, url, data):
return self._do_request(url, 'delete', data)
| gpl-3.0 | 2,693,403,016,679,041,000 | 27.254545 | 83 | 0.570785 | false |
mehdisadeghi/saga-python | src/saga/adaptors/pbspro/pbsprojob.py | 1 | 47635 | __author__ = "Andre Merzky, Ole Weidner, Mark Santcroos"
__copyright__ = "Copyright 2012-2015, The SAGA Project"
__license__ = "MIT"
""" PBSPro job adaptor implementation
"""
import threading
import saga.url as surl
import saga.utils.pty_shell as sups
import saga.adaptors.base
import saga.adaptors.cpi.job
from saga.job.constants import *
import re
import os
import time
import threading
from cgi import parse_qs
SYNC_CALL = saga.adaptors.cpi.decorators.SYNC_CALL
ASYNC_CALL = saga.adaptors.cpi.decorators.ASYNC_CALL
SYNC_WAIT_UPDATE_INTERVAL = 1 # seconds
MONITOR_UPDATE_INTERVAL = 60 # seconds
# --------------------------------------------------------------------
#
class _job_state_monitor(threading.Thread):
""" thread that periodically monitors job states
"""
def __init__(self, job_service):
self.logger = job_service._logger
self.js = job_service
self._stop = threading.Event()
super(_job_state_monitor, self).__init__()
self.setDaemon(True)
def stop(self):
self._stop.set()
def run(self):
# we stop the monitoring thread when we see the same error 3 times in
# a row...
error_type_count = dict()
while not self._stop.is_set ():
try:
# FIXME: do bulk updates here! we don't want to pull information
# job by job. that would be too inefficient!
jobs = self.js.jobs
for job_id in jobs.keys() :
job_info = jobs[job_id]
# we only need to monitor jobs that are not in a
# terminal state, so we can skip the ones that are
# either done, failed or canceled
if job_info['state'] not in [saga.job.DONE, saga.job.FAILED, saga.job.CANCELED] :
new_job_info = self.js._job_get_info(job_id, reconnect=False)
self.logger.info ("Job monitoring thread updating Job %s (state: %s)" \
% (job_id, new_job_info['state']))
# fire job state callback if 'state' has changed
if new_job_info['state'] != job_info['state']:
job_obj = job_info['obj']
job_obj._attributes_i_set('state', new_job_info['state'], job_obj._UP, True)
# update job info
jobs[job_id] = new_job_info
except Exception as e:
import traceback
traceback.print_exc ()
self.logger.warning("Exception caught in job monitoring thread: %s" % e)
# check if we see the same error again and again
error_type = str(e)
if error_type not in error_type_count :
error_type_count = dict()
error_type_count[error_type] = 1
else :
error_type_count[error_type] += 1
if error_type_count[error_type] >= 3 :
self.logger.error("too many monitoring errors -- stopping job monitoring thread")
return
finally :
time.sleep (MONITOR_UPDATE_INTERVAL)
# --------------------------------------------------------------------
#
def log_error_and_raise(message, exception, logger):
""" logs an 'error' message and subsequently throws an exception
"""
logger.error(message)
raise exception(message)
# --------------------------------------------------------------------
#
def _pbs_to_saga_jobstate(pbsjs):
""" translates a pbs one-letter state to saga
"""
if pbsjs == 'C': # Torque "Job is completed after having run."
return saga.job.DONE
elif pbsjs == 'F': # PBS Pro "Job is finished."
return saga.job.DONE
elif pbsjs == 'H': # PBS Pro and TORQUE "Job is held."
return saga.job.PENDING
elif pbsjs == 'Q': # PBS Pro and TORQUE "Job is queued(, eligible to run or routed.)
return saga.job.PENDING
elif pbsjs == 'S': # PBS Pro and TORQUE "Job is suspended."
return saga.job.PENDING
elif pbsjs == 'W': # PBS Pro and TORQUE "Job is waiting for its execution time to be reached."
return saga.job.PENDING
elif pbsjs == 'R': # PBS Pro and TORQUE "Job is running."
return saga.job.RUNNING
elif pbsjs == 'E': # PBS Pro and TORQUE "Job is exiting after having run"
return saga.job.RUNNING
elif pbsjs == 'T': # PBS Pro and TORQUE "Job is being moved to new location."
# TODO: PENDING?
return saga.job.RUNNING
elif pbsjs == 'X': # PBS Pro "Subjob has completed execution or has been deleted."
return saga.job.CANCELED
else:
return saga.job.UNKNOWN
# --------------------------------------------------------------------
#
def _pbscript_generator(url, logger, jd, ppn, gres, pbs_version, is_cray=False, queue=None, ):
""" generates a PBS Pro script from a SAGA job description
"""
pbs_params = str()
exec_n_args = str()
exec_n_args += 'export SAGA_PPN=%d\n' % ppn
if jd.executable:
exec_n_args += "%s " % (jd.executable)
if jd.arguments:
for arg in jd.arguments:
exec_n_args += "%s " % (arg)
if jd.name:
pbs_params += "#PBS -N %s \n" % jd.name
if (is_cray is "") or not('Version: 4.2.7' in pbs_version):
# qsub on Cray systems complains about the -V option:
# Warning:
# Your job uses the -V option, which requests that all of your
# current shell environment settings (9913 bytes) be exported to
# it. This is not recommended, as it causes problems for the
# batch environment in some cases.
pbs_params += "#PBS -V \n"
if jd.environment:
pbs_params += "#PBS -v %s\n" % \
','.join (["%s=%s" % (k,v)
for k,v in jd.environment.iteritems()])
# apparently this doesn't work with older PBS installations
# if jd.working_directory:
# pbs_params += "#PBS -d %s \n" % jd.working_directory
# a workaround is to do an explicit 'cd'
if jd.working_directory:
workdir_directives = 'export PBS_O_WORKDIR=%s \n' % jd.working_directory
workdir_directives += 'mkdir -p %s\n' % jd.working_directory
workdir_directives += 'cd %s\n' % jd.working_directory
else:
workdir_directives = ''
if jd.output:
# if working directory is set, we want stdout to end up in
# the working directory as well, unless it containes a specific
# path name.
if jd.working_directory:
if os.path.isabs(jd.output):
pbs_params += "#PBS -o %s \n" % jd.output
else:
# user provided a relative path for STDOUT. in this case
# we prepend the workind directory path before passing
# it on to PBS
pbs_params += "#PBS -o %s/%s \n" % (jd.working_directory, jd.output)
else:
pbs_params += "#PBS -o %s \n" % jd.output
if jd.error:
# if working directory is set, we want stderr to end up in
# the working directory as well, unless it contains a specific
# path name.
if jd.working_directory:
if os.path.isabs(jd.error):
pbs_params += "#PBS -e %s \n" % jd.error
else:
# user provided a realtive path for STDERR. in this case
# we prepend the workind directory path before passing
# it on to PBS
pbs_params += "#PBS -e %s/%s \n" % (jd.working_directory, jd.error)
else:
pbs_params += "#PBS -e %s \n" % jd.error
if jd.wall_time_limit:
hours = jd.wall_time_limit / 60
minutes = jd.wall_time_limit % 60
pbs_params += "#PBS -l walltime=%s:%s:00 \n" \
% (str(hours), str(minutes))
if jd.queue and queue:
pbs_params += "#PBS -q %s \n" % queue
elif jd.queue and not queue:
pbs_params += "#PBS -q %s \n" % jd.queue
elif queue and not jd.queue:
pbs_params += "#PBS -q %s \n" % queue
if jd.project:
if 'PBSPro_1' in pbs_version:
# On PBS Pro we set both -P(roject) and -A(accounting),
# as we don't know what the admins decided, and just
# pray that this doesn't create problems.
pbs_params += "#PBS -P %s \n" % str(jd.project)
pbs_params += "#PBS -A %s \n" % str(jd.project)
else:
# Torque
pbs_params += "#PBS -A %s \n" % str(jd.project)
if jd.job_contact:
pbs_params += "#PBS -m abe \n"
# if total_cpu_count is not defined, we assume 1
if not jd.total_cpu_count:
jd.total_cpu_count = 1
# Request enough nodes to cater for the number of cores requested
nnodes = jd.total_cpu_count / ppn
if jd.total_cpu_count % ppn > 0:
nnodes += 1
# We use the ncpus value for systems that need to specify ncpus as multiple of PPN
ncpus = nnodes * ppn
# Node properties are appended to the nodes argument in the resource_list.
node_properties = []
# Parse candidate_hosts
#
# Currently only implemented for "bigflash" on Gordon@SDSC
# https://github.com/radical-cybertools/saga-python/issues/406
#
if jd.candidate_hosts:
if 'BIG_FLASH' in jd.candidate_hosts:
node_properties.append('bigflash')
else:
raise saga.NotImplemented("This type of 'candidate_hosts' not implemented: '%s'" % jd.candidate_hosts)
if is_cray is not "":
# Special cases for PBS/TORQUE on Cray. Different PBSes,
# different flags. A complete nightmare...
if 'PBSPro_10' in pbs_version:
logger.info("Using Cray XT (e.g. Hopper) specific '#PBS -l mppwidth=xx' flags (PBSPro_10).")
pbs_params += "#PBS -l mppwidth=%s \n" % jd.total_cpu_count
elif 'PBSPro_12' in pbs_version:
logger.info("Using Cray XT (e.g. Archer) specific '#PBS -l select=xx' flags (PBSPro_12).")
pbs_params += "#PBS -l select=%d\n" % nnodes
elif '4.2.6' in pbs_version:
logger.info("Using Titan (Cray XP) specific '#PBS -l nodes=xx'")
pbs_params += "#PBS -l nodes=%d\n" % nnodes
elif '4.2.7' in pbs_version:
logger.info("Using Cray XT @ NERSC (e.g. Edison) specific '#PBS -l mppwidth=xx' flags (PBSPro_10).")
pbs_params += "#PBS -l mppwidth=%s \n" % jd.total_cpu_count
elif 'Version: 5.' in pbs_version:
logger.info("Using TORQUE 5.x notation '#PBS -l procs=XX' ")
pbs_params += "#PBS -l procs=%d\n" % jd.total_cpu_count
else:
logger.info("Using Cray XT (e.g. Kraken, Jaguar) specific '#PBS -l size=xx' flags (TORQUE).")
pbs_params += "#PBS -l size=%s\n" % jd.total_cpu_count
elif 'version: 2.3.13' in pbs_version:
# e.g. Blacklight
# TODO: The more we add, the more it screams for a refactoring
pbs_params += "#PBS -l ncpus=%d\n" % ncpus
elif '4.2.7' in pbs_version:
logger.info("Using Cray XT @ NERSC (e.g. Hopper) specific '#PBS -l mppwidth=xx' flags (PBSPro_10).")
pbs_params += "#PBS -l mppwidth=%s \n" % jd.total_cpu_count
elif 'PBSPro_12' in pbs_version:
logger.info("Using PBSPro 12 notation '#PBS -l select=XX' ")
pbs_params += "#PBS -l select=%d\n" % (nnodes)
else:
# Default case, i.e, standard HPC cluster (non-Cray)
# If we want just a slice of one node
if jd.total_cpu_count < ppn:
ppn = jd.total_cpu_count
pbs_params += "#PBS -l nodes=%d:ppn=%d%s\n" % (
nnodes, ppn, ''.join([':%s' % prop for prop in node_properties]))
# Process Generic Resource specification request
if gres:
pbs_params += "#PBS -l gres=%s\n" % gres
# escape all double quotes and dollarsigns, otherwise 'echo |'
# further down won't work
# only escape '$' in args and exe. not in the params
exec_n_args = workdir_directives + exec_n_args
exec_n_args = exec_n_args.replace('$', '\\$')
pbscript = "\n#!/bin/bash \n%s%s" % (pbs_params, exec_n_args)
pbscript = pbscript.replace('"', '\\"')
return pbscript
# --------------------------------------------------------------------
# some private defs
#
_PTY_TIMEOUT = 2.0
# --------------------------------------------------------------------
# the adaptor name
#
_ADAPTOR_NAME = "saga.adaptor.pbsprojob"
_ADAPTOR_SCHEMAS = ["pbspro", "pbspro+ssh", "pbspro+gsissh"]
_ADAPTOR_OPTIONS = []
# --------------------------------------------------------------------
# the adaptor capabilities & supported attributes
#
_ADAPTOR_CAPABILITIES = {
"jdes_attributes": [saga.job.NAME,
saga.job.EXECUTABLE,
saga.job.ARGUMENTS,
saga.job.CANDIDATE_HOSTS,
saga.job.ENVIRONMENT,
saga.job.INPUT,
saga.job.OUTPUT,
saga.job.ERROR,
saga.job.QUEUE,
saga.job.PROJECT,
saga.job.WALL_TIME_LIMIT,
saga.job.WORKING_DIRECTORY,
saga.job.WALL_TIME_LIMIT,
saga.job.SPMD_VARIATION, # TODO: 'hot'-fix for BigJob
saga.job.PROCESSES_PER_HOST,
saga.job.TOTAL_CPU_COUNT],
"job_attributes": [saga.job.EXIT_CODE,
saga.job.EXECUTION_HOSTS,
saga.job.CREATED,
saga.job.STARTED,
saga.job.FINISHED],
"metrics": [saga.job.STATE],
"callbacks": [saga.job.STATE],
"contexts": {"ssh": "SSH public/private keypair",
"x509": "GSISSH X509 proxy context",
"userpass": "username/password pair (ssh)"}
}
# --------------------------------------------------------------------
# the adaptor documentation
#
_ADAPTOR_DOC = {
"name": _ADAPTOR_NAME,
"cfg_options": _ADAPTOR_OPTIONS,
"capabilities": _ADAPTOR_CAPABILITIES,
"description": """
The PBSPro adaptor allows to run and manage jobs on `PBS <http://www.pbsworks.com/>`_
controlled HPC clusters.
""",
"example": "examples/jobs/pbsjob.py",
"schemas": {"pbspro": "connect to a local cluster",
"pbspro+ssh": "connect to a remote cluster via SSH",
"pbspro+gsissh": "connect to a remote cluster via GSISSH"}
}
# --------------------------------------------------------------------
# the adaptor info is used to register the adaptor with SAGA
#
_ADAPTOR_INFO = {
"name" : _ADAPTOR_NAME,
"version" : "v0.1",
"schemas" : _ADAPTOR_SCHEMAS,
"capabilities": _ADAPTOR_CAPABILITIES,
"cpis": [
{
"type": "saga.job.Service",
"class": "PBSProJobService"
},
{
"type": "saga.job.Job",
"class": "PBSProJob"
}
]
}
###############################################################################
# The adaptor class
class Adaptor (saga.adaptors.base.Base):
""" this is the actual adaptor class, which gets loaded by SAGA (i.e. by
the SAGA engine), and which registers the CPI implementation classes
which provide the adaptor's functionality.
"""
# ----------------------------------------------------------------
#
def __init__(self):
saga.adaptors.base.Base.__init__(self, _ADAPTOR_INFO, _ADAPTOR_OPTIONS)
self.id_re = re.compile('^\[(.*)\]-\[(.*?)\]$')
self.opts = self.get_config (_ADAPTOR_NAME)
# ----------------------------------------------------------------
#
def sanity_check(self):
# FIXME: also check for gsissh
pass
# ----------------------------------------------------------------
#
def parse_id(self, id):
# split the id '[rm]-[pid]' in its parts, and return them.
match = self.id_re.match(id)
if not match or len(match.groups()) != 2:
raise saga.BadParameter("Cannot parse job id '%s'" % id)
return (match.group(1), match.group(2))
###############################################################################
#
class PBSProJobService (saga.adaptors.cpi.job.Service):
""" implements saga.adaptors.cpi.job.Service
"""
# ----------------------------------------------------------------
#
def __init__(self, api, adaptor):
self._mt = None
_cpi_base = super(PBSProJobService, self)
_cpi_base.__init__(api, adaptor)
self._adaptor = adaptor
# ----------------------------------------------------------------
#
def __del__(self):
self.close()
# ----------------------------------------------------------------
#
def close(self):
if self.mt :
self.mt.stop()
self.mt.join(10) # don't block forever on join()
self._logger.info("Job monitoring thread stopped.")
self.finalize(True)
# ----------------------------------------------------------------
#
def finalize(self, kill_shell=False):
if kill_shell :
if self.shell :
self.shell.finalize (True)
# ----------------------------------------------------------------
#
@SYNC_CALL
def init_instance(self, adaptor_state, rm_url, session):
""" service instance constructor
"""
self.rm = rm_url
self.session = session
self.ppn = None
self.is_cray = ""
self.queue = None
self.shell = None
self.jobs = dict()
self.gres = None
# the monitoring thread - one per service instance
self.mt = _job_state_monitor(job_service=self)
self.mt.start()
rm_scheme = rm_url.scheme
pty_url = surl.Url(rm_url)
# this adaptor supports options that can be passed via the
# 'query' component of the job service URL.
if rm_url.query:
for key, val in parse_qs(rm_url.query).iteritems():
if key == 'queue':
self.queue = val[0]
elif key == 'craytype':
self.is_cray = val[0]
elif key == 'ppn':
self.ppn = int(val[0])
elif key == 'gres':
self.gres = val[0]
# we need to extract the scheme for PTYShell. That's basically the
# job.Service Url without the pbs+ part. We use the PTYShell to execute
# pbs commands either locally or via gsissh or ssh.
if rm_scheme == "pbspro":
pty_url.scheme = "fork"
elif rm_scheme == "pbspro+ssh":
pty_url.scheme = "ssh"
elif rm_scheme == "pbspro+gsissh":
pty_url.scheme = "gsissh"
# these are the commands that we need in order to interact with PBS.
# the adaptor will try to find them during initialize(self) and bail
# out in case they are note available.
self._commands = {'pbsnodes': None,
'qstat': None,
'qsub': None,
'qdel': None}
self.shell = sups.PTYShell(pty_url, self.session)
# self.shell.set_initialize_hook(self.initialize)
# self.shell.set_finalize_hook(self.finalize)
self.initialize()
return self.get_api()
# ----------------------------------------------------------------
#
def initialize(self):
# check if all required pbs tools are available
for cmd in self._commands.keys():
ret, out, _ = self.shell.run_sync("which %s " % cmd)
if ret != 0:
message = "Error finding PBS tools: %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
path = out.strip() # strip removes newline
if cmd == 'qdel': # qdel doesn't support --version!
self._commands[cmd] = {"path": path,
"version": "?"}
elif cmd == 'qsub': # qsub doesn't always support --version!
self._commands[cmd] = {"path": path,
"version": "?"}
else:
ret, out, _ = self.shell.run_sync("%s --version" % cmd)
if ret != 0:
message = "Error finding PBS tools: %s" % out
log_error_and_raise(message, saga.NoSuccess,
self._logger)
else:
# version is reported as: "version: x.y.z"
version = out#.strip().split()[1]
# add path and version to the command dictionary
self._commands[cmd] = {"path": path,
"version": version}
self._logger.info("Found PBS tools: %s" % self._commands)
#
# TODO: Get rid of this, as I dont think there is any justification that Cray's are special
#
# let's try to figure out if we're working on a Cray machine.
# naively, we assume that if we can find the 'aprun' command in the
# path that we're logged in to a Cray machine.
if self.is_cray == "":
ret, out, _ = self.shell.run_sync('which aprun')
if ret != 0:
self.is_cray = ""
else:
self._logger.info("Host '%s' seems to be a Cray machine." \
% self.rm.host)
self.is_cray = "unknowncray"
else:
self._logger.info("Assuming host is a Cray since 'craytype' is set to: %s" % self.is_cray)
#
# Get number of processes per node
#
if self.ppn:
self._logger.debug("Using user specified 'ppn': %d" % self.ppn)
return
# TODO: this is quite a hack. however, it *seems* to work quite
# well in practice.
if 'PBSPro_12' in self._commands['qstat']['version']:
ret, out, _ = self.shell.run_sync('unset GREP_OPTIONS; %s -a | grep -E "resources_available.ncpus"' % \
self._commands['pbsnodes']['path'])
else:
ret, out, _ = self.shell.run_sync('unset GREP_OPTIONS; %s -a | grep -E "(np|pcpu)[[:blank:]]*=" ' % \
self._commands['pbsnodes']['path'])
if ret != 0:
message = "Error running pbsnodes: %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# this is black magic. we just assume that the highest occurrence
# of a specific np is the number of processors (cores) per compute
# node. this equals max "PPN" for job scripts
ppn_list = dict()
for line in out.split('\n'):
np = line.split(' = ')
if len(np) == 2:
np_str = np[1].strip()
if np_str == '<various>':
continue
else:
np = int(np_str)
if np in ppn_list:
ppn_list[np] += 1
else:
ppn_list[np] = 1
self.ppn = max(ppn_list, key=ppn_list.get)
self._logger.debug("Found the following 'ppn' configurations: %s. "
"Using %s as default ppn." % (ppn_list, self.ppn))
# ----------------------------------------------------------------
#
def _job_run(self, job_obj):
""" runs a job via qsub
"""
# get the job description
jd = job_obj.get_description()
# normalize working directory path
if jd.working_directory :
jd.working_directory = os.path.normpath (jd.working_directory)
# TODO: Why would one want this?
if self.queue and jd.queue:
self._logger.warning("Job service was instantiated explicitly with \
'queue=%s', but job description tries to a different queue: '%s'. Using '%s'." %
(self.queue, jd.queue, self.queue))
try:
# create a PBS job script from SAGA job description
script = _pbscript_generator(url=self.rm, logger=self._logger,
jd=jd, ppn=self.ppn, gres=self.gres,
pbs_version=self._commands['qstat']['version'],
is_cray=self.is_cray, queue=self.queue,
)
self._logger.info("Generated PBS script: %s" % script)
except Exception, ex:
log_error_and_raise(str(ex), saga.BadParameter, self._logger)
# try to create the working directory (if defined)
# WARNING: this assumes a shared filesystem between login node and
# compute nodes.
if jd.working_directory:
self._logger.info("Creating working directory %s" % jd.working_directory)
ret, out, _ = self.shell.run_sync("mkdir -p %s" % (jd.working_directory))
if ret != 0:
# something went wrong
message = "Couldn't create working directory - %s" % (out)
log_error_and_raise(message, saga.NoSuccess, self._logger)
# Now we want to execute the script. This process consists of two steps:
# (1) we create a temporary file with 'mktemp' and write the contents of
# the generated PBS script into it
# (2) we call 'qsub <tmpfile>' to submit the script to the queueing system
cmdline = """SCRIPTFILE=`mktemp -t SAGA-Python-PBSProJobScript.XXXXXX` && echo "%s" > $SCRIPTFILE && %s $SCRIPTFILE && rm -f $SCRIPTFILE""" % (script, self._commands['qsub']['path'])
ret, out, _ = self.shell.run_sync(cmdline)
if ret != 0:
# something went wrong
message = "Error running job via 'qsub': %s. Commandline was: %s" \
% (out, cmdline)
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# parse the job id. qsub usually returns just the job id, but
# sometimes there are a couple of lines of warnings before.
# if that's the case, we log those as 'warnings'
lines = out.split('\n')
lines = filter(lambda lines: lines != '', lines) # remove empty
if len(lines) > 1:
self._logger.warning('qsub: %s' % ''.join(lines[:-2]))
# we asssume job id is in the last line
#print cmdline
#print out
job_id = "[%s]-[%s]" % (self.rm, lines[-1].strip().split('.')[0])
self._logger.info("Submitted PBS job with id: %s" % job_id)
state = saga.job.PENDING
# populate job info dict
self.jobs[job_id] = {'obj' : job_obj,
'job_id' : job_id,
'state' : state,
'exec_hosts' : None,
'returncode' : None,
'create_time' : None,
'start_time' : None,
'end_time' : None,
'gone' : False
}
self._logger.info ("assign job id %s / %s / %s to watch list (%s)" \
% (None, job_id, job_obj, self.jobs.keys()))
# set status to 'pending' and manually trigger callback
job_obj._attributes_i_set('state', state, job_obj._UP, True)
# return the job id
return job_id
# ----------------------------------------------------------------
#
def _retrieve_job(self, job_id):
""" see if we can get some info about a job that we don't
know anything about
"""
# rm, pid = self._adaptor.parse_id(job_id)
# # run the PBS 'qstat' command to get some infos about our job
# if 'PBSPro_1' in self._commands['qstat']['version']:
# qstat_flag = '-f'
# else:
# qstat_flag ='-f1'
#
# ret, out, _ = self.shell.run_sync("unset GREP_OPTIONS; %s %s %s | "\
# "grep -E -i '(job_state)|(exec_host)|(exit_status)|(ctime)|"\
# "(start_time)|(comp_time)|(stime)|(qtime)|(mtime)'" \
# % (self._commands['qstat']['path'], qstat_flag, pid))
# if ret != 0:
# message = "Couldn't reconnect to job '%s': %s" % (job_id, out)
# log_error_and_raise(message, saga.NoSuccess, self._logger)
# else:
# # the job seems to exist on the backend. let's gather some data
# job_info = {
# 'job_id': job_id,
# 'state': saga.job.UNKNOWN,
# 'exec_hosts': None,
# 'returncode': None,
# 'create_time': None,
# 'start_time': None,
# 'end_time': None,
# 'gone': False
# }
#
# job_info = self._parse_qstat(out, job_info)
#
# return job_info
# ----------------------------------------------------------------
#
def _job_get_info(self, job_id, reconnect):
""" Get job information attributes via qstat.
"""
# If we don't have the job in our dictionary, we don't want it,
# unless we are trying to reconnect.
if not reconnect and job_id not in self.jobs:
message = "Unknown job id: %s. Can't update state." % job_id
log_error_and_raise(message, saga.NoSuccess, self._logger)
if not reconnect:
# job_info contains the info collect when _job_get_info
# was called the last time
job_info = self.jobs[job_id]
# if the 'gone' flag is set, there's no need to query the job
# state again. it's gone forever
if job_info['gone'] is True:
return job_info
else:
# Create a template data structure
job_info = {
'job_id': job_id,
'state': saga.job.UNKNOWN,
'exec_hosts': None,
'returncode': None,
'create_time': None,
'start_time': None,
'end_time': None,
'gone': False
}
rm, pid = self._adaptor.parse_id(job_id)
# run the PBS 'qstat' command to get some infos about our job
# TODO: create a PBSPRO/TORQUE flag once
if 'PBSPro_1' in self._commands['qstat']['version']:
qstat_flag = '-fx'
else:
qstat_flag ='-f1'
ret, out, _ = self.shell.run_sync("unset GREP_OPTIONS; %s %s %s | "
"grep -E -i '(job_state)|(exec_host)|(exit_status)|"
"(ctime)|(start_time)|(stime)|(mtime)'"
% (self._commands['qstat']['path'], qstat_flag, pid))
if ret != 0:
if reconnect:
message = "Couldn't reconnect to job '%s': %s" % (job_id, out)
log_error_and_raise(message, saga.NoSuccess, self._logger)
if ("Unknown Job Id" in out):
# Let's see if the last known job state was running or pending. in
# that case, the job is gone now, which can either mean DONE,
# or FAILED. the only thing we can do is set it to 'DONE'
job_info['gone'] = True
# TODO: we can also set the end time?
self._logger.warning("Previously running job has disappeared. "
"This probably means that the backend doesn't store "
"information about finished jobs. Setting state to 'DONE'.")
if job_info['state'] in [saga.job.RUNNING, saga.job.PENDING]:
job_info['state'] = saga.job.DONE
else:
# TODO: This is an uneducated guess?
job_info['state'] = saga.job.FAILED
else:
# something went wrong
message = "Error retrieving job info via 'qstat': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
else:
# The job seems to exist on the backend. let's process some data.
# TODO: make the parsing "contextual", in the sense that it takes
# the state into account.
# parse the egrep result. this should look something like this:
# job_state = C
# exec_host = i72/0
# exit_status = 0
results = out.split('\n')
for line in results:
if len(line.split('=')) == 2:
key, val = line.split('=')
key = key.strip()
val = val.strip()
# The ubiquitous job state
if key in ['job_state']: # PBS Pro and TORQUE
job_info['state'] = _pbs_to_saga_jobstate(val)
# Hosts where the job ran
elif key in ['exec_host']: # PBS Pro and TORQUE
job_info['exec_hosts'] = val.split('+') # format i73/7+i73/6+...
# Exit code of the job
elif key in ['exit_status', # TORQUE
'Exit_status' # PBS Pro
]:
job_info['returncode'] = int(val)
# Time job got created in the queue
elif key in ['ctime']: # PBS Pro and TORQUE
job_info['create_time'] = val
# Time job started to run
elif key in ['start_time', # TORQUE
'stime' # PBS Pro
]:
job_info['start_time'] = val
# Time job ended.
#
# PBS Pro doesn't have an "end time" field.
# It has an "resources_used.walltime" though,
# which could be added up to the start time.
# We will not do that arithmetic now though.
#
# Alternatively, we can use mtime, as the latest
# modification time will generally also be the end time.
#
# TORQUE has an "comp_time" (completion? time) field,
# that is generally the same as mtime at the finish.
#
# For the time being we will use mtime as end time for
# both TORQUE and PBS Pro.
#
if key in ['mtime']: # PBS Pro and TORQUE
job_info['end_time'] = val
# return the updated job info
return job_info
def _parse_qstat(self, haystack, job_info):
# return the new job info dict
return job_info
# ----------------------------------------------------------------
#
def _job_get_state(self, job_id):
""" get the job's state
"""
return self.jobs[job_id]['state']
# ----------------------------------------------------------------
#
def _job_get_exit_code(self, job_id):
""" get the job's exit code
"""
ret = self.jobs[job_id]['returncode']
# FIXME: 'None' should cause an exception
if ret == None : return None
else : return int(ret)
# ----------------------------------------------------------------
#
def _job_get_execution_hosts(self, job_id):
""" get the job's exit code
"""
return self.jobs[job_id]['exec_hosts']
# ----------------------------------------------------------------
#
def _job_get_create_time(self, job_id):
""" get the job's creation time
"""
return self.jobs[job_id]['create_time']
# ----------------------------------------------------------------
#
def _job_get_start_time(self, job_id):
""" get the job's start time
"""
return self.jobs[job_id]['start_time']
# ----------------------------------------------------------------
#
def _job_get_end_time(self, job_id):
""" get the job's end time
"""
return self.jobs[job_id]['end_time']
# ----------------------------------------------------------------
#
def _job_cancel(self, job_id):
""" cancel the job via 'qdel'
"""
rm, pid = self._adaptor.parse_id(job_id)
ret, out, _ = self.shell.run_sync("%s %s\n" \
% (self._commands['qdel']['path'], pid))
if ret != 0:
message = "Error canceling job via 'qdel': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
# assume the job was succesfully canceled
self.jobs[job_id]['state'] = saga.job.CANCELED
# ----------------------------------------------------------------
#
def _job_wait(self, job_id, timeout):
""" wait for the job to finish or fail
"""
time_start = time.time()
time_now = time_start
rm, pid = self._adaptor.parse_id(job_id)
while True:
state = self.jobs[job_id]['state'] # this gets updated in the bg.
if state == saga.job.DONE or \
state == saga.job.FAILED or \
state == saga.job.CANCELED:
return True
# avoid busy poll
time.sleep(SYNC_WAIT_UPDATE_INTERVAL)
# check if we hit timeout
if timeout >= 0:
time_now = time.time()
if time_now - time_start > timeout:
return False
# ----------------------------------------------------------------
#
@SYNC_CALL
def create_job(self, jd):
""" implements saga.adaptors.cpi.job.Service.get_url()
"""
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
"job_description": jd,
"job_schema": self.rm.schema,
"reconnect": False
}
# create and return a new job object
return saga.job.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_job(self, job_id):
""" Implements saga.adaptors.cpi.job.Service.get_job()
Re-create job instance from a job-id.
"""
# If we already have the job info, we just pass the current info.
if job_id in self.jobs :
return self.jobs[job_id]['obj']
# Try to get some initial information about this job (again)
job_info = self._job_get_info(job_id, reconnect=True)
# this dict is passed on to the job adaptor class -- use it to pass any
# state information you need there.
adaptor_state = {"job_service": self,
# TODO: fill job description
"job_description": saga.job.Description(),
"job_schema": self.rm.schema,
"reconnect": True,
"reconnect_jobid": job_id
}
job_obj = saga.job.Job(_adaptor=self._adaptor,
_adaptor_state=adaptor_state)
# throw it into our job dictionary.
job_info['obj'] = job_obj
self.jobs[job_id] = job_info
return job_obj
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_url(self):
""" implements saga.adaptors.cpi.job.Service.get_url()
"""
return self.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def list(self):
""" implements saga.adaptors.cpi.job.Service.list()
"""
ids = []
ret, out, _ = self.shell.run_sync("unset GREP_OPTIONS; %s | grep `whoami`" %
self._commands['qstat']['path'])
if ret != 0 and len(out) > 0:
message = "failed to list jobs via 'qstat': %s" % out
log_error_and_raise(message, saga.NoSuccess, self._logger)
elif ret != 0 and len(out) == 0:
# qstat | grep `` exits with 1 if the list is empty
pass
else:
for line in out.split("\n"):
# output looks like this:
# 112059.svc.uc.futuregrid testjob oweidner 0 Q batch
# 112061.svc.uc.futuregrid testjob oweidner 0 Q batch
if len(line.split()) > 1:
job_id = "[%s]-[%s]" % (self.rm, line.split()[0].split('.')[0])
ids.append(str(job_id))
return ids
# # ----------------------------------------------------------------
# #
# def container_run (self, jobs) :
# self._logger.debug ("container run: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.run ()
#
#
# # ----------------------------------------------------------------
# #
# def container_wait (self, jobs, mode, timeout) :
# self._logger.debug ("container wait: %s" % str(jobs))
# # TODO: this is not optimized yet
# for job in jobs:
# job.wait ()
#
#
# # ----------------------------------------------------------------
# #
# def container_cancel (self, jobs) :
# self._logger.debug ("container cancel: %s" % str(jobs))
# raise saga.NoSuccess ("Not Implemented");
###############################################################################
#
class PBSProJob (saga.adaptors.cpi.job.Job):
""" implements saga.adaptors.cpi.job.Job
"""
def __init__(self, api, adaptor):
# initialize parent class
_cpi_base = super(PBSProJob, self)
_cpi_base.__init__(api, adaptor)
def _get_impl(self):
return self
@SYNC_CALL
def init_instance(self, job_info):
""" implements saga.adaptors.cpi.job.Job.init_instance()
"""
# init_instance is called for every new saga.job.Job object
# that is created
self.jd = job_info["job_description"]
self.js = job_info["job_service"]
if job_info['reconnect'] is True:
self._id = job_info['reconnect_jobid']
self._started = True
else:
self._id = None
self._started = False
return self.get_api()
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_state(self):
""" implements saga.adaptors.cpi.job.Job.get_state()
"""
if self._started is False:
return saga.job.NEW
return self.js._job_get_state(job_id=self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def wait(self, timeout):
""" implements saga.adaptors.cpi.job.Job.wait()
"""
if self._started is False:
log_error_and_raise("Can't wait for job that hasn't been started",
saga.IncorrectState, self._logger)
else:
self.js._job_wait(job_id=self._id, timeout=timeout)
# ----------------------------------------------------------------
#
@SYNC_CALL
def cancel(self, timeout):
""" implements saga.adaptors.cpi.job.Job.cancel()
"""
if self._started is False:
log_error_and_raise("Can't wait for job that hasn't been started",
saga.IncorrectState, self._logger)
else:
self.js._job_cancel(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def run(self):
""" implements saga.adaptors.cpi.job.Job.run()
"""
self._id = self.js._job_run(self._api())
self._started = True
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_service_url(self):
""" implements saga.adaptors.cpi.job.Job.get_service_url()
"""
return self.js.rm
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_id(self):
""" implements saga.adaptors.cpi.job.Job.get_id()
"""
return self._id
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_exit_code(self):
""" implements saga.adaptors.cpi.job.Job.get_exit_code()
"""
if self._started is False:
return None
else:
return self.js._job_get_exit_code(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_created(self):
""" implements saga.adaptors.cpi.job.Job.get_created()
"""
if self._started is False:
return None
else:
return self.js._job_get_create_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_started(self):
""" implements saga.adaptors.cpi.job.Job.get_started()
"""
if self._started is False:
return None
else:
return self.js._job_get_start_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_finished(self):
""" implements saga.adaptors.cpi.job.Job.get_finished()
"""
if self._started is False:
return None
else:
return self.js._job_get_end_time(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_execution_hosts(self):
""" implements saga.adaptors.cpi.job.Job.get_execution_hosts()
"""
if self._started is False:
return None
else:
return self.js._job_get_execution_hosts(self._id)
# ----------------------------------------------------------------
#
@SYNC_CALL
def get_description(self):
""" implements saga.adaptors.cpi.job.Job.get_execution_hosts()
"""
return self.jd
| mit | 4,840,097,012,143,292,000 | 36.156786 | 192 | 0.473454 | false |
sentriz/steely | steely/plugins/limp.py | 1 | 3205 | import sys
import limp
import limp.errors
import limp.environment
COMMAND = 'limp'
__author__ = 'byxor'
GLOBAL_DEFINITIONS = {}
def main(bot, author_id, source_code, thread_id, thread_type, **kwargs):
def send(message):
bot.sendMessage(str(message), thread_id=thread_id,
thread_type=thread_type)
def send_error(info, error):
full_error_message = f'\n{type(error).__name__}: {error}'
send(f'{info} {full_error_message}')
def last_message():
return bot.fetchThreadMessages(thread_id=thread_id, limit=2)[1].text
def define_global(name, variable):
send("This is a hack; enjoy.")
GLOBAL_DEFINITIONS[name] = variable
try:
environment = limp.environment.create_standard()
_define({
'send': send,
'last-message': last_message,
'define-global': define_global,
**GLOBAL_DEFINITIONS}, environment)
result = limp.evaluate(source_code, environment)
send(result)
except limp.errors.LimpError as error:
send_error('You got a limp error', error)
except Exception as error:
send_error('Something unexpected happened', error)
send("It's possible that it's your fault.")
def _define(custom_symbols, environment):
for name, value in custom_symbols.items():
try:
environment.define(name, value)
except limp.errors.RedefinedSymbol:
pass
def _generate_help():
def _help():
_FULL_COMMAND = f".{COMMAND}"
_REPOSITORY = f"https://www.github.com/byxor/limp"
_BORDER = '=' * 10
def _CREATE_CODE_EXAMPLE(code):
result = limp.evaluate(code)
message = f"User: {_FULL_COMMAND} {code}"
response = f"ChatBot: {result}"
return message + "\n" + response + "\n\n"
_CREATE_CODE_EXAMPLES = lambda input_examples: "".join(
list(map(_CREATE_CODE_EXAMPLE, input_examples))).strip()
description = "Evaluate the limp programming language!"
usage = f"Usage: {_FULL_COMMAND} <source_code>"
examples = _CREATE_CODE_EXAMPLES([
"(+ 1 2)",
"(// 100 (- 5 2))",
"((x -> (* x 2)) 10)",
"((x -> (* x 2)) 50)",
"(map (name -> (concatenate \"hi, \" name)) [\"john\" \"jane\" \"bob\"])",
"(do\n (define add (a b -> (+ a b)))\n (add 30 70))",
])
source_code = f"Source code: {_REPOSITORY}"
contributing = f"Want to contribute? Awesome! Make sure you read CONTRIBUTING.md in the repository first."
return "\n\n".join([
description,
usage,
_BORDER,
examples,
_BORDER,
source_code,
contributing,
])
try:
message = _help()
except Exception as e:
global __doc__
message = "The help could not be autogenerated. It's possible that the code examples are outdated and aren't valid syntax anymore. Please inform Brandon."
message += "\n\n"
message += f"Reason: {e}"
sys.modules[__name__].__doc__ = message
_generate_help()
| gpl-3.0 | 194,483,731,133,469,540 | 28.953271 | 162 | 0.558502 | false |
realestate-com-au/harpoon | harpoon/ship/network.py | 1 | 1619 | from docker.errors import APIError as DockerAPIError
import logging
import uuid
log = logging.getLogger("harpoon.ship.network")
class NetworkManager(object):
def __init__(self, docker_api):
self.networks = {}
self.docker_api = docker_api
def register(self, conf, container_name):
if not conf.links:
return
network = self.docker_api.create_network(str(uuid.uuid1()))["Id"]
inside = self.networks[network] = set()
log.info("Created network %s\tlinks=%s", network, [l.pair for l in conf.links])
for link in conf.links:
dep_container_name, link_name = link.pair
inside.add(dep_container_name)
conf.harpoon.docker_api.connect_container_to_network(dep_container_name, network
, aliases = [link_name]
)
conf.harpoon.docker_api.connect_container_to_network(container_name, network)
inside.add(container_name)
def removed(self, container_name):
for network, containers in list(self.networks.items()):
if network not in self.networks:
continue
if container_name in containers:
containers.remove(container_name)
if not containers:
try:
log.info("Removing network %s", network)
self.docker_api.remove_network(network)
except DockerAPIError as error:
log.warning("Failed to remove network %s\terror=%s", network, error)
finally:
del self.networks[network]
| mit | -949,235,595,282,341,400 | 35.795455 | 92 | 0.593576 | false |
calispac/digicampipe | digicampipe/tests/test_simtel_event_source.py | 1 | 1938 | import os
from astropy import units as u
import pkg_resources
from digicampipe.io.simtel import simtel_event_source
from digicampipe.io.event_stream import event_stream, calibration_event_stream
example_file_path = pkg_resources.resource_filename(
'digicampipe',
os.path.join(
'tests',
'resources',
'simtel',
'1_triggered_events_10_TeV.simtel.gz'
)
)
example_file_path_1 = pkg_resources.resource_filename(
'digicampipe',
os.path.join(
'tests',
'resources',
'simtel',
'file-pedestal.simtel.gz'
)
)
EVENT_ID = 102
EVENTS_IN_EXAMPLE_FILE = 1
ENERGY = 10 * u.TeV
def test_and_benchmark_event_source(benchmark):
@benchmark
def loop():
for _ in simtel_event_source(example_file_path):
pass
def test_event_id():
for data in simtel_event_source(example_file_path):
event_id = data.r0.event_id
energy = data.mc.energy
break
assert event_id == EVENT_ID
assert energy == ENERGY
def test_event_stream():
events = event_stream([example_file_path])
for event in events:
event_id = event.r0.event_id
energy = event.mc.energy
break
assert event_id == EVENT_ID
assert energy == ENERGY
def test_event_stream_with_event_id_none():
events = event_stream([example_file_path],
event_id=None)
for _ in events:
pass
def test_event_stream_pedestal_file():
events = event_stream([example_file_path_1])
for _ in events:
pass
def test_calibration_event_stream():
events = calibration_event_stream([example_file_path])
for event in events:
event_id = event.event_id
energy = event.mc.energy
break
assert event_id == EVENT_ID
assert energy == ENERGY
if __name__ == '__main__':
test_event_id()
test_event_stream()
test_calibration_event_stream()
| gpl-3.0 | -3,126,107,879,200,445,000 | 21.022727 | 78 | 0.621775 | false |
polyaxon/polyaxon | core/polyaxon/pql/builder.py | 1 | 15832 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from collections import namedtuple
from typing import Any, Callable, Optional
from polyaxon.exceptions import PolyaxonDateTimeFormatterException, PQLException
from polyaxon.utils.bool_utils import to_bool
from polyaxon.utils.date_utils import DateTimeFormatter
from polyaxon.utils.list_utils import to_list
class QueryCondSpec(namedtuple("QueryCondSpec", "cond params")):
def items(self):
return self._asdict().items()
class QueryBuilder:
"""The `QueryBuild` adds filters to a `QuerySet` from a `params` mapping.
Filters are a mapping of <name: Condition>, Condition being an object that update the queryset.
"""
def __init__(self, filters):
self.filters = filters
def build(self, queryset: Any, params: Any) -> Any:
for name, condition in self.filters.items():
if name in params:
queryset = condition.apply(queryset, name, params[name])
return queryset
class BaseCondition:
"""The base condition representing a single filter to apply to a `QuerySet`"""
def apply(
self, queryset: Any, name: str, params: Any, query_backend: Any, timezone: str
):
raise NotImplementedError
class BaseOperatorCondition(BaseCondition):
def __init__(self, op: str, negation: bool = False) -> None:
if op not in self.VALUES and op not in self.REPRESENTATIONS:
raise PQLException(
"Received an invalid operator `{}`, "
"possible values `{}` or `{}`.".format(
op, self.VALUES, self.REPRESENTATIONS
)
)
self.operator = self._get_operator(op, negation)
def __eq__(self, other: "BaseOperatorCondition") -> bool:
return self.operator == other.operator
def apply(
self, queryset: Any, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return queryset.filter(
self.operator(
name=name, params=params, query_backend=query_backend, timezone=timezone
)
)
def apply_operator(
self, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return self.operator(
name=name, params=params, query_backend=query_backend, timezone=timezone
)
class CallbackCondition(BaseCondition):
"""The `CallbackCondition` represents a filter based on a callback to apply."""
def __init__(self, callback: Callable) -> None:
self.callback = callback
self.negation = False
def __call__(self, op, negation: bool = False) -> "CallbackCondition":
self.negation = negation
return self
def apply(
self, queryset: Any, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return self.callback(
queryset,
params,
self.negation,
query_backend=query_backend,
timezone=timezone,
)
def apply_operator(
self, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return self.callback(
query_backend,
params=params,
negation=self.negation,
query_backend=query_backend,
timezone=timezone,
)
class NilCondition(BaseOperatorCondition):
VALUES = {"nil"}
REPRESENTATIONS = {"nil"}
REPRESENTATION_MAPPING = (("nil", "nil"),)
@classmethod
def _get_operator(cls, op: str, negation: bool = False) -> Optional[Callable]:
if op not in cls.VALUES and op not in cls.REPRESENTATIONS:
return None
if negation:
return cls._not_nil_operator
return cls._nil_operator
@staticmethod
def _nil_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
name = "{}__isnull".format(name)
return query_backend(**{name: True})
@classmethod
def _not_nil_operator(
cls, name: str, params: Any, query_backend: Any, timezone: str
) -> any:
name = "{}__isnull".format(name)
return query_backend(**{name: False})
class EqualityCondition(NilCondition):
VALUES = NilCondition.VALUES | {"eq"}
REPRESENTATIONS = NilCondition.REPRESENTATIONS | {"="}
REPRESENTATION_MAPPING = NilCondition.REPRESENTATION_MAPPING + (("=", "eq"),)
@classmethod
def _get_operator(cls, op: str, negation: bool = False) -> Optional[Callable]:
if op not in cls.VALUES and op not in cls.REPRESENTATIONS:
return None
_op = NilCondition._get_operator(op, negation)
if _op:
return _op
if negation:
return cls._neq_operator
return cls._eq_operator
@staticmethod
def _eq_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
return query_backend(**{name: params})
@classmethod
def _neq_operator(
cls, name: str, params: Any, query_backend: Any, timezone: str
) -> any:
return ~cls._eq_operator(name, params, query_backend, timezone)
class BoolCondition(EqualityCondition):
@staticmethod
def _eq_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
return query_backend(**{name: to_bool(params)})
class ComparisonCondition(EqualityCondition):
VALUES = EqualityCondition.VALUES | {"in", "lt", "lte", "gt", "gte"}
REPRESENTATIONS = EqualityCondition.REPRESENTATIONS | {"|", "<", "<=", ">", ">="}
REPRESENTATION_MAPPING = EqualityCondition.REPRESENTATION_MAPPING + (
("|", "in"),
("<", "lt"),
("<=", "lte"),
(">", "gt"),
(">=", "gte"),
)
@classmethod
def _get_operator(cls, op: str, negation: bool = False) -> Optional[Callable]:
if op not in cls.VALUES and op not in cls.REPRESENTATIONS:
return None
_op = EqualityCondition._get_operator(op, negation)
if _op:
return _op
if op == "lt" or op == "<":
if negation:
return cls._gte_operator
return cls._lt_operator
if op == "lte" or op == "<=":
if negation:
return cls._gt_operator
return cls._lte_operator
if op == "gt" or op == ">":
if negation:
return cls._lte_operator
return cls._gt_operator
if op == "gte" or op == ">=":
if negation:
return cls._lt_operator
return cls._gte_operator
if op == "in" or op == "|":
if negation:
return cls._nin_operator
return cls._in_operator
@staticmethod
def _lt_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
name = "{}__lt".format(name)
return query_backend(**{name: params})
@staticmethod
def _gt_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
name = "{}__gt".format(name)
return query_backend(**{name: params})
@staticmethod
def _lte_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
name = "{}__lte".format(name)
return query_backend(**{name: params})
@staticmethod
def _gte_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
name = "{}__gte".format(name)
return query_backend(**{name: params})
@staticmethod
def _in_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
assert isinstance(params, (list, tuple))
name = "{}__in".format(name)
return query_backend(**{name: params})
@classmethod
def _nin_operator(
cls, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return ~cls._in_operator(name, params, query_backend, timezone)
class DateTimeCondition(ComparisonCondition):
VALUES = ComparisonCondition.VALUES | {"range"}
REPRESENTATIONS = ComparisonCondition.REPRESENTATIONS | {".."}
REPRESENTATION_MAPPING = ComparisonCondition.REPRESENTATION_MAPPING + (
("..", "range"),
)
@classmethod
def _get_operator(cls, op: str, negation: bool = False) -> Optional[Callable]:
if op not in cls.VALUES and op not in cls.REPRESENTATIONS:
return None
# handle eq in from current class
if op == "eq" or op == "=":
if negation:
return cls._neq_operator
return cls._eq_operator
_op = ComparisonCondition._get_operator(op, negation)
if _op:
return _op
if negation:
return cls._nrange_operator
return cls._range_operator
@staticmethod
def _eq_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
try:
# Check If params is date
DateTimeFormatter.extract_timestamp(
params,
dt_format=DateTimeFormatter.DATE_FORMAT,
timezone=timezone,
)
return query_backend(**{f"{name}__date": params})
except (TypeError, ValueError):
pass
return query_backend(**{name: params})
@staticmethod
def _range_operator(
name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
assert len(params) == 2
try:
start_date = DateTimeFormatter.extract(params[0], timezone)
end_date = DateTimeFormatter.extract(params[1], timezone)
except PolyaxonDateTimeFormatterException as e:
raise PQLException(e)
name = "{}__range".format(name)
return query_backend(**{name: (start_date, end_date)})
@classmethod
def _nrange_operator(
cls, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return ~cls._range_operator(
name, params, query_backend=query_backend, timezone=timezone
)
class ValueCondition(EqualityCondition):
VALUES = EqualityCondition.VALUES | {"in"}
REPRESENTATIONS = EqualityCondition.REPRESENTATIONS | {"|"}
REPRESENTATION_MAPPING = EqualityCondition.REPRESENTATION_MAPPING + (("|", "in"),)
@classmethod
def _get_operator(cls, op: str, negation: bool = False) -> Any:
if op not in cls.VALUES and op not in cls.REPRESENTATIONS:
return None
_op = EqualityCondition._get_operator(op, negation)
if _op:
return _op
if negation:
return cls._nin_operator
return cls._in_operator
@staticmethod
def _in_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
assert isinstance(params, (list, tuple))
name = "{}__in".format(name)
return query_backend(**{name: params})
@classmethod
def _nin_operator(
cls, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return ~cls._in_operator(name, params, query_backend, timezone)
class SearchCondition(ValueCondition):
VALUES = ValueCondition.VALUES | {"icontains", "istartswith", "iendswith"}
REPRESENTATIONS = ValueCondition.REPRESENTATIONS | {"%%", "%_", "_%"}
REPRESENTATION_MAPPING = EqualityCondition.REPRESENTATION_MAPPING + (
("%%", "icontains"),
("_%", "istartswith"),
("%_", "iendswith"),
)
@classmethod
def _get_operator(cls, op: str, negation: bool = False) -> Any:
if op not in cls.VALUES and op not in cls.REPRESENTATIONS:
return None
_op = ValueCondition._get_operator(op, negation)
if _op:
return _op
if op == "%%" or op == "icontains":
if negation:
return cls._ncontains_operator
return cls._contains_operator
if op == "_%" or op == "istartswith":
if negation:
return cls._nstartswith_operator
return cls._startswith_operator
if op == "%_" or op == "iendswith":
if negation:
return cls._nendswith_operator
return cls._endswith_operator
@staticmethod
def _contains_operator(
name: str, params: str, query_backend: Any, timezone: str
) -> Any:
assert isinstance(params, str)
name = "{}__icontains".format(name)
return query_backend(**{name: params})
@classmethod
def _ncontains_operator(
cls, name: str, params: str, query_backend: Any, timezone: str
) -> Any:
return ~cls._contains_operator(name, params, query_backend, timezone)
@staticmethod
def _startswith_operator(
name: str, params: str, query_backend: Any, timezone: str
) -> Any:
assert isinstance(params, str)
name = "{}__istartswith".format(name)
return query_backend(**{name: params})
@classmethod
def _nstartswith_operator(
cls, name: str, params: str, query_backend: Any, timezone: str
) -> Any:
return ~cls._startswith_operator(
name, params, query_backend=query_backend, timezone=timezone
)
@staticmethod
def _endswith_operator(
name: str, params: str, query_backend: Any, timezone: str
) -> Any:
assert isinstance(params, str)
name = "{}__iendswith".format(name)
return query_backend(**{name: params})
@classmethod
def _nendswith_operator(
cls, name: str, params: str, query_backend: Any, timezone: str
) -> Any:
return ~cls._endswith_operator(name, params, query_backend, timezone)
class ArrayCondition(EqualityCondition):
VALUES = EqualityCondition.VALUES | {"in"}
REPRESENTATIONS = EqualityCondition.REPRESENTATIONS | {"|"}
REPRESENTATION_MAPPING = EqualityCondition.REPRESENTATION_MAPPING + (("|", "in"),)
@classmethod
def _get_operator(cls, op: str, negation: bool = False) -> Optional[Callable]:
if op not in cls.VALUES and op not in cls.REPRESENTATIONS:
return None
_op = cls._get_eq_operator(op, negation)
if _op:
return _op
if negation:
return cls._nin_operator
return cls._in_operator
@classmethod
def _get_eq_operator(cls, op: str, negation: bool = False) -> Optional[Callable]:
if (
op not in EqualityCondition.VALUES
and op not in EqualityCondition.REPRESENTATIONS
):
return None
if negation:
return cls._neq_operator
return cls._eq_operator
@staticmethod
def _eq_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
name = "{}__contains".format(name)
return query_backend(**{name: to_list(params)})
@staticmethod
def _in_operator(name: str, params: Any, query_backend: Any, timezone: str) -> Any:
assert isinstance(params, (list, tuple))
name = "{}__overlap".format(name)
return query_backend(**{name: params})
@classmethod
def _nin_operator(
cls, name: str, params: Any, query_backend: Any, timezone: str
) -> Any:
return ~cls._in_operator(name, params, query_backend, timezone)
| apache-2.0 | -8,171,395,019,497,414,000 | 32.052192 | 99 | 0.600682 | false |
rschnapka/bank-payment | account_banking/wizard/banking_transaction_wizard.py | 1 | 19506 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# (C) 2011 - 2013 Therp BV (<http://therp.nl>).
# (C) 2011 Smile (<http://smile.fr>).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
The banking transaction wizard is linked to a button in the statement line
tree view. It allows the user to undo the duplicate flag, select between
multiple matches or select a manual match.
"""
from openerp.osv import orm, fields
from openerp.tools.translate import _
class banking_transaction_wizard(orm.TransientModel):
_name = 'banking.transaction.wizard'
_description = 'Match transaction'
def create(self, cr, uid, vals, context=None):
"""
Make sure that the statement line has an import transaction
"""
res = super(banking_transaction_wizard, self).create(
cr, uid, vals, context=context)
if res and vals.get('statement_line_id'):
line_pool = self.pool.get('account.bank.statement.line')
line_pool.create_instant_transaction(
cr, uid, vals['statement_line_id'], context=context)
return res
def create_act_window(self, cr, uid, ids, nodestroy=True, context=None):
"""
Return a popup window for this model
"""
if isinstance(ids, (int, long)):
ids = [ids]
return {
'name': self._description,
'view_type': 'form',
'view_mode': 'form',
'res_model': self._name,
'domain': [],
'context': context,
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': ids[0],
'nodestroy': nodestroy,
}
def trigger_match(self, cr, uid, ids, context=None):
"""
Call the automatic matching routine for one or
more bank transactions
"""
if isinstance(ids, (int, long)):
ids = [ids]
import_transaction_obj = self.pool.get('banking.import.transaction')
trans_id = self.read(
cr, uid, ids[0], ['import_transaction_id'],
context=context)['import_transaction_id'][0] # many2one tuple
import_transaction_obj.match(cr, uid, [trans_id], context=context)
return self.create_act_window(cr, uid, ids, context=None)
def write(self, cr, uid, ids, vals, context=None):
"""
Implement a trigger to retrieve the corresponding move line
when the invoice_id changes
"""
statement_line_obj = self.pool.get('account.bank.statement.line')
transaction_obj = self.pool.get('banking.import.transaction')
if not vals or not ids:
return True
wiz = self.browse(cr, uid, ids[0], context=context)
# The following fields get never written
# they are just triggers for manual matching
# which populates regular fields on the transaction
manual_invoice_ids = vals.pop('manual_invoice_ids', [])
manual_move_line_ids = vals.pop('manual_move_line_ids', [])
res = super(banking_transaction_wizard, self).write(
cr, uid, ids, vals, context=context)
wiz.refresh()
# Process the logic of the written values
# An invoice is selected from multiple candidates
if vals and 'invoice_id' in vals:
if (wiz.import_transaction_id.match_type == 'invoice' and
wiz.import_transaction_id.invoice_id):
found = False
# the current value might apply
if (wiz.move_line_id and wiz.move_line_id.invoice and
wiz.move_line_id.invoice == wiz.invoice_id):
found = True
else:
# Otherwise, retrieve the move line for this invoice
# Given the arity of the relation, there is are always
# multiple possibilities but the move lines here are
# prefiltered for having account_id.type payable/receivable
# and the regular invoice workflow should only come up with
# one of those only.
for move_line in wiz.import_transaction_id.move_line_ids:
if (move_line.invoice ==
wiz.import_transaction_id.invoice_id):
transaction_obj.write(
cr, uid, wiz.import_transaction_id.id,
{'move_line_id': move_line.id, },
context=context
)
statement_line_obj.write(
cr, uid,
wiz.import_transaction_id.statement_line_id.id,
{
'partner_id': (
move_line.partner_id.id or False),
'account_id': move_line.account_id.id,
}, context=context)
found = True
break
# Cannot match the invoice
if not found:
orm.except_orm(
_("No entry found for the selected invoice"),
_("No entry found for the selected invoice. " +
"Try manual reconciliation."))
if manual_move_line_ids or manual_invoice_ids:
move_line_obj = self.pool.get('account.move.line')
invoice_obj = self.pool.get('account.invoice')
statement_line_obj = self.pool.get('account.bank.statement.line')
# Rewrite *2many directive notation
if manual_invoice_ids:
manual_invoice_ids = (
[i[1] for i in manual_invoice_ids if i[0] == 4] +
[j for i in manual_invoice_ids if i[0] == 6 for j in i[2]])
if manual_move_line_ids:
manual_move_line_ids = (
[i[1] for i in manual_move_line_ids if i[0] == 4] +
[j for i in manual_move_line_ids
if i[0] == 6 for j in i[2]])
for wiz in self.browse(cr, uid, ids, context=context):
# write can be called multiple times for the same values
# that doesn't hurt above, but it does here
if wiz.match_type and (
len(manual_move_line_ids) > 1 or
len(manual_invoice_ids) > 1):
continue
todo = []
for invoice in invoice_obj.browse(
cr, uid, manual_invoice_ids, context=context):
found_move_line = False
if invoice.move_id:
for line in invoice.move_id.line_id:
if line.account_id.type in ('receivable',
'payable'):
todo.append((invoice.id, line.id))
found_move_line = True
break
if not found_move_line:
raise orm.except_orm(
_("Cannot select for reconcilion"),
_("No entry found for the selected invoice. "))
for move_line_id in manual_move_line_ids:
todo_entry = [False, move_line_id]
move_line = move_line_obj.read(
cr,
uid,
move_line_id,
['invoice'],
context=context
)
if move_line['invoice']:
todo_entry[0] = move_line['invoice'][0]
todo.append(todo_entry)
while todo:
todo_entry = todo.pop()
move_line = move_line_obj.browse(
cr, uid, todo_entry[1], context)
transaction_id = wiz.import_transaction_id.id
statement_line_id = wiz.statement_line_id.id
if len(todo) > 0:
statement_line_id = wiz.statement_line_id.split_off(
move_line.debit or -move_line.credit)[0]
transaction_id = statement_line_obj.browse(
cr,
uid,
statement_line_id,
context=context
).import_transaction_id.id
vals = {
'move_line_id': todo_entry[1],
'move_line_ids': [(6, 0, [todo_entry[1]])],
'invoice_id': todo_entry[0],
'invoice_ids': [
(6, 0, [todo_entry[0]] if todo_entry[0] else [])
],
'match_type': 'manual',
}
transaction_obj.clear_and_write(
cr, uid, transaction_id, vals, context=context)
st_line_vals = {
'account_id': move_line_obj.read(
cr, uid, todo_entry[1],
['account_id'], context=context)['account_id'][0],
}
if todo_entry[0]:
st_line_vals['partner_id'] = invoice_obj.browse(
cr, uid, todo_entry[0], context=context
).partner_id.commercial_partner_id.id
statement_line_obj.write(
cr, uid, statement_line_id,
st_line_vals, context=context)
return res
def trigger_write(self, cr, uid, ids, context=None):
"""
Just a button that triggers a write.
"""
return self.create_act_window(cr, uid, ids, context=None)
def disable_match(self, cr, uid, ids, context=None):
"""
Clear manual and automatic match information
"""
settings_pool = self.pool.get('account.banking.account.settings')
statement_pool = self.pool.get('account.bank.statement.line')
if isinstance(ids, (int, long)):
ids = [ids]
for wiz in self.browse(cr, uid, ids, context=context):
# Get the bank account setting record, to reset the account
account_id = False
journal_id = wiz.statement_line_id.statement_id.journal_id.id
setting_ids = settings_pool.find(
cr, uid, journal_id, context=context
)
# Restore partner id from the bank account or else reset
partner_id = False
if (wiz.statement_line_id.partner_bank_id and
wiz.statement_line_id.partner_bank_id.partner_id):
partner_id = (
wiz.statement_line_id.partner_bank_id.partner_id.id
)
wiz.write({'partner_id': partner_id})
bank_partner = False
if partner_id:
bank_partner = wiz.statement_line_id.partner_bank_id.partner_id
if wiz.amount < 0:
if bank_partner:
account_id = bank_partner.\
def_journal_account_bank_decr()[bank_partner.id]
elif setting_ids:
account_id = settings_pool.browse(
cr, uid, setting_ids[0],
context=context).default_credit_account_id.id
else:
if bank_partner:
account_id = bank_partner.\
def_journal_account_bank_incr()[bank_partner.id]
elif setting_ids:
account_id = settings_pool.browse(
cr, uid, setting_ids[0],
context=context).default_debit_account_id.id
if account_id:
wiz.statement_line_id.write({'account_id': account_id})
if wiz.statement_line_id:
# delete splits causing an unsplit if this is a split
# transaction
statement_pool.unlink(
cr,
uid,
statement_pool.search(
cr, uid,
[('parent_id', '=', wiz.statement_line_id.id)],
context=context
),
context=context
)
if wiz.import_transaction_id:
wiz.import_transaction_id.clear_and_write()
return self.create_act_window(cr, uid, ids, context=None)
def reverse_duplicate(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
transaction_obj = self.pool.get('banking.import.transaction')
for wiz in self.read(
cr, uid, ids, ['duplicate', 'import_transaction_id'],
context=context):
transaction_obj.write(
cr, uid, wiz['import_transaction_id'][0],
{'duplicate': not wiz['duplicate']}, context=context)
return self.create_act_window(cr, uid, ids, context=None)
def button_done(self, cr, uid, ids, context=None):
return {'type': 'ir.actions.act_window_close'}
_columns = {
'name': fields.char('Name', size=64),
'statement_line_id': fields.many2one(
'account.bank.statement.line', 'Statement line'),
'amount': fields.related(
'statement_line_id', 'amount', type='float',
string="Amount", readonly=True),
'date': fields.related(
'statement_line_id', 'date', type='date',
string="Date", readonly=True),
'ref': fields.related(
'statement_line_id', 'ref', type='char', size=32,
string="Reference", readonly=True),
'message': fields.related(
'statement_line_id', 'import_transaction_id', 'message',
type='char', size=1024,
string="Message", readonly=True),
'partner_id': fields.related(
'statement_line_id', 'partner_id',
type='many2one', relation='res.partner',
string="Partner", readonly=True),
'statement_line_parent_id': fields.related(
'statement_line_id', 'parent_id', type='many2one',
relation='account.bank.statement.line', readonly=True),
'import_transaction_id': fields.related(
'statement_line_id', 'import_transaction_id',
string="Import transaction",
type='many2one', relation='banking.import.transaction'),
'residual': fields.related(
'import_transaction_id', 'residual', type='float',
string='Residual', readonly=True),
'writeoff_account_id': fields.related(
'import_transaction_id', 'writeoff_account_id',
type='many2one', relation='account.account',
string='Write-off account'),
'invoice_ids': fields.related(
'import_transaction_id', 'invoice_ids', string="Matching invoices",
type='many2many', relation='account.invoice'),
'invoice_id': fields.related(
'import_transaction_id',
'invoice_id',
string="Invoice to reconcile",
type='many2one',
relation='account.invoice',
),
'move_line_ids': fields.related(
'import_transaction_id', 'move_line_ids', string="Entry lines",
type='many2many', relation='account.move.line'),
'move_line_id': fields.related(
'import_transaction_id', 'move_line_id', string="Entry line",
type='many2one', relation='account.move.line'),
'duplicate': fields.related(
'import_transaction_id',
'duplicate',
string='Flagged as duplicate',
type='boolean',
),
'match_multi': fields.related(
'import_transaction_id', 'match_multi',
type="boolean", string='Multiple matches'),
'match_type': fields.related(
'import_transaction_id',
'match_type',
type='selection',
selection=[
('move', 'Move'),
('invoice', 'Invoice'),
('payment', 'Payment line'),
('payment_order', 'Payment order'),
('storno', 'Storno'),
('manual', 'Manual'),
('payment_manual', 'Payment line (manual)'),
('payment_order_manual', 'Payment order (manual)'),
],
string='Match type',
readonly=True,
),
'manual_invoice_ids': fields.many2many(
'account.invoice',
'banking_transaction_wizard_account_invoice_rel',
'wizard_id', 'invoice_id', string='Match one or more invoices',
domain=[('reconciled', '=', False)]),
'manual_move_line_ids': fields.many2many(
'account.move.line',
'banking_transaction_wizard_account_move_line_rel',
'wizard_id', 'move_line_id', string='Or match one or more entries',
domain=[('account_id.reconcile', '=', True),
('reconcile_id', '=', False)]),
'payment_option': fields.related(
'import_transaction_id',
'payment_option',
string='Payment Difference',
type='selection',
required=True,
selection=[
('without_writeoff', 'Keep Open'),
('with_writeoff', 'Reconcile Payment Balance')
],
),
'writeoff_analytic_id': fields.related(
'import_transaction_id', 'writeoff_analytic_id',
type='many2one', relation='account.analytic.account',
string='Write-off analytic account'),
'analytic_account_id': fields.related(
'statement_line_id', 'analytic_account_id',
type='many2one', relation='account.analytic.account',
string="Analytic Account"),
'move_currency_amount': fields.related(
'import_transaction_id',
'move_currency_amount',
type='float',
string='Match Currency Amount',
readonly=True,
),
}
| agpl-3.0 | -7,091,693,435,405,808,000 | 42.346667 | 79 | 0.501384 | false |
yephper/django | tests/model_formsets/tests.py | 1 | 79070 | from __future__ import unicode_literals
import datetime
import re
from datetime import date
from decimal import Decimal
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.forms.models import (
BaseModelFormSet, _get_foreign_key, inlineformset_factory,
modelformset_factory,
)
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from .models import (
AlternateBook, Author, AuthorMeeting, BetterAuthor, Book, BookWithCustomPK,
BookWithOptionalAltEditor, ClassyMexicanRestaurant, CustomPrimaryKey,
Location, Membership, MexicanRestaurant, Owner, OwnerProfile, Person,
Place, Player, Poem, Poet, Post, Price, Product, Repository, Restaurant,
Revision, Team,
)
class DeletionTests(TestCase):
def test_deletion(self):
PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': str(poet.pk),
'form-0-name': 'test',
'form-0-DELETE': 'on',
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
formset.save(commit=False)
self.assertEqual(Poet.objects.count(), 1)
formset.save()
self.assertTrue(formset.is_valid())
self.assertEqual(Poet.objects.count(), 0)
def test_add_form_deletion_when_invalid(self):
"""
Make sure that an add form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True)
poet = Poet.objects.create(name='test')
# One existing untouched and two new unvalid forms
data = {
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': six.text_type(poet.id),
'form-0-name': 'test',
'form-1-id': '',
'form-1-name': 'x' * 1000, # Too long
'form-2-id': six.text_type(poet.id), # Violate unique constraint
'form-2-name': 'test2',
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poet.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data in new forms aren't actually valid.
data['form-0-DELETE'] = 'on'
data['form-1-DELETE'] = 'on'
data['form-2-DELETE'] = 'on'
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poet.objects.count(), 0)
def test_change_form_deletion_when_invalid(self):
"""
Make sure that a change form that is filled out, but marked for deletion
doesn't cause validation errors.
"""
PoetFormSet = modelformset_factory(Poet, fields="__all__", can_delete=True)
poet = Poet.objects.create(name='test')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '1',
'form-MAX_NUM_FORMS': '0',
'form-0-id': six.text_type(poet.id),
'form-0-name': 'x' * 1000,
}
formset = PoetFormSet(data, queryset=Poet.objects.all())
# Make sure this form doesn't pass validation.
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poet.objects.count(), 1)
# Then make sure that it *does* pass validation and delete the object,
# even though the data isn't actually valid.
data['form-0-DELETE'] = 'on'
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poet.objects.count(), 0)
def test_outdated_deletion(self):
poet = Poet.objects.create(name='test')
poem = Poem.objects.create(name='Brevity is the soul of wit', poet=poet)
PoemFormSet = inlineformset_factory(Poet, Poem, fields="__all__", can_delete=True)
# Simulate deletion of an object that doesn't exist in the database
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '2',
'form-0-id': str(poem.pk),
'form-0-name': 'foo',
'form-1-id': str(poem.pk + 1), # doesn't exist
'form-1-name': 'bar',
'form-1-DELETE': 'on',
}
formset = PoemFormSet(data, instance=poet, prefix="form")
# The formset is valid even though poem.pk + 1 doesn't exist,
# because it's marked for deletion anyway
self.assertTrue(formset.is_valid())
formset.save()
# Make sure the save went through correctly
self.assertEqual(Poem.objects.get(pk=poem.pk).name, "foo")
self.assertEqual(poet.poem_set.count(), 1)
self.assertFalse(Poem.objects.filter(pk=poem.pk + 1).exists())
class ModelFormsetTest(TestCase):
def test_modelformset_factory_without_fields(self):
""" Regression for #19733 """
message = (
"Calling modelformset_factory without defining 'fields' or 'exclude' "
"explicitly is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
modelformset_factory(Author)
def test_simple_save(self):
qs = Author.objects.all()
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label>'
'<input id="id_form-0-name" type="text" name="form-0-name" maxlength="100" />'
'<input type="hidden" name="form-0-id" id="id_form-0-id" /></p>'
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label>'
'<input id="id_form-1-name" type="text" name="form-1-name" maxlength="100" />'
'<input type="hidden" name="form-1-id" id="id_form-1-id" /></p>'
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label>'
' <input id="id_form-2-name" type="text" name="form-2-name" maxlength="100" />'
'<input type="hidden" name="form-2-id" id="id_form-2-id" /></p>'
)
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-name': 'Charles Baudelaire',
'form-1-name': 'Arthur Rimbaud',
'form-2-name': '',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
author1, author2 = saved
self.assertEqual(author1, Author.objects.get(name='Charles Baudelaire'))
self.assertEqual(author2, Author.objects.get(name='Arthur Rimbaud'))
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1])
# Gah! We forgot Paul Verlaine. Let's create a formset to edit the
# existing authors with an extra form to add him. We *could* pass in a
# queryset to restrict the Author objects we edit, but in this case
# we'll use it to display them in alphabetical order by name.
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=1, can_delete=False)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label>'
'<input id="id_form-0-name" type="text" name="form-0-name" value="Arthur Rimbaud" maxlength="100" />'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></p>' % author2.id
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label>'
'<input id="id_form-1-name" type="text" name="form-1-name" value="Charles Baudelaire" maxlength="100" />'
'<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" /></p>' % author1.id
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label>'
'<input id="id_form-2-name" type="text" name="form-2-name" maxlength="100" />'
'<input type="hidden" name="form-2-id" id="id_form-2-id" /></p>'
)
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '2', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Arthur Rimbaud',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-name': 'Paul Verlaine',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# Only changed or new objects are returned from formset.save()
saved = formset.save()
self.assertEqual(len(saved), 1)
author3 = saved[0]
self.assertEqual(author3, Author.objects.get(name='Paul Verlaine'))
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1, author3])
# This probably shouldn't happen, but it will. If an add form was
# marked for deletion, make sure we don't save that form.
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=1, can_delete=True)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 4)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label>'
'<input id="id_form-0-name" type="text" name="form-0-name" '
'value="Arthur Rimbaud" maxlength="100" /></p>'
'<p><label for="id_form-0-DELETE">Delete:</label>'
'<input type="checkbox" name="form-0-DELETE" id="id_form-0-DELETE" />'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></p>' % author2.id
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label>'
'<input id="id_form-1-name" type="text" name="form-1-name" '
'value="Charles Baudelaire" maxlength="100" /></p>'
'<p><label for="id_form-1-DELETE">Delete:</label>'
'<input type="checkbox" name="form-1-DELETE" id="id_form-1-DELETE" />'
'<input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" /></p>' % author1.id
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_form-2-name">Name:</label>'
'<input id="id_form-2-name" type="text" name="form-2-name" '
'value="Paul Verlaine" maxlength="100" /></p>'
'<p><label for="id_form-2-DELETE">Delete:</label>'
'<input type="checkbox" name="form-2-DELETE" id="id_form-2-DELETE" />'
'<input type="hidden" name="form-2-id" value="%d" id="id_form-2-id" /></p>' % author3.id
)
self.assertHTMLEqual(
formset.forms[3].as_p(),
'<p><label for="id_form-3-name">Name:</label>'
'<input id="id_form-3-name" type="text" name="form-3-name" maxlength="100" /></p>'
'<p><label for="id_form-3-DELETE">Delete:</label>'
'<input type="checkbox" name="form-3-DELETE" id="id_form-3-DELETE" />'
'<input type="hidden" name="form-3-id" id="id_form-3-id" /></p>'
)
data = {
'form-TOTAL_FORMS': '4', # the number of forms rendered
'form-INITIAL_FORMS': '3', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Arthur Rimbaud',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-id': str(author3.id),
'form-2-name': 'Paul Verlaine',
'form-3-name': 'Walt Whitman',
'form-3-DELETE': 'on',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# No objects were changed or saved so nothing will come back.
self.assertEqual(formset.save(), [])
authors = list(Author.objects.order_by('name'))
self.assertEqual(authors, [author2, author1, author3])
# Let's edit a record to ensure save only returns that one record.
data = {
'form-TOTAL_FORMS': '4', # the number of forms rendered
'form-INITIAL_FORMS': '3', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(author2.id),
'form-0-name': 'Walt Whitman',
'form-1-id': str(author1.id),
'form-1-name': 'Charles Baudelaire',
'form-2-id': str(author3.id),
'form-2-name': 'Paul Verlaine',
'form-3-name': '',
'form-3-DELETE': '',
}
formset = AuthorFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
# One record has changed.
saved = formset.save()
self.assertEqual(len(saved), 1)
self.assertEqual(saved[0], Author.objects.get(name='Walt Whitman'))
def test_commit_false(self):
# Test the behavior of commit=False and save_m2m
author1 = Author.objects.create(name='Charles Baudelaire')
author2 = Author.objects.create(name='Paul Verlaine')
author3 = Author.objects.create(name='Walt Whitman')
meeting = AuthorMeeting.objects.create(created=date.today())
meeting.authors.set(Author.objects.all())
# create an Author instance to add to the meeting.
author4 = Author.objects.create(name='John Steinbeck')
AuthorMeetingFormSet = modelformset_factory(AuthorMeeting, fields="__all__", extra=1, can_delete=True)
data = {
'form-TOTAL_FORMS': '2', # the number of forms rendered
'form-INITIAL_FORMS': '1', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-id': str(meeting.id),
'form-0-name': '2nd Tuesday of the Week Meeting',
'form-0-authors': [author2.id, author1.id, author3.id, author4.id],
'form-1-name': '',
'form-1-authors': '',
'form-1-DELETE': '',
}
formset = AuthorMeetingFormSet(data=data, queryset=AuthorMeeting.objects.all())
self.assertTrue(formset.is_valid())
instances = formset.save(commit=False)
for instance in instances:
instance.created = date.today()
instance.save()
formset.save_m2m()
self.assertQuerysetEqual(instances[0].authors.all(), [
'<Author: Charles Baudelaire>',
'<Author: John Steinbeck>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
def test_max_num(self):
# Test the behavior of max_num with model formsets. It should allow
# all existing related objects/inlines for a given object to be
# displayed, but not allow the creation of new inlines beyond max_num.
Author.objects.create(name='Charles Baudelaire')
Author.objects.create(name='Paul Verlaine')
Author.objects.create(name='Walt Whitman')
qs = Author.objects.order_by('name')
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=None, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 6)
self.assertEqual(len(formset.extra_forms), 3)
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=4, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 4)
self.assertEqual(len(formset.extra_forms), 1)
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=0, extra=3)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 3)
self.assertEqual(len(formset.extra_forms), 0)
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=None)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=0)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
AuthorFormSet = modelformset_factory(Author, fields="__all__", max_num=4)
formset = AuthorFormSet(queryset=qs)
self.assertQuerysetEqual(formset.get_queryset(), [
'<Author: Charles Baudelaire>',
'<Author: Paul Verlaine>',
'<Author: Walt Whitman>',
])
def test_min_num(self):
# Test the behavior of min_num with model formsets. It should be
# added to extra.
qs = Author.objects.none()
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=0)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 0)
AuthorFormSet = modelformset_factory(Author, fields="__all__", min_num=1, extra=0)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 1)
AuthorFormSet = modelformset_factory(Author, fields="__all__", min_num=1, extra=1)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 2)
def test_min_num_with_existing(self):
# Test the behavior of min_num with existing objects.
Author.objects.create(name='Charles Baudelaire')
qs = Author.objects.all()
AuthorFormSet = modelformset_factory(Author, fields="__all__", extra=0, min_num=1)
formset = AuthorFormSet(queryset=qs)
self.assertEqual(len(formset.forms), 1)
def test_custom_save_method(self):
class PoetForm(forms.ModelForm):
def save(self, commit=True):
# change the name to "Vladimir Mayakovsky" just to be a jerk.
author = super(PoetForm, self).save(commit=False)
author.name = "Vladimir Mayakovsky"
if commit:
author.save()
return author
PoetFormSet = modelformset_factory(Poet, fields="__all__", form=PoetForm)
data = {
'form-TOTAL_FORMS': '3', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-name': 'Walt Whitman',
'form-1-name': 'Charles Baudelaire',
'form-2-name': '',
}
qs = Poet.objects.all()
formset = PoetFormSet(data=data, queryset=qs)
self.assertTrue(formset.is_valid())
poets = formset.save()
self.assertEqual(len(poets), 2)
poet1, poet2 = poets
self.assertEqual(poet1.name, 'Vladimir Mayakovsky')
self.assertEqual(poet2.name, 'Vladimir Mayakovsky')
def test_custom_form(self):
""" Test that model_formset respects fields and exclude parameters of
custom form
"""
class PostForm1(forms.ModelForm):
class Meta:
model = Post
fields = ('title', 'posted')
class PostForm2(forms.ModelForm):
class Meta:
model = Post
exclude = ('subtitle',)
PostFormSet = modelformset_factory(Post, form=PostForm1)
formset = PostFormSet()
self.assertNotIn("subtitle", formset.forms[0].fields)
PostFormSet = modelformset_factory(Post, form=PostForm2)
formset = PostFormSet()
self.assertNotIn("subtitle", formset.forms[0].fields)
def test_custom_queryset_init(self):
"""
Test that a queryset can be overridden in the __init__ method.
https://docs.djangoproject.com/en/dev/topics/forms/modelforms/#changing-the-queryset
"""
Author.objects.create(name='Charles Baudelaire')
Author.objects.create(name='Paul Verlaine')
class BaseAuthorFormSet(BaseModelFormSet):
def __init__(self, *args, **kwargs):
super(BaseAuthorFormSet, self).__init__(*args, **kwargs)
self.queryset = Author.objects.filter(name__startswith='Charles')
AuthorFormSet = modelformset_factory(Author, fields='__all__', formset=BaseAuthorFormSet)
formset = AuthorFormSet()
self.assertEqual(len(formset.get_queryset()), 1)
def test_model_inheritance(self):
BetterAuthorFormSet = modelformset_factory(BetterAuthor, fields="__all__")
formset = BetterAuthorFormSet()
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label>'
'<input id="id_form-0-name" type="text" name="form-0-name" maxlength="100" /></p>'
'<p><label for="id_form-0-write_speed">Write speed:</label>'
'<input type="number" name="form-0-write_speed" id="id_form-0-write_speed" />'
'<input type="hidden" name="form-0-author_ptr" id="id_form-0-author_ptr" /></p>'
)
data = {
'form-TOTAL_FORMS': '1', # the number of forms rendered
'form-INITIAL_FORMS': '0', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-author_ptr': '',
'form-0-name': 'Ernest Hemingway',
'form-0-write_speed': '10',
}
formset = BetterAuthorFormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
author1, = saved
self.assertEqual(author1, BetterAuthor.objects.get(name='Ernest Hemingway'))
hemingway_id = BetterAuthor.objects.get(name="Ernest Hemingway").pk
formset = BetterAuthorFormSet()
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_form-0-name">Name:</label>'
'<input id="id_form-0-name" type="text" name="form-0-name" value="Ernest Hemingway" maxlength="100" /></p>'
'<p><label for="id_form-0-write_speed">Write speed:</label>'
'<input type="number" name="form-0-write_speed" value="10" id="id_form-0-write_speed" />'
'<input type="hidden" name="form-0-author_ptr" value="%d" id="id_form-0-author_ptr" /></p>' % hemingway_id
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_form-1-name">Name:</label>'
'<input id="id_form-1-name" type="text" name="form-1-name" maxlength="100" /></p>'
'<p><label for="id_form-1-write_speed">Write speed:</label>'
'<input type="number" name="form-1-write_speed" id="id_form-1-write_speed" />'
'<input type="hidden" name="form-1-author_ptr" id="id_form-1-author_ptr" /></p>'
)
data = {
'form-TOTAL_FORMS': '2', # the number of forms rendered
'form-INITIAL_FORMS': '1', # the number of forms with initial data
'form-MAX_NUM_FORMS': '', # the max number of forms
'form-0-author_ptr': hemingway_id,
'form-0-name': 'Ernest Hemingway',
'form-0-write_speed': '10',
'form-1-author_ptr': '',
'form-1-name': '',
'form-1-write_speed': '',
}
formset = BetterAuthorFormSet(data)
self.assertTrue(formset.is_valid())
self.assertEqual(formset.save(), [])
def test_inline_formsets(self):
# We can also create a formset that is tied to a parent model. This is
# how the admin system's edit inline functionality works.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=3, fields="__all__")
author = Author.objects.create(name='Charles Baudelaire')
formset = AuthorBooksFormSet(instance=author)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label> <input id="id_book_set-0-title" type="text" '
'name="book_set-0-title" maxlength="100" /><input type="hidden" name="book_set-0-author" value="%d" '
'id="id_book_set-0-author" /><input type="hidden" name="book_set-0-id" id="id_book_set-0-id" />'
'</p>' % author.id
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label>'
'<input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" />'
'<input type="hidden" name="book_set-1-author" value="%d" id="id_book_set-1-author" />'
'<input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>' % author.id
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label>'
'<input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" />'
'<input type="hidden" name="book_set-2-author" value="%d" id="id_book_set-2-author" />'
'<input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>' % author.id
)
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '0', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-title': '',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1, Book.objects.get(title='Les Fleurs du Mal'))
self.assertQuerysetEqual(author.book_set.all(), ['<Book: Les Fleurs du Mal>'])
# Now that we've added a book to Charles Baudelaire, let's try adding
# another one. This time though, an edit form will be available for
# every existing book.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2, fields="__all__")
author = Author.objects.get(name='Charles Baudelaire')
formset = AuthorBooksFormSet(instance=author)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label>'
'<input id="id_book_set-0-title" type="text" name="book_set-0-title" '
'value="Les Fleurs du Mal" maxlength="100" />'
'<input type="hidden" name="book_set-0-author" value="%d" id="id_book_set-0-author" />'
'<input type="hidden" name="book_set-0-id" value="%d" id="id_book_set-0-id" /></p>' % (
author.id, book1.id,
)
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label>'
'<input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" />'
'<input type="hidden" name="book_set-1-author" value="%d" id="id_book_set-1-author" />'
'<input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>' % author.id
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label>'
'<input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" />'
'<input type="hidden" name="book_set-2-author" value="%d" id="id_book_set-2-author" />'
'<input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>' % author.id
)
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '1', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book1.id),
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-title': 'Les Paradis Artificiels',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book2, = saved
self.assertEqual(book2, Book.objects.get(title='Les Paradis Artificiels'))
# As you can see, 'Les Paradis Artificiels' is now a book belonging to
# Charles Baudelaire.
self.assertQuerysetEqual(author.book_set.order_by('title'), [
'<Book: Les Fleurs du Mal>',
'<Book: Les Paradis Artificiels>',
])
def test_inline_formsets_save_as_new(self):
# The save_as_new parameter lets you re-associate the data to a new
# instance. This is used in the admin for save_as functionality.
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2, fields="__all__")
Author.objects.create(name='Charles Baudelaire')
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '2', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': '1',
'book_set-0-title': 'Les Fleurs du Mal',
'book_set-1-id': '2',
'book_set-1-title': 'Les Paradis Artificiels',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=Author(), save_as_new=True)
self.assertTrue(formset.is_valid())
new_author = Author.objects.create(name='Charles Baudelaire')
formset = AuthorBooksFormSet(data, instance=new_author, save_as_new=True)
saved = formset.save()
self.assertEqual(len(saved), 2)
book1, book2 = saved
self.assertEqual(book1.title, 'Les Fleurs du Mal')
self.assertEqual(book2.title, 'Les Paradis Artificiels')
# Test using a custom prefix on an inline formset.
formset = AuthorBooksFormSet(prefix="test")
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_test-0-title">Title:</label>'
'<input id="id_test-0-title" type="text" name="test-0-title" maxlength="100" />'
'<input type="hidden" name="test-0-author" id="id_test-0-author" />'
'<input type="hidden" name="test-0-id" id="id_test-0-id" /></p>'
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_test-1-title">Title:</label>'
'<input id="id_test-1-title" type="text" name="test-1-title" maxlength="100" />'
'<input type="hidden" name="test-1-author" id="id_test-1-author" />'
'<input type="hidden" name="test-1-id" id="id_test-1-id" /></p>'
)
def test_inline_formsets_with_custom_pk(self):
# Test inline formsets where the inline-edited object has a custom
# primary key that is not the fk to the parent object.
self.maxDiff = 1024
AuthorBooksFormSet2 = inlineformset_factory(
Author, BookWithCustomPK, can_delete=False, extra=1, fields="__all__"
)
author = Author.objects.create(pk=1, name='Charles Baudelaire')
formset = AuthorBooksFormSet2(instance=author)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_bookwithcustompk_set-0-my_pk">My pk:</label>'
'<input id="id_bookwithcustompk_set-0-my_pk" type="number" '
'name="bookwithcustompk_set-0-my_pk" step="1" /></p>'
'<p><label for="id_bookwithcustompk_set-0-title">Title:</label>'
'<input id="id_bookwithcustompk_set-0-title" type="text" '
'name="bookwithcustompk_set-0-title" maxlength="100" />'
'<input type="hidden" name="bookwithcustompk_set-0-author" '
'value="1" id="id_bookwithcustompk_set-0-author" /></p>'
)
data = {
'bookwithcustompk_set-TOTAL_FORMS': '1', # the number of forms rendered
'bookwithcustompk_set-INITIAL_FORMS': '0', # the number of forms with initial data
'bookwithcustompk_set-MAX_NUM_FORMS': '', # the max number of forms
'bookwithcustompk_set-0-my_pk': '77777',
'bookwithcustompk_set-0-title': 'Les Fleurs du Mal',
}
formset = AuthorBooksFormSet2(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1.pk, 77777)
book1 = author.bookwithcustompk_set.get()
self.assertEqual(book1.title, 'Les Fleurs du Mal')
def test_inline_formsets_with_multi_table_inheritance(self):
# Test inline formsets where the inline-edited object uses multi-table
# inheritance, thus has a non AutoField yet auto-created primary key.
AuthorBooksFormSet3 = inlineformset_factory(Author, AlternateBook, can_delete=False, extra=1, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
formset = AuthorBooksFormSet3(instance=author)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_alternatebook_set-0-title">Title:</label>'
'<input id="id_alternatebook_set-0-title" type="text" '
'name="alternatebook_set-0-title" maxlength="100" /></p>'
'<p><label for="id_alternatebook_set-0-notes">Notes:</label>'
'<input id="id_alternatebook_set-0-notes" type="text" '
'name="alternatebook_set-0-notes" maxlength="100" />'
'<input type="hidden" name="alternatebook_set-0-author" value="1" '
'id="id_alternatebook_set-0-author" />'
'<input type="hidden" name="alternatebook_set-0-book_ptr" '
'id="id_alternatebook_set-0-book_ptr" /></p>'
)
data = {
'alternatebook_set-TOTAL_FORMS': '1', # the number of forms rendered
'alternatebook_set-INITIAL_FORMS': '0', # the number of forms with initial data
'alternatebook_set-MAX_NUM_FORMS': '', # the max number of forms
'alternatebook_set-0-title': 'Flowers of Evil',
'alternatebook_set-0-notes': 'English translation of Les Fleurs du Mal'
}
formset = AuthorBooksFormSet3(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
book1, = saved
self.assertEqual(book1.title, 'Flowers of Evil')
self.assertEqual(book1.notes, 'English translation of Les Fleurs du Mal')
@skipUnlessDBFeature('supports_partially_nullable_unique_constraints')
def test_inline_formsets_with_nullable_unique_together(self):
# Test inline formsets where the inline-edited object has a
# unique_together constraint with a nullable member
AuthorBooksFormSet4 = inlineformset_factory(
Author, BookWithOptionalAltEditor, can_delete=False, extra=2, fields="__all__"
)
author = Author.objects.create(pk=1, name='Charles Baudelaire')
data = {
'bookwithoptionalalteditor_set-TOTAL_FORMS': '2', # the number of forms rendered
'bookwithoptionalalteditor_set-INITIAL_FORMS': '0', # the number of forms with initial data
'bookwithoptionalalteditor_set-MAX_NUM_FORMS': '', # the max number of forms
'bookwithoptionalalteditor_set-0-author': '1',
'bookwithoptionalalteditor_set-0-title': 'Les Fleurs du Mal',
'bookwithoptionalalteditor_set-1-author': '1',
'bookwithoptionalalteditor_set-1-title': 'Les Fleurs du Mal',
}
formset = AuthorBooksFormSet4(data, instance=author)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
book1, book2 = saved
self.assertEqual(book1.author_id, 1)
self.assertEqual(book1.title, 'Les Fleurs du Mal')
self.assertEqual(book2.author_id, 1)
self.assertEqual(book2.title, 'Les Fleurs du Mal')
def test_inline_formsets_with_custom_save_method(self):
AuthorBooksFormSet = inlineformset_factory(Author, Book, can_delete=False, extra=2, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
book1 = Book.objects.create(pk=1, author=author, title='Les Paradis Artificiels')
book2 = Book.objects.create(pk=2, author=author, title='Les Fleurs du Mal')
book3 = Book.objects.create(pk=3, author=author, title='Flowers of Evil')
class PoemForm(forms.ModelForm):
def save(self, commit=True):
# change the name to "Brooklyn Bridge" just to be a jerk.
poem = super(PoemForm, self).save(commit=False)
poem.name = "Brooklyn Bridge"
if commit:
poem.save()
return poem
PoemFormSet = inlineformset_factory(Poet, Poem, form=PoemForm, fields="__all__")
data = {
'poem_set-TOTAL_FORMS': '3', # the number of forms rendered
'poem_set-INITIAL_FORMS': '0', # the number of forms with initial data
'poem_set-MAX_NUM_FORMS': '', # the max number of forms
'poem_set-0-name': 'The Cloud in Trousers',
'poem_set-1-name': 'I',
'poem_set-2-name': '',
}
poet = Poet.objects.create(name='Vladimir Mayakovsky')
formset = PoemFormSet(data=data, instance=poet)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 2)
poem1, poem2 = saved
self.assertEqual(poem1.name, 'Brooklyn Bridge')
self.assertEqual(poem2.name, 'Brooklyn Bridge')
# We can provide a custom queryset to our InlineFormSet:
custom_qs = Book.objects.order_by('-title')
formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label>'
'<input id="id_book_set-0-title" type="text" name="book_set-0-title" '
'value="Les Paradis Artificiels" maxlength="100" />'
'<input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" />'
'<input type="hidden" name="book_set-0-id" value="1" id="id_book_set-0-id" /></p>'
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label>'
'<input id="id_book_set-1-title" type="text" name="book_set-1-title" '
'value="Les Fleurs du Mal" maxlength="100" />'
'<input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" />'
'<input type="hidden" name="book_set-1-id" value="2" id="id_book_set-1-id" /></p>'
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label>'
'<input id="id_book_set-2-title" type="text" name="book_set-2-title" '
'value="Flowers of Evil" maxlength="100" />'
'<input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" />'
'<input type="hidden" name="book_set-2-id" value="3" id="id_book_set-2-id" /></p>'
)
self.assertHTMLEqual(
formset.forms[3].as_p(),
'<p><label for="id_book_set-3-title">Title:</label>'
'<input id="id_book_set-3-title" type="text" name="book_set-3-title" maxlength="100" />'
'<input type="hidden" name="book_set-3-author" value="1" id="id_book_set-3-author" />'
'<input type="hidden" name="book_set-3-id" id="id_book_set-3-id" /></p>'
)
self.assertHTMLEqual(
formset.forms[4].as_p(),
'<p><label for="id_book_set-4-title">Title:</label>'
'<input id="id_book_set-4-title" type="text" name="book_set-4-title" maxlength="100" />'
'<input type="hidden" name="book_set-4-author" value="1" id="id_book_set-4-author" />'
'<input type="hidden" name="book_set-4-id" id="id_book_set-4-id" /></p>'
)
data = {
'book_set-TOTAL_FORMS': '5', # the number of forms rendered
'book_set-INITIAL_FORMS': '3', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book1.id),
'book_set-0-title': 'Les Paradis Artificiels',
'book_set-1-id': str(book2.id),
'book_set-1-title': 'Les Fleurs du Mal',
'book_set-2-id': str(book3.id),
'book_set-2-title': 'Flowers of Evil',
'book_set-3-title': 'Revue des deux mondes',
'book_set-4-title': '',
}
formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
self.assertTrue(formset.is_valid())
custom_qs = Book.objects.filter(title__startswith='F')
formset = AuthorBooksFormSet(instance=author, queryset=custom_qs)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_book_set-0-title">Title:</label>'
'<input id="id_book_set-0-title" type="text" name="book_set-0-title" '
'value="Flowers of Evil" maxlength="100" />'
'<input type="hidden" name="book_set-0-author" value="1" id="id_book_set-0-author" />'
'<input type="hidden" name="book_set-0-id" value="3" id="id_book_set-0-id" /></p>'
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_book_set-1-title">Title:</label>'
'<input id="id_book_set-1-title" type="text" name="book_set-1-title" maxlength="100" />'
'<input type="hidden" name="book_set-1-author" value="1" id="id_book_set-1-author" />'
'<input type="hidden" name="book_set-1-id" id="id_book_set-1-id" /></p>'
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_book_set-2-title">Title:</label>'
'<input id="id_book_set-2-title" type="text" name="book_set-2-title" maxlength="100" />'
'<input type="hidden" name="book_set-2-author" value="1" id="id_book_set-2-author" />'
'<input type="hidden" name="book_set-2-id" id="id_book_set-2-id" /></p>'
)
data = {
'book_set-TOTAL_FORMS': '3', # the number of forms rendered
'book_set-INITIAL_FORMS': '1', # the number of forms with initial data
'book_set-MAX_NUM_FORMS': '', # the max number of forms
'book_set-0-id': str(book3.id),
'book_set-0-title': 'Flowers of Evil',
'book_set-1-title': 'Revue des deux mondes',
'book_set-2-title': '',
}
formset = AuthorBooksFormSet(data, instance=author, queryset=custom_qs)
self.assertTrue(formset.is_valid())
def test_inline_formsets_with_custom_save_method_related_instance(self):
"""
The ModelForm.save() method should be able to access the related object
if it exists in the database (#24395).
"""
class PoemForm2(forms.ModelForm):
def save(self, commit=True):
poem = super(PoemForm2, self).save(commit=False)
poem.name = "%s by %s" % (poem.name, poem.poet.name)
if commit:
poem.save()
return poem
PoemFormSet = inlineformset_factory(Poet, Poem, form=PoemForm2, fields="__all__")
data = {
'poem_set-TOTAL_FORMS': '1',
'poem_set-INITIAL_FORMS': '0',
'poem_set-MAX_NUM_FORMS': '',
'poem_set-0-name': 'Le Lac',
}
poet = Poet()
formset = PoemFormSet(data=data, instance=poet)
self.assertTrue(formset.is_valid())
# The Poet instance is saved after the formset instantiation. This
# happens in admin's changeform_view() when adding a new object and
# some inlines in the same request.
poet.name = 'Lamartine'
poet.save()
poem = formset.save()[0]
self.assertEqual(poem.name, 'Le Lac by Lamartine')
def test_inline_formsets_with_wrong_fk_name(self):
""" Regression for #23451 """
message = "fk_name 'title' is not a ForeignKey to 'model_formsets.Author'."
with self.assertRaisesMessage(ValueError, message):
inlineformset_factory(Author, Book, fields="__all__", fk_name='title')
def test_custom_pk(self):
# We need to ensure that it is displayed
CustomPrimaryKeyFormSet = modelformset_factory(CustomPrimaryKey, fields="__all__")
formset = CustomPrimaryKeyFormSet()
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_form-0-my_pk">My pk:</label> <input id="id_form-0-my_pk" type="text" '
'name="form-0-my_pk" maxlength="10" /></p>'
'<p><label for="id_form-0-some_field">Some field:</label>'
'<input id="id_form-0-some_field" type="text" name="form-0-some_field" maxlength="100" /></p>'
)
# Custom primary keys with ForeignKey, OneToOneField and AutoField ############
place = Place.objects.create(pk=1, name='Giordanos', city='Chicago')
FormSet = inlineformset_factory(Place, Owner, extra=2, can_delete=False, fields="__all__")
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 2)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_owner_set-0-name">Name:</label>'
'<input id="id_owner_set-0-name" type="text" name="owner_set-0-name" maxlength="100" />'
'<input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place" />'
'<input type="hidden" name="owner_set-0-auto_id" id="id_owner_set-0-auto_id" /></p>'
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_owner_set-1-name">Name:</label>'
'<input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100" />'
'<input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place" />'
'<input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id" /></p>'
)
data = {
'owner_set-TOTAL_FORMS': '2',
'owner_set-INITIAL_FORMS': '0',
'owner_set-MAX_NUM_FORMS': '',
'owner_set-0-auto_id': '',
'owner_set-0-name': 'Joe Perry',
'owner_set-1-auto_id': '',
'owner_set-1-name': '',
}
formset = FormSet(data, instance=place)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
owner1, = saved
self.assertEqual(owner1.name, 'Joe Perry')
self.assertEqual(owner1.place.name, 'Giordanos')
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 3)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_owner_set-0-name">Name:</label>'
'<input id="id_owner_set-0-name" type="text" name="owner_set-0-name" value="Joe Perry" maxlength="100" />'
'<input type="hidden" name="owner_set-0-place" value="1" id="id_owner_set-0-place" />'
'<input type="hidden" name="owner_set-0-auto_id" value="%d" id="id_owner_set-0-auto_id" /></p>'
% owner1.auto_id
)
self.assertHTMLEqual(
formset.forms[1].as_p(),
'<p><label for="id_owner_set-1-name">Name:</label>'
'<input id="id_owner_set-1-name" type="text" name="owner_set-1-name" maxlength="100" />'
'<input type="hidden" name="owner_set-1-place" value="1" id="id_owner_set-1-place" />'
'<input type="hidden" name="owner_set-1-auto_id" id="id_owner_set-1-auto_id" /></p>'
)
self.assertHTMLEqual(
formset.forms[2].as_p(),
'<p><label for="id_owner_set-2-name">Name:</label>'
'<input id="id_owner_set-2-name" type="text" name="owner_set-2-name" maxlength="100" />'
'<input type="hidden" name="owner_set-2-place" value="1" id="id_owner_set-2-place" />'
'<input type="hidden" name="owner_set-2-auto_id" id="id_owner_set-2-auto_id" /></p>'
)
data = {
'owner_set-TOTAL_FORMS': '3',
'owner_set-INITIAL_FORMS': '1',
'owner_set-MAX_NUM_FORMS': '',
'owner_set-0-auto_id': six.text_type(owner1.auto_id),
'owner_set-0-name': 'Joe Perry',
'owner_set-1-auto_id': '',
'owner_set-1-name': 'Jack Berry',
'owner_set-2-auto_id': '',
'owner_set-2-name': '',
}
formset = FormSet(data, instance=place)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
owner2, = saved
self.assertEqual(owner2.name, 'Jack Berry')
self.assertEqual(owner2.place.name, 'Giordanos')
# Ensure a custom primary key that is a ForeignKey or OneToOneField get rendered for the user to choose.
FormSet = modelformset_factory(OwnerProfile, fields="__all__")
formset = FormSet()
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_form-0-owner">Owner:</label>'
'<select name="form-0-owner" id="id_form-0-owner">'
'<option value="" selected="selected">---------</option>'
'<option value="%d">Joe Perry at Giordanos</option>'
'<option value="%d">Jack Berry at Giordanos</option>'
'</select></p>'
'<p><label for="id_form-0-age">Age:</label>'
'<input type="number" name="form-0-age" id="id_form-0-age" min="0" /></p>'
% (owner1.auto_id, owner2.auto_id)
)
owner1 = Owner.objects.get(name='Joe Perry')
FormSet = inlineformset_factory(Owner, OwnerProfile, max_num=1, can_delete=False, fields="__all__")
self.assertEqual(FormSet.max_num, 1)
formset = FormSet(instance=owner1)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_ownerprofile-0-age">Age:</label>'
'<input type="number" name="ownerprofile-0-age" id="id_ownerprofile-0-age" min="0" />'
'<input type="hidden" name="ownerprofile-0-owner" value="%d" id="id_ownerprofile-0-owner" /></p>'
% owner1.auto_id
)
data = {
'ownerprofile-TOTAL_FORMS': '1',
'ownerprofile-INITIAL_FORMS': '0',
'ownerprofile-MAX_NUM_FORMS': '1',
'ownerprofile-0-owner': '',
'ownerprofile-0-age': '54',
}
formset = FormSet(data, instance=owner1)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
profile1, = saved
self.assertEqual(profile1.owner, owner1)
self.assertEqual(profile1.age, 54)
formset = FormSet(instance=owner1)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_ownerprofile-0-age">Age:</label>'
'<input type="number" name="ownerprofile-0-age" value="54" id="id_ownerprofile-0-age" min="0" />'
'<input type="hidden" name="ownerprofile-0-owner" value="%d" id="id_ownerprofile-0-owner" /></p>'
% owner1.auto_id
)
data = {
'ownerprofile-TOTAL_FORMS': '1',
'ownerprofile-INITIAL_FORMS': '1',
'ownerprofile-MAX_NUM_FORMS': '1',
'ownerprofile-0-owner': six.text_type(owner1.auto_id),
'ownerprofile-0-age': '55',
}
formset = FormSet(data, instance=owner1)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
profile1, = saved
self.assertEqual(profile1.owner, owner1)
self.assertEqual(profile1.age, 55)
def test_unique_true_enforces_max_num_one(self):
# ForeignKey with unique=True should enforce max_num=1
place = Place.objects.create(pk=1, name='Giordanos', city='Chicago')
FormSet = inlineformset_factory(Place, Location, can_delete=False, fields="__all__")
self.assertEqual(FormSet.max_num, 1)
formset = FormSet(instance=place)
self.assertEqual(len(formset.forms), 1)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<p><label for="id_location_set-0-lat">Lat:</label>'
'<input id="id_location_set-0-lat" type="text" name="location_set-0-lat" maxlength="100" /></p>'
'<p><label for="id_location_set-0-lon">Lon:</label> '
'<input id="id_location_set-0-lon" type="text" name="location_set-0-lon" maxlength="100" />'
'<input type="hidden" name="location_set-0-place" value="1" id="id_location_set-0-place" />'
'<input type="hidden" name="location_set-0-id" id="id_location_set-0-id" /></p>'
)
def test_foreign_keys_in_parents(self):
self.assertEqual(type(_get_foreign_key(Restaurant, Owner)), models.ForeignKey)
self.assertEqual(type(_get_foreign_key(MexicanRestaurant, Owner)), models.ForeignKey)
def test_unique_validation(self):
FormSet = modelformset_factory(Product, fields="__all__", extra=1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'car-red',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
product1, = saved
self.assertEqual(product1.slug, 'car-red')
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'car-red',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'slug': ['Product with this Slug already exists.']}])
def test_modelformset_validate_max_flag(self):
# If validate_max is set and max_num is less than TOTAL_FORMS in the
# data, then throw an exception. MAX_NUM_FORMS in the data is
# irrelevant here (it's output as a hint for the client but its
# value in the returned data is not checked)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '2', # should be ignored
'form-0-price': '12.00',
'form-0-quantity': '1',
'form-1-price': '24.00',
'form-1-quantity': '2',
}
FormSet = modelformset_factory(Price, fields="__all__", extra=1, max_num=1, validate_max=True)
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(), ['Please submit 1 or fewer forms.'])
# Now test the same thing without the validate_max flag to ensure
# default behavior is unchanged
FormSet = modelformset_factory(Price, fields="__all__", extra=1, max_num=1)
formset = FormSet(data)
self.assertTrue(formset.is_valid())
def test_unique_together_validation(self):
FormSet = modelformset_factory(Price, fields="__all__", extra=1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '12.00',
'form-0-quantity': '1',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
price1, = saved
self.assertEqual(price1.price, Decimal('12.00'))
self.assertEqual(price1.quantity, 1)
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '12.00',
'form-0-quantity': '1',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'__all__': ['Price with this Price and Quantity already exists.']}])
def test_unique_together_with_inlineformset_factory(self):
# Also see bug #8882.
repository = Repository.objects.create(name='Test Repo')
FormSet = inlineformset_factory(Repository, Revision, extra=1, fields="__all__")
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertTrue(formset.is_valid())
saved = formset.save()
self.assertEqual(len(saved), 1)
revision1, = saved
self.assertEqual(revision1.repository, repository)
self.assertEqual(revision1.revision, '146239817507f148d448db38840db7c3cbf47c76')
# attempt to save the same revision against the same repo.
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertFalse(formset.is_valid())
self.assertEqual(formset.errors, [{'__all__': ['Revision with this Repository and Revision already exists.']}])
# unique_together with inlineformset_factory with overridden form fields
# Also see #9494
FormSet = inlineformset_factory(Repository, Revision, fields=('revision',), extra=1)
data = {
'revision_set-TOTAL_FORMS': '1',
'revision_set-INITIAL_FORMS': '0',
'revision_set-MAX_NUM_FORMS': '',
'revision_set-0-repository': repository.pk,
'revision_set-0-revision': '146239817507f148d448db38840db7c3cbf47c76',
'revision_set-0-DELETE': '',
}
formset = FormSet(data, instance=repository)
self.assertFalse(formset.is_valid())
def test_callable_defaults(self):
# Use of callable defaults (see bug #7975).
person = Person.objects.create(name='Ringo')
FormSet = inlineformset_factory(Person, Membership, can_delete=False, extra=1, fields="__all__")
formset = FormSet(instance=person)
# Django will render a hidden field for model fields that have a callable
# default. This is required to ensure the value is tested for change correctly
# when determine what extra forms have changed to save.
self.assertEqual(len(formset.forms), 1) # this formset only has one form
form = formset.forms[0]
now = form.fields['date_joined'].initial()
result = form.as_p()
result = re.sub(r'[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}(?:\.[0-9]+)?', '__DATETIME__', result)
self.assertHTMLEqual(
result,
'<p><label for="id_membership_set-0-date_joined">Date joined:</label>'
'<input type="text" name="membership_set-0-date_joined" '
'value="__DATETIME__" id="id_membership_set-0-date_joined" />'
'<input type="hidden" name="initial-membership_set-0-date_joined" value="__DATETIME__" '
'id="initial-membership_set-0-id_membership_set-0-date_joined" /></p>'
'<p><label for="id_membership_set-0-karma">Karma:</label>'
'<input type="number" name="membership_set-0-karma" id="id_membership_set-0-karma" />'
'<input type="hidden" name="membership_set-0-person" value="%d" id="id_membership_set-0-person" />'
'<input type="hidden" name="membership_set-0-id" id="id_membership_set-0-id" /></p>'
% person.id)
# test for validation with callable defaults. Validations rely on hidden fields
data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(data, instance=person)
self.assertTrue(formset.is_valid())
# now test for when the data changes
one_day_later = now + datetime.timedelta(days=1)
filled_data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined': six.text_type(one_day_later.strftime('%Y-%m-%d %H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(filled_data, instance=person)
self.assertFalse(formset.is_valid())
# now test with split datetime fields
class MembershipForm(forms.ModelForm):
date_joined = forms.SplitDateTimeField(initial=now)
class Meta:
model = Membership
fields = "__all__"
def __init__(self, **kwargs):
super(MembershipForm, self).__init__(**kwargs)
self.fields['date_joined'].widget = forms.SplitDateTimeWidget()
FormSet = inlineformset_factory(
Person,
Membership,
form=MembershipForm,
can_delete=False,
extra=1,
fields="__all__",
)
data = {
'membership_set-TOTAL_FORMS': '1',
'membership_set-INITIAL_FORMS': '0',
'membership_set-MAX_NUM_FORMS': '',
'membership_set-0-date_joined_0': six.text_type(now.strftime('%Y-%m-%d')),
'membership_set-0-date_joined_1': six.text_type(now.strftime('%H:%M:%S')),
'initial-membership_set-0-date_joined': six.text_type(now.strftime('%Y-%m-%d %H:%M:%S')),
'membership_set-0-karma': '',
}
formset = FormSet(data, instance=person)
self.assertTrue(formset.is_valid())
def test_inlineformset_factory_with_null_fk(self):
# inlineformset_factory tests with fk having null=True. see #9462.
# create some data that will exhibit the issue
team = Team.objects.create(name="Red Vipers")
Player(name="Timmy").save()
Player(name="Bobby", team=team).save()
PlayerInlineFormSet = inlineformset_factory(Team, Player, fields="__all__")
formset = PlayerInlineFormSet()
self.assertQuerysetEqual(formset.get_queryset(), [])
formset = PlayerInlineFormSet(instance=team)
players = formset.get_queryset()
self.assertEqual(len(players), 1)
player1, = players
self.assertEqual(player1.team, team)
self.assertEqual(player1.name, 'Bobby')
def test_model_formset_with_custom_pk(self):
# a formset for a Model that has a custom primary key that still needs to be
# added to the formset automatically
FormSet = modelformset_factory(ClassyMexicanRestaurant, fields=["tacos_are_yummy"])
self.assertEqual(sorted(FormSet().forms[0].fields.keys()), ['restaurant', 'tacos_are_yummy'])
def test_model_formset_with_initial_model_instance(self):
# has_changed should compare model instance and primary key
# see #18898
FormSet = modelformset_factory(Poem, fields='__all__')
john_milton = Poet(name="John Milton")
john_milton.save()
data = {
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-name': '',
'form-0-poet': str(john_milton.id),
}
formset = FormSet(initial=[{'poet': john_milton}], data=data)
self.assertFalse(formset.extra_forms[0].has_changed())
def test_model_formset_with_initial_queryset(self):
# has_changed should work with queryset and list of pk's
# see #18898
FormSet = modelformset_factory(AuthorMeeting, fields='__all__')
Author.objects.create(pk=1, name='Charles Baudelaire')
data = {
'form-TOTAL_FORMS': 1,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-name': '',
'form-0-created': '',
'form-0-authors': list(Author.objects.values_list('id', flat=True)),
}
formset = FormSet(initial=[{'authors': Author.objects.all()}], data=data)
self.assertFalse(formset.extra_forms[0].has_changed())
def test_prevent_duplicates_from_with_the_same_formset(self):
FormSet = modelformset_factory(Product, fields="__all__", extra=2)
data = {
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-slug': 'red_car',
'form-1-slug': 'red_car',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for slug.'])
FormSet = modelformset_factory(Price, fields="__all__", extra=2)
data = {
'form-TOTAL_FORMS': 2,
'form-INITIAL_FORMS': 0,
'form-MAX_NUM_FORMS': '',
'form-0-price': '25',
'form-0-quantity': '7',
'form-1-price': '25',
'form-1-quantity': '7',
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for price and quantity, which must be unique.'])
# Only the price field is specified, this should skip any unique checks since
# the unique_together is not fulfilled. This will fail with a KeyError if broken.
FormSet = modelformset_factory(Price, fields=("price",), extra=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-price': '24',
'form-1-price': '24',
}
formset = FormSet(data)
self.assertTrue(formset.is_valid())
FormSet = inlineformset_factory(Author, Book, extra=0, fields="__all__")
author = Author.objects.create(pk=1, name='Charles Baudelaire')
Book.objects.create(pk=1, author=author, title='Les Paradis Artificiels')
Book.objects.create(pk=2, author=author, title='Les Fleurs du Mal')
Book.objects.create(pk=3, author=author, title='Flowers of Evil')
book_ids = author.book_set.order_by('id').values_list('id', flat=True)
data = {
'book_set-TOTAL_FORMS': '2',
'book_set-INITIAL_FORMS': '2',
'book_set-MAX_NUM_FORMS': '',
'book_set-0-title': 'The 2008 Election',
'book_set-0-author': str(author.id),
'book_set-0-id': str(book_ids[0]),
'book_set-1-title': 'The 2008 Election',
'book_set-1-author': str(author.id),
'book_set-1-id': str(book_ids[1]),
}
formset = FormSet(data=data, instance=author)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for title.'])
self.assertEqual(formset.errors,
[{}, {'__all__': ['Please correct the duplicate values below.']}])
FormSet = modelformset_factory(Post, fields="__all__", extra=2)
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'blah',
'form-0-slug': 'Morning',
'form-0-subtitle': 'foo',
'form-0-posted': '2009-01-01',
'form-1-title': 'blah',
'form-1-slug': 'Morning in Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-01-01'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for title which must be unique for the date in posted.'])
self.assertEqual(formset.errors,
[{}, {'__all__': ['Please correct the duplicate values below.']}])
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
'form-0-slug': 'Morning in Prague',
'form-0-subtitle': 'foo',
'form-0-posted': '2009-01-01',
'form-1-title': 'blah',
'form-1-slug': 'Morning in Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-08-02'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for slug which must be unique for the year in posted.'])
data = {
'form-TOTAL_FORMS': '2',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
'form-0-slug': 'Morning in Prague',
'form-0-subtitle': 'rawr',
'form-0-posted': '2008-08-01',
'form-1-title': 'blah',
'form-1-slug': 'Prague',
'form-1-subtitle': 'rawr',
'form-1-posted': '2009-08-02'
}
formset = FormSet(data)
self.assertFalse(formset.is_valid())
self.assertEqual(formset._non_form_errors,
['Please correct the duplicate data for subtitle which must be unique for the month in posted.'])
class TestModelFormsetOverridesTroughFormMeta(TestCase):
def test_modelformset_factory_widgets(self):
widgets = {
'name': forms.TextInput(attrs={'class': 'poet'})
}
PoetFormSet = modelformset_factory(Poet, fields="__all__", widgets=widgets)
form = PoetFormSet.form()
self.assertHTMLEqual(
"%s" % form['name'],
'<input id="id_name" maxlength="100" type="text" class="poet" name="name" />'
)
def test_inlineformset_factory_widgets(self):
widgets = {
'title': forms.TextInput(attrs={'class': 'book'})
}
BookFormSet = inlineformset_factory(Author, Book, widgets=widgets, fields="__all__")
form = BookFormSet.form()
self.assertHTMLEqual(
"%s" % form['title'],
'<input class="book" id="id_title" maxlength="100" name="title" type="text" />'
)
def test_modelformset_factory_labels_overrides(self):
BookFormSet = modelformset_factory(Book, fields="__all__", labels={
'title': 'Name'
})
form = BookFormSet.form()
self.assertHTMLEqual(form['title'].label_tag(), '<label for="id_title">Name:</label>')
def test_inlineformset_factory_labels_overrides(self):
BookFormSet = inlineformset_factory(Author, Book, fields="__all__", labels={
'title': 'Name'
})
form = BookFormSet.form()
self.assertHTMLEqual(form['title'].label_tag(), '<label for="id_title">Name:</label>')
def test_modelformset_factory_help_text_overrides(self):
BookFormSet = modelformset_factory(Book, fields="__all__", help_texts={
'title': 'Choose carefully.'
})
form = BookFormSet.form()
self.assertEqual(form['title'].help_text, 'Choose carefully.')
def test_inlineformset_factory_help_text_overrides(self):
BookFormSet = inlineformset_factory(Author, Book, fields="__all__", help_texts={
'title': 'Choose carefully.'
})
form = BookFormSet.form()
self.assertEqual(form['title'].help_text, 'Choose carefully.')
def test_modelformset_factory_error_messages_overrides(self):
author = Author.objects.create(pk=1, name='Charles Baudelaire')
BookFormSet = modelformset_factory(Book, fields="__all__", error_messages={
'title': {
'max_length': 'Title too long!!'
}
})
form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id})
form.full_clean()
self.assertEqual(form.errors, {'title': ['Title too long!!']})
def test_inlineformset_factory_error_messages_overrides(self):
author = Author.objects.create(pk=1, name='Charles Baudelaire')
BookFormSet = inlineformset_factory(Author, Book, fields="__all__", error_messages={
'title': {
'max_length': 'Title too long!!'
}
})
form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id})
form.full_clean()
self.assertEqual(form.errors, {'title': ['Title too long!!']})
def test_modelformset_factory_field_class_overrides(self):
author = Author.objects.create(pk=1, name='Charles Baudelaire')
BookFormSet = modelformset_factory(Book, fields="__all__", field_classes={
'title': forms.SlugField,
})
form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id})
self.assertIs(Book._meta.get_field('title').__class__, models.CharField)
self.assertIsInstance(form.fields['title'], forms.SlugField)
def test_inlineformset_factory_field_class_overrides(self):
author = Author.objects.create(pk=1, name='Charles Baudelaire')
BookFormSet = inlineformset_factory(Author, Book, fields="__all__", field_classes={
'title': forms.SlugField,
})
form = BookFormSet.form(data={'title': 'Foo ' * 30, 'author': author.id})
self.assertIs(Book._meta.get_field('title').__class__, models.CharField)
self.assertIsInstance(form.fields['title'], forms.SlugField)
| bsd-3-clause | 6,083,113,462,328,456,000 | 44.051252 | 119 | 0.563627 | false |
gnome-prototypes-team/gnome-music | gnomemusic/query.py | 1 | 42091 | # Copyright (c) 2013 Arnel A. Borja <[email protected]>
# Copyright (c) 2013 Vadim Rutkovsky <[email protected]>
# Copyright (c) 2013 Seif Lotfy <[email protected]>
# Copyright (c) 2013 Guillaume Quintard <[email protected]>
#
# GNOME Music is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# GNOME Music is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with GNOME Music; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The GNOME Music authors hereby grant permission for non-GPL compatible
# GStreamer plugins to be used and distributed together with GStreamer
# and GNOME Music. This permission is above and beyond the permissions
# granted by the GPL license by which GNOME Music is covered. If you
# modify this code, you may extend this exception to your version of the
# code, but you are not obligated to do so. If you do not wish to do so,
# delete this exception statement from your version.
from gettext import gettext as _
from gi.repository import GLib, Tracker
import os
import logging
logger = logging.getLogger(__name__)
import time
sparql_midnight_dateTime_format = "%Y-%m-%dT00:00:00Z"
SECONDS_PER_DAY = 86400
class Query():
music_folder = None
MUSIC_URI = None
download_folder = None
DOWNLOAD_URI = None
try:
music_folder = GLib.get_user_special_dir(GLib.UserDirectory.DIRECTORY_MUSIC)
MUSIC_URI = Tracker.sparql_escape_string(GLib.filename_to_uri(music_folder))
download_folder = GLib.get_user_special_dir(GLib.UserDirectory.DIRECTORY_DOWNLOAD)
DOWNLOAD_URI = Tracker.sparql_escape_string(GLib.filename_to_uri(download_folder))
for folder in [music_folder, download_folder]:
if os.path.islink(folder):
logger.warn("%s is a symlink, this folder will be omitted" % folder)
else:
i = len(next(os.walk(folder))[2])
logger.debug("Found %d files in %s" % (i, folder))
except TypeError:
logger.warn("XDG user dirs are not set")
@staticmethod
def order_by_statement(attr):
"""Returns a SPARQL ORDER BY statement sorting by the given attribute, ignoring
articles as defined in _("the"). 'Attr' should be given without parentheses,
e.g., "attr='?author'"."""
return_statement = "fn:lower-case(%(attribute)s)" % {'attribute': attr}
# TRANSLATORS: _("the") should be a space-separated list of all-lowercase articles
# (such as 'the') that should be ignored when alphabetizing artists/albums. This
# list should include 'the' regardless of language. If some articles occur more
# frequently than others, most common should appear first, least common last.
for article in reversed(_("the a an").split(" ")):
return_statement = '''IF(fn:starts-with(fn:lower-case(%(attribute)s), "%(article)s"),
fn:substring(fn:lower-case(%(attribute)s), %(substr_start)s),
%(nested_if)s)''' % {
'attribute': attr,
'article': article + " ",
'substr_start': str(len(article) + 2),
'nested_if': return_statement}
return return_statement
@staticmethod
def all_albums():
return Query.albums('?album a nmm:MusicAlbum .')
@staticmethod
def all_artists():
return Query.artists('?album a nmm:MusicAlbum .')
@staticmethod
def all_songs():
return Query.songs('?song a nmm:MusicPiece ; a nfo:FileDataObject .')
@staticmethod
def all_playlists():
return Query.playlists('?playlist a nmm:Playlist .')
@staticmethod
def all_songs_count():
query = '''
SELECT
COUNT(?song) AS childcount
WHERE {
?song a nmm:MusicPiece ;
a nfo:FileDataObject
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?song)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?song)
)
)
FILTER (
NOT EXISTS {
?song a nmm:Video
} &&
NOT EXISTS {
?song a nmm:Playlist
}
)
}
'''.replace('\n', ' ').strip() % {
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI
}
return query
@staticmethod
def albums(where_clause):
query = '''
SELECT DISTINCT
rdf:type(?album)
tracker:id(?album) AS id
(
SELECT
nmm:artistName(?artist)
WHERE {
?album nmm:albumArtist ?artist
}
LIMIT 1
) AS artist
nie:title(?album) AS title
nie:title(?album) AS album
tracker:coalesce(
(
SELECT
GROUP_CONCAT(
nmm:artistName(?artist),
','
)
WHERE {
?album nmm:albumArtist ?artist
}
),
(
SELECT
GROUP_CONCAT(
(
SELECT
nmm:artistName(nmm:performer(?_12)) AS perf
WHERE {
?_12 nmm:musicAlbum ?album
}
GROUP BY ?perf
),
','
) AS album_performer
WHERE {
}
)
) AS author
xsd:integer(
tracker:coalesce(
nmm:albumTrackCount(?album),
(
SELECT
COUNT(?_1)
WHERE {
?_1 nmm:musicAlbum ?album ;
tracker:available 'true'
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?_1)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?_1)
)
)
FILTER (
NOT EXISTS {
?_1 a nmm:Video
} &&
NOT EXISTS {
?_1 a nmm:Playlist
}
)
}
)
)
) AS childcount
(
SELECT
fn:year-from-dateTime(?c)
WHERE {
?_2 nmm:musicAlbum ?album ;
nie:contentCreated ?c ;
tracker:available 'true'
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?_2)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?_2)
)
)
FILTER (
NOT EXISTS {
?_2 a nmm:Video
} &&
NOT EXISTS {
?_2 a nmm:Playlist
}
)
}
LIMIT 1
) AS creation-date
{
%(where_clause)s
FILTER (
EXISTS {
?_3 nmm:musicAlbum ?album ;
tracker:available 'true'
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?_3)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?_3)
)
)
FILTER (
NOT EXISTS {
?_3 a nmm:Video
} &&
NOT EXISTS {
?_3 a nmm:Playlist
}
)
}
)
}
ORDER BY %(album_order)s
%(artist_order)s
?albumyear
'''.replace('\n', ' ').strip() % {
'where_clause': where_clause.replace('\n', ' ').strip(),
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI,
'album_order': Query.order_by_statement("?title"),
'artist_order': Query.order_by_statement("?author")
}
return query
@staticmethod
def artists(where_clause):
query = '''
SELECT DISTINCT
rdf:type(?album)
tracker:id(?album) AS id
(
SELECT
nmm:artistName(?artist)
WHERE {
?album nmm:albumArtist ?artist
}
LIMIT 1
) AS artist
nie:title(?album) AS title
nie:title(?album) AS album
tracker:coalesce(
(
SELECT
GROUP_CONCAT(
nmm:artistName(?artist),
','
)
WHERE {
?album nmm:albumArtist ?artist
}
),
(
SELECT
GROUP_CONCAT(
(
SELECT
nmm:artistName(nmm:performer(?_12)) AS perf
WHERE {
?_12 nmm:musicAlbum ?album
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?_12)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?_12)
)
)
FILTER (
NOT EXISTS {
?_12 a nmm:Video
} &&
NOT EXISTS {
?_12 a nmm:Playlist
}
)
}
GROUP BY ?perf
),
','
) AS album_performer
WHERE {
}
)
) AS author
xsd:integer(
tracker:coalesce(
nmm:albumTrackCount(?album),
(
SELECT
COUNT(?_1)
WHERE {
?_1 nmm:musicAlbum ?album ;
tracker:available 'true'
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?_1)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?_1)
)
)
FILTER (
NOT EXISTS {
?_1 a nmm:Video
} &&
NOT EXISTS {
?_1 a nmm:Playlist
}
)
}
)
)
) AS childcount
(
SELECT
fn:year-from-dateTime(?c)
WHERE {
?_2 nmm:musicAlbum ?album ;
nie:contentCreated ?c ;
tracker:available 'true'
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?_2)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?_2)
)
)
FILTER (
NOT EXISTS {
?_2 a nmm:Video
} &&
NOT EXISTS {
?_2 a nmm:Playlist
}
)
}
LIMIT 1
) AS creation-date
{
%(where_clause)s
FILTER (
EXISTS {
?_3 nmm:musicAlbum ?album ;
tracker:available 'true'
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?_3)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?_3)
)
)
FILTER (
NOT EXISTS {
?_3 a nmm:Video
} &&
NOT EXISTS {
?_3 a nmm:Playlist
}
)
}
)
}
ORDER BY %(artist_order)s
?albumyear
%(album_order)s
'''.replace('\n', ' ').strip() % {
'where_clause': where_clause.replace('\n', ' ').strip(),
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI,
'artist_order': Query.order_by_statement("?author"),
'album_order': Query.order_by_statement("nie:title(?album)")
}
return query
@staticmethod
def songs(where_clause):
query = '''
SELECT DISTINCT
rdf:type(?song)
tracker:id(?song) AS id
nie:url(?song) AS url
nie:title(?song) AS title
nmm:artistName(nmm:performer(?song)) AS artist
nie:title(nmm:musicAlbum(?song)) AS album
nfo:duration(?song) AS duration
IF(bound(?tag), 'truth!', '') AS lyrics
{
%(where_clause)s
OPTIONAL {
?song nao:hasTag ?tag .
FILTER( ?tag = nao:predefined-tag-favorite )
}
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?song)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?song)
)
)
FILTER (
NOT EXISTS {
?song a nmm:Video
} &&
NOT EXISTS {
?song a nmm:Playlist
}
)
}
ORDER BY tracker:added(?song)
'''.replace('\n', ' ').strip() % {
'where_clause': where_clause.replace('\n', ' ').strip(),
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI
}
return query
@staticmethod
def playlists(where_clause):
query = '''
SELECT DISTINCT
rdf:type(?playlist)
tracker:id(?playlist) AS id
nie:title(?playlist) AS title
nfo:entryCounter(?playlist) AS childcount
{
%(where_clause)s
OPTIONAL {
?playlist a nfo:FileDataObject .
FILTER (
EXISTS {
?playlist tracker:available 'true'
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?playlist)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?playlist)
)
)
}
)
}
}
ORDER BY fn:lower-case(?title)
'''.replace('\n', ' ').strip() % {
'where_clause': where_clause.replace('\n', ' ').strip(),
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI
}
return query
@staticmethod
def album_songs(album_id):
query = '''
SELECT DISTINCT
rdf:type(?song)
tracker:id(?song) AS id
nie:url(?song) AS url
nie:title(?song) AS title
nmm:artistName(nmm:performer(?song)) AS artist
nie:title(nmm:musicAlbum(?song)) AS album
nfo:duration(?song) AS duration
IF(bound(?tag), 'truth!', '') AS lyrics
WHERE {
?song a nmm:MusicPiece ;
a nfo:FileDataObject ;
nmm:musicAlbum ?album .
OPTIONAL {
?song nao:hasTag ?tag .
FILTER( ?tag = nao:predefined-tag-favorite )
}
FILTER (
tracker:id(?album) = %(album_id)s
)
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?song)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?song)
)
)
FILTER (
NOT EXISTS {
?song a nmm:Video
} &&
NOT EXISTS {
?song a nmm:Playlist
}
)
}
ORDER BY
nmm:setNumber(nmm:musicAlbumDisc(?song))
nmm:trackNumber(?song)
tracker:added(?song)
'''.replace('\n', ' ').strip() % {
'album_id': album_id,
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI
}
return query
@staticmethod
def playlist_songs(playlist_id, filter_clause=None):
query = '''
SELECT
rdf:type(?song)
tracker:id(?entry) AS id
nie:url(?song) AS url
nie:title(?song) AS title
nmm:artistName(nmm:performer(?song)) AS artist
nie:title(nmm:musicAlbum(?song)) AS album
nfo:duration(?song) AS duration
IF(bound(?tag), 'truth!', '') AS lyrics
WHERE {
?playlist a nmm:Playlist ;
a nfo:MediaList ;
nfo:hasMediaFileListEntry ?entry .
?entry a nfo:MediaFileListEntry ;
nfo:entryUrl ?url .
?song a nmm:MusicPiece ;
a nfo:FileDataObject ;
nie:url ?url .
OPTIONAL {
?song nao:hasTag ?tag .
FILTER( ?tag = nao:predefined-tag-favorite )
}
FILTER (
%(filter_clause)s
)
FILTER (
NOT EXISTS {
?song a nmm:Video
} &&
NOT EXISTS {
?song a nmm:Playlist
}
)
}
ORDER BY
nfo:listPosition(?entry)
'''.replace('\n', ' ').strip() % {
'playlist_id': playlist_id,
'filter_clause':
filter_clause or 'tracker:id(?playlist) = ' + playlist_id,
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI
}
return query
@staticmethod
def get_album_for_album_id(album_id):
query = """
SELECT DISTINCT
rdf:type(?album)
tracker:id(?album) AS id
(
SELECT
nmm:artistName(?artist)
WHERE {
?album nmm:albumArtist ?artist
}
LIMIT 1
) AS artist
nie:title(?album) AS title
nie:title(?album) AS album
WHERE {
?album a nmm:MusicAlbum .
FILTER (
tracker:id(?album) = %(album_id)s
)
}
""".replace("\n", " ").strip() % {
'album_id': album_id,
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI
}
return query
@staticmethod
def get_album_for_song_id(song_id):
query = """
SELECT DISTINCT
rdf:type(?album)
tracker:id(?album) AS id
(
SELECT
nmm:artistName(?artist)
WHERE {
?album nmm:albumArtist ?artist
}
LIMIT 1
) AS artist
nie:title(?album) AS title
nie:title(?album) AS album
WHERE {
?song a nmm:MusicPiece ;
nmm:musicAlbum ?album .
FILTER (
tracker:id(?song) = %(song_id)s
)
FILTER (
tracker:uri-is-descendant(
'%(music_dir)s', nie:url(?song)
) ||
tracker:uri-is-descendant(
'%(download_dir)s', nie:url(?song)
)
)
FILTER (
NOT EXISTS {
?song a nmm:Video
} &&
NOT EXISTS {
?song a nmm:Playlist
}
)
}
""".replace("\n", " ").strip() % {
'song_id': song_id,
'music_dir': Query.MUSIC_URI,
'download_dir': Query.DOWNLOAD_URI
}
return query
@staticmethod
def update_playcount(song_url):
query = """
INSERT OR REPLACE { ?song nie:usageCounter ?playcount . }
WHERE {
SELECT
IF(bound(?usage), (?usage + 1), 1) AS playcount
?song
WHERE {
?song a nmm:MusicPiece .
OPTIONAL { ?song nie:usageCounter ?usage . }
FILTER ( nie:url(?song) = "%(song_url)s" )
}
}
""".replace("\n", " ").strip() % {
'song_url': song_url
}
return query
@staticmethod
def update_last_played(song_url, time):
query = """
INSERT OR REPLACE { ?song nfo:fileLastAccessed '%(time)s' . }
WHERE {
SELECT
?song
WHERE {
?song a nmm:MusicPiece .
FILTER ( nie:url(?song) = "%(song_url)s" )
}
}
""".replace("\n", " ").strip() % {
'song_url': song_url,
'time': time
}
return query
@staticmethod
def create_playlist(title):
query = """
INSERT {
_:playlist
a nmm:Playlist ;
a nfo:MediaList ;
nie:title "%(title)s" ;
nfo:entryCounter 0 .
}
""".replace("\n", " ").strip() % {
'title': title
}
return query
@staticmethod
def create_tag(tag_text):
query = """
INSERT OR REPLACE {
_:tag
a nao:Tag ;
rdfs:comment '%(tag_text)s'.
}
""".replace("\n", " ").strip() % {
'tag_text': tag_text
}
return query
@staticmethod
def create_playlist_with_tag(title, tag_text):
# TODO: make this an extension of 'create playlist' rather than its own func.?
# TODO: CREATE TAG IF IT DOESN'T EXIST!
query = """
INSERT {
_:playlist
a nmm:Playlist ;
a nfo:MediaList ;
nie:title "%(title)s" ;
nfo:entryCounter 0 ;
nao:hasTag ?tag.
}
WHERE {
SELECT ?tag
WHERE {
?tag a nao:Tag ;
rdfs:comment '%(tag_text)s'.
}
}
""".replace("\n", " ").strip() % {
'title': title,
'tag_text': tag_text
}
return query
@staticmethod
def delete_playlist(playlist_id):
query = """
DELETE {
?playlist
a rdfs:Resource .
?entry
a rdfs:Resource .
}
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList .
OPTIONAL {
?playlist
nfo:hasMediaFileListEntry ?entry .
}
FILTER (
tracker:id(?playlist) = %(playlist_id)s
)
}
""".replace("\n", " ").strip() % {
'playlist_id': playlist_id
}
return query
@staticmethod
def add_song_to_playlist(playlist_id, song_uri):
query = """
INSERT OR REPLACE {
_:entry
a nfo:MediaFileListEntry ;
nfo:entryUrl "%(song_uri)s" ;
nfo:listPosition ?position .
?playlist
nfo:entryCounter ?position ;
nfo:hasMediaFileListEntry _:entry .
}
WHERE {
SELECT
?playlist
(?counter + 1) AS position
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList ;
nfo:entryCounter ?counter .
FILTER (
tracker:id(?playlist) = %(playlist_id)s
)
}
}
""".replace("\n", " ").strip() % {
'playlist_id': playlist_id,
'song_uri': song_uri
}
return query
@staticmethod
def remove_song_from_playlist(playlist_id, song_id):
query = """
INSERT OR REPLACE {
?entry
nfo:listPosition ?position .
}
WHERE {
SELECT
?entry
(?old_position - 1) AS position
WHERE {
?entry
a nfo:MediaFileListEntry ;
nfo:listPosition ?old_position .
?playlist
nfo:hasMediaFileListEntry ?entry .
FILTER (?old_position > ?removed_position)
{
SELECT
?playlist
?removed_position
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList ;
nfo:hasMediaFileListEntry ?removed_entry .
?removed_entry
nfo:listPosition ?removed_position .
FILTER (
tracker:id(?playlist) = %(playlist_id)s &&
tracker:id(?removed_entry) = %(song_id)s
)
}
}
}
}
INSERT OR REPLACE {
?playlist
nfo:entryCounter ?new_counter .
}
WHERE {
SELECT
?playlist
(?counter - 1) AS new_counter
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList ;
nfo:entryCounter ?counter .
FILTER (
tracker:id(?playlist) = %(playlist_id)s
)
}
}
DELETE {
?playlist
nfo:hasMediaFileListEntry ?entry .
?entry
a rdfs:Resource .
}
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList ;
nfo:hasMediaFileListEntry ?entry .
FILTER (
tracker:id(?playlist) = %(playlist_id)s &&
tracker:id(?entry) = %(song_id)s
)
}
""".replace("\n", " ").strip() % {
'playlist_id': playlist_id,
'song_id': song_id
}
return query
@staticmethod
def get_playlist_with_id(playlist_id):
query = """
?playlist a nmm:Playlist .
FILTER (
tracker:id(?playlist) = %(playlist_id)s
)
""".replace('\n', ' ').strip() % {'playlist_id': playlist_id}
return Query.playlists(query)
@staticmethod
def get_playlist_with_tag(playlist_tag):
query = """
?playlist
a nmm:Playlist ;
nao:hasTag ?tag .
?tag rdfs:comment ?tag_text .
FILTER ( ?tag_text = '%(playlist_tag)s' )
""".replace('\n', ' ').strip() % {'playlist_tag': playlist_tag}
return Query.playlists(query)
@staticmethod
def get_playlist_with_urn(playlist_urn):
query = """
SELECT DISTINCT
tracker:id(<%(playlist_urn)s>) AS id
WHERE {
<%(playlist_urn)s> a nmm:Playlist
}
""".replace('\n', ' ').strip() % {'playlist_urn': playlist_urn}
return query
@staticmethod
def get_playlist_song_with_id(playlist_id, entry_id):
return Query.playlist_songs(
playlist_id, 'tracker:id(?entry) = ' + str(entry_id)
)
@staticmethod
def get_playlist_song_with_urn(entry_urn):
query = """
SELECT DISTINCT
tracker:id(<%(entry_urn)s>) AS id
WHERE {
<%(entry_urn)s> a nfo:MediaFileListEntry
}
""".replace('\n', ' ').strip() % {'entry_urn': entry_urn}
return query
@staticmethod
def clear_playlist_with_id(playlist_id):
query = """
DELETE {
?playlist
nfo:hasMediaFileListEntry ?entry .
?entry
a rdfs:Resource .
}
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList ;
nfo:hasMediaFileListEntry ?entry .
FILTER (
tracker:id(?playlist) = %(playlist_id)s
)
}
""".replace('\n', ' ').strip() % {'playlist_id': playlist_id}
return query
@staticmethod
def get_most_played_songs():
# TODO: set playlist size somewhere? Currently default is 50.
query = """
SELECT ?url
WHERE {
?song a nmm:MusicPiece ;
nie:usageCounter ?count ;
nie:isStoredAs ?as .
?as nie:url ?url .
} ORDER BY DESC(?count) LIMIT 50
""".replace('\n', ' ').strip()
return query
@staticmethod
def get_never_played_songs():
query = """
SELECT ?url
WHERE {
?song a nmm:MusicPiece ;
nie:isStoredAs ?as .
?as nie:url ?url .
FILTER ( NOT EXISTS { ?song nie:usageCounter ?count .} )
} ORDER BY nfo:fileLastAccessed(?song)
""".replace('\n', ' ').strip()
return query
def get_recently_played_songs():
#TODO: or this could take comparison date as an argument so we don't need to make a date string in query.py...
#TODO: set time interval somewhere? A settings file? (Default is maybe 2 weeks...?)
days_difference = 7 # currently hardcoding time interval of 7 days
seconds_difference = days_difference * SECONDS_PER_DAY
compare_date = time.strftime(
sparql_midnight_dateTime_format, time.gmtime(time.time() - seconds_difference))
query = """
SELECT ?url
WHERE {
?song a nmm:MusicPiece ;
nie:isStoredAs ?as ;
nfo:fileLastAccessed ?last_played .
?as nie:url ?url .
FILTER ( ?last_played > '%(compare_date)s'^^xsd:dateTime )
FILTER ( EXISTS { ?song nie:usageCounter ?count .} )
} ORDER BY DESC(?last_played)
""".replace('\n', ' ').strip() % {'compare_date': compare_date}
return query
def get_recently_added_songs():
#TODO: or this could take comparison date as an argument so we don't need to make a date string in query.py...
#TODO: set time interval somewhere? A settings file? (Default is maybe 2 weeks...?)
days_difference = 7 # currently hardcoding time interval of 7 days
seconds_difference = days_difference * SECONDS_PER_DAY
compare_date = time.strftime(sparql_midnight_dateTime_format, time.gmtime(time.time()-seconds_difference))
query = """
SELECT ?url
WHERE {
?song a nmm:MusicPiece ;
nie:isStoredAs ?as ;
tracker:added ?added .
?as nie:url ?url .
FILTER ( ?added > '%(compare_date)s'^^xsd:dateTime )
} ORDER BY DESC(?added)
""".replace('\n', ' ').strip() % {'compare_date': compare_date}
return query
def get_favorite_songs():
query = """
SELECT ?url
WHERE {
?song a nmm:MusicPiece ;
nie:isStoredAs ?as ;
nao:hasTag nao:predefined-tag-favorite .
?as nie:url ?url .
} ORDER BY DESC(tracker:added(?song))
""".replace('\n', ' ').strip()
return query
# Functions for search
# TODO: make those queries actually return something
@staticmethod
def get_albums_with_any_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
nmm:musicAlbum(?song) AS album
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nie:title(nmm:musicAlbum(?song))), "%(name)s") ||
fn:contains(tracker:case-fold(nmm:artistName(nmm:performer(?song))), "%(name)s") ||
fn:contains(tracker:case-fold(nie:title(?song)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.albums(query)
@staticmethod
def get_albums_with_artist_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?album
WHERE {
?album a nmm:MusicAlbum ;
nmm:albumArtist ?artist .
FILTER (
fn:contains(tracker:case-fold(nmm:artistName(?artist)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.albums(query)
@staticmethod
def get_albums_with_album_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?album
WHERE {
?album a nmm:MusicAlbum .
FILTER (
fn:contains(tracker:case-fold(nie:title(?album)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.albums(query)
@staticmethod
def get_albums_with_track_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
nmm:musicAlbum(?song) AS album
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nie:title(?song)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.albums(query)
@staticmethod
def get_artists_with_any_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
nmm:musicAlbum(?song) AS album
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nie:title(nmm:musicAlbum(?song))), "%(name)s") ||
fn:contains(tracker:case-fold(nmm:artistName(nmm:performer(?song))), "%(name)s") ||
fn:contains(tracker:case-fold(nie:title(?song)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.artists(query)
@staticmethod
def get_artists_with_artist_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?album
WHERE {
?album a nmm:MusicAlbum ;
nmm:albumArtist ?artist .
FILTER (
fn:contains(tracker:case-fold(nmm:artistName(?artist)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.artists(query)
@staticmethod
def get_artists_with_album_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?album
WHERE {
?album a nmm:MusicAlbum .
FILTER (
fn:contains(tracker:case-fold(nie:title(?album)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.artists(query)
@staticmethod
def get_artists_with_track_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
nmm:musicAlbum(?song) AS album
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nie:title(?song)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.artists(query)
@staticmethod
def get_songs_with_any_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?song
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nie:title(?song)), "%(name)s") ||
fn:contains(tracker:case-fold(nmm:artistName(nmm:performer(?song))), "%(name)s") ||
fn:contains(tracker:case-fold(nie:title(nmm:musicAlbum(?song))), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.songs(query)
@staticmethod
def get_songs_with_artist_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?song
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nmm:artistName(nmm:performer(?song))), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.songs(query)
@staticmethod
def get_songs_with_album_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?song
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nie:title(nmm:musicAlbum(?song))), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.songs(query)
@staticmethod
def get_songs_with_track_match(name):
name = Tracker.sparql_escape_string(GLib.utf8_casefold(name, -1))
query = '''
{
SELECT DISTINCT
?song
WHERE {
?song a nmm:MusicPiece .
FILTER (
fn:contains(tracker:case-fold(nie:title(?song)), "%(name)s")
)
}
}
'''.replace('\n', ' ').strip() % {'name': name}
return Query.songs(query)
@staticmethod
def clear_playlist(playlist_id):
# TODO is there a way to do this with only one FILTER statement?
query = """
DELETE {
?playlist
nfo:hasMediaFileListEntry ?entry .
?entry
a rdfs:Resource .
}
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList ;
nfo:hasMediaFileListEntry ?entry .
FILTER (
tracker:id(?playlist) = %(playlist_id)s
)
}
INSERT OR REPLACE {
?playlist nfo:entryCounter '0'
}
WHERE {
?playlist
a nmm:Playlist ;
a nfo:MediaList .
FILTER (
tracker:id(?playlist) = %(playlist_id)s
)
}
""".replace("\n", " ").strip() % {
'playlist_id': playlist_id
}
return query
def add_favorite(song_url):
query = """
INSERT {
?song nao:hasTag nao:predefined-tag-favorite
}
WHERE {
?song a nmm:MusicPiece .
FILTER ( nie:url(?song) = "%(song_url)s" )
}
""".replace("\n", " ").strip() % {
'song_url': song_url
}
return query
def remove_favorite(song_url):
query = """
DELETE {
?song nao:hasTag nao:predefined-tag-favorite
}
WHERE {
?song a nmm:MusicPiece .
FILTER ( nie:url(?song) = "%(song_url)s" )
}
""".replace("\n", " ").strip() % {
'song_url': song_url
}
return query | gpl-2.0 | 4,295,170,475,370,668,500 | 30.133136 | 122 | 0.436625 | false |
Yangqing/caffe2 | caffe2/python/checkpoint.py | 1 | 29797 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package checkpoint
# Module caffe2.python.checkpoint
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
from caffe2.python import core, context
from caffe2.python.net_builder import ops
from caffe2.python.task import Node, Task, TaskGroup, TaskOutput, WorkspaceType
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
@context.define_context()
class Job(object):
"""
A Job defines three TaskGroups: the `init_group`, the `epoch_group` and the
`exit_group` which will be run by a JobRunner.
The `init_group` will be run only once at startup. Its role is to
initialize globally persistent blobs such as model weights, accumulators
and data file lists.
The `epoch_group` will be run in a loop after init_group. The loop will
exit when any of the stop signals added with `add_stop_signal` is True
at the end of an epoch.
The download_group will be run only once, after all the executions of
epoch_group finish. Its role is to collect the distribute scattered
parameters back after training.
The `exit_group` will be run only once at the very end of the job, the
role of this group is to save the results of training in the end of the job.
Jobs are context-driven, so that Tasks can be added to the active Job
without having to explicitly pass the job object around.
Example of usage:
def build_reader(partitions):
with Job.current().init_group:
reader = HiveReader(init_reader, ..., partitions)
Task(step=init_reader)
with Job.current().epoch_group:
limited_reader = ReaderWithLimit(reader, num_iter=10000)
data_queue = pipe(limited_reader, num_threads=8)
Job.current().add_stop_signal(limited_reader.data_finished())
return data_queue
def build_hogwild_trainer(reader, model):
with Job.current().init_group:
Task(step=model.param_init_net)
with Job.current().epoch_group:
pipe(reader, processor=model, num_threads=8)
with Job.current().exit_group:
Task(step=model.save_model_net)
with Job() as job:
reader = build_reader(partitions)
model = build_model(params)
build_hogwild_trainer(reader, model)
"""
def __init__(self,
init_group=None, epoch_group=None,
download_group=None, exit_group=None,
stop_signals=None, nodes_to_checkpoint=None):
self.init_group = init_group or TaskGroup(
workspace_type=WorkspaceType.GLOBAL)
self.epoch_group = epoch_group or TaskGroup()
self.download_group = download_group or TaskGroup()
self.exit_group = exit_group or TaskGroup()
self.stop_signals = stop_signals or []
self._nodes_to_checkpoint = nodes_to_checkpoint
def nodes_to_checkpoint(self):
if self._nodes_to_checkpoint:
return self._nodes_to_checkpoint
else:
return self.init_group.used_nodes()
def compile(self, session_class):
return Job(
init_group=session_class.compile(self.init_group),
epoch_group=session_class.compile(self.epoch_group),
download_group=session_class.compile(self.download_group),
exit_group=session_class.compile(self.exit_group),
stop_signals=self.stop_signals,
nodes_to_checkpoint=self.nodes_to_checkpoint())
def __enter__(self):
self.epoch_group.__enter__()
return self
def __exit__(self, *args):
self.epoch_group.__exit__()
def add_stop_signal(self, output):
if isinstance(output, core.BlobReference):
t = Task(outputs=[output], group=self.epoch_group)
output = t.outputs()[0]
assert isinstance(output, TaskOutput)
self.stop_signals.append(output)
def get_ckpt_filename(node_name, epoch):
"""Returns the checkpoint filename.
Args:
node_name: A string. The name of the node.
epoch: An integer. The checkpoint epoch.
Returns:
ckpt_filename: A string. The filename of the checkpoint.
"""
return node_name + '.' + str(epoch)
def db_name(epoch, node_name, db_prefix, path_prefix=None):
"""Returns the full db name where checkpoint files are saved.
Args:
epoch: An integer. The checkpoint epoch.
node_name: A string. The name of the node.
db_prefix: A string. The prefix used to construct full db name.
path_prefix: A string. Optional param used to construct db name or path
where checkpoint files are are stored.
Returns:
db_name: A string. The absolute path of full_db_name where checkpoint
files are saved
"""
if path_prefix:
db_name = path_prefix + get_ckpt_filename(node_name, epoch)
else:
ckpt_filename = get_ckpt_filename(node_name, epoch)
db_name = os.path.join(db_prefix, ckpt_filename)
return db_name
class CheckpointManager(object):
"""
Controls saving and loading of workspaces on every epoch boundary of a job.
If a CheckpointManager instance is passed to JobRunner, then JobRunner will
call `init`, `read` and `save` at different moments in between epoch runs.
Args:
db_prefix: The prefix used to construct full db name. Since `absolute_path`
is set to True, this will be used as db_name in SaveOp.
node_name: Name of the node where this checkpoint_manager is used.
db_type: Type of database to use for storing checkpoint.
metadata_handler: An optional object capable of reading/writing
checkpoint info in storage of choice.
"""
def __init__(self, db_prefix, node_name, db_type, metadata_handler=None):
self._db_prefix = db_prefix
self._node_name = node_name
self._db_type = db_type
self._metadata_handler = metadata_handler
# make sure these blobs are the first in the checkpoint file.
self._net = core.Net('!!checkpoint_mngr')
self._blob_names = self._net.AddExternalInput('blob_names')
self._names_output = None
self._path_prefix = None
self._path_type = None
"""
Initialize the checkpoint manager. Determines all blobs that need to be saved
or loads from a checkpoint.
Args:
nodes: An array of nodes where this checkpoint manager is running. Should
only contain a single node.
retrieve_from_epoch: Set to a number to load blobs from this epoch.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
def init(
self,
nodes=None,
retrieve_from_epoch=None,
path_prefix=None,
path_type=None
):
"""
Build a Task that will be run once after the job's `init_group` is run.
This task will determine which blobs need to be checkpointed.
If retrieve_from_epoch is not None, then the checkpoint metadata is
retrieved from a previously saved checkpoint.
"""
assert nodes is None or len(nodes) == 1, (
'CheckpointManager only supports single node.')
with Task(outputs=[self._blob_names]) as task:
if retrieve_from_epoch is None:
ops.GetAllBlobNames(
[],
self._blob_names,
include_shared=False)
else:
full_db_name = db_name(retrieve_from_epoch,
self._node_name, self._db_prefix, path_prefix)
db_type = path_type or self._db_type
logger.info("Initializing checkpoints from = %s"
% full_db_name)
ops.Load(
[], self._blob_names,
db=full_db_name,
db_type=db_type,
absolute_path=True)
self._names_output = task.outputs()[0]
return task
def blob_list(self):
assert self._names_output
return self._names_output.fetch().tolist()
def load(self, epoch, path_prefix=None, path_type=None):
"""
Build a Task that will be run by JobRunner when the job is to be
resumed from a given epoch. This task will run a Load op that will
load and deserialize all relevant blobs from a persistent storage.
"""
full_db_name = db_name(epoch, self._node_name, self._db_prefix, path_prefix)
db_type = path_type or self._db_type
logger.info("Loading checkpoints from = %s" % full_db_name)
with Task() as task:
ops.Load(
[],
self.blob_list(),
db=full_db_name,
db_type=db_type,
absolute_path=True)
return task
def load_blobs_from_checkpoint(self, blob_names, epoch):
"""
Builds a Task that loads only the necessary blobs from a checkpoint of
the given epoch. The necessary blobs are given in the blob_names
argument.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: The checkpoint epoch to load from.
Returns:
A Task which loads the specified blobs from the checkpoint of the
given epoch.
"""
logger.info('Load from %s' % db_name(epoch, self._node_name, self._db_prefix))
with Task() as task:
ops.Load(
[],
blob_names,
db=db_name(epoch, self._node_name, self._db_prefix),
db_type=self._db_type,
absolute_path=True,
allow_incomplete=True)
return task
def check_db_exists(self, epoch):
logger.info('Check existence of %s' %
db_name(epoch, self._node_name, self._db_prefix))
with Task() as task:
existence = ops.Const(False)
ops.DBExists(
[],
[existence],
db_name=db_name(epoch, self._node_name, self._db_prefix),
db_type=self._db_type,
absolute_path=True)
task.add_output(existence)
return task
def save(self, epoch):
"""
Build a Task that is run once after `init_group` and after each
epoch is run. This will execute a Save ops to serialize and persist
blobs present in the global workspace.
"""
logger.info('Saving to %s' % db_name(epoch, self._node_name, self._db_prefix))
with Task() as task:
ops.Save(
self.blob_list(), [],
db=db_name(epoch, self._node_name, self._db_prefix),
db_type=self._db_type, absolute_path=True)
return task
def write_checkpoint_metadata(self, epoch):
"""
Write metadata for checkpoint
Args:
epoch: An integer. The epoch-id for which checkpoint metadata is
written
"""
if self._metadata_handler is not None:
self._metadata_handler.write(epoch=epoch)
def get_resume_from_epoch_id(self, user_epoch=None):
"""
Identify the epoch-id from which Job must resume
Args:
user_epoch: An integer. Optional parameter for user to explicitly
identify the epoch-id to load checkpoint from
Retruns:
epoch: the epoch-id to load checkpoints from
or None if no checkpoints were written
"""
last_epoch = user_epoch
if self._metadata_handler is not None:
last_epoch = self._metadata_handler.last_epoch(user_epoch=user_epoch)
return last_epoch
def set_params(self, nodes, path_prefix=None, path_type=None):
"""Set parameters associated with CP manager
Args:
nodes: An array of nodes where this checkpoint manager is running.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
if path_prefix:
self._path_prefix = path_prefix
if path_type:
self._path_type = path_type
if self._metadata_handler:
self._metadata_handler.set_params(
db_prefix=self._db_prefix,
db_type=self._db_type,
node_names=[str(self._node_name)],
path_prefix=self._path_prefix,
path_type=self._path_type)
def cp_accessible(self, epoch=None):
"""Returns True if Checkpoint data is accessible
Args:
epoch: An integer. The epoch of the checkpoint. If None,
it implies we need to check if checkpoint directory is accessible
Returns:
is_cp_accessible: A boolean. Returns True if Checkpoint data is accessible
"""
if self._metadata_handler is not None:
return self._metadata_handler.cp_accessible(epoch)
else:
return True
class MultiNodeCheckpointManager(object):
"""
Coordinates checkpointing and checkpointing across multiple nodes.
Each of `init`, `load` and `save` will build TaskGroups which will
trigger checkpointing on each of the nodes involved in a distributed job.
Args:
db_prefix: The prefix used to construct full db name. Since `absolute_path`
is set to True, this will be used as db_name in SaveOp.
db_type: Type of database to use for storing checkpoint.
metadata_handler: An optional object capable of reading/writing
checkpoint info in storage of choice.
"""
def __init__(self, db_prefix, db_type, metadata_handler=None):
self._node_managers = None
self._db_prefix = db_prefix
self._db_type = db_type
self._metadata_handler = metadata_handler
self._path_prefix = None
self._path_type = None
def _task_group(self, func, *args, **kw):
assert self._node_managers is not None, 'init must be called first.'
with TaskGroup(WorkspaceType.GLOBAL) as task_group:
for node, manager in self._node_managers:
with Node(node):
func(manager, *args, **kw)
return task_group
"""
Args:
nodes: An array of nodes where this checkpoint manager is running.
retrieve_from_epoch: Set to a number to load blobs from this epoch.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
def init(
self, nodes, retrieve_from_epoch=None, path_prefix=None, path_type=None
):
if self._node_managers is not None:
assert [node for node, _ in self._node_managers] == nodes
return TaskGroup(WorkspaceType.GLOBAL)
self._node_managers = []
for node in nodes:
with Node(node):
manager = CheckpointManager(
db_prefix=self._db_prefix,
node_name=str(node),
db_type=self._db_type)
self._node_managers.append((node, manager))
return self._task_group(
CheckpointManager.init,
nodes=[node],
retrieve_from_epoch=retrieve_from_epoch,
path_prefix=path_prefix,
path_type=path_type)
def load(self, epoch, path_prefix=None, path_type=None):
return self._task_group(
CheckpointManager.load,
epoch,
path_prefix=path_prefix,
path_type=path_type)
def load_blobs_locally(self, nodes, blob_names, epoch, session):
"""Loads the necessary blobs from the checkpoints to the current node.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: An integer. The checkpoint epoch to load from.
session: A Session object to execute the Load ops.
"""
if self._node_managers is not None:
assert [node for node, _ in self._node_managers] == nodes
else:
self._node_managers = []
for node in nodes:
with Node(node):
manager = CheckpointManager(
db_prefix=self._db_prefix,
node_name=str(node),
db_type=self._db_type)
self._node_managers.append((node, manager))
assert self._node_managers is not None, 'must initialize node managers'
for _, manager in self._node_managers:
existence_task = manager.check_db_exists(epoch)
session.run(existence_task)
existence = existence_task.outputs()[0].fetch()
if not existence:
logger.info('DB %s does not exist!' %
db_name(epoch, manager._node_name, manager._db_prefix))
return False
load_task = manager.load_blobs_from_checkpoint(blob_names, epoch)
session.run(load_task)
logger.info('Successfully loaded from checkpoints.')
return True
def get_ckpt_db_name(self, node_name, epoch):
"""Returns the DB name of the given node and the given epoch.
The DB name is effectively the checkpoint path of the given node and
the given epoch.
Args:
node_name: A string. The node name of interest.
epoch: An integer. The epoch of the checkpoint.
Returns:
checkpoint_db_name: A string. The checkpoint path of the given
node and the given epoch.
"""
for node, manager in self._node_managers:
if str(node) == node_name:
return db_name(epoch, manager._node_name, manager._db_prefix)
def save(self, epoch):
"""
Build a Task that will execute a Save ops to serialize and persist
blobs present in the global workspace.
"""
return self._task_group(CheckpointManager.save, epoch)
def write_checkpoint_metadata(self, epoch):
"""
Write metadata for checkpoint
Args:
epoch: An integer. The epoch-id for which checkpoint metadata is
written
"""
if self._metadata_handler is not None:
self._metadata_handler.write(epoch=epoch)
def get_resume_from_epoch_id(self, user_epoch=None):
"""
Identify the epoch-id from which Job must resume
Args:
user_epoch: An integer. Optional parameter for user to explicitly
identify the epoch-id to load checkpoint from
Retruns:
epoch: the epoch-id to load checkpoints from
or None if no checkpoints were written
"""
last_epoch = user_epoch
if self._metadata_handler is not None:
last_epoch = self._metadata_handler.last_epoch(user_epoch=user_epoch)
return last_epoch
def set_params(self, nodes, path_prefix=None, path_type=None):
"""Set parameters associated with CP manager
Args:
nodes: An array of nodes where this checkpoint manager is running.
path_prefix: Used to construct db name or path where checkpoint files are
stored.
path_type: Indicate the type of path where checkpoint files are stored.
"""
self._node_names = [str(node) for node in nodes]
if path_prefix:
self._path_prefix = path_prefix
if path_type:
self._path_type = path_type
if self._metadata_handler:
self._metadata_handler.set_params(
db_prefix=self._db_prefix,
db_type=self._db_type,
node_names=self._node_names,
path_prefix=self._path_prefix,
path_type=self._path_type)
def cp_accessible(self, epoch=None):
"""Returns True if Checkpoint data is accessible
Args:
epoch: An integer. The epoch of the checkpoint. If None,
it implies we need to check if checkpoint directory is accessible
Returns:
is_cp_accessible: A boolean. Returns True if Checkpoint data is accessible
"""
if self._metadata_handler is not None:
return self._metadata_handler.cp_accessible(epoch)
else:
return True
class UploadTaskGroupBuilder(object):
"""A simple class to upload checkpoints."""
def build(self, epoch, checkpoint_manager):
"""Builds the task group to upload checkpoints.
Args:
epoch: An integer. The checkpoint epoch to be uploaded.
checkpoint_manager: Can be a CheckpointManager for single machine
or a MultiNodeCheckpointManager for multi-machine. The manager
that initializes/saves/loads checkpoints.
Raises:
NotImplementedError: This base class only has the interface,
the implementation will be in the subclasses.
"""
raise NotImplementedError()
class JobRunner(object):
"""
Implement the runtime logic for jobs with checkpointing at the level of
epoch. Can be used to run either single-host or distributed jobs. Job
runner is a callable to be called once from the master, passing a session
as an argument. This call will block until the Job execution is complete.
If a checkpoint_manager is passed, checkpoints will be taken after
initialization and after each epoch execution. If, in addition,
`resume_from_epoch` is an epoch number, the corresponding checkpoint will
be loaded and job execution will continue from the given epoch. In
this case, the job's init_group will not be run.
Refer to checkpoint_test.py for an example.
"""
def __init__(self, job, checkpoint_manager=None, resume_from_epoch=None,
upload_task_group_builder=None):
"""Initializes the JobRunner.
Args:
job: A Job object. The job to be executed.
checkpoint_manager: Can be a CheckpointManager for single machine
or a MultiNodeCheckpointManager for multi-machine. The manager
that initializes/saves/loads checkpoints.
resume_from_epoch: An integer. The epoch to resume from.
upload_task_group_builder: A subclass of the
UploadTaskGroupBuilder. Creates a task group to upload
checkpoints.
"""
self.resume_from_epoch = resume_from_epoch
self.checkpoint_manager = checkpoint_manager
self.job = job
self.upload_task_group_builder = upload_task_group_builder
def __call__(self, session):
"""Runs the training flow.
Args:
session: A Session object. Valid choises are: LocalSession,
LocalHostScheduler, and DistributedSession. It is used to
execute one TaskGroup a time.
"""
# identify the epoch we must resume from
if self.checkpoint_manager:
self.checkpoint_manager.set_params(nodes=self.job.nodes_to_checkpoint())
self.resume_from_epoch = self.checkpoint_manager.\
get_resume_from_epoch_id(self.resume_from_epoch)
if self.resume_from_epoch is not None:
logger.info('Resuming from epoch {}'.format(self.resume_from_epoch))
# Initialize all the nodes.
from_scratch = self.resume_from_epoch is None
if from_scratch:
session.run(self.job.init_group)
if self.checkpoint_manager:
logger.info('Preparing checkpoints ...')
session.run(self.checkpoint_manager.init(
self.job.nodes_to_checkpoint(),
retrieve_from_epoch=self.resume_from_epoch))
# Save the first checkpoint before training starts, or resume from
# a previously saved checkpoint.
if from_scratch:
self.save_checkpoints(0, session)
else:
logger.info('Loading checkpoints for epoch {} ...'.format(
self.resume_from_epoch))
session.run(
self.checkpoint_manager.load(self.resume_from_epoch))
logger.info('Checkpoint loaded')
logger.info("Finished initializing")
# Start training.
epoch = 1 if from_scratch else self.resume_from_epoch + 1
while True:
logger.info('Starting epoch %d' % epoch)
session.run(self.job.epoch_group)
logger.info('Finished epoch %d' % epoch)
stop_signals = [o.fetch() for o in self.job.stop_signals]
if self.checkpoint_manager:
self.save_checkpoints(epoch, session)
if any(stop_signals):
logger.info('Stopping')
break
epoch += 1
logger.info('Finished training')
# Upload the checkpoints.
if (self.upload_task_group_builder):
upload_task_group = self.upload_task_group_builder.build(
epoch, self.checkpoint_manager)
session.run(upload_task_group)
logger.info('Finished uploading the checkpoints')
# Download the parameters to save
session.run(self.job.download_group)
logger.info('Finished downloading the parameters')
# Finally run the exit step to save nets
session.run(self.job.exit_group)
logger.info('Finished running the exit group')
return epoch
def load_blobs_from_checkpoints(self, blob_names, epoch, session):
"""Loads the necessary blobs from the checkpoints.
Checkpoints store the snapshots of the workspace in each node.
Sometimes we only need to load a subset of the blobs from the
checkpoints. One common scenario is to load only the model blobs from
the checkpoints for evaluation purpose. Given the names of the
necessary blobs, this function goes over all the checkpoints of all the
nodes, but only loads the blobs specified in the blob_names to the
current workspace.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: An integer. The checkpoint epoch to load from.
session: A Session object to execute the load ops.
Raises:
ValueError: When the checkpoint manager is invalid.
"""
if not self.checkpoint_manager:
raise ValueError('Checkpoint manager is None')
logger.info('Loading checkpoint for epoch {} ...'.format(epoch))
return self.checkpoint_manager.load_blobs_locally(
self.job.nodes_to_checkpoint(), blob_names, epoch, session)
def save_checkpoints(self, epoch, session):
"""Triggers operation to save checkpoints
This method will trigger the Save ops to serialize and persist the
blobs present in the global workspaace.
Args:
epoch: An integer. The checkpoint epoch-id that we are saving.
session: A Session object to execute the save ops.
Raises:
ValueError: When the checkpoint manager is invalid.
"""
if not self.checkpoint_manager:
raise ValueError('Checkpoint manager is None')
try:
is_accessible = self.checkpoint_manager.cp_accessible(epoch=None)
if is_accessible:
logger.info('Saving checkpoints for epoch {}'.format(epoch))
session.run(self.checkpoint_manager.save(epoch))
self.checkpoint_manager.write_checkpoint_metadata(epoch)
logger.info('Checkpoints saved')
else:
logger.warning("Checkpoint files cannot be accessed!")
except Exception as ex:
logger.warning("Unable to write checkpoint for epoch {}. Error={}".
format(epoch, ex))
def epoch_limiter(num_epochs):
"""
Creates a task that will output True when a given
number of epochs has finished.
"""
with Job.current().init_group:
init_net = core.Net('epoch_counter_init')
counter = init_net.CreateCounter([], init_count=num_epochs - 1)
Task(step=init_net)
with Job.current().epoch_group:
epoch_net = core.Net('epoch_countdown')
finished = epoch_net.CountDown(counter)
output = Task(step=epoch_net, outputs=finished).outputs()[0]
Job.current().add_stop_signal(output)
| apache-2.0 | 7,775,444,930,955,179,000 | 38.571049 | 86 | 0.606202 | false |
SeavantUUz/silence | tools.py | 1 | 1952 | #coding: utf-8
__all__ = ['covert','line_resize','parse','draw_line','draw_screen','move','draw_input','check_pos']
import locale
import logging
locale.setlocale(locale.LC_ALL,'')
code = locale.getpreferredencoding()
def covert(string,code):
new_string = string.decode('utf-8')
lenth = len(new_string)
return new_string.encode(code), lenth
def line_resize(lines, width, code):
count = len(lines)
index = 0
while index < count:
line = lines[index].decode('utf-8')
line_lenth = len(line)
if line_lenth > width:
s_width = 0
while s_width < line_lenth:
yield line[s_width:s_width+width].encode(code)
s_width += width
index += 1
else:
yield line.encode(code)
index += 1
def combine(func):
def wrapper(*args, **kwargs):
value = "".join(reversed(list(func(*args, **kwargs))))
return value
return wrapper
@combine
def parse(value):
while value:
ch = value % 1000
value /= 1000
yield chr(ch)
def draw_line(stdscr, y, x):
stdscr.hline(y,0,ord('-'),x)
def move(stdscr, y, x):
stdscr.move(y,x)
def draw_screen(stdscr, content, hight, width):
lines = list(line_resize(content, width, code))
move(stdscr,0,0)
y = 0
for line in lines[-hight:]:
stdscr.addstr(covert(line,code)[0])
y += 1
move(stdscr, y, 0)
def draw_input(stdscr, line, y, x):
logging.info(line)
move(stdscr, y,0)
stdscr.clrtoeol()
stdscr.refresh()
stdscr.addstr(line)
logging.info(line)
move(stdscr,y,x)
def check_pos(stdscr, type_, value):
y, x = stdscr.getmaxyx()
if type_ == 'x':
if value < 0:
value = 0
if value > x-1:
value = x-1
if type_ == 'y':
if value < 0:
value = 0
if value > y-1:
value = y-1
return value
| mit | 707,597,869,600,511,700 | 23.098765 | 100 | 0.553279 | false |
rvianello/rdkit | rdkit/Chem/UnitTestQED.py | 4 | 4498 | from __future__ import print_function
from collections import namedtuple
import doctest
import os.path
import unittest
from rdkit import Chem
from rdkit import RDConfig
from rdkit.Chem import QED
doLong = False
TestData = namedtuple('TestData', 'lineNo,smiles,mol,expected')
dataNCI200 = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'QED', 'NCI_200_qed.csv')
dataRegression = os.path.join(RDConfig.RDCodeDir, 'Chem', 'test_data', 'QED', 'Regression_qed.csv')
def load_tests(loader, tests, ignore):
""" Add the Doctests from the module """
tests.addTests(doctest.DocTestSuite(QED, optionflags=doctest.ELLIPSIS))
return tests
class TestCase(unittest.TestCase):
def testQED(self):
self.assertEqual(QED.qed.version, '1.1.0',
msg='QED version has changed. Update the regression tests if required.')
def testNCI200(self):
for d in readTestData(dataNCI200):
self.assertAlmostEqual(QED.qed(d.mol), d.expected,
msg='QED not equal to expected in line {}'.format(d.lineNo))
# Check that adding hydrogens will not change the result
# This is currently not the case. Hydrogens change the number of rotatable bonds and the
# number of alerts.
mol = Chem.AddHs(d.mol)
self.assertAlmostEqual(QED.qed(mol), d.expected,
msg='QED not equal to expected in line {}'.format(d.lineNo))
def testRegression(self):
if not doLong:
raise unittest.SkipTest('long test')
for d in readTestData(dataRegression):
self.assertAlmostEqual(QED.qed(d.mol), d.expected,
msg='QED not equal to expected in line {}'.format(d.lineNo))
def test_properties(self):
m = Chem.MolFromSmiles('N=C(CCSCc1csc(N=C(N)N)n1)NS(N)(=O)=O')
p = QED.properties(m)
self.assertAlmostEqual(p.MW, 337.456)
self.assertAlmostEqual(p.ALOGP, -0.55833)
self.assertAlmostEqual(p.HBA, 6)
self.assertAlmostEqual(p.HBD, 5)
self.assertAlmostEqual(p.PSA, 173.33)
self.assertAlmostEqual(p.ROTB, 7)
self.assertAlmostEqual(p.AROM, 1)
self.assertAlmostEqual(p.ALERTS, 3)
p = QED.properties(Chem.AddHs(m))
self.assertAlmostEqual(p.MW, 337.456)
self.assertAlmostEqual(p.ALOGP, -0.55833)
self.assertAlmostEqual(p.HBA, 6)
self.assertAlmostEqual(p.HBD, 5)
self.assertAlmostEqual(p.PSA, 173.33)
self.assertAlmostEqual(p.ROTB, 7)
self.assertAlmostEqual(p.AROM, 1)
self.assertAlmostEqual(p.ALERTS, 3)
def test_examples(self):
# Paroxetine 0.935
self.assertAlmostEqual(QED.qed(Chem.MolFromSmiles('c1cc2OCOc2cc1OCC1CNCCC1c1ccc(F)cc1')), 0.934,
places=3)
# Leflunomide 0.929
self.assertAlmostEqual(QED.qed(Chem.MolFromSmiles('C1=NOC(C)=C1C(=O)Nc1ccc(cc1)C(F)(F)F')),
0.911, places=3)
# Clomipramine 0.779
self.assertAlmostEqual(QED.qed(Chem.MolFromSmiles('CN(C)CCCN1c2ccccc2CCc2ccc(Cl)cc21')),
0.818, places=3)
# Tegaserod 0.213
self.assertAlmostEqual(QED.qed(Chem.MolFromSmiles('CCCCCNC(=N)NN=CC1=CNc2ccc(CO)cc21')),
0.235, places=3)
def readTestData(filename):
""" Read test data from file """
with open(filename, 'r') as f:
for lineNo, line in enumerate(f, 1):
if line[0] == '#':
continue
smiles, expected = line.strip().split(',')
mol = Chem.MolFromSmiles(smiles)
if not mol:
raise AssertionError('molecule construction failed on line %d' % lineNo)
yield TestData(lineNo, smiles, mol, float(expected))
def updateTestData():
""" Update the test data. This should only be done if the method changes! """
for filename in (dataNCI200, dataRegression,):
data = list(readTestData(filename))
with open(filename, 'w') as f:
print('# Test data for QED descriptor', file=f)
for d in data:
expected = QED.qed(d.mol)
print('{0.smiles},{1}'.format(d, expected), file=f)
if __name__ == '__main__': # pragma: nocover
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-l', default=False, action='store_true', dest='doLong')
parser.add_argument('-u', default=False, action='store_true', dest='updateTestData')
args = parser.parse_args()
# Handle possible arguments
doLong = args.doLong
if args.doLong:
sys.argv.remove('-l')
if args.updateTestData:
updateTestData()
sys.argv.remove('-u')
unittest.main()
| bsd-3-clause | -2,996,229,248,481,166,000 | 34.417323 | 100 | 0.659627 | false |
fatboystring/Wagtail-MVC | wagtail_mvc/views.py | 1 | 1517 | from django.views.generic import DetailView
class WagtailMvcView(DetailView):
"""
Basic default wagtail mvc view class
"""
page = None
def dispatch(self, request, *args, **kwargs):
"""
Pops the page out of the keyword args and
stores it against the view instance
:param request: HttpRequest instance
:param args: Default positional args
:param kwargs: Default keyword args
:return: HttpResponse
"""
self.page = kwargs.pop('page')
return super(WagtailMvcView, self).dispatch(request, *args, **kwargs)
def get_template_names(self):
"""
Returns a list of potential template
names for the view
:return: List containing the pages own template
"""
return [self.page.get_template(
self.request,
*self.args,
**self.kwargs
)]
def get_object(self):
"""
We return the page instance by default
:return: wagtail page model instance
"""
return self.page
def get_context_data(self, **kwargs):
"""
Adds the pages own context data to the view context data
:param kwargs: Default keyword args
:return: Dict of template data
"""
ctx = super(WagtailMvcView, self).get_context_data(**kwargs)
ctx.update(self.page.get_context(
self.request,
*self.args,
**kwargs
))
return ctx
| mit | -3,658,705,902,822,287,400 | 25.155172 | 77 | 0.57416 | false |
ibagrak/algae | gae/tests/test_i18n.py | 1 | 3855 | import webapp2
import webtest
import unittest2
import copy
import urllib
import logging
from google.appengine.ext import testbed
from google.appengine.ext import db
from webapp2_extras import json
from webapp2_extras.appengine.auth import models as users
import app
import settings
import handlers
import re
from core import model
class I18NTest(unittest2.TestCase):
# Language-Accept: header values for tests
hdr_english_accept = {'Accept-Language': 'en'}
hdr_other_accept = {'Accept-Language': 'da, fr'}
hdr_german_accept = {'Accept-Language': 'de'}
hdr_english_prefer = {'Accept-Language': 'en, de'}
hdr_german_prefer = {'Accept-Language': 'de, en'}
# text to check if english response
txt_in_english = r'was created by'
txt_in_german = r'ist ein Werk von'
def setUp(self):
# Create a WSGI application.
application = webapp2.WSGIApplication(app.routes, debug = True, config = settings.app_config)
application.error_handlers[404] = handlers.common.handle_404
application.error_handlers[500] = handlers.common.handle_500
# Wrap the app with WebTest's TestApp.
self.testapp = webtest.TestApp(application)
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
# test with 'only english'
def test_english(self):
response = self.testapp.get('/', headers=self.hdr_english_accept)
self.assertEqual(response.status_int, 200)
self.assertIn(self.txt_in_english, response.body)
self.assertNotIn(self.txt_in_german, response.body)
# test with 'only german'
def test_german(self):
response = self.testapp.get('/', headers=self.hdr_german_accept)
self.assertEqual(response.status_int, 200)
self.assertIn(self.txt_in_german, response.body)
self.assertNotIn(self.txt_in_english, response.body)
# test with 'english preferred'
def test_english_preferred(self):
response = self.testapp.get('/', headers=self.hdr_english_prefer)
self.assertEqual(response.status_int, 200)
self.assertIn(self.txt_in_english, response.body)
self.assertNotIn(self.txt_in_german, response.body)
# test with 'german preferred'
def test_german(self):
response = self.testapp.get('/', headers=self.hdr_german_prefer)
self.assertEqual(response.status_int, 200)
self.assertIn(self.txt_in_german, response.body)
self.assertNotIn(self.txt_in_english, response.body)
# test with 'other'
def test_other(self):
response = self.testapp.get('/', headers=self.hdr_other_accept)
self.assertEqual(response.status_int, 200)
self.assertIn(self.txt_in_english, response.body)
self.assertNotIn(self.txt_in_german, response.body)
# test with 'english', then request german
def test_german_explicit(self):
response = self.testapp.get('/', headers=self.hdr_english_accept)
response = self.testapp.get('/locale/de_DE', headers=self.hdr_english_accept)
self.assertEqual(response.status_int, 302)
response = self.testapp.get('/', headers=self.hdr_english_accept)
self.assertEqual(response.status_int, 200)
self.assertIn(self.txt_in_german, response.body)
self.assertNotIn(self.txt_in_english, response.body)
# test with 'german', then request english
def test_english_explicit(self):
response = self.testapp.get('/', headers=self.hdr_german_accept)
response = self.testapp.get('/locale/en_US', headers=self.hdr_german_accept)
self.assertEqual(response.status_int, 302)
response = self.testapp.get('/', headers=self.hdr_german_accept)
self.assertEqual(response.status_int, 200)
self.assertIn(self.txt_in_english, response.body)
self.assertNotIn(self.txt_in_german, response.body)
| mit | -3,105,536,637,220,046,300 | 35.367925 | 95 | 0.746304 | false |
shajoezhu/msprime | tskit_tests/test_vcf.py | 1 | 9222 | """
Test cases for VCF output in tskit.
"""
from __future__ import print_function
from __future__ import division
import collections
import math
import os
import tempfile
import unittest
import msprime
import vcf
import tskit
# Pysam is not available on windows, so we don't make it mandatory here.
_pysam_imported = False
try:
import pysam
_pysam_imported = True
except ImportError:
pass
test_data = []
def setUp():
Datum = collections.namedtuple(
"Datum",
["tree_sequence", "ploidy", "contig_id", "vcf_file", "sample_names"])
L = 100
for ploidy in [1, 2, 3, 5]:
for contig_id in ["1", "x" * 8]:
for n in [2, 10]:
for rho in [0, 0.5]:
for mu in [0, 1.0]:
ts = msprime.simulate(
n * ploidy, length=L, recombination_rate=rho,
mutation_rate=mu)
fd, file_name = tempfile.mkstemp(prefix="tskit_vcf_")
os.close(fd)
with open(file_name, "w") as f:
ts.write_vcf(f, ploidy, contig_id)
sample_names = ["msp_{}".format(j) for j in range(n)]
test_data.append(
Datum(ts, ploidy, contig_id, file_name, sample_names))
def tearDown():
for datum in test_data:
os.unlink(datum.vcf_file)
def write_vcf(tree_sequence, output, ploidy, contig_id):
"""
Writes a VCF using the sample algorithm as the low level code.
"""
if tree_sequence.get_sample_size() % ploidy != 0:
raise ValueError("Sample size must a multiple of ploidy")
n = tree_sequence.get_sample_size() // ploidy
sample_names = ["msp_{}".format(j) for j in range(n)]
last_pos = 0
positions = []
for variant in tree_sequence.variants():
pos = int(round(variant.position))
if pos <= last_pos:
pos = last_pos + 1
positions.append(pos)
last_pos = pos
contig_length = int(math.ceil(tree_sequence.get_sequence_length()))
if len(positions) > 0:
contig_length = max(positions[-1], contig_length)
print("##fileformat=VCFv4.2", file=output)
print("##source=tskit {}".format(tskit.__version__), file=output)
print(
'##FILTER=<ID=PASS,Description="All filters passed">',
file=output)
print("##contig=<ID={},length={}>".format(contig_id, contig_length), file=output)
print(
'##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">',
file=output)
print(
"#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO",
"FORMAT", sep="\t", end="", file=output)
for sample_name in sample_names:
print("\t", sample_name, sep="", end="", file=output)
print(file=output)
for variant in tree_sequence.variants():
pos = positions[variant.index]
print(
contig_id, pos, ".", "A", "T", ".", "PASS", ".", "GT",
sep="\t", end="", file=output)
for j in range(n):
genotype = "|".join(
str(g) for g in
variant.genotypes[j * ploidy: j * ploidy + ploidy])
print("\t", genotype, end="", sep="", file=output)
print(file=output)
@unittest.skip("Skipping until version headers sorted out")
class TestEquality(unittest.TestCase):
"""
Tests if the VCF file produced by the low level code is the
same as one we generate here.
"""
def test_equal(self):
for datum in test_data:
with tempfile.TemporaryFile("w+") as f:
write_vcf(datum.tree_sequence, f, datum.ploidy, datum.contig_id)
f.seek(0)
vcf1 = f.read()
with open(datum.vcf_file) as f:
vcf2 = f.read()
self.assertEqual(vcf1, vcf2)
class TestHeaderParsers(unittest.TestCase):
"""
Tests if we can parse the headers with various tools.
"""
def test_pyvcf(self):
for datum in test_data:
reader = vcf.Reader(filename=datum.vcf_file)
self.assertEqual(len(reader.contigs), 1)
contig = reader.contigs[datum.contig_id]
self.assertEqual(contig.id, datum.contig_id)
self.assertGreater(contig.length, 0)
self.assertEqual(len(reader.alts), 0)
self.assertEqual(len(reader.filters), 1)
p = reader.filters["PASS"]
self.assertEqual(p.id, "PASS")
self.assertEqual(len(reader.formats), 1)
f = reader.formats["GT"]
self.assertEqual(f.id, "GT")
self.assertEqual(len(reader.infos), 0)
@unittest.skipIf(not _pysam_imported, "pysam not available")
def test_pysam(self):
for datum in test_data:
bcf_file = pysam.VariantFile(datum.vcf_file)
self.assertEqual(bcf_file.format, "VCF")
self.assertEqual(bcf_file.version, (4, 2))
header = bcf_file.header
self.assertEqual(len(header.contigs), 1)
contig = header.contigs[0]
self.assertEqual(contig.name, datum.contig_id)
self.assertGreater(contig.length, 0)
self.assertEqual(len(header.filters), 1)
p = header.filters["PASS"]
self.assertEqual(p.name, "PASS")
self.assertEqual(p.description, "All filters passed")
self.assertEqual(len(header.info), 0)
self.assertEqual(len(header.formats), 1)
fmt = header.formats["GT"]
self.assertEqual(fmt.name, "GT")
self.assertEqual(fmt.number, 1)
self.assertEqual(fmt.type, "String")
self.assertEqual(fmt.description, "Genotype")
self.assertEqual(len(header.samples), len(datum.sample_names))
for s1, s2 in zip(header.samples, datum.sample_names):
self.assertEqual(s1, s2)
bcf_file.close()
@unittest.skipIf(not _pysam_imported, "pysam not available")
class TestRecordsEqual(unittest.TestCase):
"""
Tests where we parse the input using PyVCF and Pysam
"""
def verify_records(self, datum, pyvcf_records, pysam_records):
self.assertEqual(len(pyvcf_records), len(pysam_records))
for pyvcf_record, pysam_record in zip(pyvcf_records, pysam_records):
self.assertEqual(pyvcf_record.CHROM, pysam_record.chrom)
self.assertEqual(pyvcf_record.POS, pysam_record.pos)
self.assertEqual(pyvcf_record.ID, pysam_record.id)
self.assertEqual(pyvcf_record.ALT, list(pysam_record.alts))
self.assertEqual(pyvcf_record.REF, pysam_record.ref)
self.assertEqual(pysam_record.filter[0].name, "PASS")
self.assertEqual(pyvcf_record.FORMAT, "GT")
self.assertEqual(
datum.sample_names, list(pysam_record.samples.keys()))
for value in pysam_record.samples.values():
self.assertEqual(len(value.alleles), datum.ploidy)
for j, sample in enumerate(pyvcf_record.samples):
self.assertEqual(sample.sample, datum.sample_names[j])
if datum.ploidy > 1:
self.assertTrue(sample.phased)
for call in sample.data.GT.split("|"):
self.assertIn(call, ["0", "1"])
def test_all_records(self):
for datum in test_data:
vcf_reader = vcf.Reader(filename=datum.vcf_file)
bcf_file = pysam.VariantFile(datum.vcf_file)
pyvcf_records = list(vcf_reader)
pysam_records = list(bcf_file)
self.verify_records(datum, pyvcf_records, pysam_records)
bcf_file.close()
class TestContigLengths(unittest.TestCase):
"""
Tests that we create sensible contig lengths under a variety of conditions.
"""
def setUp(self):
fd, self.temp_file = tempfile.mkstemp(prefix="msprime_vcf_")
os.close(fd)
def tearDown(self):
os.unlink(self.temp_file)
def get_contig_length(self, ts):
with open(self.temp_file, "w") as f:
ts.write_vcf(f)
reader = vcf.Reader(filename=self.temp_file)
contig = reader.contigs["1"]
return contig.length
def test_no_mutations(self):
ts = msprime.simulate(10, length=1)
self.assertEqual(ts.num_mutations, 0)
contig_length = self.get_contig_length(ts)
self.assertEqual(contig_length, 1)
def test_long_sequence(self):
# Nominal case where we expect the positions to map within the original
# sequence length
ts = msprime.simulate(10, length=100, mutation_rate=0.01, random_seed=3)
self.assertGreater(ts.num_mutations, 0)
contig_length = self.get_contig_length(ts)
self.assertEqual(contig_length, 100)
def test_short_sequence(self):
# Degenerate case where the positions cannot map into the sequence length
ts = msprime.simulate(10, length=1, mutation_rate=10)
self.assertGreater(ts.num_mutations, 1)
contig_length = self.get_contig_length(ts)
self.assertEqual(contig_length, ts.num_mutations)
| gpl-3.0 | 174,891,526,295,170,660 | 37.26556 | 85 | 0.586315 | false |
fujicoin/fujicoin | test/functional/p2p_invalid_locator.py | 1 | 2017 | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test node responses to invalid locators.
"""
from test_framework.messages import msg_getheaders, msg_getblocks, MAX_LOCATOR_SZ
from test_framework.p2p import P2PInterface
from test_framework.test_framework import FujicoinTestFramework
class InvalidLocatorTest(FujicoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = False
def run_test(self):
node = self.nodes[0] # convenience reference to the node
node.generatetoaddress(1, node.get_deterministic_priv_key().address) # Get node out of IBD
self.log.info('Test max locator size')
block_count = node.getblockcount()
for msg in [msg_getheaders(), msg_getblocks()]:
self.log.info('Wait for disconnect when sending {} hashes in locator'.format(MAX_LOCATOR_SZ + 1))
exceed_max_peer = node.add_p2p_connection(P2PInterface())
msg.locator.vHave = [int(node.getblockhash(i - 1), 16) for i in range(block_count, block_count - (MAX_LOCATOR_SZ + 1), -1)]
exceed_max_peer.send_message(msg)
exceed_max_peer.wait_for_disconnect()
node.disconnect_p2ps()
self.log.info('Wait for response when sending {} hashes in locator'.format(MAX_LOCATOR_SZ))
within_max_peer = node.add_p2p_connection(P2PInterface())
msg.locator.vHave = [int(node.getblockhash(i - 1), 16) for i in range(block_count, block_count - (MAX_LOCATOR_SZ), -1)]
within_max_peer.send_message(msg)
if type(msg) == msg_getheaders:
within_max_peer.wait_for_header(node.getbestblockhash())
else:
within_max_peer.wait_for_block(int(node.getbestblockhash(), 16))
if __name__ == '__main__':
InvalidLocatorTest().main()
| mit | -2,813,032,587,056,297,500 | 45.906977 | 135 | 0.662866 | false |
Mr-meet/PythonApplets | spiders_packege/xunlei_girl/PageProcessor.py | 1 | 1671 | from bs4 import BeautifulSoup
import time
from spiders_packege.unit.ResourcesDowmloader import ResourcesDownloader
from spiders_packege.unit.ResourcesProcessor import ResourcesProcessor
class PageProcessor:
def __init__(self):
pass
def dealAllImg(basePath,list,webDriver):
baseDetailUrl='http://meitu.xunlei.com/detail.html?id='
for oneGirlDict in list:
block_detailid=oneGirlDict['block_detailid']
girlDetailUrl=baseDetailUrl+block_detailid
imgCount=0
if '花絮' not in oneGirlDict['title']:
girlPhotoList=PageProcessor.dealGirlPage(girlDetailUrl,webDriver)
for girlPhoto in girlPhotoList:
imgCount=imgCount+1
path=basePath+'\\【'+oneGirlDict['name']+'】'+'身高:'+oneGirlDict['height']
name=str(imgCount)+'.jpg'
ResourcesProcessor.saveFile(girlPhoto,path,name)
def dealGirlPage(url,webDriver):
imgResoures=list()
webDriver.get(url)
time.sleep(1.5)
webDriver.page_source.encode('utf-8','ignore') #这个函数获取页面的html
#webDriver.get_screenshot_as_file("1.jpg") #获取页面截图
soup = BeautifulSoup(webDriver.page_source, "html5lib")
imgItems = soup.find_all('img', class_='portrait')
imgLen=len(imgItems)/2
baseImgUrl=imgItems[0]['src'][0:-5]
i=1
while i<int(imgLen):
imgUrl=baseImgUrl+str(i)+'.jpg'
imgObj=ResourcesDownloader.downloadResource(imgUrl)
imgResoures.append(imgObj)
i=i+1
return imgResoures | mit | 5,768,753,674,798,561,000 | 36.860465 | 91 | 0.62815 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.