repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Turupawn/website | runners/migrations/0001_initial.py | 1 | 1722 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('platforms', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Runner',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=127, verbose_name='Name')),
('slug', models.SlugField(unique=True)),
('website', models.CharField(max_length=127, verbose_name='Website', blank=True)),
('icon', models.ImageField(upload_to=b'runners/icons', blank=True)),
('platforms', models.ManyToManyField(related_name='runners', to='platforms.Platform')),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RunnerVersion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('version', models.CharField(max_length=32)),
('architecture', models.CharField(default=b'x86_64', max_length=8, choices=[(b'i386', b'32 bit'), (b'x86_64', b'64 bit'), (b'arm', b'ARM')])),
('url', models.URLField(blank=True)),
('runner', models.ForeignKey(related_name='versions', to='runners.Runner')),
],
options={
'ordering': ('version', 'architecture'),
},
bases=(models.Model,),
),
]
| agpl-3.0 | -5,153,796,980,511,601,000 | 39.046512 | 158 | 0.533101 | false |
carthach/essentia | test/src/unittests/rhythm/test_beatsloudness_streaming.py | 1 | 7504 | #!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
# NOTE: frequencyBands = [20,150] is used throughout the tests in order to
# obtain the same results as before adding frequencyBands parameter to the
# beatsloudness algorithm
from essentia_test import *
from essentia.streaming import BeatsLoudness, MonoLoader as sMonoLoader, \
RhythmExtractor
class TestBeatsLoudness(TestCase):
def computeSingleBeatLoudness(self, beat, audio, sr):
beatWindowDuration = 0.1
beatDuration = 0.05
start = int(round((beat - beatWindowDuration/2)*sr))
end = int(round((beat + beatWindowDuration/2 + beatDuration + 0.0001)*sr))
# SingleBeatLoudness will throw exception if the audio fragment is too short,
# this will happen when the beat is too close to the beginning of the signal so that
# the beat window will start actually before it
if start < 0:
# reposition the window
end = start - end
start = 0
return SingleBeatLoudness(frequencyBands=[20, 150])(audio[start:end])
def testEmpty(self):
gen = VectorInput([])
beatsLoudness = BeatsLoudness()
p = Pool()
gen.data >> beatsLoudness.signal
beatsLoudness.loudness >> (p, 'beats.loudness')
beatsLoudness.loudnessBandRatio >> (p, 'beats.loudnessBandRatio')
run(gen)
self.assertEqual(len(p.descriptorNames()), 0)
def testRegression(self):
loader = sMonoLoader(filename=join(testdata.audio_dir, 'recorded', 'techno_loop.wav'),
sampleRate=44100)
rhythm = RhythmExtractor()
p = Pool()
loader.audio >> rhythm.signal
loader.audio >> (p, 'audio.raw')
rhythm.bpm >> None
rhythm.bpmIntervals >> None
rhythm.estimates >> None
#rhythm.rubatoStart >> None
#rhythm.rubatoStop >> None
rhythm.ticks >> (p, 'beats.locationEstimates')
run(loader)
gen = VectorInput(p['audio.raw'])
beatsLoudness = BeatsLoudness(beats=p['beats.locationEstimates'],
frequencyBands = [20,150])
gen.data >> beatsLoudness.signal
beatsLoudness.loudness >> (p, 'beats.loudness')
beatsLoudness.loudnessBandRatio >> (p, 'beats.loudnessBandRatio')
run(gen);
expectedLoudness = []
expectedLoudnessBandRatio = []
for beat in p['beats.locationEstimates']:
loudness, loudnessBandRatio = self.computeSingleBeatLoudness(beat,p['audio.raw'], 44100)
expectedLoudness.append(loudness)
expectedLoudnessBandRatio.append(loudnessBandRatio)
# The values below where extracted from running essentia-1.0 cpp tests
# on some platform. This results cause the test to fail, and there is no
# way to be sure they are correct. Therefore a new test has been done
# where we compare the results of the algorithm with a manually passing
# the beats to singlebeatloudness std:
# expectedLoudness = [0.428758, 0.291341, 0.633762, 0.26555, 0.425245, 0.277024, 0.495149, 0.242385, 0.357601, 0.334, 0.323821, 0.232946, 0.528381, 0.200571, 0.437708, 0.167769, 0.584228, 0.392591, 0.530719, 0.296724, 0.550218, 0.332743, 0.501887, 0.310001, 0.403775, 0.29342, 0.578137, 0.306543, 0.470718, 0.690108, 0.0089495, 0.372516, 0.180331, 0.253785, 0.298147, 0.290077, 0.447453, 0.536407, 0.257739, 0.587473, 0.526467, 0.415834, 0.259945, 0.48784, 0.440733, 0.462674, 0.279204]
# expectedLoudnessBass = [0.928696, 0.127746, 0.681139, 0.0506813, 0.947531, 0.0654974, 0.822909, 0.0516866, 0.781132, 0.134502, 0.74214, 0.0559918, 0.870337, 0.0795841, 0.825638, 0.0935618, 0.875636, 0.11054, 0.515007, 0.0459782, 0.681463, 0.0269587, 0.755229, 0.0620431, 0.711997, 0.127048, 0.713851, 0.0255558, 0.700511, 0.754544, 0.452143, 0.745394, 0.0926197, 0.113369, 0.0516325, 0.0871752, 0.00407939, 0.779901, 0.0498086, 0.677019, 0.0714908, 0.368265, 0.0453059, 0.51892, 0.0210914, 0.63086, 0.069424]
self.assertAlmostEqualVector(p['beats.loudness'], expectedLoudness)
self.assertAlmostEqualVector(p['beats.loudnessBandRatio'], expectedLoudnessBandRatio)
def testClickTrack(self):
sr = 44100
nClicks = 5
# create audio signal that represents a click track nClicks seconds
# long, with .25s clicks starting at every second
clickTrack = [1.0]*int(sr/2) + [0.0]*int(sr/2)
clickTrack *= nClicks
clickLocations = [i + 1./4. for i in range(nClicks)]
gen = VectorInput(clickTrack)
beatsLoudness = BeatsLoudness(beats=clickLocations,
frequencyBands = [20,150],
beatWindowDuration=0.5,
beatDuration=0.5)
p = Pool()
gen.data >> beatsLoudness.signal
beatsLoudness.loudness >> (p, 'beats.loudness')
beatsLoudness.loudnessBandRatio >> (p, 'beats.loudnessBandRatio')
run(gen)
# last beat gets discarded as it cannot be completely acquired, thus
# (nclicks-1)
expectedLoudness = [5.22167]*(nClicks-1)
expectedLoudnessBandRatio = [2.07204e-13]*(nClicks-1)
self.assertAlmostEqualVector(p['beats.loudness'], expectedLoudness, 1e-5)
self.assertAlmostEqualVector(p['beats.loudnessBandRatio'], expectedLoudnessBandRatio, 2e-2)
def testLastBeatTooShort(self):
beatDuration = 0.5 # in seconds
# 1 second silence, beat for beatDuration (s), 1 second silence, then last beat lasts half beatDuration
signal = [0]*44100 + [1]*int(44100*beatDuration) + [0]*44100 + [1]*int(44100*beatDuration/2)
beatPositions = [1+beatDuration/2.0, 2.05+beatDuration/4.0] # each in seconds
gen = VectorInput(signal)
beatsLoudness = BeatsLoudness(beats=beatPositions,
frequencyBands = [20,150],
beatDuration=beatDuration,
beatWindowDuration=0.5)
p = Pool()
gen.data >> beatsLoudness.signal
beatsLoudness.loudness >> (p, 'beats.loudness')
beatsLoudness.loudnessBandRatio >> (p, 'beats.loudnessBandRatio')
run(gen)
# the last beat should have been thrown away since it didn't last for a
# whole beat duration
self.assertAlmostEqualVector(p['beats.loudness'], [5.22167], 1e-5)
self.assertAlmostEqualVector(p['beats.loudnessBandRatio'], [2.07204e-13], 2e-2)
suite = allTests(TestBeatsLoudness)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
| agpl-3.0 | -526,608,728,823,635,500 | 42.627907 | 517 | 0.646588 | false |
ajmedford/catmap | catmap/parsers/table_parser.py | 1 | 17136 | import numpy as np
import catmap
from parser_base import *
string2symbols = catmap.string2symbols
Template = catmap.Template
class TableParser(ParserBase):
"""Parses attributes based on column headers and filters.
Additional functionality may be added by inheriting and defining
the parse_{header_name} function where header_name is the
column header for the additional variable to be parsed.
"""
def __init__(self,reaction_model,**kwargs):
ParserBase.__init__(self,reaction_model)
defaults = dict(
estimate_frequencies = 1, #Use frequencies from different sites
#if available (set variable to 1 or True).
#Use dissociated state frequencies for TS (set to 2)
#If no frequencies are available from other sites then
#concatenate frequencies from
#individual atoms (set to 3).
#If no frequencies can be found, use empty frequency set
#(set to >3)
frequency_surface_names = [], #Use frequencies from a specific
#surface_name only. If "None" or empty then an average of
#the frequencies from all available surfaces will be used.
required_headers = ['species_name','surface_name','site_name'
,'formation_energy','frequencies',
'reference'],
parse_headers = ['formation_energy','frequencies'],
frequency_unit_conversion = 1.239842e-4, # conversion factor to
coverage_headers = ['coverage','coadsorbate_coverage'],
#go from input units to eV
standard_coverage = 'min',
standard_coadsorbate_coverage = 'min',
#coverage to use as the "base" in coverage-dependent input file
#use "min" to take the minimum or specify explicitly
interaction_surface_names = None,
#use a different set of (more) surfaces to form interaction matrix.
#If none then only the surfaces in the model will be used.
)
self._linebreak = '\n'
self._separator = '\t'
self._rxm.update(kwargs,override=True)
self._rxm.update(defaults,override=False)
self._required = {'input_file':str,'estimate_frequencies':bool,
'required_headers':list,
'parse_headers':list,
'frequency_unit_conversion':float,
'frequency_surface_names':None}
def parse(self,**kwargs):
f = open(self.input_file)
lines = f.read().split(self._linebreak)
lines = [L for L in lines if L]
f.close()
self._baseparse()
headers = lines.pop(0).split(self._separator)
headers = [h.strip() for h in headers]
if not set(self.required_headers).issubset(set(headers)):
raise ValueError('Required headers are missing! '+\
'Please be sure that all headers '+\
'are specified: '+' '.join(self.required_headers))
linedicts = []
for L in lines:
linedict = {}
for k, v in zip(headers,
L.split(self._separator, len(headers))):
linedict[k] = v
sites = [s for s in self.species_definitions if
self.species_definitions[s].get('type',None) == 'site' and
linedict['site_name'] in
self.species_definitions[s]['site_names']
and '*' not in s]
if not sites:
sites = ['?']
adskey = [linedict['species_name']+'_'+site_i for site_i in sites]
linedict['species_keys'] = adskey
linedicts.append(linedict)
self._line_dicts = linedicts
self._headers = headers
for p in self.parse_headers:
if callable(getattr(self,'parse_'+p)):
getattr(self,'parse_'+p)()
else:
raise AttributeError('No parsing function defined for '+p)
def parse_formation_energy(self,**kwargs):
"Parse in basic info for reaction model"
self.__dict__.update(kwargs)
all_ads = [k for k in self.species_definitions.keys()
if self.species_definitions[k].get('type',None) != 'site']
for adsdef in all_ads:
ads = self.species_definitions[adsdef].get('name',None)
if ads is None:
del self.species_definitions[adsdef]
print('Warning: Species with undefined "name" was encountered ('+adsdef+'). '+\
'Ensure that all species which are explicitly set in "species_definitions" '+\
'are also defined in the reaction network ("rxn_expressions"). This definition '+\
'will be ignored.')
else:
site = self.species_definitions[adsdef]['site']
alternative_names = self.species_definitions[adsdef].get(
'alternative_names',[])
adsnames = [ads]+alternative_names
sites = self.species_definitions[site]['site_names']
infodict = {}
for linedict in self._line_dicts:
if (
linedict['species_name'] in adsnames and
linedict['site_name'] in sites and
linedict['surface_name'] in list(self.surface_names)+['None']
):
#The following clause ensures that the low-coverage limit
#is used unless otherwise specified.
#It should probably be abstracted out into something cleaner.
pass_dict = {}
surf = linedict['surface_name']
for cvg_key in ['coverage','coadsorbate_coverage']:
pass_dict[cvg_key] = True
if cvg_key in linedict:
standard_cvg = getattr(self,'standard_'+cvg_key, None)
if standard_cvg in ['min','minimum',None]:
if surf in infodict:
if linedict[cvg_key] > infodict[surf][cvg_key]:
pass_dict[cvg_key] = False
else:
if linedict[cvg_key] != standard_cvg:
pass_dict[cvg_key] = False
if False not in pass_dict.values():
infodict[surf] = linedict
paramlist = []
sources = []
if self.species_definitions[adsdef]['type'] not in ['gas']:
for surf in self.surface_names:
if surf in infodict:
E = float(infodict[surf]['formation_energy'])
paramlist.append(E)
sources.append(infodict[surf]['reference'].strip())
else:
paramlist.append(None)
self.species_definitions[adsdef]['formation_energy'] = paramlist
self.species_definitions[adsdef]['formation_energy_source'] = sources
else:
if 'None' in infodict:
E = float(infodict['None']['formation_energy'])
self.species_definitions[adsdef]['formation_energy'] = E
self.species_definitions[adsdef]['formation_energy_source'] = \
infodict['None']['reference'].strip()
else:
raise ValueError('No formation energy found for '+str(adsdef)+'. Check input file.')
def parse_frequencies(self,**kwargs):
self.__dict__.update(kwargs)
allfreqdict = {}
frequency_dict = {}
#Parse in all available frequencies
for linedict in self._line_dicts:
if eval(linedict['frequencies']):
freqs = eval(linedict['frequencies'])
freqs = [self.frequency_unit_conversion*f for f in freqs]
if linedict['species_name'] not in allfreqdict:
allfreqdict[linedict['species_name']] = \
[[linedict['surface_name'],
linedict['site_name'],
freqs]] #Store frequency info for parsing later
else:
frq = [linedict['surface_name'],
linedict['site_name'],
freqs]
if frq not in allfreqdict[linedict['species_name']]:
allfreqdict[linedict['species_name']].append(frq)
def freq_handler(freqdict_entry,site,ads):
perfect_matches = []
partial_matches = []
if self.frequency_surface_names is None:
self.frequency_surface_names = []
for entry in freqdict_entry:
masked = [entry[0] in self.frequency_surface_names,
entry[1] in self.species_definitions.get(site,{'site_names':[]})['site_names'],
entry[2]]
if not self.frequency_surface_names:
if site in self._gas_sites and entry[0] == 'None':
masked[0] = True
elif site not in self._gas_sites:
masked[0] = True
else:
if site in self._gas_sites and entry[0] == 'None':
masked[0] = True
if False not in masked:
perfect_matches.append(masked[-1])
elif masked[0] and site not in self._gas_sites: #Surface matches but site might not...
if entry[1] != 'gas': #HACK... this whole function needs to be cleaned up.
partial_matches.append(masked[-1])
def match_handler(perfect_matches):
if len(perfect_matches) == 1:
return perfect_matches[0]
elif len(perfect_matches) > 1:
if len(set([len(pm) for pm in perfect_matches]))>1:
raise ValueError('Frequency vectors have different '+\
'lengths for '+ str(ads))
matcharray = np.array(perfect_matches)
freqout = matcharray.mean(0) #average valid frequencies
return list(freqout)
else: #No valid frequencies are found...
return []
if len(perfect_matches) > 0:
return match_handler(perfect_matches)
elif self.estimate_frequencies:
return match_handler(partial_matches)
else:
return []
all_ads = [k for k in self.species_definitions.keys()
if self.species_definitions[k]['type'] != 'site']
for adsdef in all_ads+allfreqdict.keys(): #format all freqs
if '_' in adsdef:
adsname,site = adsdef.split('_')
elif adsdef in allfreqdict.keys():
adsname = adsdef
site = self._default_site
if adsname in allfreqdict:
frequency_dict[adsdef] = freq_handler(allfreqdict[adsname],site
,adsname)
elif self.estimate_frequencies > 3:
frequency_dict[adsdef] = []
for adsdef in all_ads:
adsname,site = [self.species_definitions[adsdef][k]
for k in ['name','site']]
#Use single-atom frequencies...
if (
not frequency_dict.get(adsdef,None) and
self.estimate_frequencies > 2 and
'-' not in adsname #Don't include TS's
):
symbols = string2symbols(adsname)
freqs = []
if set(symbols).issubset(set(frequency_dict.keys())):
for s in symbols:
freqs += frequency_dict[s]
frequency_dict[adsdef] = freqs
for adsdef in all_ads:
#Use dissosciated TS frequencies
adsname,site = [self.species_definitions[adsdef][k]
for k in ['name','site']]
if (
not frequency_dict.get(adsdef,None) and
self.estimate_frequencies > 1 and
'-' in adsname
):
A,B = adsname.split('-')
frequency_dict[adsdef] = frequency_dict[A] + frequency_dict[B]
for key in self.species_definitions.keys():
self.species_definitions[key]['frequencies'] = frequency_dict.get(key,[])
def parse_coverage(self,**kwargs):
self.__dict__.update(kwargs)
n = len(self.adsorbate_names)
surfaces = self.surface_names
info_dict = {}
ads_names = self.adsorbate_names+self.transition_state_names
for surf in surfaces:
cvg_dict = {}
for linedict in self._line_dicts:
for skey in linedict['species_keys']:
if (skey in self.adsorbate_names+self.transition_state_names
and linedict['surface_name'] == surf):
ads = skey
if 'delta_theta' in linedict:
self.species_definitions[ads]['delta_theta'] = float(
linedict['delta_theta'])
theta_vec = [0]*len(ads_names)
idx_i = ads_names.index(ads)
theta_i = float(linedict['coverage'])
theta_vec[idx_i] += theta_i
for coads_name in ['coadsorbate','coadsorbate2']:
#could add coadsorbate3, coadsorbate4,... as needed
if coads_name+'_name' in linedict:
if linedict[coads_name+'_name'] != 'None':
coads = linedict[coads_name+'_name']
site = ads.split('_')[-1]
site = linedict.get(coads_name+'_site',site)
coads += '_'+site #assume coads on same site as ads if not specified
theta_j = float(linedict[coads_name+'_coverage'])
if coads in ads_names:
idx_j = ads_names.index(coads)
theta_vec[idx_j] += theta_j
else:
names_only = [n.split('_')[0] for n in ads_names]
coads_name = coads.split('_')[0]
if coads_name not in names_only:
print 'Warning: Could not find co-adsorbed species '\
+coads+' (adsorbate '+ads+'). Ignoring this entry.'
else:
idx_j = names_only.index(coads_name)
actual_ads = ads_names[idx_j]
print 'Warning: Could not find co-adsorbed species '\
+coads+' (adsorbate '+ads+'). Using '+actual_ads+'.'
theta_vec[idx_j] += theta_j
E_diff = float(linedict['formation_energy'])
E_int = linedict.get('integral_formation_energy',None)
if E_int:
E_int = float(E_int)
theta_E = [theta_vec,
E_diff,E_int]
if ads in cvg_dict:
cvg_dict[ads].append(theta_E)
else:
cvg_dict[ads] = [theta_E]
info_dict[surf] = cvg_dict
for i_ads,ads in enumerate(self.adsorbate_names+self.transition_state_names):
cvg_dep_E = [None]*len(surfaces)
for surf in surfaces:
cvgs = info_dict[surf].get(ads,None)
if cvgs is None:
pass
else:
cvg_dep_E[self.surface_names.index(surf)] = cvgs
self.species_definitions[ads]['coverage_dependent_energy'] = cvg_dep_E
| gpl-3.0 | 238,050,990,544,195,740 | 47.681818 | 108 | 0.474907 | false |
BiRG/Omics-Dashboard | omics/omics_dashboard/blueprints/api/analyses.py | 1 | 2789 | from flask import Blueprint, jsonify, request
from flask_login import login_required
import data_tools as dt
from helpers import get_current_user, handle_exception
analyses_api = Blueprint('analyses_api', __name__, url_prefix='/api/analyses')
@analyses_api.route('/', methods=['GET', 'POST'])
@login_required
def list_analyses():
try:
user = get_current_user()
if request.method == 'POST':
new_data = request.get_json(force=True)
if 'collection_ids' in new_data:
collections = [dt.collections.get_collection(user, collection_id)
for collection_id in new_data['collection_ids']]
else:
collections = None
analysis = dt.analyses.create_analysis(user, new_data, collections)
return jsonify(analysis.to_dict())
return jsonify([analysis.to_dict() for analysis in dt.analyses.get_analyses(user)])
except Exception as e:
return handle_exception(e)
@analyses_api.route('/attach/<analysis_id>', methods=['POST', 'DELETE'])
@login_required
def attach_collection(analysis_id=None):
try:
user = get_current_user()
analysis = dt.analyses.get_analysis(user, analysis_id)
data = request.get_json()
if 'collection_ids' in data:
collections = [dt.collections.get_collection(user, collection_id)
for collection_id in data['collection_ids']]
for collection in collections:
if request.method == 'POST':
dt.analyses.attach_collection(user, analysis, collection)
if request.method == 'DELETE':
dt.analyses.detach_collection(user, analysis, collection)
return jsonify({**analysis.to_dict(), 'is_write_permitted': dt.users.is_write_permitted(user, analysis)})
else:
raise ValueError('No collection id(s) specified')
except Exception as e:
return handle_exception(e)
@analyses_api.route('/<analysis_id>', methods=['GET', 'POST', 'DELETE'])
@login_required
def get_analysis(analysis_id=None):
try:
user = get_current_user()
if request.method == 'GET':
analysis = dt.analyses.get_analysis(user, analysis_id)
return jsonify(analysis.to_dict())
if request.method == 'POST':
analysis = dt.analyses.get_analysis(user, analysis_id)
return jsonify(dt.analyses.update_analysis(user, analysis, request.get_json(force=True)).to_dict())
if request.method == 'DELETE':
analysis = dt.analyses.get_analysis(user, analysis_id)
return jsonify(dt.analyses.delete_analysis(user, analysis))
except Exception as e:
return handle_exception(e)
| mit | -5,069,418,989,384,102,000 | 41.257576 | 117 | 0.622087 | false |
maui-packages/qt-creator | tests/system/shared/editor_utils.py | 1 | 19525 | #############################################################################
##
## Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://qt.digia.com/licensing. For further information
## use the contact form at http://qt.digia.com/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
import re;
# places the cursor inside the given editor into the given line
# (leading and trailing whitespaces are ignored!)
# and goes to the end of the line
# line can be a regex - but if so, remember to set isRegex to True
# the function returns True if this went fine, False on error
def placeCursorToLine(editor, line, isRegex=False):
def getEditor():
return waitForObject(editor)
isDarwin = platform.system() == 'Darwin'
if not isinstance(editor, (str, unicode)):
editor = objectMap.realName(editor)
oldPosition = 0
if isDarwin:
type(getEditor(), "<Home>")
else:
type(getEditor(), "<Ctrl+Home>")
found = False
if isRegex:
regex = re.compile(line)
while not found:
currentLine = str(lineUnderCursor(getEditor())).strip()
found = isRegex and regex.match(currentLine) or not isRegex and currentLine == line
if not found:
type(getEditor(), "<Down>")
newPosition = getEditor().textCursor().position()
if oldPosition == newPosition:
break
oldPosition = newPosition
if not found:
test.fatal("Couldn't find line matching\n\n%s\n\nLeaving test..." % line)
return False
if isDarwin:
type(getEditor(), "<Ctrl+Right>")
else:
type(getEditor(), "<End>")
return True
# this function returns True if a QMenu is
# popped up above the given editor
# param editor is the editor where the menu should appear
# param menuInList is a list containing one item. This item will be assigned the menu if there is one.
# THIS IS A HACK to get a pass-by-reference
def menuVisibleAtEditor(editor, menuInList):
menuInList[0] = None
try:
# Hack for Squish 5.0.1 handling menus of Qt5.2 on Mac (avoids crash) - remove asap
if platform.system() == 'Darwin':
for obj in object.topLevelObjects():
if className(obj) == "QMenu" and obj.visible and widgetContainsPoint(editor, obj.mapToGlobal(QPoint(0, 0))):
menuInList[0] = obj
return True
return False
menu = waitForObject("{type='QMenu' unnamed='1' visible='1'}", 500)
if platform.system() == 'Darwin':
menu.activateWindow()
success = menu.visible and widgetContainsPoint(editor, menu.mapToGlobal(QPoint(0, 0)))
if success:
menuInList[0] = menu
return success
except:
return False
# this function checks whether the given global point (QPoint)
# is contained in the given widget
def widgetContainsPoint(widget, point):
return QRect(widget.mapToGlobal(QPoint(0, 0)), widget.size).contains(point)
# this function simply opens the context menu inside the given editor
# at the same position where the text cursor is located at
def openContextMenuOnTextCursorPosition(editor):
rect = editor.cursorRect(editor.textCursor())
openContextMenu(editor, rect.x+rect.width/2, rect.y+rect.height/2, 0)
menuInList = [None]
waitFor("menuVisibleAtEditor(editor, menuInList)", 5000)
return menuInList[0]
# this function marks/selects the text inside the given editor from current cursor position
# param direction is one of "Left", "Right", "Up", "Down", but "End" and combinations work as well
# param typeCount defines how often the cursor will be moved in the given direction (while marking)
def markText(editor, direction, typeCount=1):
for i in range(typeCount):
type(editor, "<Shift+%s>" % direction)
# works for all standard editors
def replaceEditorContent(editor, newcontent):
type(editor, "<Ctrl+a>")
type(editor, "<Delete>")
type(editor, newcontent)
def typeLines(editor, lines):
if isinstance(lines, (str, unicode)):
lines = [lines]
if isinstance(lines, (list, tuple)):
for line in lines:
type(editor, line)
type(editor, "<Return>")
else:
test.warning("Illegal parameter passed to typeLines()")
# function to verify hoverings on e.g. code inside of the given editor
# param editor the editor object
# param lines a list/tuple of regex that indicates which lines should be verified
# param additionalKeyPresses an array holding the additional typings to do (special chars for cursor movement)
# to get to the location (inside line) where to trigger the hovering (must be the same for all lines)
# param expectedTypes list/tuple holding the type of the (tool)tips that should occur (for each line)
# param expectedValues list/tuple of dict or list/tuple of strings regarding the types that have been used
# if it's a dict it indicates a property value pair, if it's a string it is type specific (e.g. color value for ColorTip)
# param alternativeValues same as expectedValues, but here you can submit alternatives - this is for example
# necessary if you do not add the correct documentation (from where the tip gets its content)
def verifyHoveringOnEditor(editor, lines, additionalKeyPresses, expectedTypes, expectedValues, alternativeValues=None):
counter = 0
for line in lines:
expectedVals = expectedValues[counter]
expectedType = expectedTypes[counter]
altVal = None
if isinstance(alternativeValues, (list, tuple)):
altVal = alternativeValues[counter]
counter += 1
placeCursorToLine(editor, line, True)
for ty in additionalKeyPresses:
type(editor, ty)
rect = editor.cursorRect(editor.textCursor())
sendEvent("QMouseEvent", editor, QEvent.MouseMove, rect.x+rect.width/2, rect.y+rect.height/2, Qt.NoButton, 0)
try:
tip = waitForObject("{type='%s' visible='1'}" % expectedType)
except:
tip = None
if tip == None:
test.warning("Could not get %s for line containing pattern '%s'" % (expectedType,line))
else:
if expectedType == "ColorTip":
__handleColorTips__(tip, expectedVals, altVal)
elif expectedType == "TextTip":
__handleTextTips__(tip, expectedVals, altVal)
elif expectedType == "WidgetTip":
test.warning("Sorry - WidgetTip checks aren't implemented yet.")
sendEvent("QMouseEvent", editor, QEvent.MouseMove, 0, -50, Qt.NoButton, 0)
waitFor("isNull(tip)", 10000)
# helper function that handles verification of TextTip hoverings
# param textTip the TextTip object
# param expectedVals a dict holding property value pairs that must match
def __handleTextTips__(textTip, expectedVals, alternativeVals):
props = object.properties(textTip)
expFail = altFail = False
eResult = verifyProperties(props, expectedVals)
for val in eResult.itervalues():
if not val:
expFail = True
break
if expFail and alternativeVals != None:
aResult = verifyProperties(props, alternativeVals)
else:
altFail = True
aResult = None
if not expFail:
test.passes("TextTip verified")
else:
for key,val in eResult.iteritems():
if val == False:
if aResult and aResult.get(key):
test.passes("Property '%s' does not match expected, but alternative value" % key)
else:
aVal = None
if alternativeVals:
aVal = alternativeVals.get(key, None)
if aVal:
test.fail("Property '%s' does not match - expected '%s' or '%s', got '%s'" % (key, expectedVals.get(key), aVal, props.get(key)))
else:
test.fail("Property '%s' does not match - expected '%s', got '%s" % (key, expectedVals.get(key), props.get(key)))
else:
test.fail("Property '%s' could not be found inside properties" % key)
# helper function that handles verification of ColorTip hoverings
# param colTip the ColorTip object
# param expectedColor a single string holding the color the ColorTip should have
# Attention: because of being a non-standard Qt object it's not possible to
# verify colors which are (semi-)transparent!
def __handleColorTips__(colTip, expectedColor, alternativeColor):
def uint(value):
if value < 0:
return 0xffffffff + value + 1
return value
cmp = QColor()
cmp.setNamedColor(expectedColor)
if alternativeColor:
alt = QColor()
alt.setNamedColor(alternativeColor)
if cmp.alpha() != 255 or alternativeColor and alt.alpha() != 255:
test.warning("Cannot handle transparent colors - cancelling this verification")
return
dPM = QPixmap.grabWidget(colTip, 1, 1, colTip.width-2, colTip.height-2)
img = dPM.toImage()
rgb = img.pixel(1, 1)
rgb = QColor(rgb)
if rgb.rgba() == cmp.rgba() or alternativeColor and rgb.rgba() == alt.rgba():
test.passes("ColorTip verified")
else:
altColorText = ""
if alternativeColor:
altColorText = " or '%X'" % uint(alt.rgb())
test.fail("ColorTip does not match - expected color '%X'%s got '%X'"
% (uint(cmp.rgb()), altColorText, uint(rgb.rgb())))
# function that checks whether all expected properties (including their values)
# match the given properties
# param properties a dict holding the properties to check
# param expectedProps a dict holding the key value pairs that must be found inside properties
# this function returns a dict holding the keys of the expectedProps - the value of each key
# is a boolean that indicates whether this key could have been found inside properties and
# the values matched or None if the key could not be found
def verifyProperties(properties, expectedProps):
if not isinstance(properties, dict) or not isinstance(expectedProps, dict):
test.warning("Wrong usage - both parameter must be of type dict")
return {}
result = {}
for key,val in expectedProps.iteritems():
foundVal = properties.get(key, None)
if foundVal != None:
result[key] = val == foundVal
else:
result[key] = None
return result
def getEditorForFileSuffix(curFile, treeViewSyntax=False):
cppEditorSuffixes = ["cpp", "cc", "CC", "h", "H", "cp", "cxx", "C", "c++", "inl", "moc", "qdoc",
"tcc", "tpp", "t++", "c", "cu", "m", "mm", "hh", "hxx", "h++", "hpp", "hp"]
qmlEditorSuffixes = ["qml", "qmlproject", "js", "qs", "qtt"]
proEditorSuffixes = ["pro", "pri", "prf"]
glslEditorSuffixes= ["frag", "vert", "fsh", "vsh", "glsl", "shader", "gsh"]
pytEditorSuffixes = ["py", "pyw", "wsgi"]
suffix = __getFileSuffix__(curFile)
expected = os.path.basename(curFile)
if treeViewSyntax:
expected = simpleFileName(curFile)
mainWindow = waitForObject(":Qt Creator_Core::Internal::MainWindow")
if not waitFor("expected in str(mainWindow.windowTitle)", 5000):
test.fatal("Window title (%s) did not switch to expected file (%s)."
% (str(mainWindow.windowTitle), expected))
try:
if suffix in cppEditorSuffixes:
editor = waitForObject(":Qt Creator_CppEditor::Internal::CPPEditorWidget")
elif suffix in qmlEditorSuffixes:
editor = waitForObject(":Qt Creator_QmlJSEditor::QmlJSTextEditorWidget")
elif suffix in proEditorSuffixes:
editor = waitForObject(":Qt Creator_ProFileEditorWidget")
elif suffix in glslEditorSuffixes:
editor = waitForObject("{type='GLSLEditor::Internal::GLSLTextEditorWidget' unnamed='1' "
"visible='1' window=':Qt Creator_Core::Internal::MainWindow'}")
elif suffix in pytEditorSuffixes:
editor = waitForObject(":Qt Creator_PythonEditor::EditorWidget")
else:
test.log("Trying PlainTextEditor (file suffix: %s)" % suffix)
try:
editor = waitForObject(":Qt Creator_TextEditor::PlainTextEditorWidget", 3000)
except:
test.fatal("Unsupported file suffix for file '%s'" % curFile)
editor = None
except:
f = str(waitForObject(":Qt Creator_Core::Internal::MainWindow").windowTitle).split(" ", 1)[0]
if os.path.basename(curFile) == f:
test.fatal("Could not find editor although expected file matches.")
else:
test.fatal("Expected (%s) and current file (%s) do not match. Failed to get editor"
% (os.path.basename(curFile), f))
editor = None
return editor
# helper that determines the file suffix of the given fileName
# (doesn't matter if fileName contains the path as well)
def __getFileSuffix__(fileName):
suffix = os.path.basename(fileName).rsplit(".", 1)
if len(suffix) == 1:
return None
else:
return suffix[1]
def maskSpecialCharsForSearchResult(filename):
filename = filename.replace("_", "\\_").replace(".","\\.")
return filename
def validateSearchResult(expectedCount):
searchResult = waitForObject(":Qt Creator_SearchResult_Core::Internal::OutputPaneToggleButton")
ensureChecked(searchResult)
resultTreeView = waitForObject(":Qt Creator_Find::Internal::SearchResultTreeView")
counterLabel = waitForObject("{type='QLabel' unnamed='1' visible='1' text?='*matches found.' "
"window=':Qt Creator_Core::Internal::MainWindow'}")
matches = cast((str(counterLabel.text)).split(" ", 1)[0], "int")
test.compare(matches, expectedCount, "Verified match count.")
model = resultTreeView.model()
for index in dumpIndices(model):
itemText = str(model.data(index).toString())
doubleClickItem(resultTreeView, maskSpecialCharsForSearchResult(itemText), 5, 5, 0, Qt.LeftButton)
test.log("%d occurrences in %s" % (model.rowCount(index), itemText))
for chIndex in dumpIndices(model, index):
resultTreeView.scrollTo(chIndex)
text = str(chIndex.data()).rstrip('\r')
rect = resultTreeView.visualRect(chIndex)
doubleClick(resultTreeView, rect.x+5, rect.y+5, 0, Qt.LeftButton)
editor = getEditorForFileSuffix(itemText)
if not waitFor("lineUnderCursor(editor) == text", 2000):
test.warning("Jumping to search result '%s' is pretty slow." % text)
waitFor("lineUnderCursor(editor) == text", 2000)
test.compare(lineUnderCursor(editor), text)
# this function invokes context menu and command from it
def invokeContextMenuItem(editorArea, command1, command2 = None):
ctxtMenu = openContextMenuOnTextCursorPosition(editorArea)
if platform.system() == 'Darwin':
activateItem(ctxtMenu, command1)
else:
activateItem(waitForObjectItem(objectMap.realName(ctxtMenu), command1, 2000))
if command2:
# Hack for Squish 5.0.1 handling menus of Qt5.2 on Mac (avoids crash) - remove asap
if platform.system() == 'Darwin':
for obj in object.topLevelObjects():
if className(obj) == 'QMenu' and obj.visible and not obj == ctxtMenu:
activateItem(obj, command2)
break
else:
activateItem(waitForObjectItem("{title='%s' type='QMenu' visible='1' window=%s}"
% (command1, objectMap.realName(ctxtMenu)), command2, 2000))
# this function invokes the "Find Usages" item from context menu
# param editor an editor object
# param line a line in editor (content of the line as a string)
# param typeOperation a key to type
# param n how often repeat the type operation?
def invokeFindUsage(editor, line, typeOperation, n=1):
if not placeCursorToLine(editor, line, True):
return False
for i in range(n):
type(editor, typeOperation)
invokeContextMenuItem(editor, "Find Usages")
return True
def addBranchWildcardToRoot(rootNode):
pos = rootNode.find(".")
if pos == -1:
return rootNode + " [[]*[]]"
return rootNode[:pos] + " [[]*[]]" + rootNode[pos:]
def openDocument(treeElement):
try:
selectFromCombo(":Qt Creator_Core::Internal::NavComboBox", "Projects")
navigator = waitForObject(":Qt Creator_Utils::NavigationTreeView")
try:
item = waitForObjectItem(navigator, treeElement, 3000)
except:
treeElement = addBranchWildcardToRoot(treeElement)
item = waitForObjectItem(navigator, treeElement)
doubleClickItem(navigator, treeElement, 5, 5, 0, Qt.LeftButton)
mainWindow = waitForObject(":Qt Creator_Core::Internal::MainWindow")
expected = str(item.text).split("/")[-1]
waitFor("expected in str(mainWindow.windowTitle)")
return True
except:
return False
def earlyExit(details="No additional information"):
test.fail("Something went wrong running this test", details)
invokeMenuItem("File", "Save All")
invokeMenuItem("File", "Exit")
def openDocumentPlaceCursor(doc, line, additionalFunction=None):
cppEditorStr = ":Qt Creator_CppEditor::Internal::CPPEditorWidget"
if openDocument(doc) and placeCursorToLine(cppEditorStr, line):
if additionalFunction:
additionalFunction()
return str(waitForObject(cppEditorStr).plainText)
else:
earlyExit("Open %s or placing cursor to line (%s) failed." % (doc, line))
return None
# Replaces a line in the editor with another
# param fileSpec a string specifying a file in Projects view
# param oldLine a string holding the line to be replaced
# param newLine a string holding the line to be inserted
def replaceLine(fileSpec, oldLine, newLine):
if openDocumentPlaceCursor(fileSpec, oldLine) == None:
return False
editor = waitForObject(":Qt Creator_CppEditor::Internal::CPPEditorWidget")
for _ in oldLine:
type(editor, "<Backspace>")
type(editor, newLine)
return True
| lgpl-2.1 | -4,425,573,142,757,364,700 | 45.599045 | 152 | 0.651729 | false |
jaap3/django-formative | tests/test_admin.py | 1 | 5314 | from django.contrib.admin import AdminSite
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import RequestFactory, TestCase, Client
from formative.admin import FormativeBlobAdmin
from formative.forms import FormativeTypeForm
from formative.models import FormativeBlob
from tests.testproject.testapp.forms import SimpleForm
class MockUser(object):
def has_perm(self, *args, **kwargs):
return True
class TestSelectType(TestCase):
def setUp(self):
self.admin = FormativeBlobAdmin(FormativeBlob, AdminSite())
self.request = RequestFactory().get('/add/')
def test_add_view_delegates_to_select_formative_type_view(self):
response = self.admin.add_view(self.request)
self.assertEqual(
response.template_name, 'formative/admin/formative_type_form.html')
def test_add_returns_formative_type_form(self):
response = self.admin.add_view(self.request)
self.assertIsInstance(response.context_data['adminform'].form,
FormativeTypeForm)
def test_add_with_next_param_validates_requested_type(self):
response = self.admin.add_view(RequestFactory().get('/add/', {
'formative_type': 'invalid', '_next': None
}))
self.assertEqual(response.context_data['adminform'].form.errors, {
'formative_type': [
'Select a valid choice. invalid is not one of the'
' available choices.']
})
def test_add_without_next_param_and_invalid_type(self):
response = self.admin.add_view(RequestFactory().get('/add/', {
'formative_type': 'invalid',
}))
self.assertEqual(
response.template_name, 'formative/admin/formative_type_form.html')
self.assertEqual(response.context_data['adminform'].form.errors, {})
def test_add_with_valid_type_renders_add(self):
request = RequestFactory().get('/add/', {
'formative_type': 'simple',
})
request.user = MockUser()
response = self.admin.add_view(request)
self.assertIn('admin/change_form.html', response.template_name)
class TestAddAndChange(TestCase):
def setUp(self):
self.admin = FormativeBlobAdmin(FormativeBlob, AdminSite())
self.request = RequestFactory().get('/add/', {
'formative_type': 'simple',
})
self.request.user = MockUser()
self.SimpleForm = FormativeBlob.registry.get('simple').form
def test_add_gets_correct_form(self):
response = self.admin.add_view(self.request)
self.assertIsInstance(
response.context_data['adminform'].form, self.SimpleForm)
def test_change_gets_correct_form(self):
f = self.SimpleForm({
'formative_type': 'simple',
'unique_identifier': 'test-identifier',
'name': 'test-name'
})
f.full_clean()
obj = f.save()
response = self.admin.change_view(self.request, str(obj.pk))
self.assertIsInstance(response.context_data['adminform'].form,
SimpleForm)
obj.delete()
class TestAddPost(TestCase):
def setUp(self):
User.objects.create_superuser('test', '[email protected]', 'test')
c = Client()
c.login(username='test', password='test')
self.response = c.post(reverse('admin:formative_formativeblob_add'), {
'formative_type': 'simple',
'unique_identifier': 'test-identifier',
'name': 'test-name'
}, follow=True)
def test_add_creates_object(self):
obj = FormativeBlob.objects.all()[0]
self.assertEqual(
{
'formative_type': obj.formative_type.name,
'unique_identifier': obj.unique_identifier,
'name': obj.data['name']
},
{
'formative_type': 'simple',
'unique_identifier': 'test-identifier',
'name': 'test-name'
})
class TestGetFieldsets(TestCase):
def setUp(self):
self.admin = FormativeBlobAdmin(FormativeBlob, AdminSite())
def test_get_undefined_fieldsets(self):
request = RequestFactory().get('/add/', {
'formative_type': 'simple',
})
self.assertEqual(self.admin.get_fieldsets(request), [
(None, {'fields': ['unique_identifier', 'formative_type', 'name']})
])
def test_get_defined_fieldsets(self):
request = RequestFactory().get('/add/', {
'formative_type': 'fieldset-identifier',
})
self.assertEqual(self.admin.get_fieldsets(request), [
(None, {'fields': ['formative_type', 'unique_identifier']}),
('Title', {'fields': ['title']}),
('Body', {'fields': ['body']})
])
def test_get_fieldsets_adds_unique_identifier(self):
request = RequestFactory().get('/add/', {
'formative_type': 'fieldset-no-identifier',
})
self.assertEqual(self.admin.get_fieldsets(request), [
(None, {'fields': ['unique_identifier', 'formative_type']}),
('Title', {'fields': ['title']}),
('Body', {'fields': ['body']})
])
| mit | 3,614,257,185,338,273,300 | 36.422535 | 79 | 0.596349 | false |
kadrlica/ugali | ugali/candidate/associate.py | 1 | 16821 | #!/usr/bin/env python
import os,sys
from os.path import join,abspath,split
import inspect
from collections import OrderedDict as odict
import numpy as np
from numpy.lib.recfunctions import stack_arrays
import fitsio
import ugali.utils.projector
from ugali.utils.projector import gal2cel, cel2gal
import ugali.utils.idl
from ugali.utils.healpix import ang2pix
from ugali.utils.shell import get_ugali_dir
from ugali.utils.logger import logger
#class Catalog(np.recarray):
#
# DATADIR=os.path.join(os.path.split(os.path.abspath(__file__))[0],"../data/catalogs/")
#
# def __new__(cls,filename=None):
# # Need to do it this way so that array can be resized...
# dtype=[('name',object),
# ('ra',float),
# ('dec',float),
# ('glon',float),
# ('glat',float)]
# self = np.recarray(0,dtype=dtype).view(cls)
# self._load(filename)
# return self
#
# def __add__(self, other):
# return np.concatenate([self,other])
#
# def __getitem__(self, key):
# """
# Support indexing, slicing and direct access.
# """
# try:
# return np.recarray.__getitem__(key)
# except ValueError, message:
# if key in self.name:
# idx = (self.name == key)
# return np.recarray.__getitem__(idx)
# else:
# raise ValueError(message)
#
# def _load(self,filename):
# pass
#
# def match(self,lon,lat,tol=0.1,coord='gal'):
# if coord.lower == 'cel':
# glon, glat = ugali.utils.projector.celToGal(lon,lat)
# else:
# glon,glat = lon, lat
# return ugali.utils.projector.match(glon,glat,self.data['glon'],self.data['glat'],tol)
def get_cat_dir():
catdir = os.path.join(get_ugali_dir(),'catalogs')
if not os.path.exists(catdir):
msg = "Catalog directory not found:\n%s"%catdir
logger.warning(msg)
return catdir
class SourceCatalog(object):
#join(split(abspath(__file__))[0],"../data/catalogs/")
DATADIR=get_cat_dir()
def __init__(self, filename=None):
columns = [('name',object),
('ra',float),
('dec',float),
('glon',float),
('glat',float)]
self.data = np.recarray(0,dtype=columns)
self._load(filename)
if np.isnan([self.data['glon'],self.data['glat']]).any():
raise ValueError("Incompatible values")
def __getitem__(self, key):
"""
Support indexing, slicing and direct access.
"""
try:
return self.data[key]
except ValueError as message:
if key in self.data['name']:
return self.data[self.data['name'] == key]
else:
raise ValueError(message)
def __add__(self, other):
ret = SourceCatalog()
ret.data = np.concatenate([self.data,other.data])
return ret
def __len__(self):
""" Return the length of the collection.
"""
return len(self.data)
def _load(self,filename):
pass
def match(self,lon,lat,coord='gal',tol=0.1,nnearest=1):
if coord.lower() == 'cel':
glon, glat = cel2gal(lon,lat)
else:
glon,glat = lon, lat
return ugali.utils.projector.match(glon,glat,self['glon'],self['glat'],tol,nnearest)
class McConnachie12(SourceCatalog):
"""
Catalog of nearby dwarf spheroidal galaxies.
http://arxiv.org/abs/1204.1562
https://www.astrosci.ca/users/alan/Nearby_Dwarfs_Database_files/NearbyGalaxies.dat
"""
def _load(self,filename):
if filename is None:
filename = os.path.join(self.DATADIR,"J_AJ_144_4/NearbyGalaxies2012.dat")
self.filename = filename
raw = np.genfromtxt(filename,delimiter=[19,3,3,5,3,3,3],usecols=range(7),dtype=['|S19']+6*[float],skip_header=36)
self.data.resize(len(raw))
self.data['name'] = np.char.strip(raw['f0'])
ra = raw[['f1','f2','f3']].view(float).reshape(len(raw),-1)
dec = raw[['f4','f5','f6']].view(float).reshape(len(raw),-1)
self.data['ra'] = ugali.utils.projector.hms2dec(ra)
self.data['dec'] = ugali.utils.projector.dms2dec(dec)
glon,glat = cel2gal(self.data['ra'],self.data['dec'])
self.data['glon'],self.data['glat'] = glon,glat
class McConnachie15(SourceCatalog):
"""
Catalog of nearby dwarf spheroidal galaxies. Updated September 2015.
http://arxiv.org/abs/1204.1562
http://www.astro.uvic.ca/~alan/Nearby_Dwarf_Database_files/NearbyGalaxies.dat
"""
def _load(self,filename):
if filename is None:
filename = os.path.join(self.DATADIR,"J_AJ_144_4/NearbyGalaxies.dat")
self.filename = filename
raw = np.genfromtxt(filename,delimiter=[19,3,3,5,3,3,3],usecols=list(range(7)),dtype=['|S19']+6*[float],skip_header=36)
self.data.resize(len(raw))
self.data['name'] = np.char.lstrip(np.char.strip(raw['f0']),'*')
ra = raw[['f1','f2','f3']].view(float).reshape(len(raw),-1)
dec = raw[['f4','f5','f6']].view(float).reshape(len(raw),-1)
self.data['ra'] = ugali.utils.projector.hms2dec(ra)
self.data['dec'] = ugali.utils.projector.dms2dec(dec)
glon,glat = cel2gal(self.data['ra'],self.data['dec'])
self.data['glon'],self.data['glat'] = glon,glat
class Rykoff14(SourceCatalog):
"""
Catalog of red-sequence galaxy clusters.
http://arxiv.org/abs/1303.3562
"""
def _load(self, filename):
if filename is None:
filename = os.path.join(self.DATADIR,"redmapper/dr8_run_redmapper_v5.10_lgt20_catalog.fit")
self.filename = filename
raw = fitsio.read(filename,lower=True)
self.data.resize(len(raw))
self.data['name'] = np.char.mod("RedMaPPer %d",raw['mem_match_id'])
self.data['ra'] = raw['ra']
self.data['dec'] = raw['dec']
glon,glat = cel2gal(raw['ra'],raw['dec'])
self.data['glon'],self.data['glat'] = glon, glat
class Harris96(SourceCatalog):
"""
Catalog of Milky Way globular clusters.
Harris, W.E. 1996, AJ, 112, 1487
http://physwww.physics.mcmaster.ca/~harris/mwgc.dat
NOTE: There is some inconsistency between Equatorial and
Galactic coordinates in the catalog. Equatorial seems more
reliable.
"""
def _load(self,filename):
if filename is None:
filename = os.path.join(self.DATADIR,"VII_202/mwgc.dat")
self.filename = filename
kwargs = dict(delimiter=[12,12,3,3,6,5,3,6,8,8,6],dtype=2*['S12']+7*[float],skip_header=72,skip_footer=363)
raw = np.genfromtxt(filename,**kwargs)
self.data.resize(len(raw))
self.data['name'] = np.char.strip(raw['f0'])
ra = raw[['f2','f3','f4']].view(float).reshape(len(raw),-1)
dec = raw[['f5','f6','f7']].view(float).reshape(len(raw),-1)
self.data['ra'] = ugali.utils.projector.hms2dec(ra)
self.data['dec'] = ugali.utils.projector.dms2dec(dec)
glon,glat = cel2gal(self.data['ra'],self.data['dec'])
self.data['glon'],self.data['glat'] = glon,glat
class Corwen04(SourceCatalog):
"""
Modern compilation of the New General Catalogue and IC
"""
def _load(self,filename):
kwargs = dict(delimiter=[1,1,4,15,3,3,8,3,3,7],usecols=[1,2]+list(range(4,10)),dtype=['S1']+[int]+6*[float])
if filename is None:
raw = []
for basename in ['VII_239A/ngcpos.dat','VII_239A/icpos.dat']:
filename = os.path.join(self.DATADIR,basename)
raw.append(np.genfromtxt(filename,**kwargs))
raw = np.concatenate(raw)
else:
raw = np.genfromtxt(filename,**kwargs)
self.filename = filename
# Some entries are missing...
raw['f4'] = np.where(np.isnan(raw['f4']),0,raw['f4'])
raw['f7'] = np.where(np.isnan(raw['f7']),0,raw['f7'])
self.data.resize(len(raw))
names = np.where(raw['f0'] == 'N', 'NGC %04i', 'IC %04i')
self.data['name'] = np.char.mod(names,raw['f1'])
ra = raw[['f2','f3','f4']].view(float).reshape(len(raw),-1)
dec = raw[['f5','f6','f7']].view(float).reshape(len(raw),-1)
self.data['ra'] = ugali.utils.projector.hms2dec(ra)
self.data['dec'] = ugali.utils.projector.dms2dec(dec)
glon,glat = cel2gal(self.data['ra'],self.data['dec'])
self.data['glon'],self.data['glat'] = glon,glat
#class Steinicke10(SourceCatalog):
# """
# Another modern compilation of the New General Catalogue
# (people still don't agree on the composition of NGC...)
# """
# def _load(self,filename):
# if filename is None:
# filename = os.path.join(self.DATADIR,"NI2013.csv")
#
# raw = np.genfromtxt(filename,delimiter=',',usecols=[5,6]+range(13,20),dtype=['S1',int]+3*[float]+['S1']+3*[float])
#
# self.data.resize(len(raw))
# names = np.where(raw['f0'] == 'N', 'NGC %04i', 'IC %04i')
# self.data['name'] = np.char.mod(names,raw['f1'])
#
# sign = np.where(raw['f5'] == '-',-1,1)
# ra = raw[['f2','f3','f4']].view(float).reshape(len(raw),-1)
# dec = raw[['f6','f7','f8']].view(float).reshape(len(raw),-1)
# dec[:,0] = np.copysign(dec[:,0], sign)
#
# self.data['ra'] = ugali.utils.projector.hms2dec(ra)
# self.data['dec'] = ugali.utils.projector.dms2dec(dec)
#
# glon,glat = ugali.utils.projector.celToGal(self.data['ra'],self.data['dec'])
# self.data['glon'],self.data['glat'] = glon,glat
class Nilson73(SourceCatalog):
"""
Modern compilation of the Uppsala General Catalog
http://vizier.cfa.harvard.edu/viz-bin/Cat?VII/26D
"""
def _load(self,filename):
if filename is None:
filename = os.path.join(self.DATADIR,"VII_26D/catalog.dat")
self.filename = filename
raw = np.genfromtxt(filename,delimiter=[3,7,2,4,3,2],dtype=['S3']+['S7']+4*[float])
self.data.resize(len(raw))
self.data['name'] = np.char.mod('UGC %s',np.char.strip(raw['f1']))
ra = raw[['f2','f3']].view(float).reshape(len(raw),-1)
ra = np.vstack([ra.T,np.zeros(len(raw))]).T
dec = raw[['f4','f5']].view(float).reshape(len(raw),-1)
dec = np.vstack([dec.T,np.zeros(len(raw))]).T
ra1950 = ugali.utils.projector.hms2dec(ra)
dec1950 = ugali.utils.projector.dms2dec(dec)
ra2000,dec2000 = ugali.utils.idl.jprecess(ra1950,dec1950)
self.data['ra'] = ra2000
self.data['dec'] = dec2000
glon,glat = cel2gal(self.data['ra'],self.data['dec'])
self.data['glon'],self.data['glat'] = glon,glat
class Webbink85(SourceCatalog):
"""
Structure parameters of Galactic globular clusters
http://vizier.cfa.harvard.edu/viz-bin/Cat?VII/151
NOTE: Includes Reticulum and some open clusters
http://spider.seds.org/spider/MWGC/mwgc.html
"""
def _load(self,filename):
kwargs = dict(delimiter=[8,15,9,4,3,3,5,5],usecols=[1]+list(range(3,8)),dtype=['S13']+5*[float])
if filename is None:
raw = []
for basename in ['VII_151/table1a.dat','VII_151/table1c.dat']:
filename = os.path.join(self.DATADIR,basename)
raw.append(np.genfromtxt(filename,**kwargs))
raw = np.concatenate(raw)
else:
raw = np.genfromtxt(filename,**kwargs)
self.filename = filename
self.data.resize(len(raw))
#self.data['name'] = np.char.strip(raw['f0'])
self.data['name'] = np.char.join(' ',np.char.split(raw['f0']))
ra = raw[['f1','f2','f3']].view(float).reshape(len(raw),-1)
dec = raw[['f4','f5']].view(float).reshape(len(raw),-1)
dec = np.vstack([dec.T,np.zeros(len(raw))]).T
ra1950 = ugali.utils.projector.hms2dec(ra)
dec1950 = ugali.utils.projector.dms2dec(dec)
ra2000,dec2000 = ugali.utils.idl.jprecess(ra1950,dec1950)
self.data['ra'] = ra2000
self.data['dec'] = dec2000
glon,glat = cel2gal(self.data['ra'],self.data['dec'])
self.data['glon'],self.data['glat'] = glon,glat
class Kharchenko13(SourceCatalog):
"""
Global survey of star clusters in the Milky Way
http://vizier.cfa.harvard.edu/viz-bin/Cat?J/A%2bA/558/A53
NOTE: CEL and GAL coordinates are consistent to < 0.01 deg.
"""
def _load(self,filename):
kwargs = dict(delimiter=[4,18,20,8,8],usecols=[1,3,4],dtype=['S18',float,float])
if filename is None:
filename = os.path.join(self.DATADIR,"J_AA_558_A53/catalog.dat")
self.filename = filename
raw = np.genfromtxt(filename,**kwargs)
self.data.resize(len(raw))
self.data['name'] = np.char.strip(raw['f0'])
self.data['glon'] = raw['f1']
self.data['glat'] = raw['f2']
ra,dec = gal2cel(self.data['glon'],self.data['glat'])
self.data['ra'],self.data['dec'] = ra,dec
class Bica08(SourceCatalog):
"""
LMC star clusters
http://cdsarc.u-strasbg.fr/viz-bin/Cat?J/MNRAS/389/678
NOTE: CEL and GAL coordinates are consistent to < 0.01 deg.
"""
def _load(self,filename):
kwargs = dict(delimiter=[32,2,3,3,5,3,3],dtype=['S32']+6*[float])
if filename is None:
filename = os.path.join(self.DATADIR,"J_MNRAS_389_678/table3.dat")
self.filename = filename
raw = np.genfromtxt(filename,**kwargs)
self.data.resize(len(raw))
self.data['name'] = np.char.strip(raw['f0'])
ra = raw[['f1','f2','f3']].view(float).reshape(len(raw),-1)
dec = raw[['f4','f5','f6']].view(float).reshape(len(raw),-1)
self.data['ra'] = ugali.utils.projector.hms2dec(ra)
self.data['dec'] = ugali.utils.projector.dms2dec(dec)
glon,glat = cel2gal(self.data['ra'],self.data['dec'])
self.data['glon'],self.data['glat'] = glon,glat
class WEBDA14(SourceCatalog):
"""
Open cluster database.
http://www.univie.ac.at/webda/cgi-bin/selname.cgi?auth=
"""
def _load(self,filename):
kwargs = dict(delimiter='\t',usecols=[0,1,2],dtype=['S18',float,float])
if filename is None:
filename = os.path.join(self.DATADIR,"WEBDA/webda.tsv")
self.filename = filename
raw = np.genfromtxt(filename,**kwargs)
self.data.resize(len(raw))
self.data['name'] = np.char.strip(raw['f0'])
self.data['glon'] = raw['f1']
self.data['glat'] = raw['f2']
ra,dec = gal2cel(self.data['glon'],self.data['glat'])
self.data['ra'],self.data['dec'] = ra,dec
class ExtraDwarfs(SourceCatalog):
"""
Collection of dwarf galaxy candidates discovered in 2015
"""
def _load(self,filename):
kwargs = dict(delimiter=',')
if filename is None:
filename = os.path.join(self.DATADIR,"extras/extra_dwarfs.csv")
self.filename = filename
raw = np.recfromcsv(filename,**kwargs)
self.data.resize(len(raw))
self.data['name'] = raw['name']
self.data['ra'] = raw['ra']
self.data['dec'] = raw['dec']
self.data['glon'],self.data['glat'] = cel2gal(raw['ra'],raw['dec'])
class ExtraClusters(SourceCatalog):
"""
Collection of recently discovered star clusters
"""
def _load(self,filename):
kwargs = dict(delimiter=',')
if filename is None:
filename = os.path.join(self.DATADIR,"extras/extra_clusters.csv")
self.filename = filename
raw = np.recfromcsv(filename,**kwargs)
self.data.resize(len(raw))
self.data['name'] = raw['name']
self.data['ra'] = raw['ra']
self.data['dec'] = raw['dec']
self.data['glon'],self.data['glat'] = cel2gal(raw['ra'],raw['dec'])
def catalogFactory(name, **kwargs):
"""
Factory for various catalogs.
"""
fn = lambda member: inspect.isclass(member) and member.__module__==__name__
catalogs = odict(inspect.getmembers(sys.modules[__name__], fn))
if name not in list(catalogs.keys()):
msg = "%s not found in catalogs:\n %s"%(name,list(kernels.keys()))
logger.error(msg)
msg = "Unrecognized catalog: %s"%name
raise Exception(msg)
return catalogs[name](**kwargs)
if __name__ == "__main__":
import argparse
description = "python script"
parser = argparse.ArgumentParser(description=description)
parser.add_argument('args',nargs=argparse.REMAINDER)
opts = parser.parse_args(); args = opts.args
| mit | 6,533,313,649,244,551,000 | 34.562368 | 127 | 0.580584 | false |
SalesforceFoundation/CumulusCI | cumulusci/tasks/metadata_etl/permissions.py | 1 | 4361 | from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.tasks.metadata_etl import MetadataSingleEntityTransformTask
from cumulusci.utils.xml.metadata_tree import MetadataElement
class AddPermissionSetPermissions(MetadataSingleEntityTransformTask):
entity = "PermissionSet"
task_options = {
"field_permissions": {
"description": "Array of fieldPermissions objects to upsert into permission_set. Each fieldPermission requires the following attributes: 'field': API Name of the field including namespace; 'readable': boolean if field can be read; 'editable': boolean if field can be edited",
"required": False,
},
"class_accesses": {
"description": "Array of classAccesses objects to upsert into permission_set. Each classAccess requires the following attributes: 'apexClass': Name of Apex Class. If namespaced, make sure to use the form \"namespace__ApexClass\"; 'enabled': boolean if the Apex Class can be accessed.",
"required": False,
},
**MetadataSingleEntityTransformTask.task_options,
}
def _transform_entity(
self, metadata: MetadataElement, api_name: str
) -> MetadataElement:
self._upsert_class_accesses(metadata, api_name)
self._upsert_field_permissions(metadata, api_name)
return metadata
def _upsert_class_accesses(self, metadata, api_name):
class_accesses = self.options.get("class_accesses")
if not class_accesses:
return
self.logger.info(f"Upserting class accesses for {api_name}")
for class_access in class_accesses:
if "apexClass" not in class_access:
raise TaskOptionsError(
"class_access entries must contain the 'apexClass' key."
)
class_access["apexClass"] = self._inject_namespace(
class_access["apexClass"]
)
existing_permissions = metadata.findall(
"classAccesses", apexClass=class_access["apexClass"]
)
if len(existing_permissions):
# Permission exists: update
for elem in existing_permissions:
elem.find("enabled").text = str(
class_access.get("enabled", True)
).lower()
else:
# Permission doesn't exist: insert
elem = metadata.append("classAccesses")
elem.append("apexClass", text=class_access.get("apexClass"))
elem.append(
"enabled", text=str(class_access.get("enabled", True)).lower()
)
def _upsert_field_permissions(self, metadata, api_name):
field_permissions = self.options.get("field_permissions")
if not field_permissions:
return
self.logger.info(f"Upserting Field Level Security for {api_name}")
for field_permission in field_permissions:
if "field" not in field_permission:
raise TaskOptionsError(
"field_permissions entries must include the 'field' key."
)
field_permission["field"] = self._inject_namespace(
field_permission["field"]
)
existing_permissions = metadata.findall(
"fieldPermissions", field=field_permission["field"]
)
if len(existing_permissions):
# Permission exists: update
for elem in existing_permissions:
elem.find("readable").text = str(
field_permission.get("readable", True)
).lower()
elem.find("editable").text = str(
field_permission.get("editable", True)
).lower()
else:
# Permission doesn't exist: insert
element = metadata.append("fieldPermissions")
element.append("field", text=field_permission.get("field"))
element.append(
"editable", text=str(field_permission.get("editable", True)).lower()
)
element.append(
"readable", text=str(field_permission.get("readable", True)).lower()
)
| bsd-3-clause | 2,393,319,820,197,800,000 | 41.339806 | 299 | 0.579454 | false |
ArchanjoJr/solid-robot | app.py | 1 | 2056 | import requests as re
from delete import *
import pprint
import json
import sys
langs = ['en-US', 'de-DE', 'es-ES', 'es-MX', 'fr-FR', 'it-IT', 'ja-JP', 'pl-PL', 'pt-BR', 'ru-RU']
index = 0
if len(sys.argv) > 0:
if sys.argv[1] in langs:
index = langs.index(sys.argv[1])
print("Getting a JSON for the " + langs[index] + " indexuage")
url = 'https://api.gwentapi.com/v0/cards'
print('Getting the Amount of Cards of Database')
pag = re.get(url)
if pag.status_code != 200:
print(pag.status_code)
else:
limit = pag.json()['count']
url += "?limit=" + str(limit) + '&index=' + langs[index]
pag = re.get(url)
if pag.status_code != 200:
print(pag.status_code)
else:
with open('aux.json', 'w') as arquivo:
arquivo.write(json.dumps(pag.json()["results"]))
size = limit
aux = json.load(open('aux.json', 'r'))
file = open('cards.json', 'w')
file.write('['), file.close()
for i in range(size):
print("Making the request for : " + aux[i]['name'])
cart = re.get(aux[i]['href'])
with open("cards.json", 'a')as file:
if i < (size - 1):
print("Writing the request for : " + aux[i]['name'] + " in file.")
file.write(json.dumps(cart.json()) + ",")
else:
print("Writing the request for : " + aux[i]['name'] + " in file.")
file.write(json.dumps(cart.json()) + "]")
cartas = json.load(open('cards.json', 'r'))
file = open('variations.json', 'w')
file.write('['), file.close()
for i in range(size):
print('Getting Variations for: ' + cartas[i]['name'])
var = re.get(cartas[i]['variations'][0]['href'])
with open('variations.json', 'a') as file:
if i < (size - 1):
print('writing variations for :' + cartas[i]['name'])
file.write(json.dumps(var.json()) + ',')
else:
print('writing variations for :' + cartas[i]['name'])
file.write(json.dumps(var.json()) + ']')
print('CLEANING THE THE JSONS')
clean_cards('cards.json')
clean_variations('variations.json')
delete('aux.json')
print('Done!')
| mit | -6,035,645,404,594,599,000 | 31.634921 | 98 | 0.580253 | false |
simvisage/oricreate | oricreate/factories/hexagonal_cp_factory.py | 1 | 6886 | # -------------------------------------------------------------------------
#
# Copyright (c) 2009, IMB, RWTH Aachen.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in simvisage/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.simvisage.com/licenses/BSD.txt
#
# Thanks for using Simvisage open source!
#
# Created on Sep 7, 2011 by: rch
from traits.api import \
Float, Int, Property, cached_property, Callable
import numpy as np
from oricreate.crease_pattern import \
CreasePatternState
from oricreate.forming_tasks import \
FactoryTask
import sympy as sp
x_, y_ = sp.symbols('x, y')
class HexagonalCPFactory(FactoryTask):
'''Generate a Yoshimura crease pattern based
on the specification of its parameters.
.. todo::
Default parameters do not work. d_x not described.
'''
L_x = Float(4, geometry=True)
L_y = Float(4, geometry=True)
n_seg = Int(4)
def deliver(self):
return CreasePatternState(X=self.X,
L=self.L,
F=self.F)
X = Property
def _get_X(self):
return self._geometry[0]
L = Property
def _get_L(self):
return self._geometry[1]
F = Property
def _get_F(self):
return self._geometry[2]
L_rigid = Property
def _get_L_rigid(self):
return self._geometry[3]
N_x_sym = Property
def _get_N_x_sym(self):
return self._geometry[4]
N_up = Property
def _get_N_up(self):
return self._geometry[5]
N_down = Property
def _get_N_down(self):
return self._geometry[6]
geo_transform = Callable
def _geo_transform_default(self):
return lambda X_arr: X_arr
_geometry = Property(depends_on='+geometry')
@cached_property
def _get__geometry(self):
n_seg = self.n_seg
n_x = n_seg * n_seg + 1
n2 = n_x / 2
L_x = 1.0
L_y = 1.0
# provide the base element with four by four discretization
x_e, y_e = np.mgrid[0:L_x:complex(n_x), 0:L_y:complex(n_x)]
x_m = (x_e[:-1, :-1] + x_e[1:, 1:]) / 2.0
y_m = (y_e[:-1, :-1] + y_e[1:, 1:]) / 2.0
x1 = np.c_[x_e.flatten(), y_e.flatten(),
np.zeros_like(x_e.flatten())]
x2 = np.c_[x_m.flatten(), y_m.flatten(),
np.zeros_like(x_m.flatten())]
x = np.vstack([x1, x2])
Nf1 = np.arange(n_x * n_x).reshape(n_x, n_x)
n_x2 = n_x - 1
def get_facets(N1):
f1 = np.array(
[N1[:-1, :-1].flatten(),
N1[1:, :-1].flatten(),
N1[1:, 1:].flatten()]).T
f2 = np.array(
[N1[:-1, :-1].flatten(),
N1[1:, 1:].flatten(),
N1[:-1, 1:].flatten()]).T
return np.vstack([f1, f2])
ff1 = get_facets(Nf1[:n2 + 1, :n2 + 1])
ff2 = get_facets(Nf1[n2:, n2:])
nlh = Nf1[:, n2]
nlv = Nf1[n2, ::-1]
f5 = np.array(
[nlh[:n2].flatten(),
nlv[:n2].flatten(),
nlh[1:n2 + 1].flatten()]).T
f6 = np.array(
[nlh[n2 + 1:-1].flatten(),
nlv[n2 + 1:-1].flatten(),
nlh[n2 + 2:].flatten()]).T
f7 = np.array(
[nlv[:n2 - 1].flatten(),
nlh[1:n2].flatten(),
nlv[1:n2].flatten()]).T
f8 = np.array(
[nlv[n2:- 1].flatten(),
nlh[n2 + 1:].flatten(),
nlv[n2 + 1:].flatten()]).T
nl_fixed = np.vstack([f5[:-1, (1, 2)], f6[:, (1, 2)]])
facets = np.vstack([ff1, ff2, f5, f6, f7, f8])
# identify lines
ix_arr = np.array([[0, 1], [1, 2], [2, 0]])
L_N = facets[:, ix_arr].reshape(-1, 2)
n_L = len(L_N)
n_N = len(x)
NN_L = np.zeros((n_N, n_N), dtype='int') - 1
NN_L[L_N[:, 0], L_N[:, 1]] = np.arange(n_L)
NN_L[L_N[:, 1], L_N[:, 0]] = np.arange(n_L)
i, j = np.mgrid[:n_N, :n_N]
i_arr, j_arr = np.where((i > j) & (NN_L > -1))
l_arr = NN_L[i_arr, j_arr]
lines = L_N[l_arr]
l_mapping = np.array(
np.bincount(l_arr, np.arange(len(l_arr))),
dtype=np.int_)
l_fixed = NN_L[nl_fixed[:, 0], nl_fixed[:, 1]]
l_fixed_red = l_mapping[l_fixed]
# shrink / condense
N_connected = np.where(np.sum(NN_L + 1, axis=1) > 0)[0]
N_enum = np.zeros(n_N, dtype=np.int_) - 1
N_enum[N_connected] = np.arange(len(N_connected))
Nf1_x_sym = Nf1[np.arange(len(Nf1)), np.arange(len(Nf1))]
Nf_x_sym = N_enum[np.hstack([Nf1_x_sym])]
x_red = x[N_connected, :]
l_red = N_enum[lines]
f_red = N_enum[facets]
s = Nf1.shape
i, j = np.mgrid[:(s[0] + 1) / 2, :(s[1] + 1) / 2]
i_arr, j_arr = np.where(i[:, ::2] >= j[:, ::2])
Nuph1 = N_enum[Nf1[i_arr, j_arr * 2]]
Nuph2 = N_enum[Nf1[-i_arr - 1, -j_arr * 2 - 1]]
Nupv1 = N_enum[Nf1[j_arr * 2, i_arr]]
Nupv2 = N_enum[Nf1[-j_arr * 2 - 1, -i_arr - 1]]
print('N_uph1', Nuph1)
Nf_up = np.unique(np.hstack([Nuph1, Nuph2, Nupv1, Nupv2]))
i, j = np.mgrid[:(s[0]) / 2, :(s[1]) / 2]
i_arr, j_arr = np.where(i[:, ::2] >= j[:, ::2])
Ndoh1 = N_enum[Nf1[i_arr + 1, (j_arr * 2) + 1]]
Ndoh2 = N_enum[Nf1[-i_arr - 2, -j_arr * 2 - 2]]
Ndov1 = N_enum[Nf1[j_arr * 2 + 1, i_arr + 1]]
Ndov2 = N_enum[Nf1[-j_arr * 2 - 2, -i_arr - 2]]
Nf_do = np.unique(np.hstack([Ndoh1, Ndoh2, Ndov1, Ndov2]))
x_red = self.geo_transform(x_red)
return (x_red, l_red, f_red, l_fixed_red,
Nf_x_sym, Nf_up, Nf_do)
if __name__ == '__main__':
def geo_transform(x_arr):
alpha = np.pi / 4.0
L_x = 6.0
L_y = 2.0
x_max = np.max(x_arr, axis=0)
x_min = np.min(x_arr, axis=0)
T = (x_max - x_min) / 2.0
x_arr -= T[np.newaxis, :]
R = np.array([[np.cos(alpha), -np.sin(alpha), 0],
[np.sin(alpha), np.cos(alpha), 0],
[0, 0, 1]], dtype=np.float_)
x_rot = np.einsum('ij,nj->ni', R, x_arr)
x_rot[:, 0] *= L_x
x_rot[:, 1] *= L_y
return x_rot
yf = HexagonalCPFactory(L_x=2,
L_y=1,
n_seg=2,
geo_transform=geo_transform
)
cp = yf.formed_object
print(yf.L_rigid)
print('N_x_sym', yf.N_x_sym)
print(yf.N_up)
print(yf.N_down)
import pylab as p
cp.plot_mpl(p.axes(), nodes=True, lines=True, facets=False)
p.show()
| gpl-3.0 | 5,428,420,417,678,322,000 | 27.221311 | 76 | 0.472117 | false |
aerler/HGS-Tools | Python/geospatial/xarray_tools.py | 1 | 40502 | '''
Created on Feb. 23, 2019
Utility functions to extract data from xarray Dataset or DataArray classes.
@author: Andre R. Erler, GPL v3
'''
from warnings import warn
from datetime import datetime
import os
import numpy as np
import xarray as xr
import netCDF4 as nc
from dask.diagnostics import ProgressBar
# internal imports
from geospatial.netcdf_tools import getNCAtts, geospatial_netcdf_version, zlib_default # this import should be fine
## an important option I am relying on!
xr.set_options(keep_attrs=True)
# names of valid geographic/projected coordinates
default_x_coords = dict(geo=('lon','long','longitude',), proj=('x','easting','west_east') )
default_y_coords = dict(geo=('lat','latitude',), proj=('y','northing','south_north'))
default_lon_coords = default_x_coords['geo']; default_lat_coords = default_y_coords['geo']
## helper functions
def getAtts(xvar, lraise=True):
''' return dictionary of attributed from netCDF4 or xarray '''
if isinstance(xvar,(xr.DataArray,xr.Variable,xr.Dataset)):
atts = xvar.attrs.copy()
elif isinstance(xvar,(nc.Variable,nc.Dataset)):
atts = getNCAtts(xvar)
elif lraise:
raise TypeError(xvar)
return atts
## functions to interface with rasterio
def getGeoDims(xvar, x_coords=None, y_coords=None, lraise=True):
''' helper function to identify geographic/projected dimensions by name '''
if x_coords is None: x_coords = default_x_coords
if y_coords is None: y_coords = default_y_coords
xlon,ylat = None,None # return None, if nothing is found
if isinstance(xvar,(xr.DataArray,xr.Dataset)):
# test geographic grid and projected grids separately
for coord_type in x_coords.keys():
for name in xvar.dims.keys() if isinstance(xvar,xr.Dataset) else xvar.dims:
if name.lower() in x_coords[coord_type]:
xlon = name; break
for name in xvar.dims.keys() if isinstance(xvar,xr.Dataset) else xvar.dims:
if name.lower() in y_coords[coord_type]:
ylat = name; break
if xlon is not None and ylat is not None: break
else: xlon,ylat = None,None
elif isinstance(xvar,(nc.Dataset,nc.Variable)):
# test geographic grid and projected grids separately
for coord_type in x_coords.keys():
for name in xvar.dimensions:
if name.lower() in x_coords[coord_type]:
xlon = name; break
for name in xvar.dimensions:
if name.lower() in y_coords[coord_type]:
ylat = name; break
if xlon is not None and ylat is not None: break
else: xlon,ylat = None,None
elif lraise: # optionally check input
raise TypeError("Can only infer coordinates from xarray or netCDF4 - not from {}".format(xvar.__class__))
else:
pass # return None,None
return xlon,ylat
def getGeoCoords(xvar, x_coords=None, y_coords=None, lraise=True, lvars=True):
''' helper function to extract geographic/projected coordinates from xarray'''
# find dim names
xlon_dim,ylat_dim = getGeoDims(xvar, x_coords=x_coords, y_coords=y_coords, lraise=lraise)
# find coordinates
if isinstance(xvar,(xr.DataArray,xr.Dataset)):
if xlon_dim in xvar.coords:
xlon = xvar.coords[xlon_dim] if lvars else xlon_dim
else: xlon = None
if ylat_dim in xvar.coords:
ylat = xvar.coords[ylat_dim] if lvars else ylat_dim
else: ylat = None
elif isinstance(xvar,nc.Variable) and lraise:
raise TypeError("Cannot infer coordinates from netCDF4 Variable - only Dataset!")
elif isinstance(xvar,nc.Dataset):
if xlon_dim in xvar.variables:
xlon = xvar.variables[xlon_dim] if lvars else xlon_dim
else: xlon = None
if ylat_dim in xvar.variables:
ylat = xvar.variables[ylat_dim] if lvars else ylat_dim
else: ylat = None
# optionally raise error if no coordinates are found, otherwise just return None
if lraise and (xlon is None or ylat is None):
raise ValueError("No valid pair of geographic coodinates found:\n {}".format(xvar.dims))
# return a valid pair of geographic or projected coordinate axis
return xlon,ylat
def isGeoVar(xvar, x_coords=None, y_coords=None, lraise=True):
''' helper function to identify variables that have geospatial coordinates (geographic or
projected), based on xarray or netCDF4 dimension names '''
if x_coords is None: x_coords = default_x_coords
if y_coords is None: y_coords = default_y_coords
if isinstance(xvar,(xr.DataArray,xr.Dataset)):
dims = xvar.coords.keys()
elif isinstance(xvar,(nc.Dataset,nc.Variable)):
dims = xvar.dimensions
elif lraise:
raise TypeError("Can only infer coordinate system from xarray or netCDF4 - not from {}".format(xvar.__class__))
else:
return None # evaluates as False, but allows checking
# test geographic grid and projected grids separately
for coord_type in x_coords.keys():
xlon,ylat = False,False
for name in dims:
if name.lower() in x_coords[coord_type]:
xlon = True; break
for name in dims:
if name.lower() in y_coords[coord_type]:
ylat = True; break
if xlon and ylat: break
# if it has a valid pair of geographic or projected coordinate axis
return ( xlon and ylat )
def isGeoCRS(xvar, lat_coords=None, lon_coords=None, lraise=True):
''' helper function to determine if we have a simple geographic lat/lon CRS (based on xarray dimension names) '''
lat,lon = False,False
if lon_coords is None: lon_coords = default_x_coords['geo']
if lat_coords is None: lat_coords = default_y_coords['geo']
if isinstance(xvar,(xr.DataArray,xr.Dataset)):
dims = xvar.coords.keys()
elif isinstance(xvar,(nc.Dataset,nc.Variable)):
dims = xvar.dimensions
elif lraise:
raise TypeError("Can only infer coordinate system from xarray or netCDF4- not from {}".format(xvar.__class__))
else:
return None # evaluates as False, but allows checking
# check dimension names
for name in dims:
if name.lower() in lon_coords:
lon = True; break
for name in dims:
if name.lower() in lat_coords:
lat = True; break
# it is a geographic coordinate system if both, lat & lon are present
return ( lat and lon )
def getTransform(xvar=None, x=None, y=None, lcheck=True):
''' generate an affine transformation from xarray coordinate axes '''
from rasterio.transform import Affine # to generate Affine transform
if isinstance(xvar,(xr.DataArray,xr.Dataset,nc.Dataset)):
x,y = getGeoCoords(xvar, lraise=True)
elif xvar is None and isinstance(x,(xr.DataArray,nc.Variable)) and isinstance(y,(xr.DataArray,nc.Variable)):
pass # x and y axes are supplied directly
elif xvar:
raise TypeError('Can only infer GeoTransform from xarray Dataset or DataArray or netCDF4 Dataset\n - not from {}.'.format(xvar))
# check X-axis
if isinstance(x,xr.DataArray): x = x.data
elif isinstance(x,nc.Variable): x = x[:]
if not isinstance(x,np.ndarray):
raise TypeError(x)
diff_x = np.diff(x); dx = diff_x.min()
if lcheck and not np.isclose(dx, diff_x.max(), rtol=1.e-2):
raise ValueError("X-axis is not regular: {} - {}".format(dx, diff_x.max()))
# check Y-axis
if isinstance(y,xr.DataArray): y = y.data
elif isinstance(y,nc.Variable): y = y[:]
if not isinstance(y,np.ndarray):
raise TypeError(y)
diff_y = np.diff(y); dy = diff_y.min()
if lcheck and not np.isclose(dy, diff_y.max(), rtol=1.e-2):
raise ValueError("Y-axis is not regular. {} - {}".format(dy, diff_y.max()))
# generate transform
return Affine.from_gdal(x[0]-dx/2.,dx,0.,y[0]-dy/2.,0.,dy), (len(x),len(y))
def readCFCRS(xds, grid_mapping=None, lraise=True, lproj4=False):
''' function to generate CRS from CF-Convention grid mapping variable; only works with Datasets '''
# read CF convention string
if not isinstance(xds,(nc.Dataset,xr.Dataset)):
raise TypeError("Only xarray of netCDF4 Datasets are supported.")
atts = getAtts(xds) # works for xarray or netCDF4
if 'Conventions' in atts:
cf_str = atts['Conventions']
if cf_str[:3] != 'CF-' or float(cf_str[3:]) < 1:
raise ValueError("Only CF convection version 1 or later is supported; not '{}'.".format(cf_str))
elif lraise:
raise ValueError("No CF convention attribute found; this Dataset may not adhere to CF conventions.")
else:
return None # return without CRS
# find grid mapping variable
if grid_mapping:
if grid_mapping in xds.variables:
grid_type = grid_mapping
grid_atts = getAtts(xds.variables[grid_mapping])
else:
raise ValueError("Grid mapping '{}' not found in dataset.".format(grid_mapping))
else:
grid_type = None
grid_varlist = ['Lambert_Conformal']
for grid_var in grid_varlist:
if grid_var in xds.variables:
if grid_type is None:
grid_type = grid_var
grid_atts = getAtts(xds.variables[grid_var])
else:
raise ValueError("Multiple grid_mapping variables detected:",grid_type,grid_var)
if grid_type is None:
if lraise:
raise NotImplementedError("No supported grid_mapping variable detected:\n",grid_varlist)
else:
return None # return without CRS
elif grid_type == 'Lambert_Conformal':
assert grid_atts['grid_mapping_name'] == "lambert_conformal_conic", grid_atts
proj4 = ('+proj=lcc +lat_1={lat_1} +lat_2={lat_1} '.format(lat_1=grid_atts['standard_parallel'])
+ '+lat_0={lat_0} +lon_0={lon_0} '.format(lat_0=grid_atts['latitude_of_projection_origin'],
lon_0=grid_atts['longitude_of_central_meridian'])
+ '+x_0=0 +y_0=0 +a=6371229 +b=6371229 +units=m +no_defs' )
else:
raise NotImplementedError("The grid_mapping '{}' is currently not implemented/supported.".format(grid_type))
import rasterio as rio
# return either string or CRS object
if lproj4: crs = proj4
else: crs = rio.crs.CRS.from_string(proj4) # initialize from Proj4 string
return crs
def getCRS(xvar, lraise=True):
''' infer projection from a xarray Dataset or DataArray; this function assumes that either a proj4 string or
an EPSG designation is stored in the attributes of the dataset/variable. '''
from geospatial.rasterio_tools import genCRS # used to generate CRS object
if isinstance(xvar,(xr.DataArray,xr.Dataset)):
atts = xvar.attrs
elif isinstance(xvar,(nc.Variable,nc.Dataset)):
atts = getAtts(xvar)
elif lraise:
raise TypeError("Can only infer coordinate system from xarray or netCDF4 - not from {}".format(xvar.__class__))
else:
return None # no projection
crs = None
# check CF convention
if isinstance(xvar,(xr.Dataset,nc.Dataset)):
crs = readCFCRS(xvar, lraise=False, lproj4=False)
# search for EPSG number
if crs is None:
for key,value in atts.items():
if key.upper() == 'EPSG' and value != 'n/a': crs = genCRS(value); break
# search for Proj4 string
if crs is None:
for key,value in atts.items():
if key.lower() == 'proj4' and value != 'n/a': crs = genCRS(value); break
# check for simple geographic lat/lon system
if crs is None:
if isGeoCRS(xvar, lraise=False): # error will be raised below (if desired)
crs = genCRS() # no arguments for default lat/lon
# return values
if lraise and crs is None:
raise ValueError("No projection information found in attributes.")
# return a GDAL/rasterio CRS instance
return crs
def inferGeoInfo(xvar, varname=None, crs=None, transform=None, size=None, lraise=True, lcheck=True):
''' infere geo-reference information from xarray DataArray or Dataset and netCDF4 Dataset '''
# CRS
_crs = getCRS(xvar, lraise=lraise)
if crs is None: crs = _crs
elif crs != _crs:
from geospatial.rasterio_tools import genCRS # used to generate CRS object
crs = genCRS(crs)
if crs != _crs:
raise ValueError("Prescribed CRS and inferred CRS are incompatible:\n{}\n{}".format(crs,_crs))
crs = _crs # for some reason EPSG ints also pass the equality test...
# geotransform & grid size
xlon,ylat = getGeoCoords(xvar, lraise=True, lvars=False)
_transform, _size = getTransform(xvar, lcheck=lraise)
if transform is None: transform = _transform
elif not transform is _transform:
raise ValueError("Prescribed and inferred Geotransform are incompatible:\n{}\n{}".format(transform,_transform))
if size is None: size = _size
elif not size is _size:
raise ValueError("Prescribed and inferred grid sizes are incompatible:\n{}\n{}".format(size,_size))
# do some checks
if lcheck:
if crs.is_projected and isGeoCRS(xvar):
raise ValueError(crs,xvar) # simple check
if isinstance(xvar,xr.Dataset) and varname:
xvar = xvar[varname]
shape = None; dims = None
if isinstance(xvar,xr.DataArray):
shape = xvar.data.shape; dims = xvar.dims
if xvar.attrs.get('dim_order',None) is False:
raise NotImplementedError("The x/lon and y/lat axes of this xarray have to be swapped:\n {}".format(xvar))
elif isinstance(xvar,nc.Dataset) and varname:
xvar = xvar.variables[varname]
shape = xvar.shape; dims = xvar.dimensions
if shape:
if shape[-2:] != (size[1],size[0]):
raise ValueError(xvar)
if dims:
if dims[-2] != ylat or dims[-1] != xlon:
raise ValueError(xvar)
# return verified georef info
return crs, transform, size
## functions that modify a dataset
def _inferVarmap(varmap=None, varatts=None, linvert=False):
''' simple function that infers a varmap using varatts, if necessary '''
if varmap is None:
varmap = dict()
if varatts is not None:
for varname,atts in varatts.items():
if 'name' in atts: varmap[varname] = atts['name']
elif not isinstance(varmap,dict):
raise TypeError(varmap)
if linvert:
varmap = {value:key for key,value in varmap.items()}
# return varmap (guaranteed to be a dict)
return varmap
def updateVariableAttrs(xds, varatts=None, varmap=None, varlist=None, **kwargs):
''' a helper function to update variable attributes, rename variables, and apply scaling factors '''
# update varatts
if varatts is None:
varatts = dict()
elif isinstance(varatts,dict):
varatts = varatts.copy()
else:
raise TypeError(varatts)
varatts.update(kwargs) # add kwargs
# generate varmap
varmap = _inferVarmap(varmap=varmap, varatts=varatts, linvert=False)
# drop variables
if varlist is not None:
drop_list = []
for varname in xds.data_vars.keys():
name = varmap.get(varname,varname)
if name not in varlist: drop_list.append(varname)
xds = xds.drop_vars(drop_list)
# update attributes (using old names)
date_str = datetime.today().strftime('%Y%m%d')
for varname,atts in varatts.items():
if varname in xds.variables:
if varname == 'time':
warn("The 'time' coordinate is handled automatically by xarray using numpy datetime64; "
+ "changing attributes can break this functionality when the dataset is saved to file. ")
var = xds.variables[varname]
attrs = var.attrs.copy()
if 'updated' not in attrs:
if 'units' in atts:
if 'units' not in attrs or attrs['units'] != atts['units']:
if 'scalefactor' in atts and atts['scalefactor'] != 1:
var *= atts['scalefactor'] # this should execute lazily...
if 'offset' in atts and atts['offset'] != 0:
var += atts['offset'] # this should execute lazily...
# transfer attributes
for key,value in atts.items():
if key not in ('scalefactor','offset'):
if key in attrs: attrs['old_'+key] = attrs[key]
attrs[key] = value
attrs['updated'] = date_str # indicate we have updated with date string
var.attrs = attrs
# actually rename (but only vars that are present and need to be renamed...)
xds = xds.rename({key:val for key,val in varmap.items() if key in xds.variables and key != val})
xds = xds.rename_dims({key:val for key,val in varmap.items() if key in xds.dims and key != val})
xds.attrs['updated'] = date_str
return xds
def addGeoReference(xds, proj4_string=None, x_coords=None, y_coords=None, lcreate=False, xlon_coord=None, ylat_coord=None):
''' helper function to add GDAL/rasterio-style georeferencing information to an xarray dataset;
note that this only refers to attributed, not axes, but also includes variables '''
xlon,ylat = getGeoCoords(xds, x_coords=x_coords, y_coords=y_coords, lvars=lcreate, lraise=not lcreate)
if lcreate:
if (xlon is None and ylat is None):
assert xlon_coord is not None and ylat_coord is not None
# need to find names again...
xlon_dim,ylat_dim = getGeoDims(xds, x_coords=x_coords, y_coords=y_coords, lraise=True)
# create new xlon/ylat coordinates, based on coordinates passed down
coords = {xlon_dim:xlon_coord, ylat_dim:ylat_coord}
xds = xds.assign_coords(**coords)
elif (xlon is not None) and (ylat is not None):
xlon = xlon.name; ylat = ylat.name # from here on only need names
else:
raise ValueError("No valid pair of geographic coodinates found:\n {}".format(xds.dims))
xds.attrs['xlon'] = xlon
xds.attrs['ylat'] = ylat
if proj4_string is None:
if isGeoVar(xds, x_coords, y_coords, lraise=True):
proj4_string = '+proj=longlat +lon_0=0 +lat_0=0 +ellps=WGS84 +datum=WGS84' # default geographic, also EPSG 4326
else:
raise ValueError("Cannot infer projection - need to provide proj4 string!")
elif isinstance(proj4_string,str):
xds.attrs['proj4'] = proj4_string
else:
raise TypeError("Cannot infer projection - need to provide proj4 string!")
for xvar in list(xds.data_vars.values()):
if isGeoVar(xvar):
xvar.attrs['proj4'] = proj4_string
xvar.attrs['xlon'] = xlon
xvar.attrs['ylat'] = ylat
xvar.attrs['dim_order'] = int( xvar.dims[-2:] == (ylat, xlon) )
# N.B.: the NetCDF-4 backend does not like Python bools
return xds
def rechunkTo2Dslices(xvar, **other_chunks):
''' convenience function to rechunk an xarray so that the horizontal dimensions are contiguous (not chunked)
N.B.: rechunking in a way that does not simply combine existing chunks seems to cause all chunks/data
to be loaded into memory (we want to avoid that); also, chunks are defined by their size, not by
their number, i.e. the definition for one large 2D chunk is (len(y),len(x)) and *not* (1,1) '''
if not isinstance(xvar,(xr.DataArray,xr.Dataset)):
raise TypeError(xvar)
# old chunk sizes
if 'chunksizes' in xvar.encoding:
chunks = {dim:cs for dim,cs in zip(xvar.sizes,xvar.encoding['chunksizes'])}
else: chunks = dict()
chunks.update(other_chunks)
# find horizontal/map dimensions
xlon = xvar.attrs['xlon']; ylat = xvar.attrs['ylat']
chunks[xlon] = xvar.sizes[xlon]; chunks[ylat] = xvar.sizes[ylat]
return xvar.chunk(chunks=chunks) # rechunk x/lon and y/lat
def autoChunkXArray(xds, chunks=None, dims=None, **kwargs):
''' apply auto-chunking to an xarray object, like a Dataset or DataArray (chunks kw arg can override) '''
from geospatial.netcdf_tools import autoChunk
if dims is None:
xlon,ylat = getGeoCoords(xds)
dims = ('time', ylat.name, xlon.name)
dims = [dim for dim in dims if dim in xds.sizes]
shape = [xds.sizes[dim] for dim in dims]
cks = autoChunk(shape, **kwargs)
cks = {dim:c for dim,c in zip(dims,cks)}
if chunks: cks.update(chunks) # manually/explicitly specified chunks override
return xds.chunk(chunks=cks)
def getCommonChunks(xds, method='min'):
''' get smallest/largest/mean common denominator for chunks in dataset '''
chunk_list = dict()
# collect chunks
if isinstance(xds,xr.Dataset):
for xvar in xds.data_vars.values():
if 'chunksizes' in xvar.encoding:
for dim,cks in zip(xvar.dims,xvar.encoding['chunksizes']):
if dim in chunk_list: chunk_list[dim].append(cks)
else: chunk_list[dim] = [cks]
elif isinstance(xds,nc.Dataset):
for ncvar in xds.variables.values():
if ncvar.chunking():
for dim,cks in zip(ncvar.dimensions,ncvar.chunking()):
if dim in chunk_list: chunk_list[dim].append(cks)
else: chunk_list[dim] = [cks]
else:
raise TypeError(xds)
# reduce chunks
chunks = dict()
for dim,cks in list(chunk_list.items()):
chunks[dim] = getattr(np,method)(cks)
# return dict with chunksize for each dimension
return chunks
def computeNormals(xds, aggregation='month', time_stamp='time_stamp', lresample=False, time_name='time'):
''' function invoking lazy groupby() call and replacing the resulting time axis with a new time axis '''
lts = time_stamp and time_stamp in xds
# time stamp variable for meta data
if lts:
import pandas as pd
ts_var = xds[time_stamp].load()
period = (pd.to_datetime(ts_var.data[0]).year, (pd.to_datetime(ts_var.data[-1])+pd.Timedelta(31, unit='D')).year)
prdstr = '{:04d}-{:04d}'.format(*period)
# resample data to aggregation interval
if lresample:
if aggregation.lower() == 'month': rsi = 'MS'
else:
raise NotImplementedError(aggregation)
xds = xds.resample(time=rsi,skipna=True,).mean()
# N.B.: I am not sure to which extent resampling is necessary
# compute monthly normals
cds = xds.groupby('time.'+aggregation).mean('time')
assert len(cds['month']) == 12, cds
# convert time axis
cds = cds.rename({aggregation:time_name}) # the new time axis is named 'month'
tm = cds.coords[time_name]
tm.attrs['name'] = time_name
tm.attrs['long_name'] = 'Calendar '+aggregation.title()
tm.attrs['units'] = aggregation
# add period info for quick identification
if lts:
tm.attrs['start_date'] = str(ts_var.data[0])
tm.attrs['end_date'] = str(ts_var.data[-1])
tm.attrs['period'] = prdstr
# add attributes to dataset
cds.attrs['start_date'] = str(ts_var.data[0])
cds.attrs['end_date'] = str(ts_var.data[-1])
cds.attrs['period'] = prdstr
# return formatted climatology dataset
return cds
## function to load a dataset
def _multichunkPresets(multi_chunks):
''' translate string identifiers into valid multichunk dicts, based on presets '''
if isinstance(multi_chunks,str):
if multi_chunks.lower() == 'regular': # 256 MB
multi_chunks = {dim:16 for dim in ('lat','lon','latitude','longitude','x','y',)}
multi_chunks['time'] = 8
elif multi_chunks.lower() == 'small': # 64 MB
multi_chunks = {dim:8 for dim in ('lat','lon','latitude','longitude','x','y','time')}
elif multi_chunks.lower() == 'time': # 184 MB
multi_chunks = {dim:4 for dim in ('lat','lon','latitude','longitude','x','y')}
multi_chunks['time'] = 92 # for reductions along time, we can use a higher value (8 days * 92 ~ 2 years)
else:
raise NotImplementedError(multi_chunks)
elif ( multi_chunks is not None ) and not isinstance(multi_chunks, dict):
raise TypeError(multi_chunks)
# return valid multi_chunks (dict)
return multi_chunks
def loadXArray(varname=None, varlist=None, folder=None, varatts=None, filename_pattern=None, filelist=None, default_varlist=None,
varmap=None, mask_and_scale=True, grid=None, lgeoref=True, geoargs=None, chunks=True, multi_chunks=None,
ldropAtts=False, lskip=False, filetypes=None,
compat='override', join='inner', fill_value=np.NaN, combine_attrs='no_conflicts', **kwargs):
''' function to open a dataset in one of two modes: 1) variables are stored in separate files, but in the same folder (this mainly
applies to high-resolution, high-frequency (daily) observations, e.g. SnoDAS) or 2) multiple variables are stored in different
filetypes and each is opened and then merged (usually model output); datasets are opened using xarray '''
# load variables
if filetypes is None:
lopt1 = True
# option 1: one variable per file
if varname and varlist:
raise ValueError(varname,varlist)
elif varname:
varlist = [varname] # load a single variable
elif varlist is None:
varlist = default_varlist
# add variable filetypes
# if there is a (implied) varmap, we need to apply that to variable-filetypes
ravmap = _inferVarmap(varmap=varmap, varatts=varatts, linvert=True)
filetypes = [ravmap.get(varname,varname) for varname in varlist]
# now also transform varatts and varmap
varmap_single = None if varmap is None else varmap.copy()
varatts_single = None if varatts is None else varatts.copy()
varatts = {filetype:varatts_single for filetype in filetypes}
varmap = {filetype:varmap_single for filetype in filetypes}
else:
lopt1 = False # just to remember when using option 2
## now use option 2: multiple variables per file
# expand varmap to filetypes
if varmap is None:
varmap = {filetype:None for filetype in filetypes} # no varmap
elif isinstance(varmap,dict):
filetypes_set = set(filetypes); varmap_set = set(varmap.keys())
if varmap_set.issubset(filetypes_set) or filetypes_set.issubset(varmap_set): # expand to filetypes using None
for filetype in filetypes:
if filetype in varmap_set:
if not isinstance(varmap[filetype],dict) and varmap[filetype] is not None:
raise TypeError(filetype,varmap[filetype])
else:
varmap[filetype] = None
elif any([key in filetypes for key in varmap.keys()]):
raise ValueError("It is unclear if varmap is a dict containing varmap dicts for each filetype or just one varmap dict.",varmap.keys())
if all([key in filetypes for key in varmap.keys()]): # one varmap per filetype
if not all([isinstance(value,dict) or value is None for value in varmap.values()]):
raise TypeError(varmap)
elif any([key in filetypes for key in varmap.keys()]):
raise ValueError(varmap.keys())
else:
varmap = {filetype:varmap for filetype in filetypes} # same varmap for all
else:
raise TypeError(varmap)
# expand varatts to filetypes
if varatts is None:
varatts = {filetype:None for filetype in filetypes} # no varatts
elif isinstance(varatts,dict):
filetypes_set = set(filetypes); varatts_set = set(varatts.keys())
if varatts_set.issubset(filetypes_set) or filetypes_set.issubset(varatts_set): # expand to filetypes using None
for filetype in filetypes:
if filetype in varatts_set:
if not isinstance(varatts[filetype],dict) and varatts[filetype] is not None:
raise TypeError(filetype,varatts[filetype])
else:
varatts[filetype] = None
elif any([key in filetypes for key in varatts.keys()]):
raise ValueError("It is unclear if varatts is a dict containing varatts dicts for each filetype or just one varatts dict.",varatts.keys())
else:
varatts = {filetype:varatts for filetype in filetypes} # same varatts for all
else:
raise TypeError(varatts)
# expand filename/pattern to filetypes
if filename_pattern and not filelist:
filelist = filename_pattern
if isinstance(filelist, dict):
if len(filelist) != len(filetypes):
raise ValueError(filelist)
elif isinstance(filelist, str):
filelist = {filetype:filelist for filetype in filetypes}
else:
raise ValueError(filelist)
# just some default settings that will produce chunks larger than 100 MB on 8*64*64 float chunks
multi_chunks = _multichunkPresets(multi_chunks)
orig_chunks = chunks.copy() if isinstance(chunks, dict) else chunks # deep copy or True or None
# construct dataset
ds_list = []
for filetype in filetypes:
filename = filelist[filetype].lower().format(var=filetype.lower(), type=filetype.lower()) # all lower case
filepath = '{}/{}'.format(folder,filename)
chunks = orig_chunks # reset
# apply varmap in reverse to varlist
if os.path.exists(filepath):
# load dataset
if chunks is True:
# infer chunks from NetCDF-4 file (not sure why xarray doesn't do this automatically...)
with nc.Dataset(filepath, 'r') as ncds : # open in read-only using NetCDF4 module
chunks = dict()
for varname,ncvar in ncds.variables.items():
for dim,size in zip(ncvar.dimensions,ncvar.chunking()):
chunks[dim] = size # this just selects the last value... not necessarily always the same
if dim in chunks and chunks[dim] != size:
print("WARNING: Chunks for dimension '{}' not coherent in file:\n '{}'".format(dim, filepath))
if multi_chunks: # enlarge chunks with multiplier
chunks = {dim:(val*multi_chunks.get(dim,1)) for dim,val in chunks.items()}
# open dataset with xarray
#print(varname,chunks)
ds = xr.open_dataset(filepath, chunks=chunks, mask_and_scale=mask_and_scale, **kwargs)
# N.B.: the use of open_mfdataset is problematic, because it does not play nicely with chunking -
# by default it loads everything as one chunk, and it only respects chunking, if chunks are
# specified explicitly at the initial load time (later chunking seems to have no effect!)
# That being said, I don't know if this is still the case...
# rename, prune/drop vars and apply attributes
if ldropAtts: ds.attrs = dict() # drop original attributes from NC file (still add georef etc.)
if varatts or varmap:
ds = updateVariableAttrs(ds, varatts=varatts[filetype], varmap=varmap[filetype],
varlist=None if lopt1 else varlist)
ds_list.append(ds)
else:
if lskip:
print("Skipping missing dataset file '{}' ('{}')".format(filename,folder))
else:
raise IOError("The dataset file '{}' was not found in folder:\n '{}'".format(filename,folder))
# merge into new dataset
if len(ds_list) == 0:
raise ValueError("Dataset is empty - aborting! Folder: \n '{}'".format(folder))
# resolve a very common conflict caused by NCO logging
if np.sum(['history' in ds.attrs for ds in ds_list]) > 1:
for ds in ds_list:
if 'history' in ds.attrs: ds.attrs['history'] = 'conflicting sources'
xds = xr.merge(ds_list, compat=compat, join=join, fill_value=fill_value, combine_attrs=combine_attrs)
# add projection info
if lgeoref:
if geoargs is not None:
# check
if 'proj4' in xds.attrs and 'proj4_string' in geoargs:
if xds.attrs['proj4'] != geoargs['proj4_string']:
raise ValueError(xds.attrs['proj4'])
# custom options
xds = addGeoReference(xds, **geoargs)
# default options
elif 'proj4' in xds.attrs:
# read projection string
xds = addGeoReference(xds, proj4_string=xds.attrs['proj4'])
elif grid:
# load griddef from pickle
from geodata.gdal import loadPickledGridDef
griddef = loadPickledGridDef(grid=grid)
xds = addGeoReference(xds, proj4_string=griddef.projection.ExportToProj4(),)
else:
# use default lat/lon, if it works...
xds = addGeoReference(xds,)
return xds
def saveXArray(xds, filename=None, folder=None, mode='overwrite', varlist=None, chunks=None, encoding=None, laddTime=None,
time_dim='time', time_agg=None, ltmpfile=True, lcompute=True, lprogress=True, lfeedback=True, **kwargs):
''' function to save a xarray dataset to disk, with options to add/overwrite variables, choose smart encoding,
add timstamps, use a temp file, and handle dask functionality '''
from geospatial.netcdf_tools import addTimeStamps, addNameLengthMonth
# file path and tmp file
if folder:
filepath = '{}/{}'.format(folder,filename)
# if file exists, get varlist and chunks
if not os.path.exists(filepath) or mode.lower() in ('overwrite','write'):
# create a new file
nc_mode = 'w'
if lfeedback: print("\nExporting to new NetCDF-4 file:")
else:
# if file exists and we are appending...
nc_mode = 'a' # in most cases
ltmpfile = not lcompute # only works with new file (or dummy...)
if mode.lower() in ('add_new',):
if lfeedback: print("\nAppending to existing NetCDF-4 file (only adding new variables):")
elif mode.lower() in ('add_all',):
if lfeedback: print("\nAppending to existing NetCDF-4 file (overwriting existing variables):")
else:
raise ValueError(mode)
# determine tmp file
if ltmpfile:
tmp_filepath = filepath + ( '.tmp' if lcompute else '.test' ) # use temporary file during creation
else:
tmp_filepath = filepath
if lfeedback: print(" '{}'".format(tmp_filepath))
## handle varlist and existing variables in file
# user-supplied varlist
if varlist:
drop_vars = [xvar for xvar in xds.data_vars.keys() if xvar not in varlist]
xds = xds.drop_vars(drop_vars) # returns a shallow copy with vars removed
# handle existing
if nc_mode == 'a':
# open existing file and get encoding
with nc.Dataset(filepath, 'r') as ncds:
if chunks is None: chunks = getCommonChunks(ncds)
if mode.lower() == 'add_new':
nc_varlist = [var for var in ncds.variables.keys() if var not in ncds.dimensions]
drop_vars = [xvar for xvar in xds.data_vars.keys() if xvar in nc_varlist]
xds = xds.drop_vars(drop_vars) # returns a shallow copy with vars removed
# adding all variables and overwriting existing ones, requires no changes except nc_mode = 'a'
# setup encoding
if encoding is None:
encoding = dict(); default = None
else:
default = encoding.pop('DEFAULT',None)
for varname,xvar in xds.data_vars.items():
tmp = zlib_default.copy()
cks = tuple(1 if dim == 'time' else chunks[dim] for dim in xvar.dims)
tmp['chunksizes'] = cks # depends on variable
# N.B.: use chunk size 1 for time and as before for space; monthly chunks make sense, since
# otherwise normals will be expensive to compute (access patterns are not sequential)
if isinstance(xvar.dtype,np.inexact): encoding[varname]['_FillValue'] = np.NaN
if default: tmp.update(default)
if varname not in encoding:
encoding[varname] = tmp
else:
tmp.update(encoding[varname])
encoding[varname] = tmp
#print(varname,cks,rvar.encoding)
# write to NetCDF
## write to file (with progress)
# write results to file (actually just create file)
task = xds.to_netcdf(tmp_filepath, mode=nc_mode, format='NETCDF4', unlimited_dims=['time'],
engine='netcdf4', encoding=encoding, compute=False)
if lcompute:
# execute with or without progress bar
if lprogress:
with ProgressBar():
task.compute()
else:
task.compute()
## add extras
with nc.Dataset(tmp_filepath, mode='a') as ncds:
if laddTime:
time_coord = ncds.variables[time_dim]
tatts = getNCAtts(time_coord)
tname = tatts.get('long_name','')
if tname.lower().startswith('calendar '):
# info on month for climatology
from geospatial.netcdf_tools import default_mon_name_atts
if default_mon_name_atts['name'] in ncds.variables:
if lfeedback: print("\nName of months variable alrady exists.")
else:
if lfeedback: print("\nAdding name and length of months.")
assert tatts.get('units','').lower().startswith('month'), tatts # this assumes monthly time aggregation
assert not time_agg or time_agg.lower().startswith('month')
addNameLengthMonth(ncds, time_dim=time_dim)
else:
# time stamps for transient
if time_dim+'_stamp' in ncds.variables:
if lfeedback: print("\nTime-stamp variable ('{}_stamp') already exists.".format(time_dim))
else:
time_agg = time_agg.lower()
if time_agg.endswith('ly'): time_agg = time_agg[:-2]
if lfeedback: print("\nAdding human-readable time-stamp variable ('_stamp').".format(time_dim))
addTimeStamps(ncds, units=time_agg) # add time-stamps
## make sure the spatial units are present!!! xarray seems to loose the spatial coordinate units
lgeo = isGeoCRS(ncds, lraise=True)
for coord in getGeoCoords(ncds, lvars=True, lraise=True):
if 'units' not in coord.ncattrs():
coord.setncattr('units','deg' if lgeo else 'm')
# store geospatial code version
ncds.setncattr('geospatial_netcdf_version',geospatial_netcdf_version)
# replace original file
if ltmpfile:
if lfeedback: print("\nMoving file to final destination (overwriting old file):\n '{}'".format(filepath))
if os.path.exists(filepath): os.remove(filepath)
os.rename(tmp_filepath, filepath)
else:
# just show some info and save task graph
if lfeedback:
print("\nEncoding info:")
print(encoding)
print(task)
print("\nSaving task graph to:\n '{}.svg'".format(filepath))
task.visualize(filename=filepath+'.svg') # This file is never produced
# return file path
return filepath
if __name__ == '__main__':
pass | gpl-3.0 | -2,811,596,414,685,821,000 | 46.989336 | 150 | 0.616463 | false |
kaushik94/sympy | sympy/physics/quantum/tests/test_boson.py | 2 | 1542 | from sympy import sqrt, exp, prod, Rational
from sympy.core.compatibility import range
from sympy.physics.quantum import Dagger, Commutator, qapply
from sympy.physics.quantum.boson import BosonOp
from sympy.physics.quantum.boson import (
BosonFockKet, BosonFockBra, BosonCoherentKet, BosonCoherentBra)
def test_bosonoperator():
a = BosonOp('a')
b = BosonOp('b')
assert isinstance(a, BosonOp)
assert isinstance(Dagger(a), BosonOp)
assert a.is_annihilation
assert not Dagger(a).is_annihilation
assert BosonOp("a") == BosonOp("a", True)
assert BosonOp("a") != BosonOp("c")
assert BosonOp("a", True) != BosonOp("a", False)
assert Commutator(a, Dagger(a)).doit() == 1
assert Commutator(a, Dagger(b)).doit() == a * Dagger(b) - Dagger(b) * a
def test_boson_states():
a = BosonOp("a")
# Fock states
n = 3
assert (BosonFockBra(0) * BosonFockKet(1)).doit() == 0
assert (BosonFockBra(1) * BosonFockKet(1)).doit() == 1
assert qapply(BosonFockBra(n) * Dagger(a)**n * BosonFockKet(0)) \
== sqrt(prod(range(1, n+1)))
# Coherent states
alpha1, alpha2 = 1.2, 4.3
assert (BosonCoherentBra(alpha1) * BosonCoherentKet(alpha1)).doit() == 1
assert (BosonCoherentBra(alpha2) * BosonCoherentKet(alpha2)).doit() == 1
assert abs((BosonCoherentBra(alpha1) * BosonCoherentKet(alpha2)).doit() -
exp((alpha1 - alpha2) ** 2 * Rational(-1, 2))) < 1e-12
assert qapply(a * BosonCoherentKet(alpha1)) == \
alpha1 * BosonCoherentKet(alpha1)
| bsd-3-clause | -4,047,575,561,741,689,000 | 33.266667 | 77 | 0.656939 | false |
jenfly/monsoon-onset | scratchpad.py | 1 | 38032 | import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import numpy as np
import xarray as xray
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pydap.client import open_url
import atmos as atm
import merra
import indices
# ----------------------------------------------------------------------
url = 'http://iridl.ldeo.columbia.edu/SOURCES/.OSU/.PRISM/.monthly/dods'
ds = xray.open_dataset(url, decode_times=False)
#ds = xray.open_dataset(url, decode_times=False, engine='pydap')
tmax = ds['tmax'][:500, ::3, ::3]
tmax[0].plot()
# Use pydap's open_url
ds2 = open_url(url)
# ----------------------------------------------------------------------
authfile = atm.homedir() + '.netrc'
with open(authfile) as f:
lines = f.readlines()
username = lines[1].split()[1]
password = lines[2].split()[1]
url = ('https://%s:%s@' % (username, password) +
'goldsmr5.sci.gsfc.nasa.gov/opendap/MERRA2/M2I3NPASM.5.12.4/'
'1986/01/MERRA2_100.inst3_3d_asm_Np.19860101.nc4.nc4')
ds = open_url(url)
# ----------------------------------------------------------------------
# 8/24/2017 West African monsoon region
#filnm = '/home/jwalker/datastore/gpcp/gpcp_daily_1997-2014.nc'
year = 1997
filenm = '/home/jwalker/datastore/gpcp/gpcp_daily_%d.nc' % year
suptitle = 'GPCP %d' % year
ds = xray.open_dataset(filenm)
pcp = ds['PREC'].load()
pcp = atm.set_lon(pcp, lonmax=180)
ssns = ['JAN', 'APR', 'JUL', 'OCT']
#ssns = ['DJF', 'MAM', 'JJA', 'SON']
lat1, lat2 = 0, 20
lon1, lon2 = -20, 15
#axlims = (-60, 60, -30, 120)
axlims = (-45, 45, -45, 60)
xticks = np.arange(-30, 61, 30)
clev = np.arange(0, 10.5, 1)
climits = (0, 10)
nrow, ncol = 2, 2
fig_kw = {'figsize' : (8, 6.5), 'sharex' : True, 'sharey' : True}
gridspec_kw = {'left' : 0.07, 'right' : 0.9, 'bottom' : 0.07, 'top' : 0.9,
'wspace' : 0.3}
grp = atm.FigGroup(nrow, ncol, fig_kw=fig_kw, suptitle=suptitle,
gridspec_kw=gridspec_kw)
for ssn in ssns:
days = atm.season_days(ssn)
pcpbar = atm.dim_mean(pcp, 'day', min(days), max(days))
grp.next()
m = atm.init_latlon(lat1=axlims[0], lat2=axlims[1], lon1=axlims[2],
lon2=axlims[3])
atm.contourf_latlon(pcpbar, clev=clev, cmap='PuBuGn', extend='max', m=m)
atm.geobox(lat1, lat2, lon1, lon2, m=m)
plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
plt.clim(climits)
plt.title(ssn)
ts = atm.mean_over_geobox(pcp, lat1, lat2, lon1, lon2)
ts_acc = ts.cumsum(axis=0)
plt.figure(figsize=(10, 4))
plt.subplot(1, 2, 1)
ts.plot()
plt.title('Precip')
plt.subplot(1, 2, 2)
ts_acc.plot()
plt.title('Cumulative precip')
# ----------------------------------------------------------------------
# 4/26/2016 Re-calculate ubudget climatologies with corrupted data replaced
version = 'merra2'
years = np.arange(1980, 2016)
onset_nm = 'CHP_MFC'
ind_nm = 'onset'
# ind_nm = 'retreat'
plevs = [1000,925,850,775,700,600,500,400,300,250,200,150,100,70,50,30,20]
datadir = atm.homedir() + 'datastore/%s/analysis/' % version
savedir = atm.homedir() + 'eady/datastore/%s/analysis/' % version
filestr = version + '_ubudget%d_dailyrel_'
if ind_nm == 'retreat':
filestr = filestr + 'retreat_'
filestr = filestr + onset_nm + '_ndays5_60E-100E'
datafiles = {}
for plev in plevs:
datafiles[plev] = [datadir + filestr % plev + '_%d.nc' % yr for yr in years]
# Compute climatologies and save
yearstr = '_%d-%d.nc' % (min(years), max(years))
for plev in plevs:
relfiles = datafiles[plev]
savefile = savedir + filestr % plev + yearstr
ds = atm.mean_over_files(relfiles)
ds.attrs['years'] = years
print('Saving to ' + savefile)
ds.to_netcdf(savefile)
# ----------------------------------------------------------------------
# JJAS precip and fraction of annual totals
datadir = atm.homedir() + 'datastore/merra2/figure_data/'
filenm = datadir + 'gpcp_dailyrel_1997-2015.nc'
with xray.open_dataset(filenm) as ds:
pcp_jjas = ds['PCP_JJAS'].load()
pcp_frac = ds['FRAC_JJAS'].load()
axlims = (-20, 35, 50, 115)
xticks = range(40, 121, 10)
clev = np.arange(0, 10.5, 1)
plt.figure(figsize=(8, 6))
m = atm.contourf_latlon(pcp_jjas, clev=clev, axlims=axlims, cmap='PuBuGn',
extend='max')
plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
atm.contour_latlon(pcp_frac, clev=[0.5], m=m, colors='m', linewidths=1)
atm.geobox(10, 30, 60, 100, m=m, color='b')
plt.xlim(axlims[2], axlims[3])
# ----------------------------------------------------------------------
# Map of monsoon region
m = atm.init_latlon(-50, 50, 40, 120, coastlines=False)
m.shadedrelief(scale=0.3)
yticks = range(-45, 46, 15)
xticks = range(40, 121, 20)
plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
plt.yticks(yticks, atm.latlon_labels(yticks, 'lat'))
atm.geobox(10, 30, 60, 100, m=m, color='k')
plt.savefig('figs/map_box.png', dpi=200)
# ----------------------------------------------------------------------
# Animation of precip and 850 mb winds
datadir = atm.homedir() + 'datastore/merra2/analysis/'
files = {'PREC' : datadir + 'gpcp_dailyrel_CHP_MFC_1997-2015.nc'}
for nm in ['U', 'V']:
files[nm] = datadir + 'merra2_%s850_dailyrel_CHP_MFC_1980-2015.nc' % nm
ndays = 10
data = {}
for nm in files:
filenm = files[nm]
print('Loading ' + filenm)
with xray.open_dataset(filenm) as ds:
var = ds[nm].load()
if 'year' in var:
var = var.mean(dim='year')
daydim = atm.get_coord(var, 'dayrel', 'dim')
data[nm] = atm.rolling_mean(var, ndays, axis=daydim)
def animate(data, day, axlims=(-30, 45, 40, 120), dx=5, dy=5, climits=(-5, 15),
cmap='BuPu', d0=138, clev=np.arange(5, 15.5, 1),
cticks=np.arange(5, 16, 2.5)):
lat1, lat2, lon1, lon2 = axlims
subset_dict = {'lat' : (lat1, lat2), 'lon' : (lon1, lon2)}
xticks = range(40, 121, 20)
mm, dd = atm.jday_to_mmdd(day + d0)
title = (atm.month_str(mm)).capitalize() + ' %d' % dd
u = atm.subset(data['U'].sel(dayrel=day), subset_dict)
v = atm.subset(data['V'].sel(dayrel=day), subset_dict)
u = u[::dy, ::dx]
v = v[::dy, ::dx]
#spd = np.sqrt(u**2 + v**2)
pcp = data['PREC'].sel(dayrel=day)
lat = atm.get_coord(u, 'lat')
lon = atm.get_coord(u, 'lon')
plt.clf()
m = atm.init_latlon(lat1, lat2, lon1, lon2, coastlines=False)
m.drawcoastlines(color='k', linewidth=0.5)
m.shadedrelief(scale=0.3)
atm.contourf_latlon(pcp, clev=clev, axlims=axlims, m=m, cmap=cmap,
extend='max', cb_kwargs={'ticks' : cticks})
#atm.pcolor_latlon(pcp, axlims=axlims, cmap=cmap, cb_kwargs={'extend' : 'max'})
plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
plt.clim(climits)
#plt.quiver(lon, lat, u, v, linewidths=spd.values.ravel())
plt.quiver(lon, lat, u, v)
plt.title(title)
plt.draw()
# Need to scale arrows in quiver plot so that they are consistent across
# different days
days = range(-90, 201, 1)
for i, day in enumerate(days):
animate(data, day)
filenm = 'figs/anim/frame%03d.png' % i
print('Saving to ' + filenm)
plt.savefig(filenm)
# ----------------------------------------------------------------------
years = np.arange(1980, 1999)
datadir = atm.homedir() + 'datastore/merra2/dailyrad/'
files = [datadir + 'merra2_RAD_%d.nc4' % yr for yr in years]
ds = atm.mean_over_files(files)
# ----------------------------------------------------------------------
plev = 200
filestr = '/home/jennifer/datastore/merra2/analysis/merra2_H%d_dailyrel_CHP_MFC_1980-2015.nc'
filenm = filestr % plev
with xray.open_dataset(filenm) as ds:
ds.load()
lon1, lon2 = 60, 100
hgt = ds['H']
hgt = hgt - atm.dim_mean(hgt, 'lon', lon1, lon2)
if plev == 20:
climits = (-80, 80)
else:
climits = (-40, 40)
plotdays = [-30, 0, 30]
xticks = range(40, 121, 20)
axlims = (-60, 60, 40, 120)
nrow, ncol = 1, 3
fig_kw = {'figsize' : (11, 5), 'sharex' : True, 'sharey' : True}
gridspec_kw = {'left' : 0.07, 'right' : 0.9, 'bottom' : 0.07, 'top' : 0.9,
'wspace' : 0.3}
suptitle = 'H* at %d hPa' % plev
grp = atm.FigGroup(nrow, ncol, fig_kw=fig_kw, suptitle=suptitle,
gridspec_kw=gridspec_kw)
for day in plotdays:
grp.next()
var = hgt.sel(dayrel=day)
atm.pcolor_latlon(var, axlims=axlims, cb_kwargs={'extend' : 'both'})
plt.clim(climits)
plt.title('Day %d' % day)
plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
plt.axvline(lon1, color='k', dashes=[6,1])
plt.axvline(lon2, color='k', dashes=[6,1])
# ----------------------------------------------------------------------
# JJAS precip and fraction of annual totals
datadir = atm.homedir() + 'datastore/merra2/figure_data/'
filenm = datadir + 'gpcp_dailyrel_1997-2015.nc'
with xray.open_dataset(filenm) as ds:
pcp_jjas = ds['PCP_JJAS'].load()
pcp_frac = ds['FRAC_JJAS'].load()
axlims = (-20, 35, 50, 115)
xticks = range(40, 121, 10)
clev = np.arange(0, 10.5, 1)
plt.figure(figsize=(8, 6))
m = atm.contourf_latlon(pcp_jjas, clev=clev, axlims=axlims, cmap='PuBuGn',
extend='max')
plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
atm.contour_latlon(pcp_frac, clev=[0.5], m=m, colors='m', linewidths=1)
atm.geobox(10, 30, 60, 100, m=m, color='b')
plt.xlim(axlims[2], axlims[3])
# ----------------------------------------------------------------------
# Map of monsoon region
m = atm.init_latlon(-50, 50, 40, 120, coastlines=False)
m.shadedrelief(scale=0.3)
yticks = range(-45, 46, 15)
xticks = range(40, 121, 20)
plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
plt.yticks(yticks, atm.latlon_labels(yticks, 'lat'))
atm.geobox(10, 30, 60, 100, m=m, color='k')
plt.savefig('figs/map_box.png', dpi=200)
# ----------------------------------------------------------------------
# Animation of precip and 850 mb winds
datadir = atm.homedir() + 'datastore/merra2/analysis/'
files = {'PREC' : datadir + 'gpcp_dailyrel_CHP_MFC_1997-2015.nc'}
for nm in ['U', 'V']:
files[nm] = datadir + 'merra2_%s850_dailyrel_CHP_MFC_1980-2015.nc' % nm
ndays = 10
data = {}
for nm in files:
filenm = files[nm]
print('Loading ' + filenm)
with xray.open_dataset(filenm) as ds:
var = ds[nm].load()
if 'year' in var:
var = var.mean(dim='year')
daydim = atm.get_coord(var, 'dayrel', 'dim')
data[nm] = atm.rolling_mean(var, ndays, axis=daydim)
def animate(data, day, axlims=(-30, 45, 40, 120), dx=5, dy=5, climits=(-5, 15),
cmap='BuPu', d0=138, clev=np.arange(5, 15.5, 1),
cticks=np.arange(5, 16, 2.5)):
lat1, lat2, lon1, lon2 = axlims
subset_dict = {'lat' : (lat1, lat2), 'lon' : (lon1, lon2)}
xticks = range(40, 121, 20)
mm, dd = atm.jday_to_mmdd(day + d0)
title = (atm.month_str(mm)).capitalize() + ' %d' % dd
u = atm.subset(data['U'].sel(dayrel=day), subset_dict)
v = atm.subset(data['V'].sel(dayrel=day), subset_dict)
u = u[::dy, ::dx]
v = v[::dy, ::dx]
#spd = np.sqrt(u**2 + v**2)
pcp = data['PREC'].sel(dayrel=day)
lat = atm.get_coord(u, 'lat')
lon = atm.get_coord(u, 'lon')
plt.clf()
m = atm.init_latlon(lat1, lat2, lon1, lon2, coastlines=False)
m.drawcoastlines(color='k', linewidth=0.5)
m.shadedrelief(scale=0.3)
atm.contourf_latlon(pcp, clev=clev, axlims=axlims, m=m, cmap=cmap,
extend='max', cb_kwargs={'ticks' : cticks})
#atm.pcolor_latlon(pcp, axlims=axlims, cmap=cmap, cb_kwargs={'extend' : 'max'})
plt.xticks(xticks, atm.latlon_labels(xticks, 'lon'))
plt.clim(climits)
#plt.quiver(lon, lat, u, v, linewidths=spd.values.ravel())
plt.quiver(lon, lat, u, v)
plt.title(title)
plt.draw()
# Need to scale arrows in quiver plot so that they are consistent across
# different days
days = range(-90, 201, 1)
for i, day in enumerate(days):
animate(data, day)
filenm = 'figs/anim/frame%03d.png' % i
print('Saving to ' + filenm)
plt.savefig(filenm)
# ----------------------------------------------------------------------
years = np.arange(1980, 1999)
datadir = atm.homedir() + 'datastore/merra2/dailyrad/'
files = [datadir + 'merra2_RAD_%d.nc4' % yr for yr in years]
ds = atm.mean_over_files(files)
# ----------------------------------------------------------------------
from pydap.client import open_url
authfile = atm.homedir() + '.netrc'
with open(authfile) as f:
lines = f.readlines()
username = lines[1].split()[1]
password = lines[2].split()[1]
url = ('https://%s:%s@' % (username, password) +
'goldsmr5.sci.gsfc.nasa.gov/opendap/MERRA2/M2I3NPASM.5.12.4/'
'1986/01/MERRA2_100.inst3_3d_asm_Np.19860101.nc4.nc4')
ds = open_url(url)
# ----------------------------------------------------------------------
# 11/2/2016 Using pydap and xray to try reading MERRA2_100
from pydap_auth import install_basic_client
install_basic_client()
from pydap.client import open_url
url = ('http://goldsmr4.sci.gsfc.nasa.gov/opendap/MERRA2/M2T1NXSLV.5.12.4/' +
'2016/06/MERRA2_400.tavg1_2d_slv_Nx.20160601.nc4.nc4')
ds = open_url(url)
###################
from pydap_cas import install_cas_client
install_cas_client()
from pydap.client import open_url
import xarray
url = ('http://jenfly29:[email protected]/opendap/' +
'MERRA2/M2I3NPASM.5.12.4/1986/01/MERRA2_100.inst3_3d_asm_Np.19860101.nc4.nc4')
ds1 = open_url(url) # Works but data isn't in xarray format
ds2 = xarray.open_dataset(url, engine='pydap') # Error message, see attached
# ----------------------------------------------------------------------
# 11/1/2016 MSE budget terms from monthly data
years = range(1980, 1983)
months = range(1, 13)
lon1, lon2 = 60, 100
datadir = '/home/jwalker/datastore/merra2/monthly/'
filestr = datadir + 'MERRA2_100.tavgM_2d_rad_Nx.%d%02d.nc4'
datafiles = {yr : [filestr % (yr, m) for m in months] for yr in years}
def net_rad(rad, weights):
for i, nm in enumerate(weights):
if i == 0:
net = rad[nm] * weights[nm]
else:
net = net + rad[nm] * weights[nm]
net.attrs['long_name'] = 'net_longwave_and_shortwave_into_column'
return net
def get_year(files, year, months=range(1,13)):
weights = {'SWTNT' : 1.0, 'LWTUP' : -1.0, 'SWGNT' : -1.0, 'LWGNT' : -1.0}
nms = weights.keys()
for i, filenm in enumerate(files):
month = months[i]
print('Loading ' + filenm)
with xray.open_dataset(filenm) as ds:
rad = atm.squeeze(ds[nms])
# Change 'time' dimension to 'month' and add 'year' dimension
for nm in rad.data_vars:
rad[nm] = atm.expand_dims(rad[nm], 'month', month, axis=0)
rad[nm] = atm.expand_dims(rad[nm], 'year', year, axis=0)
rad['NETRAD'] = net_rad(rad, weights)
if i == 0:
data = rad
else:
data = xray.concat([data, rad], dim='month')
return data
for i, year in enumerate(years):
files = datafiles[year]
ds = get_year(files, year)
if i == 0:
data = ds
else:
data = xray.concat([data, ds], dim='year')
# Mean along sector and within 2 degrees of equator
latmin, latmax = -2, 2
data_eq = atm.dim_mean(data, 'lon', lon1, lon2)
data_eq = atm.dim_mean(data_eq, 'lat', latmin, latmax)
plotyear = 1980
plt.figure(figsize=(6, 8))
plt.suptitle('%d-%dE RAD (W/m2) at equator' % (lon1, lon2))
plt.subplot(2, 1, 1)
for nm in data_eq.data_vars:
plt.plot(months, data_eq[nm].sel(year=plotyear), label=nm)
plt.legend(fontsize=8)
plt.title(plotyear)
plt.subplot(2, 1, 2)
for year in years:
plt.plot(months, data_eq['NETRAD'].sel(year=year), label=year)
plt.plot(months, data_eq['NETRAD'].mean(dim='year'), 'k', linewidth=2,
label='CLIM')
plt.legend(fontsize=8)
plt.title('NETRAD')
plt.xlabel('Month')
# ----------------------------------------------------------------------
# 10/30/2016 Temporary ubudget climatologies with problem years removed
version = 'merra2'
years = range(1980, 1984) + [1985] + range(1987, 1995) + [1996]
years = years + range(1998, 2008) + range(2009, 2012) + [2014]
onset_nm = 'CHP_MFC'
plevs = [1000,925,850,775,700,600,500,400,300,250,200,150,100,70,50,30,20]
datadir = atm.homedir() + 'datastore/%s/analysis/' % version
savedir = atm.homedir() + 'eady/datastore/%s/analysis/ubudget_temp/' % version
filestr = (version + '_ubudget%d_dailyrel_' + onset_nm +
'_ndays5_60E-100E')
datafiles = {}
for plev in plevs:
datafiles[plev] = [datadir + filestr % plev + '_%d.nc' % yr for yr in years]
# Compute climatologies and save
yearstr = '_%d-%d_excl.nc' % (min(years), max(years))
for plev in plevs:
relfiles = datafiles[plev]
savefile = savedir + filestr % plev + yearstr
ds = atm.mean_over_files(relfiles)
ds.attrs['years'] = years
print('Saving to ' + savefile)
ds.to_netcdf(savefile)
#************************ TEMPORARY TROUBLESHOOTING ******************
# filestr = ('/home/jwalker/datastore/merra2/analysis/merra2_ubudget%d_' +
# 'dailyrel_CHP_MFC_ndays5_60E-100E_%d.nc')
#
# for year in years:
# with open('troubleshooting_%d.txt' % year, 'w') as f1:
# for plev in plevs:
# filenm = filestr % (plev, year)
# print(filenm)
# f1.write('------------------------------------------\n')
# f1.write(filenm + '\n')
# f1.write('Year %d, pressure level %.0f' % (year, plev))
# with xray.open_dataset(filenm) as ds:
# vals = ds.max()
# biggest = vals.to_array().values.max()
# f1.write('%.e\n' % biggest)
# if biggest > 10:
# for nm in vals.data_vars:
# f1.write('%s\t%.e\n' % (nm, vals[nm]))
# ----------------------------------------------------------------------
# 10/20/2016 Read India state boundaries from geojson file
filenm = 'data/india_state.geojson'
with open(filenm) as f:
data = json.load(f)
i_region, i_poly = 17, 44
poly = data['features'][i_region]['geometry']['coordinates'][i_poly][0]
arr = np.array(poly)
x, y = arr[:, 0], arr[:, 1]
# Cut out wonky bits
i1, i2 = 8305, 19200
x = np.concatenate((x[:i1], x[i2:]))
y = np.concatenate((y[:i1], y[i2:]))
plt.figure()
atm.init_latlon(5, 20, 70, 85, resolution='l')
plt.plot(x, y)
# ----------------------------------------------------------------------
# 7/13/2016 MERRA2 radiation data
years = 1980
months = 7
#opts = {'vertical' : 'X', 'res' : 'N', 'time_kind' : 'T', 'kind' : 'RAD'}
url_dict = merra.get_urls(years, months=months, version='merra2',
varnm='SWGNT', monthly=True)
weights = {'SWTNT' : 1.0, 'LWTUP' : -1.0, 'SWGNT' : -1.0, 'LWGNT' : -1.0}
nms = weights.keys()
def net_rad(rad, weights):
for i, nm in enumerate(weights):
if i == 0:
net = rad[nm] * weights[nm]
else:
net = net + rad[nm] * weights[nm]
return net
url = url_dict.values()[0]
with xray.open_dataset(url) as ds:
rad = atm.squeeze(ds[nms])
rad['NET'] = net_rad(rad, weights)
url_dict2 = merra.get_urls(years, months=months, version='merra2',
varnm='EFLUX', monthly=True)
url2 = url_dict2.values()[0]
with xray.open_dataset(url2) as ds:
Fnet = atm.squeeze(ds[['EFLUX', 'HFLUX']])
Fnet['RAD'] = rad['NET']
Fnet['TOT'] = Fnet['EFLUX'] + Fnet['HFLUX'] + Fnet['RAD']
plt.figure()
for i, nm in enumerate(Fnet.data_vars):
plt.subplot(2, 2, i + 1)
atm.pcolor_latlon(Fnet[nm])
plt.title(nm)
h_nms = ['UFLXCPT', 'UFLXPHI', 'UFLXQV', 'VFLXCPT', 'VFLXPHI', 'VFLXQV']
Lv = atm.constants.Lv.values
urls = merra.get_urls(years, months=months, version='merra2', monthly=True,
varnm='UFLXCPT')
url3 = urls.values()[0]
with xray.open_dataset(url3) as ds:
mse = atm.squeeze(ds[h_nms])
for nm in ['UFLXQV', 'VFLXQV']:
key = nm.replace('QV', 'LQV')
mse[key] = mse[nm] * Lv
mse[key].attrs['units'] = mse[nm].attrs['units'].replace('kg', 'J')
mse['UFLXTOT'] = mse['UFLXCPT'] + mse['UFLXPHI'] + mse['UFLXLQV']
mse['VFLXTOT'] = mse['VFLXCPT'] + mse['VFLXPHI'] + mse['VFLXLQV']
mse_div, mse_div_x, mse_div_y = atm.divergence_spherical_2d(mse['UFLXTOT'],
mse['VFLXTOT'])
var = atm.subset(mse['VFLXTOT'], {'lat' : (-80, 80)})
dvar = atm.subset(mse_div_y, {'lat' : (-80, 80)})
lon0 = 10
val, ind = atm.find_closest(var.lon, lon0)
var0, dvar0 = var[:, ind], dvar[:, ind]
lat = var0.lat.values
lat_rad = np.radians(lat)
coslat = np.cos(lat_rad)
a = atm.constants.radius_earth.values
dy = np.gradient(lat_rad)
plt.figure()
plt.subplot(2, 2, 1)
plt.plot(lat, var0)
plt.subplot(2, 2, 2)
plt.plot(lat, var0 * coslat)
plt.subplot(2, 2, 3)
plt.plot(lat, np.gradient(var0 * coslat, dy))
plt.subplot(2, 2, 4)
plt.plot(lat, np.gradient(var0 * coslat, dy) / (coslat*a))
plt.plot(lat, dvar0, 'r')
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(var.lat, var0)
plt.grid()
plt.subplot(2, 1, 2)
plt.plot(var.lat, dvar0)
plt.grid()
# v . grad(phi)
nms2 = ['U', 'V', 'H']
urls = merra.get_urls(years, months=months, version='merra2', monthly=True,
varnm='U')
url4 = urls.values()[0]
with xray.open_dataset(url4) as ds:
phi_vars = atm.squeeze(ds[nms2])
phi = phi_vars['H'] * atm.constants.g.values
# ----------------------------------------------------------------------
# GPCP daily climatology
years = range(1997, 2015)
datadir = atm.homedir() + 'datastore/gpcp/'
files = [datadir + 'gpcp_daily_%d.nc' % yr for yr in years]
savefile = datadir + 'gpcp_daily_%d-%d.nc' % (min(years), max(years))
pcp = atm.combine_daily_years('PREC', files, years, yearname='year')
pcp = pcp.mean(dim='year')
print('Saving to ' + savefile)
atm.save_nc(savefile, pcp)
day1 = atm.mmdd_to_jday(6, 1)
day2 = atm.mmdd_to_jday(9, 30)
pcp_ssn = atm.subset(pcp, {'day' : (day1, day2)})
pcp_frac = pcp_ssn.sum(dim='day') / pcp.sum(dim='day')
# ----------------------------------------------------------------------
# Data-wrangling for ENSO indices
datadir = atm.homedir() + 'dynamics/python/data/ENSO/'
datafile = datadir + 'enso_sst_monthly.txt'
df = pd.read_table(datafile, skiprows=8, index_col=[0, 1],
delim_whitespace=True)
savestr = datadir + 'enso_sst_monthly_%s.csv'
for key in ['NINO1+2', 'NINO3', 'NINO3.4', 'NINO4']:
savefile = savestr % key.lower().replace('.', '').replace('+', '')
enso = df.unstack()[key]
enso.columns = [(atm.month_str(m)).capitalize() for m in range(1, 13)]
enso.to_csv(savefile)
# ----------------------------------------------------------------------
# x-y data
regdays = [-60, -30, 0, 30, 60]
plotdays = [-60, -30]
clev_r = np.arange(-1.0, 1.01, 0.05)
for nm in varnms:
print(nm)
var = data[nm].sel(dayrel=regdays)
reg_daily = atm.regress_field(var, onset, axis=0)
for day in plotdays:
reg = reg_daily.sel(dayrel=day)
title = '%s day %d vs. Onset ' % (var.name, day)
cint_m = atm.cinterval(reg.m)
clev_m = atm.clevels(reg.m, cint_m, symmetric=True)
plt.figure(figsize=(11, 8))
plt.subplot(1, 2, 1)
atm.contourf_latlon(reg['r'], clev=clev_r, cmap='RdBu_r')
plt.title(title + ' - Corr Coeff')
plt.subplot(1, 2, 2)
atm.contourf_latlon(reg['m'], clev=clev_m, cmap='RdBu_r')
plt.title(title + ' - Reg Coeff')
# ----------------------------------------------------------------------
# For later, when combining plevel data:
def func(var, pname='Height', axis=1):
pres = var.attrs[pname]
var = atm.expand_dims(var, pname, pres, axis=axis)
return var
# ----------------------------------------------------------------------
# Streamfunction and zonal wind from dailyrel climatology
datadir = atm.homedir() + 'datastore/merra/analysis/'
lon1, lon2 = 60, 100
lonstr = atm.latlon_str(lon1, lon2, 'lon')
filestr = datadir + 'merra_%s_sector_' + lonstr + '_dailyrel_CHP_MFC_%d.nc'
years = np.arange(1979, 1995)
files = {}
for nm in ['U', 'V']:
files[nm] = [filestr % (nm, yr) for yr in years]
data = xray.Dataset()
for nm in files:
data[nm] = atm.combine_daily_years(nm, files[nm], years, yearname='year')
# Climatological mean
databar = data.mean(dim='year')
# Streamfunction
if (lon2 - lon1) < 360:
sector_scale = (lon2 - lon1) / 360.
else:
sector_scale = None
databar['PSI'] = atm.streamfunction(databar['V'], sector_scale = sector_scale)
# Topography
psfile = atm.homedir() + 'dynamics/python/atmos-tools/data/topo/ncep2_ps.nc'
with xray.open_dataset(psfile) as ds:
ps = ds['ps'] / 100
if (lon2 - lon1) < 360:
ps = atm.dim_mean(ps, 'lon', lon1, lon2)
else:
ps = atm.dim_mean(ps, 'lon')
# Finding latitude of max psi
# psi = atm.subset(databar['PSI'], {'plev' : (700, 700), 'lat' : (-30, 10)},
# squeeze=True)
psi = atm.subset(databar['PSI'], {'lat' : (-30, 10), 'plev' : (100, 800)},
squeeze=True)
psi = psi.max(axis=1)
lat = atm.get_coord(psi, 'lat')
ilatmax = psi.argmax(axis=1)
latmax = lat[ilatmax]
days = atm.get_coord(psi, 'dayrel')
latmax = xray.DataArray(latmax, coords={'dayrel' : days})
plt.figure()
plt.plot(latmax['dayrel'], latmax)
# Lat-pres plots on days
clev_u, clev_psi = 5, 5
clims = [-50, 50]
omitzero = True
days = [-30]
u = databar['U'].sel(dayrel=days).mean(dim='dayrel')
psi = databar['PSI'].sel(dayrel=days).mean(dim='dayrel')
latm = latmax.sel(dayrel=days).mean(dim='dayrel')
plt.figure()
atm.contourf_latpres(u, clev=clev_u, topo=ps)
plt.clim(clims)
atm.contour_latpres(psi, clev=clev_psi, omitzero=omitzero)
plt.grid()
plt.axvline(latm, color='m', linewidth=2)
plt.title('Days ' + str(days))
# ----------------------------------------------------------------------
# Double and single westerly jets for group meeting presentation
yearstr = '1979-2015'
varnms = ['U', 'V']
datadir = atm.homedir() + 'datastore/merra/monthly/'
filestr = datadir + 'merra_%s_%s.nc'
files = {nm : filestr % (nm, yearstr) for nm in varnms}
ssns = ['DJF', 'JJA']
sector_ssn = 'JJA'
data_str = 'MERRA %s' % yearstr
data = xray.Dataset()
for nm in varnms:
with xray.open_dataset(files[nm]) as ds:
data[nm] = ds[nm].load()
data['PSI'] = atm.streamfunction((data['V']).mean(dim='XDim'), pdim=-2)
keys = data.data_vars.keys()
for ssn in ssns:
for nm in keys:
months = atm.season_months(ssn)
data[nm + '_' + ssn] = (data[nm]).sel(month=months).mean(dim='month')
lat = atm.get_coord(data, 'lat')
lon = atm.get_coord(data, 'lon')
psfile = atm.homedir() + 'dynamics/python/atmos-tools/data/topo/ncep2_ps.nc'
ps = atm.get_ps_clim(lat, lon, psfile)
ps = ps / 100
psbar = atm.dim_mean(ps, 'lon')
# Mean over sectors:
def calc_sectors(var):
sectors = xray.Dataset()
name = var.name
lon = atm.get_coord(var, 'lon')
sector_lons = {'Atlantic' : lon[(lon >= -90) & (lon <= 0)],
'Pacific' : lon[(lon >= 135) | (lon <= -100)],
'Indian' : lon[(lon >= 40) & (lon <= 120)]}
sector_lims = {'Atlantic' : (-75, 0), 'Pacific' : (135, -100),
'Indian' : (40, 120)}
for nm in sector_lons:
lon_sub = sector_lons[nm]
var_sub = atm.subset(var, {'lon' : (lon_sub, None)})
var_sub.attrs['lon_lims'] = sector_lims[nm]
sectors[nm] = atm.dim_mean(var_sub, 'lon')
return sectors
usectors = calc_sectors(data['U_' + sector_ssn])
# DJF and JJA zonal mean zonal wind and streamfunction
def plot_contours(data, varnm, ssn, psbar, row, col, xticks):
key = varnm + '_' + ssn
var = data[key]
if 'XDim' in var.dims:
var = var.mean(dim='XDim')
clev = {'U' : 5, 'PSI' : 10}[varnm]
omitzero = {'U' : False, 'PSI' : True}[varnm]
atm.contour_latpres(var, clev=clev, topo=psbar, omitzero=omitzero)
plt.xticks(xticks, [])
plt.xlabel('')
name = {'PSI' : '$\psi$', 'U' : 'U'}[varnm]
sz = {'PSI' : 16, 'U' : 14}[varnm]
wt = {'PSI' : 'bold', 'U' : 'normal'}[varnm]
atm.text(name, (0.02, 0.88), fontsize=sz, fontweight=wt)
if row == 1:
plt.title(ssn, fontsize=12, fontweight='bold')
if col > 1:
plt.ylabel('')
plt.gca().set_yticklabels([])
plot_psi = True
if plot_psi:
nr, nc, figsize = 3, 2, (11, 8)
nms = ['PSI', 'U']
suptitle = 'Zonal Mean Streamfunction and U (%s)' % data_str
else:
nr, nc, figsize = 2, 2, (11, 7)
nms = ['U']
suptitle = 'Zonal Mean U (%s)' % data_str
xticks = range(-90, 91, 30)
ylims = (-10, 45)
gridspec_kw = {'left' : 0.07, 'right' : 0.97, 'wspace' : 0.05, 'hspace' :0.08,
'top' : 0.92, 'bottom' : 0.08}
fig, axes = plt.subplots(nr, nc, figsize=figsize, gridspec_kw=gridspec_kw)
plt.suptitle(suptitle, fontsize=12)
for i, ssn in enumerate(['DJF', 'JJA']):
col = i + 1
for j, nm in enumerate(nms):
row = j + 1
plt.sca(axes[j, col - 1])
plot_contours(data, nm, ssn, psbar, row, col, xticks)
plt.sca(axes[nr - 1, col -1])
key = 'U_%s' % ssn
u850 = atm.dim_mean(data[key].sel(Height=850), 'lon')
u200 = atm.dim_mean(data[key].sel(Height=200), 'lon')
plt.plot(lat, u200, 'k', label='200mb')
plt.plot(lat, u850, 'k--', label='850mb')
plt.legend(fontsize=10)
plt.xticks(xticks)
plt.grid(True)
plt.xlim(-90, 90)
plt.ylim(ylims)
plt.xlabel('Latitude')
atm.text('U', (0.02, 0.88), fontsize=14)
if col == 1:
plt.ylabel('Zonal Wind (m/s)')
else:
plt.gca().set_yticklabels([])
# Lat-lon maps and sector line plots
ssn = sector_ssn
gridspec_kw = {'left' : 0.02, 'right' : 0.98, 'wspace' : 0.3, 'hspace' : 0.2,
'bottom' : 0.08, 'top' : 0.92, 'width_ratios' : [2, 1]}
nr, nc = 2, 2
style = {'Indian' : 'm', 'Atlantic' : 'k--', 'Pacific' : 'g'}
climits = {200 : (-50, 50), 850 : (-16, 16)}
iplot = 0
fig, axes = plt.subplots(nr, nc, figsize=(11,8), gridspec_kw=gridspec_kw)
plt.suptitle('%s Zonal Wind' % ssn, fontsize=14)
for i, plev in enumerate([200, 850]):
iplot += 1
row, col = atm.subplot_index(nr, nc, iplot)
u = atm.subset(data['U_' + ssn], {'plev' : (plev, plev)}, squeeze=True)
usec = atm.subset(usectors, {'plev' : (plev, plev)}, squeeze=True)
plt.sca(axes[row - 1, col - 1])
atm.pcolor_latlon(u)
plt.title('%d hPa' % plev, fontsize=12)
plt.xlabel('')
plt.gca().set_xticklabels([])
plt.gca().set_yticklabels([])
plt.clim(climits[plev])
for nm in usec.data_vars:
for lon0 in usec[nm].attrs['lon_lims']:
plt.axvline(lon0, color=style[nm][0], linewidth=2)
iplot += 1
row, col = atm.subplot_index(nr, nc, iplot)
plt.sca(axes[row - 1, col - 1])
df = usec.to_dataframe()
df.plot(ax=plt.gca(), style=style, legend=False, linewidth=1.5)
plt.legend(fontsize=10, handlelength=2.5)
plt.xticks(xticks)
plt.ylabel('U (m/s)')
if row == nr:
plt.xlabel('Latitude')
else:
plt.xlabel('')
plt.gca().set_xticklabels([])
plt.title('%d hPa' % plev, fontsize=12)
plt.grid(True)
# ----------------------------------------------------------------------
# Calculate monthly U, V climatology
years = np.arange(1979, 2016)
datadir = atm.homedir() + '/datastore/merra/monthly/'
varnms = ['U', 'V']
months = range(1, 13)
filestr = datadir + 'merra_%s_1979-2015_%02d.nc'
filestr2 = datadir + 'merra_%s_1979-2015.nc'
for nm in varnms:
files = [datadir + 'merra_%s_%d.nc' % (nm, yr) for yr in years]
for month in months:
var = atm.load_concat(files, nm, concat_dim='year',
subset_dict={'month' : (month, month)},
squeeze=False)
var = atm.dim_mean(var, 'year')
filenm = filestr % (nm, month)
print('Saving to ' + filenm)
atm.save_nc(filenm, var)
# Concatenate months together
files = [filestr % (nm, month) for month in months]
var = atm.load_concat(files, nm, concat_dim='month')
filenm = filestr2 % nm
print('Saving to ' + filenm)
atm.save_nc(filenm, var)
# ----------------------------------------------------------------------
# EMFD
datadir = atm.homedir() + 'datastore/merra/daily/'
ds = xray.open_dataset(datadir + 'merra_uv200_40E-120E_60S-60N_1979.nc')
u = atm.squeeze(ds['U'])
v = atm.squeeze(ds['V'])
nroll = 7
u_tr = u - atm.rolling_mean(u, nroll, axis=0)
v_tr = v - atm.rolling_mean(v, nroll, axis=0)
emfd_tr, emfd_tr_x, emfd_tr_y = atm.divergence_spherical_2d(u_tr * u_tr,
u_tr * v_tr)
# ----------------------------------------------------------------------
# Lat-pres streamfunction
v = merra.read_daily('V', 1979, 7, days=range(1,6),
subset_dict={'lon' : (60, 100)})
v = v.mean(dim='TIME')
psi = atm.streamfunction(v)
psibar = psi.mean(dim='XDim')
plt.figure()
atm.contourf_latpres(psibar)
# ----------------------------------------------------------------------
# 01/14/2016 Plots for Simona
# Load data from compare-indices.py
keys = ['HOWI_100', 'OCI', 'SJKE', 'TT', 'WLH_MERRA_PRECIP_nroll7']
shortkeys = ['HOWI', 'OCI', 'SJKE', 'TT', 'WLH']
#shortkeys = [short[key] for key in keys]
years = index[keys[0]].year.values
onset = np.reshape(index[keys[0]].onset.values, (len(years), 1))
for key in keys[1:]:
ind = np.reshape(index[key].onset.values, (len(years), 1))
onset = np.concatenate([onset, ind], axis=1)
onset = pd.DataFrame(onset, index=years, columns=shortkeys)
# Add monsoon strength index
ind_comp = onset.copy()
ind_comp['JJAS_MFC'] = strength['MERRA_DET']
# Box plots of onset days
plt.figure()
onset.boxplot()
plt.xlabel('Onset Index')
plt.ylabel('Day of Year')
# ----------------------------------------------------------------------
# Model level MERRA data
varnm = 'T'
xsub = '[330:2:450]'
ysub = '[60:2:301]'
tsub = '[0:1:3]'
lev = 71
zsub = '[%d:1:%d]' % (lev, lev)
def datafile(year, mon, day, varnm, xsub, ysub, zsub, tsub):
url = ('http://goldsmr3.sci.gsfc.nasa.gov:80/opendap/MERRA/MAI6NVANA.5.2.0/'
'%d/%02d/MERRA100.prod.assim.inst6_3d_ana_Nv.%d%02d%02d.hdf'
'?%s%s%s%s%s,XDim%s,YDim%s,Height%s,TIME%s') % (year, mon, year, mon,
day, varnm, tsub, zsub, ysub, xsub, xsub, ysub, zsub, tsub)
return url
year = 1979
month = 4
#jdays = atm.season_days(atm.month_str(month), atm.isleap(year))
days = range(1, atm.days_this_month(year, month) + 1)
urls = [datafile(year, month, day, varnm, xsub, ysub, zsub, tsub) for day
in days]
savedir = atm.homedir() + '/datastore/merra/daily/'
savefile = '%smerra_%s_ML%02d_40-120E_60S-60N_%d%02d.nc' % (savedir, varnm, lev,
year, month)
var = atm.load_concat(urls, varnm, 'TIME')
print('Saving to ' + savefile)
atm.save_nc(savefile, var)
# for d, day in enumerate(days):
# url = datafile(year, month, day, varnm, xsub, ysub, zsub, tsub)
# print('Reading %s' % url)
# ds = xray.open_dataset(url)
# var_in = atm.squeeze(ds[varnm])
# # Daily mean:
# var_in = var_in.mean(dim='TIME')
# var_in.coords['Day'] = day
# if d == 0:
# var = var_in
# else:
# var = xray.concat([var, var_in], dim='Day')
#
#
# T = ds['T']
# T = T[0]
# ----------------------------------------------------------------------
#datadir = '/home/jennifer/datastore/merra/daily/'
datadir = '/home/jwalker/eady/datastore/merra/daily/'
filestr = 'merra_uv200_40E-120E_60S-60N_'
pathstr = datadir + filestr
years = np.arange(1979, 2015)
lon1, lon2 = 60, 100
#lat1, lat2 = 10, 30
lat1, lat2 = 10.625, 10.625
#lat1, lat2 = 0.625, 0.625
#lat1, lat2 = -5.625, -5.625
#lat1, lat2 = -10.625, -10.625
# ----------------------------------------------------------------------
def timeseries_allyears(pathstr, years, lat1, lat2, lon1, lon2):
"""Return the mean_over_geobox of daily data from selected years."""
def get_year(ds, year, lat1, lat2, lon1, lon2):
"""Get daily data for this year, calculate mean_over_geobox,
and add NaN if applicable so that non-leap and leap years
can be concatenated together."""
dsbar = xray.Dataset()
nan = np.nan*np.ones((1,1))
days = np.arange(1,367)
for nm in ds.data_vars:
print(nm)
var = atm.mean_over_geobox(ds[nm], lat1, lat2, lon1, lon2)
vals = var.values
if not atm.isleap(year):
vals = np.concatenate([vals, nan])
coords = {'Day' : days, 'Height': var.coords['Height'], 'Year': year}
dsbar[nm] = xray.DataArray(vals, name=var.name, dims=var.dims,
attrs=var.attrs, coords=coords)
return dsbar
for i, year in enumerate(years):
filename = '%s%d.nc' % (pathstr, year)
print('Loading ' + filename)
with xray.open_dataset(filename) as ds:
data = get_year(ds, year, lat1, lat2, lon1, lon2)
if i == 0:
dsbar = data
else:
dsbar = xray.concat([dsbar, data], dim='Year')
return dsbar
def plot_timeseries_year(dsbar, year, nroll=None):
iplot = {'U' : 1, 'V' : 2, 'rel_vort' : 3, 'Ro' : 4}
plt.figure(figsize=(12, 9))
plt.suptitle(year)
for nm in dsbar.data_vars:
var = dsbar[nm].sel(Year=year)
plt.subplot(2, 2, iplot[nm])
plt.plot(var.Day, var, color='gray')
if nroll is not None:
data = pd.rolling_mean(np.squeeze(var.values), nroll)
plt.plot(var.Day, data, color='black')
plt.title(nm)
# ----------------------------------------------------------------------
dsbar = timeseries_allyears(pathstr, years, lat1, lat2, lon1, lon2)
nroll = 10
for year in [1979, 1980, 1981, 1982]:
plot_timeseries_year(dsbar, year, nroll)
# ----------------------------------------------------------------------
| mit | -1,380,522,120,908,778,200 | 32.099217 | 93 | 0.564145 | false |
jtrobec/pants | src/python/pants/bin/goal_runner.py | 1 | 14497 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import sys
import pkg_resources
from pants.base.build_environment import get_scm, pants_version
from pants.base.build_file import FilesystemBuildFile
from pants.base.cmd_line_spec_parser import CmdLineSpecParser
from pants.base.exceptions import BuildConfigurationError
from pants.base.scm_build_file import ScmBuildFile
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.bin.extension_loader import load_plugins_and_backends
from pants.bin.plugin_resolver import PluginResolver
from pants.bin.repro import Reproducer
from pants.build_graph.build_file_address_mapper import BuildFileAddressMapper
from pants.build_graph.build_file_parser import BuildFileParser
from pants.build_graph.build_graph import BuildGraph
from pants.engine.round_engine import RoundEngine
from pants.goal.context import Context
from pants.goal.goal import Goal
from pants.goal.run_tracker import RunTracker
from pants.help.help_printer import HelpPrinter
from pants.java.nailgun_executor import NailgunProcessGroup
from pants.logging.setup import setup_logging
from pants.option.custom_types import list_option
from pants.option.global_options import GlobalOptionsRegistrar
from pants.option.options_bootstrapper import OptionsBootstrapper
from pants.reporting.report import Report
from pants.reporting.reporting import Reporting
from pants.source.source_root import SourceRootConfig
from pants.subsystem.subsystem import Subsystem
from pants.task.task import QuietTaskMixin
from pants.util.filtering import create_filters, wrap_filters
logger = logging.getLogger(__name__)
class OptionsInitializer(object):
"""Initializes global options and logging."""
def __init__(self, options_bootstrapper=None, working_set=None, exiter=sys.exit):
"""
:param OptionsBootStrapper options_bootstrapper: An options bootstrapper instance (Optional).
:param pkg_resources.WorkingSet working_set: The working set of the current run as returned by
PluginResolver.resolve() (Optional).
:param func exiter: A function that accepts an exit code value and exits (for tests, Optional).
"""
self._options_bootstrapper = options_bootstrapper or OptionsBootstrapper()
self._working_set = working_set or PluginResolver(self._options_bootstrapper).resolve()
self._exiter = exiter
def _setup_logging(self, global_options):
"""Sets global logging."""
# N.B. quiet help says 'Squelches all console output apart from errors'.
level = 'ERROR' if global_options.quiet else global_options.level.upper()
setup_logging(level, log_dir=global_options.logdir)
# This routes warnings through our loggers instead of straight to raw stderr.
logging.captureWarnings(True)
def _register_options(self, subsystems, options):
"""Registers global options."""
# Standalone global options.
GlobalOptionsRegistrar.register_options_on_scope(options)
# Options for subsystems.
for subsystem in subsystems:
subsystem.register_options_on_scope(options)
# TODO(benjy): Should Goals or the entire goal-running mechanism be a Subsystem?
for goal in Goal.all():
# Register task options.
goal.register_options(options)
def _setup_options(self, options_bootstrapper, working_set):
bootstrap_options = options_bootstrapper.get_bootstrap_options()
global_bootstrap_options = bootstrap_options.for_global_scope()
if global_bootstrap_options.pants_version != pants_version():
raise BuildConfigurationError(
'Version mismatch: Requested version was {}, our version is {}.'.format(
global_bootstrap_options.pants_version, pants_version()
)
)
# Get logging setup prior to loading backends so that they can log as needed.
self._setup_logging(global_bootstrap_options)
# Add any extra paths to python path (e.g., for loading extra source backends).
for path in global_bootstrap_options.pythonpath:
sys.path.append(path)
pkg_resources.fixup_namespace_packages(path)
# Load plugins and backends.
plugins = global_bootstrap_options.plugins
backend_packages = global_bootstrap_options.backend_packages
build_configuration = load_plugins_and_backends(plugins, working_set, backend_packages)
# Now that plugins and backends are loaded, we can gather the known scopes.
known_scope_infos = [GlobalOptionsRegistrar.get_scope_info()]
# Add scopes for all needed subsystems via a union of all known subsystem sets.
subsystems = Subsystem.closure(
GoalRunner.subsystems() | Goal.subsystems() | build_configuration.subsystems()
)
for subsystem in subsystems:
known_scope_infos.append(subsystem.get_scope_info())
# Add scopes for all tasks in all goals.
for goal in Goal.all():
known_scope_infos.extend(filter(None, goal.known_scope_infos()))
# Now that we have the known scopes we can get the full options.
options = options_bootstrapper.get_full_options(known_scope_infos)
self._register_options(subsystems, options)
# Make the options values available to all subsystems.
Subsystem.set_options(options)
return options, build_configuration
def setup(self):
return self._setup_options(self._options_bootstrapper, self._working_set)
class ReportingInitializer(object):
"""Starts and provides logged info on the RunTracker and Reporting subsystems."""
def __init__(self, run_tracker=None, reporting=None):
self._run_tracker = run_tracker or RunTracker.global_instance()
self._reporting = reporting or Reporting.global_instance()
def setup(self):
"""Start up the RunTracker and log reporting details."""
report = self._reporting.initial_reporting(self._run_tracker)
self._run_tracker.start(report)
url = self._run_tracker.run_info.get_info('report_url')
if url:
self._run_tracker.log(Report.INFO, 'See a report at: {}'.format(url))
else:
self._run_tracker.log(Report.INFO, '(To run a reporting server: ./pants server)')
return self._run_tracker, self._reporting
class GoalRunnerFactory(object):
def __init__(self, root_dir, options, build_config, run_tracker, reporting, exiter=sys.exit):
"""
:param str root_dir: The root directory of the pants workspace (aka the "build root").
:param Options options: The global, pre-initialized Options instance.
:param BuildConfiguration build_config: A pre-initialized BuildConfiguration instance.
:param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.
:param Reporting reporting: The global, pre-initialized Reporting instance.
:param func exiter: A function that accepts an exit code value and exits (for tests, Optional).
"""
self._root_dir = root_dir
self._options = options
self._build_config = build_config
self._run_tracker = run_tracker
self._reporting = reporting
self._exiter = exiter
self._goals = []
self._targets = []
self._requested_goals = self._options.goals
self._target_specs = self._options.target_specs
self._help_request = self._options.help_request
self._global_options = options.for_global_scope()
self._tag = self._global_options.tag
self._fail_fast = self._global_options.fail_fast
self._spec_excludes = self._global_options.spec_excludes
self._explain = self._global_options.explain
self._kill_nailguns = self._global_options.kill_nailguns
self._build_file_type = self._get_buildfile_type(self._global_options.build_file_rev)
self._build_file_parser = BuildFileParser(self._build_config, self._root_dir)
self._address_mapper = BuildFileAddressMapper(self._build_file_parser, self._build_file_type)
self._build_graph = BuildGraph(self._address_mapper)
self._spec_parser = CmdLineSpecParser(
self._root_dir,
self._address_mapper,
spec_excludes=self._spec_excludes,
exclude_target_regexps=self._global_options.exclude_target_regexp
)
def _get_buildfile_type(self, build_file_rev):
"""Selects the BuildFile type for use in a given pants run."""
if build_file_rev:
ScmBuildFile.set_rev(build_file_rev)
ScmBuildFile.set_scm(get_scm())
return ScmBuildFile
else:
return FilesystemBuildFile
def _expand_goals(self, goals):
"""Check and populate the requested goals for a given run."""
for goal in goals:
if self._address_mapper.from_cache(self._root_dir, goal, must_exist=False).file_exists():
logger.warning("Command-line argument '{0}' is ambiguous and was assumed to be "
"a goal. If this is incorrect, disambiguate it with ./{0}.".format(goal))
if self._help_request:
help_printer = HelpPrinter(self._options)
result = help_printer.print_help()
self._exiter(result)
self._goals.extend([Goal.by_name(goal) for goal in goals])
def _expand_specs(self, specs, fail_fast):
"""Populate the BuildGraph and target list from a set of input specs."""
with self._run_tracker.new_workunit(name='parse', labels=[WorkUnitLabel.SETUP]):
def filter_for_tag(tag):
return lambda target: tag in map(str, target.tags)
tag_filter = wrap_filters(create_filters(self._tag, filter_for_tag))
for spec in specs:
for address in self._spec_parser.parse_addresses(spec, fail_fast):
self._build_graph.inject_address_closure(address)
target = self._build_graph.get_target(address)
if tag_filter(target):
self._targets.append(target)
def _is_quiet(self):
return any(goal.has_task_of_type(QuietTaskMixin) for goal in self._goals) or self._explain
def _setup_context(self):
with self._run_tracker.new_workunit(name='setup', labels=[WorkUnitLabel.SETUP]):
self._expand_goals(self._requested_goals)
self._expand_specs(self._target_specs, self._fail_fast)
# Now that we've parsed the bootstrap BUILD files, and know about the SCM system.
self._run_tracker.run_info.add_scm_info()
# Update the Reporting settings now that we have options and goal info.
invalidation_report = self._reporting.update_reporting(self._global_options,
self._is_quiet(),
self._run_tracker)
context = Context(options=self._options,
run_tracker=self._run_tracker,
target_roots=self._targets,
requested_goals=self._requested_goals,
build_graph=self._build_graph,
build_file_parser=self._build_file_parser,
address_mapper=self._address_mapper,
spec_excludes=self._spec_excludes,
invalidation_report=invalidation_report)
return context, invalidation_report
def setup(self):
context, invalidation_report = self._setup_context()
return GoalRunner(context=context,
goals=self._goals,
kill_nailguns=self._kill_nailguns,
run_tracker=self._run_tracker,
invalidation_report=invalidation_report,
exiter=self._exiter)
class GoalRunner(object):
"""Lists installed goals or else executes a named goal."""
Factory = GoalRunnerFactory
def __init__(self, context, goals, run_tracker, invalidation_report, kill_nailguns,
exiter=sys.exit):
"""
:param Context context: The global, pre-initialized Context as created by GoalRunnerFactory.
:param list[Goal] goals: The list of goals to act on.
:param Runtracker run_tracker: The global, pre-initialized/running RunTracker instance.
:param InvalidationReport invalidation_report: An InvalidationReport instance (Optional).
:param bool kill_nailguns: Whether or not to kill nailguns after the run.
:param func exiter: A function that accepts an exit code value and exits (for tests, Optional).
"""
self._context = context
self._goals = goals
self._run_tracker = run_tracker
self._invalidation_report = invalidation_report
self._kill_nailguns = kill_nailguns
self._exiter = exiter
@classmethod
def subsystems(cls):
# Subsystems used outside of any task.
return {SourceRootConfig, Reporting, Reproducer, RunTracker}
def _execute_engine(self):
workdir = self._context.options.for_global_scope().pants_workdir
if not workdir.endswith('.pants.d'):
self._context.log.error('Pants working directory should end with \'.pants.d\', currently it is {}\n'
.format(workdir))
return 1
unknown_goals = [goal.name for goal in self._goals if not goal.ordered_task_names()]
if unknown_goals:
self._context.log.error('Unknown goal(s): {}\n'.format(' '.join(unknown_goals)))
return 1
engine = RoundEngine()
result = engine.execute(self._context, self._goals)
if self._invalidation_report:
self._invalidation_report.report()
return result
def run(self):
should_kill_nailguns = self._kill_nailguns
try:
result = self._execute_engine()
if result:
self._run_tracker.set_root_outcome(WorkUnit.FAILURE)
except KeyboardInterrupt:
self._run_tracker.set_root_outcome(WorkUnit.FAILURE)
# On ctrl-c we always kill nailguns, otherwise they might keep running
# some heavyweight compilation and gum up the system during a subsequent run.
should_kill_nailguns = True
raise
except Exception:
self._run_tracker.set_root_outcome(WorkUnit.FAILURE)
raise
finally:
# Must kill nailguns only after run_tracker.end() is called, otherwise there may still
# be pending background work that needs a nailgun.
if should_kill_nailguns:
# TODO: This is JVM-specific and really doesn't belong here.
# TODO: Make this more selective? Only kill nailguns that affect state?
# E.g., checkstyle may not need to be killed.
NailgunProcessGroup().killall()
return result
| apache-2.0 | 1,185,414,354,773,972,200 | 41.142442 | 106 | 0.697248 | false |
editxt/editxt | editxt/command/blame.py | 1 | 2967 | # -*- coding: utf-8 -*-
# EditXT
# Copyright 2007-2016 Daniel Miller <[email protected]>
#
# This file is part of EditXT, a programmer's text editor for Mac OS X,
# which can be found at http://editxt.org/.
#
# EditXT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EditXT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EditXT. If not, see <http://www.gnu.org/licenses/>.
import logging
import subprocess
from os.path import dirname, isabs, isfile, realpath
import editxt.config as config
import editxt.constants as const
from editxt.command.base import command, CommandError
from editxt.command.parser import CommandParser, File
from editxt.command.util import has_editor, threaded_exec_shell
log = logging.getLogger(__name__)
def file_path(editor=None):
return editor.file_path if editor is not None else None
@command(
arg_parser=CommandParser(
File("path", default=file_path),
),
config={
"git_path": config.String("git"),
},
is_enabled=has_editor)
def blame(editor, args):
"""Invoke `git gui blame` on file path
Example configuration:
command:
blame:
git_path: /opt/local/bin/git
"""
if not args:
from editxt.commands import show_command_bar
return show_command_bar(editor, "blame ")
if not (args.path and isfile(args.path)):
raise CommandError("cannot blame file without path")
git_path = editor.app.config.for_command("blame")["git_path"]
index = editor.selection[0]
line = "--line={}".format(editor.line_numbers[index])
command = [git_path, "gui", "blame", line, args.path]
output = []
def got_output(text, returncode):
if returncode is None:
output.append(text)
else:
if returncode:
if git_path == "git":
try:
command[0] = subprocess.check_output(
["which", "git"], universal_newlines=True).strip()
except subprocess.CalledProcessError:
pass
output.insert(0, " ".join(command) + "\n")
output.append("\nexit code: {}".format(returncode))
view.append_message("".join(output), msg_type=const.ERROR)
view.process_completed()
view = editor.get_output_view()
view.process = threaded_exec_shell(
command,
cwd=dirname(realpath(args.path)),
got_output=got_output,
kill_on_cancel=False,
)
| gpl-3.0 | -5,294,403,790,234,940,000 | 33.103448 | 78 | 0.640377 | false |
sgkang/PhysPropIP | codes/ZarcFit2015-11-12.py | 1 | 4144 |
import numpy as np
import sys
from PyQt4 import QtGui
from PyQt4.uic import loadUiType
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import (
FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
Ui_MainWindow, QMainWindow = loadUiType('ZarcFit2015-11-12.ui')
def Zarc(Rx, Qx, Px, freq):
Zx = np.zeros_like(freq, dtype=np.complex128)
Zx = 1./(1./Rx + Qx*(np.pi*2*freq*1j)**Px)
return Zx
def ZarcElectrode(Re, Qe, Pef, Pei, freq):
Ze = np.zeros_like(freq, dtype=np.complex128)
Ze = 1./(1./Re + Qe*(np.pi*2*freq)**Pef*(1j)**Pei)
return Ze
def CalculateImpedance(frequency, Rinf, Rh, Qh, Ph, Rl, Ql, Pl, Re, Qe, Pef, Pei):
Zh = Zarc(Rh, Qh, Ph, frequency)
Zl = Zarc(Rl, Ql, Pl, frequency)
Ze = ZarcElectrode(Re, Qe, Pef, Pei, frequency)
Z = Rinf + Zh + Zl + Ze
return Z
class Main(QMainWindow, Ui_MainWindow):
def __init__(ZarcFitWindow, ):
super(Main, ZarcFitWindow).__init__()
ZarcFitWindow.setupUi(ZarcFitWindow)
ZarcFitWindow.SliderRh.valueChanged.connect(ZarcFitWindow.updateSldOutRh)
ZarcFitWindow.SliderFh.valueChanged.connect(ZarcFitWindow.updateSldOutFh)
def updateSldOutRh(ZarcFitWindow, value):
Rh = 10**(value/100.)
ZarcFitWindow.SldOutRh.setText("{:.2E}".format(Rh))
Z = CalculateImpedance(frequency, Rinf, Rh, Qh, Ph, Rl, Ql, Pl, Re, Qe, Pef, Pei)
axCole.hold (False)
axCole.plot(Z.real, Z.imag, 'ro')
axCole.grid(True)
figCole.canvas.draw()
axBodeMagn.hold (False)
axBodeMagn.loglog(frequency, abs(Z), 'ro')
axBodeMagn.grid(True)
figBodeMagn.canvas.draw()
axBodePhase.hold (False)
axBodePhase.loglog(frequency, abs(np.angle(Z, deg=True)), 'ro')
axBodePhase.grid(True)
figBodePhase.canvas.draw()
def updateSldOutFh(ZarcFitWindow, value):
ZarcFitWindow.SldOutFh.setText("{:.2E}".format(10**(value/100.)))
def addmplCole(ZarcFitWindow, fig):
ZarcFitWindow.canvas = FigureCanvas(fig)
ZarcFitWindow.mplCole.addWidget(ZarcFitWindow.canvas)
ZarcFitWindow.canvas.draw()
# ZarcFitWindow.toolbar = NavigationToolbar(ZarcFitWindow.canvas,
# ZarcFitWindow, coordinates=True)
# ZarcFitWindow.addToolBar(ZarcFitWindow.toolbar)
def addmplBodeMagn(ZarcFitWindow, fig):
ZarcFitWindow.canvas = FigureCanvas(fig)
ZarcFitWindow.mplBodeMagn.addWidget(ZarcFitWindow.canvas)
ZarcFitWindow.canvas.draw()
def addmplBodePhase(ZarcFitWindow, fig):
ZarcFitWindow.canvas = FigureCanvas(fig)
ZarcFitWindow.mplBodePhase.addWidget(ZarcFitWindow.canvas)
ZarcFitWindow.canvas.draw()
if __name__ == '__main__':
Rinf = 1.E4
Rh = 1.E5
Qh = 2.E-10
Ph = 0.8
Rl = 5.E4
Ql = 1.E-5
Pl = 0.5
Re = 1.E10
Qe = 1.E-4
Pef = 0.5
Pei = 0.1
frequency = 10.**np.arange(6,-2,-0.2)
Z = CalculateImpedance(frequency, Rinf, Rh, Qh, Ph, Rl, Ql, Pl, Re, Qe, Pef, Pei)
figCole = Figure()
axCole = figCole.add_subplot(111)
axCole.plot(Z.real, Z.imag, 'ro')
axCole.grid(True)
axCole.invert_yaxis()
axCole.set_xlabel("Real [Ohm]")
axCole.set_ylabel("Imag [Ohm]")
figBodeMagn = Figure()
axBodeMagn = figBodeMagn.add_subplot(111)
axBodeMagn.loglog(frequency, abs(Z), 'ro')
axBodeMagn.grid(True)
axBodeMagn.invert_xaxis()
axBodeMagn.set_xlabel("Frequency [Hz]")
axBodeMagn.set_ylabel("Total Impedance [Ohm]")
figBodePhase = Figure()
axBodePhase = figBodePhase.add_subplot(111)
axBodePhase.loglog(frequency, abs(np.angle(Z, deg=True)), 'ro')
axBodePhase.grid(True)
axBodePhase.invert_xaxis()
axBodePhase.set_xlabel("Frequency [Hz]")
axBodePhase.set_ylabel("Phase [deg]")
app = QtGui.QApplication(sys.argv)
main = Main()
main.addmplCole(figCole)
# main.addmplBodeMagn(figBodeMagn)
# main.addmplBodePhase(figBodePhase)
main.show()
sys.exit(app.exec_()) | mit | 4,866,987,108,303,030,000 | 31.382813 | 89 | 0.645029 | false |
FabriceSalvaire/Musica | Musica/Audio/Spectrum.py | 1 | 7542 | ####################################################################################################
#
# Musica - A Music Theory Package for Python
# Copyright (C) 2017 Fabrice Salvaire
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
####################################################################################################
"""This module implements spectrum analysis based on Fast Fourier Transform.
References
Efficient Pitch Detection Techniques for Interactive Music
Patricio de la Cuadra, Aaron Master, Craig Sapp
Center for Computer Research in Music and Acoustics, Stanford University
"""
####################################################################################################
import math
import numpy as np
####################################################################################################
class Spectrum:
__window_function__ = {
'hann': np.hanning,
}
##############################################
@staticmethod
def next_power_of_two(x):
return 2**math.ceil(math.log(x)/math.log(2))
##############################################
@classmethod
def sample_for_resolution(cls, sampling_frequency, frequency_resolution, power_of_two=True):
number_of_samples = int(math.ceil(sampling_frequency / frequency_resolution))
if power_of_two:
number_of_samples = cls.next_power_of_two(number_of_samples)
return number_of_samples
##############################################
def __init__(self, sampling_frequency, values, window='hann'):
# *args, **kwargs
# Fixme: better way to handle ctor !
# args expect sampling_frequency, values
# kwargs; window=hann
# clone = kwargs.get('clone', None)
# if clone is not None:
# self._sampling_frequency = clone._sampling_frequency
# self._number_of_samples = clone._number_of_samples
# self._values = np.array(clone._values)
# self._fft = np.array(clone._fft)
# self._frequencies = np.array(clone._frequencies)
# else:
# if len(args) == 2:
# sampling_frequency, values = args
# elif len(args) == 1:
# sampling_frequency = args[0]
# values = kwargs['values']
# elif len(args) == 0:
# sampling_frequency = kwargs['sampling_frequency']
# values = kwargs['values']
# else:
# raise ValueError("require sampling_frequency and values args")
# window = kwargs.get('window', 'hann')
self._sampling_frequency = sampling_frequency
self._number_of_samples = values.size
self._values = np.array(values)
if window is not None:
window = self.__window_function__[window](self._number_of_samples)
values = values*window
self._fft = np.fft.rfft(values)
# Given a window length N and a sample spacing dt
# f = [0, 1, ..., N/2 - 1, -N/2, ..., -1] / (dt*N) if N is even
# f = [0, 1, ..., (N-1)/2, -(N-1)/2, ..., -1] / (dt*N) if N is odd
self._frequencies = np.fft.rfftfreq(self._number_of_samples, self.sample_spacing)
##############################################
def clone(self):
return self.__clone__(clone=self)
##############################################
@property
def sampling_frequency(self):
return self._sampling_frequency
@property
def sample_spacing(self):
return 1 / self._sampling_frequency
@property
def number_of_samples(self):
return self._number_of_samples
@property
def duration(self):
# inverse of frequency_resolution
return self._number_of_samples / self._sampling_frequency
@property
def frequency_resolution(self):
return self._sampling_frequency / self._number_of_samples
##############################################
# time
@property
def values(self):
return self._values
@property
def frequencies(self):
return self._frequencies
@property
def fft(self):
return self._fft
##############################################
# Coefficients:
# A0**2
# Ak**2 / 4
#
# In a two-sided spectrum, half the energy is displayed at the positive frequency, and half the
# energy is displayed at the negative frequency.
#
# single sided : * 2 and discard half
#
# amplitude = magnitude(FFT) / N = sqrt(real**2 + imag**2) / N
# phase [rad] = arctan(imag/real)
#
#
# amplitude in rms = sqrt(2) * magnitude(FFT) / N for i > 0
# = magnitude(FFT) / N for i = 0
#
# power spectrum = FFT . FFT* / N**2
#
# dB = 10 log10(P/Pref)
# dB = 20 log10(A/Aref)
@property
def magnitude(self):
return np.abs(self._fft)
@property
def power(self):
return self.magnitude**2
@property
def decibel_power(self):
return 10 * np.log10(self.power)
##############################################
def hfs(self, number_of_products):
# , rebin=False
"""Compute the Harmonic Product Spectrum.
References
Noll, M. (1969).
Pitch determination of human speech by the harmonic product spectrum, the harmonic sum
spectrum, and a maximum likelihood estimate. In Proceedings of the Symposium on Computer
Processing ing Communications, pp. 779-797. Polytechnic Institute of Brooklyn.
"""
spectrum= self.magnitude # Fixme: **2 ???
# Fixme: ceil ?
size = int(math.ceil(spectrum.size / number_of_products))
hfs = spectrum[:size].copy()
for i in range(2, number_of_products + 1):
# if rebin:
# rebinned_spectrum = spectrum[::i][:size].copy()
# for j ixn range(1, i):
# array = spectrum[j::i][:size]
# rebinned_spectrum[:array.size] += array
# rebinned_spectrum /= i
# hfs *= rebinned_spectrum # Fixme: wrong for upper bins
# else:
hfs *= spectrum[::i][:size]
# Fixme: return class ???
return self._frequencies[:size], hfs
##############################################
def h_dome(self, height):
"""Extract h-dome from spectrum using Mathematical Morphology.
Parameters
----------
height : int
Minimal height of the peaks
"""
# Fixme: just for test ...
values = np.array(self.decibel_power, dtype=np.int)
values = np.where(values >= 0, values, 0)
from Musica.Math.Morphomath import Function
function = Function(values).h_dome(height)
return function.values
| gpl-3.0 | 2,083,419,971,255,120,400 | 30.037037 | 100 | 0.522806 | false |
acmxrds/summer-2016 | helloworld/MNIST2Conv_Stat_Collect.py | 1 | 11236 | # coding=utf-8
__author__ = 'Abhineet Saxena'
"""
The Code for the ACM XRDS Hello World! column collects summary statistics for thethe CNN architecture constructed
from the architecture detailed at Google TensorFlow MNIST Expert tutorial:
https://www.tensorflow.org/versions/r0.7/tutorials/mnist/pros/index.html
Note:
The summary collection ops for most of the layers (Conv. Layer 1, Conv. Layer 2 and Softmax Layer) have
been commented out owing to a significant computation load that is entailed by the CPU for handling the summary
collection for all the layers at once. It can cripplingly slow down the machine while the file is in execution.
If you have a much better computing architecture than the one I use, you can certainly try running all ops at once:
My Configuration:
~~~~~~~~~~~~~~~~~~~~~~~~~~~
> Model Name: Intel(R) Core(TM) i5-4210U CPU @ 1.70GHz
> No. of Processors: 3
> No. of CPU cores: 2
> Cache Size: 3072 KB
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Instructions For Running TensorFlow:
For running the Tensorboard program and visualizing the statistics, anyone of the following commands needs to
be entered at the terminal and run:
>> tensorboard --logdir='/path/to/mnist_logs folder'
or
>> python tensorflow/tensorboard/tensorboard.py --logdir='path/to/mnist_logs folder'
(Replace the string after the ‘=’ sign above with the actual path to the folder, without the single quotes.)
Thereafter, the TensorBoard panel can then be accessed by visiting the following URL in any of your browsers.
http://0.0.0.0:6006/
"""
# The Imports
import tensorflow as tf
# We make use of the script provided by the TensorFlow team for reading-in and processing the data.
import input_data as inpt_d
# ##Function Declarations
def weight_variable(shape, arg_name=None):
"""
A method that returns a tf.Variable initialised with values drawn from a normal distribution.
:param shape: The shape of the desired output.
:return: tf.Variable
"""
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=arg_name)
def bias_variable(shape, arg_name=None):
"""
A method that creates a constant Tensor with the specified shape and a constant value of 0.1.
The bias value must be slightly positive to prevent neurons from becoming unresponsive or dead.
:param shape: The shape of the desired output.
:return: tf.Variable
"""
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=arg_name)
def conv2d(xvar, Wt_var, stride_arg=(1, 1, 1, 1), pad_arg='SAME'):
"""
Returns the Activation Map obtained by convolving the Weight matrix with the input matrix.
:param xvar: The Neural Input Matrix.
:param Wt_var: The Weight Matrix.
:param stride_arg: The Stride value, specified as a tuple.
:param pad_arg: The Padding Value. Can either be 'VALID' (padding disabled) or 'SAME' (padding-enabled).
:return: The Activation Map or the Output Volume.
"""
return tf.nn.conv2d(xvar, Wt_var, strides=[sval for sval in stride_arg], padding=pad_arg)
def max_pool_2x2(xvar):
"""
Performs the max-pooling operation. Here, a default window size of 2x2 and stride values of (2, 2) is assumed.
:param xvar: The Input Volume to be max-pooled.
:return: Teh max-pooled output.
"""
return tf.nn.max_pool(xvar, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Creating a Graph
new_graph = tf.Graph()
# Setting the Graph as the default Graph.
with new_graph.as_default():
# Instantiating an Interactive Session.
sess = tf.InteractiveSession()
# Placeholder for the Input image data.
xvar = tf.placeholder("float", shape=[None, 784], name="Input_Image")
# Placeholder for the Input image label.
y_var = tf.placeholder("float", shape=[None, 10], name="Input_Image_Label")
# Setting up the variable that receives the processed MNIST dataset.
mnist_data = inpt_d.read_data_sets('MNIST_data', one_hot=True)
# ######The First Convolutional Layer #######
# The Weight Matrix for the First Conv. Layer [28x28x32]. R=5, S=1, K=32 and P=2, The Input Channels: 1.
# It has been named for use in collecting stats.
Wt_mat_layer1 = weight_variable([5, 5, 1, 32], arg_name="Weights_Conv_Layer_1")
# The Bias vector for the first Conv. Layer instantiated.
bias_vec_layer1 = bias_variable([32], arg_name="Bias_Conv_Layer_1")
# Reshapes the Image_Input into it's 28x28 matrix form. -1 implies flattening the image along the first dimension.
x_image = tf.reshape(xvar, [-1, 28, 28, 1])
# Convolution operation performed with scope as Conv_Layer_1 to aid visualization.
with tf.name_scope("Conv_Layer_1") as scope_cv1:
output_conv1 = tf.nn.relu(conv2d(x_image, Wt_mat_layer1) + bias_vec_layer1)
pool_out_conv1 = max_pool_2x2(output_conv1)
# Setting up the summary ops to collect the Weights, Bias and pool activation outputs.
# Uncomment the following 3 lines for logging the outputs to summary op.
# Wt_Cv1_summ = tf.histogram_summary("Conv1_Weights", Wt_mat_layer1)
# Bs_Cv1_summ = tf.histogram_summary("Conv1_Bias", bias_vec_layer1)
# Amap_Cv1_summ = tf.histogram_summary("Acivation_Max-Pooled_Output_Conv1", pool_out_conv1)
# ######The Second Convolutional Layer #######
# Instantiates the Weight Matrix defined per neuron for the second Conv. Layer [14x14x64]. R=5, K=64, S=1, P=2.
# The Input channels: 32.
Wt_mat_layer2 = weight_variable([5, 5, 32, 64], arg_name="Weights_Conv_Layer_2")
bias_vec_layer2 = bias_variable([64], arg_name="Bias_Conv_Layer_2")
# Operation of the second Conv. layer. Input has been padded (default).
with tf.name_scope("Conv_Layer_2") as scope_cv2:
output_conv2 = tf.nn.relu(conv2d(pool_out_conv1, Wt_mat_layer2) + bias_vec_layer2)
pool_out_conv2 = max_pool_2x2(output_conv2)
# Setting up the summary ops to collect the Weights, Bias and pool activation outputs.
# Uncomment the following 3 lines for logging the outputs to summary op.
# Wt_Cv2_summ = tf.histogram_summary("Conv2_Weights", Wt_mat_layer2)
# Bs_Cv2_summ = tf.histogram_summary("Conv2_Bias", bias_vec_layer2)
# Amap_Cv2_summ = tf.histogram_summary("Acivation_Max-Pooled_Output_Conv2", pool_out_conv2)
# ######The First Fully Connected Layer #######
# Weights initialised for the first fully connected layer. The FC layer has 1024 neurons.
# The Output Volume from the previous layer has the structure 7x7x64.
Wt_fc_layer1 = weight_variable([7 * 7 * 64, 1024], arg_name="Weights_FC_Layer")
# Bias vector for the fully connected layer.
bias_fc1 = bias_variable([1024], arg_name="Bias_FC_Layer")
# The output matrix from 2nd Conv. layer reshaped to make it conducive to matrix multiply.
# -1 implies flattening the Tensor matrix along the first dimension.
pool_out_conv2_flat = tf.reshape(pool_out_conv2, [-1, 7*7*64])
with tf.name_scope("FC_Layer") as scope_fc:
output_fc1 = tf.nn.relu(tf.matmul(pool_out_conv2_flat, Wt_fc_layer1) + bias_fc1)
# Setting up the summary ops to collect the Weights, Bias and pool activation outputs.
Wt_FC_summ = tf.histogram_summary("FC_Weights", Wt_fc_layer1)
Bs_FC_summ = tf.histogram_summary("FC_Bias", bias_fc1)
Amap_FC_summ = tf.histogram_summary("Acivations_FC", output_fc1)
# ##### Dropout #######
# Placeholder for the Dropout probability.
keep_prob = tf.placeholder("float", name="Dropout_Probability")
# Performs the dropout op, where certain neurons are randomly disconnected and their outputs not considered.
with tf.name_scope("CNN_Dropout_Op") as scope_dropout:
h_fc1_drop = tf.nn.dropout(output_fc1, keep_prob)
# ##### SoftMax-Regression #######
W_fc2 = weight_variable([1024, 10], arg_name="Softmax_Reg_Weights")
b_fc2 = bias_variable([10], arg_name="Softmax_Reg_Bias")
# Performs the Softmax Regression op, computes the softmax probabilities assigned to each class.
with tf.name_scope("Softmax_Regression") as scope_softmax:
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
# Setting up the summary ops to collect the Weights, Bias and pool activation outputs.
# Uncomment the following 3 lines for logging the outputs to summary op.
# Wt_softmax_summ = tf.histogram_summary("Sfmax_Weights", Wt_mat_layer2)
# Bs_softmax_summ = tf.histogram_summary("Sfmax_Bias", bias_vec_layer2)
# Amap_softmax_summ = tf.histogram_summary("Acivations_Sfmax", y_conv)
# Cross-Entropy calculated.
with tf.name_scope("X_Entropy") as scope_xentrop:
cross_entropy = -tf.reduce_sum(y_var*tf.log(y_conv))
# Adding the scalar summary operation for capturing the cross-entropy.
ce_summ = tf.scalar_summary("Cross_Entropy", cross_entropy)
# Adam Optimizer gives the best performance among Gradient Descent Optimizers.
with tf.name_scope("Train") as scope_train:
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# Calculating the Correct Prediction value.
with tf.name_scope("Test") as scope_test:
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_var, 1))
# The Bool tensor is converted or type-casted into float representation (1.s and 0s) and the mean for all the
# values is calculated.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
# Adding the scalar summary operation for capturing the Accuracy.
acc_summ = tf.scalar_summary("Accuracy", accuracy)
# Adds the ops to the Graph that perform Variable initializations.
# Merge all the summaries and write them out to /tmp/mnist_logs
merged = tf.merge_all_summaries()
summ_writer = tf.train.SummaryWriter("./mnist_logs", sess.graph_def)
sess.run(tf.initialize_all_variables())
# Training for 2000 iterations or Epochs.
for i in range(2000):
if i % 100 == 0:
# Feeds the feed_dict dictionary with values from the test set.
feed = {xvar: mnist_data.test.images, y_var: mnist_data.test.labels, keep_prob: 1.0}
# The run method executes both the ops, i.e. 'merged' for merging the summaries and writing them
# and the 'accuracy' op. for calculating the accuracy over the test set. Both are executed every
# 100th iteration.
result = sess.run([merged, accuracy], feed_dict=feed)
# Summary string output obtained after the execution of 'merged' op.
summary_str = result[0]
# Accuracy value output obtained after the execution of 'accuracy' op.
acc = result[1]
# Adding the summary string and writing the output to the log-directory.
summ_writer.add_summary(summary_str, i)
print("Accuracy at step %s: %s" % (i, acc))
else:
# Returns the next 50 images and their labels from the training set.
batch = mnist_data.train.next_batch(50)
# Train the CNN with the dropout probability of neurons being 0.5 for every iteration.
train_step.run(feed_dict={xvar: batch[0], y_var: batch[1], keep_prob: 0.5})
| gpl-3.0 | 4,570,019,302,829,004,000 | 48.048035 | 118 | 0.689548 | false |
Vito2015/tcc3-portal | tcc3portal/tcc_core/middleware.py | 1 | 2480 | # coding:utf-8
"""
tcc3portal.tcc_core.middleware
~~~~~~~~~~~~~~~~~~~~~~~~~
tcc3portal tcc_core middleware module.
:copyright: (c) 2015 by Vito.
:license: GNU, see LICENSE for more details.
"""
from werkzeug.urls import url_decode
class HTTPMethodOverrideMiddleware(object):
"""The HTTPMethodOverrideMiddleware middleware implements the hidden HTTP
method technique. Not all web browsers support every HTTP method, such as
DELETE and PUT. This middleware class allows clients to provide a method
override parameter via an HTTP header value or a querystring parameter. This
middleware will look for the header parameter first followed by the
querystring. The default HTTP header name is `X-HTTP-METHOD-OVERRIDE` and
the default querystring parameter name is `__METHOD__`. These can be changed
via the constructor parameters `header_name` and `querystring_param`
respectively. Additionally, a list of allowed HTTP methods may be specified
via the `allowed_methods` constructor parameter. The default allowed methods
are GET, HEAD, POST, DELETE, PUT, PATCH, and OPTIONS.
"""
bodyless_methods = frozenset(['GET', 'HEAD', 'OPTIONS', 'DELETE'])
def __init__(self, app, header_name=None,
querystring_param=None, allowed_methods=None):
header_name = header_name or 'X-HTTP-METHOD-OVERRIDE'
self.app = app
self.header_name = 'HTTP_' + header_name.replace('-', '_')
self.querystring_param = querystring_param or '__METHOD__'
self.allowed_methods = frozenset(
allowed_methods or ['GET', 'HEAD', 'POST', 'DELETE', 'PUT', 'PATCH', 'OPTIONS'])
def _get_from_querystring(self, environ):
if self.querystring_param in environ.get('QUERY_STRING', ''):
args = url_decode(environ['QUERY_STRING'])
return args.get(self.querystring_param)
return None
def _get_method_override(self, environ):
return environ.get(self.header_name, None) or \
self._get_from_querystring(environ) or ''
def __call__(self, environ, start_response):
method = self._get_method_override(environ).upper()
if method in self.allowed_methods:
method = method.encode('ascii', 'replace')
environ['REQUEST_METHOD'] = method
if method in self.bodyless_methods:
environ['CONTENT_LENGTH'] = '0'
return self.app(environ, start_response)
| gpl-2.0 | -5,644,159,451,796,110,000 | 40.333333 | 92 | 0.660887 | false |
mattasmith/SCHEMA-RASPP | schemaenergy.py | 1 | 8117 | #! /usr/local/bin/python
"""Script for calculating SCHEMA energies.
******************************************************************
Copyright (C) 2005 Allan Drummond, California Institute of Technology
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*******************************************************************
SCHEMA was developed in the laboratory of Frances H. Arnold at the California Institute of Technology.
References:
Voigt, C. et al., "Protein building blocks preserved by recombination," Nature Structural Biology 9(7):553-558 (2002).
Meyer, M. et al., "Library analysis of SCHEMA-guided recombination," Protein Science 12:1686-1693 (2003).
Otey, C. et al., "Functional evolution and structural conservation in chimeric cytochromes P450: Calibrating a structure-guided approach," Chemistry & Biology 11:1-20 (2004)
Silberg, J. et al., "SCHEMA-guided protein recombination," Methods in Enzymology 388:35-42 (2004).
Endelman, J. et al., "Site-directed protein recombination as a shortest-path problem," Protein Engineering, Design & Selection 17(7):589-594 (2005).
"""
import sys, string, os
import pdb, schema
ARG_PRINT_E = 'E'
ARG_PRINT_M = 'm'
ARG_PDB_ALIGNMENT_FILE = 'pdbal'
ARG_PARENT_INDEX = 'p'
ARG_CHIMERAS = 'chim'
ARG_CROSSOVER_FILE = 'xo'
ARG_MULTIPLE_SEQUENCE_ALIGNMENT_FILE = 'msa'
ARG_CONTACT_FILE = 'con'
ARG_RANDOM_SEED = 'seed'
ARG_OUTPUT_FILE = 'o'
ARG_HELP = 'help'
def parse_arguments(args):
# Turn linear arguments into a dictionary of (option, [values,...]) pairs
arg_dict = {}
key = None
for arg in args[1:]:
if arg[0] == '-':
key = arg[1:]
arg_dict[key] = None
else:
if arg_dict.has_key(key):
if arg_dict[key]:
if type(arg_dict[key]) is list:
arg_dict[key] = arg_dict[key]+[arg]
else:
arg_dict[key] = [arg_dict[key],arg]
else:
arg_dict[key] = arg
else:
arg_dict[key] = arg
return arg_dict
def print_usage(args):
print 'Usage: python', args[0].split(os.path.sep)[-1], ' [options]'
print 'Options:\n', \
'\t-%s <alignment file>\n' % ARG_MULTIPLE_SEQUENCE_ALIGNMENT_FILE, \
'\t-%s <contact file>\n' % ARG_CONTACT_FILE, \
'\t-%s <crossover file>\n' % ARG_CROSSOVER_FILE, \
'\t[-%s <chimera list>]\n' % ARG_CHIMERAS, \
'\t[-%s]\n' % ARG_PRINT_E, \
'\t[-%s]\n' % ARG_PRINT_M, \
'\t[-%s <output file>]' % ARG_OUTPUT_FILE
def confirm_arguments(arg_dict):
# Are arguments okay?
res = True
arg_keys = arg_dict.keys()
try:
if len(arg_keys) == 0:
res = False
return
if not ARG_MULTIPLE_SEQUENCE_ALIGNMENT_FILE in arg_keys:
print " You must provide a library file (-%s <file>)" % ARG_MULTIPLE_SEQUENCE_ALIGNMENT_FILE
res = False
elif not os.path.isfile(arg_dict[ARG_MULTIPLE_SEQUENCE_ALIGNMENT_FILE]):
print " Can't find library file %s" % arg_dict[ARG_MULTIPLE_SEQUENCE_ALIGNMENT_FILE]
res = False
if not ARG_CROSSOVER_FILE in arg_keys:
print " You must provide a crossover file (-%s <file>)" % ARG_CROSSOVER_FILE
res = False
elif not os.path.isfile(arg_dict[ARG_CROSSOVER_FILE]):
print " Can't find crossover file %s" % arg_dict[ARG_CROSSOVER_FILE]
res = False
if not ARG_CONTACT_FILE in arg_keys:
print " You must provide a contact file (-%s <file>)" % ARG_CONTACT_FILE
res = False
elif not os.path.isfile(arg_dict[ARG_CONTACT_FILE]):
print " Can't find contact file %s" % arg_dict[ARG_CONTACT_FILE]
res = False
if not (arg_dict.has_key(ARG_PRINT_E) or arg_dict.has_key(ARG_PRINT_M)):
print " No output specified; use -E to print disruption and/or -m to print mutation"
res = False
except Exception, e:
raise e
res = False
return res
def outputEnergies(chimera_blocks, contacts, fragments, parents, output_file, output_string, print_E, print_m):
if not schema.checkChimera(chimera_blocks, fragments, parents):
output_file.write("# %s is not a valid chimera\n" % chimera_blocks)
return
output_vars = [chimera_blocks]
E = None
m = None
if print_E:
E = schema.getChimeraDisruption(chimera_blocks, contacts, fragments, parents)
output_vars = output_vars + [E]
if print_m:
m = schema.getChimeraShortestDistance(chimera_blocks, fragments, parents)
output_vars = output_vars + [m]
#print output_vars
output_file.write(output_string % tuple(output_vars))
return (E,m)
def main(args):
arg_dict = parse_arguments(args)
if not confirm_arguments(arg_dict):
if args[0].split(os.path.sep)[-1] == "schemaenergy.py":
print_usage(args)
return
# Flags and values
print_E = False
print_m = False
output_file = sys.stdout
# Inputs:
# The alignment/fragment file name.
msa_file = arg_dict[ARG_MULTIPLE_SEQUENCE_ALIGNMENT_FILE]
if arg_dict.has_key(ARG_PRINT_E):
print_E = True
if arg_dict.has_key(ARG_PRINT_M):
print_m = True
# Read the alignment file to create a list of parents.
# The parents will appear in the list in the order in which they appear in the file.
parent_list = schema.readMultipleSequenceAlignmentFile(file(msa_file, 'r'))
parents = [p for (k,p) in parent_list]
crossovers = schema.readCrossoverFile(file(arg_dict[ARG_CROSSOVER_FILE], 'r'))
fragments = schema.getFragments(crossovers, parents[0])
# Get the contacts
pdb_contacts = schema.readContactFile(file(arg_dict[ARG_CONTACT_FILE], 'r'))
contacts = schema.getSCHEMAContactsWithCrossovers(pdb_contacts, parents, crossovers)
if arg_dict.has_key(ARG_OUTPUT_FILE):
output_file = file(arg_dict[ARG_OUTPUT_FILE], 'w')
# Now, what does the user want?
output_string = '%s'
output_file.write('# chimera')
if print_E:
output_string += '\t%d'
output_file.write('\tE')
if print_m:
output_string += '\t%d'
output_file.write('\tm')
output_string += '\n'
output_file.write('\n')
if arg_dict.has_key(ARG_CHIMERAS): # Print values for chimeras
chimeras = arg_dict[ARG_CHIMERAS]
# Could be a) a chimera, b) a list of chimeras, or c) a file of chimeras.
if type(chimeras) is list:
# It's a list of chimeras
for chimera_blocks in chimeras:
outputEnergies(chimera_blocks, contacts, fragments, parents, output_file, output_string, print_E, print_m)
elif os.path.isfile(chimeras):
# It's a file of chimeras
for line in file(chimeras,'r').readlines():
chimera_blocks = line.strip()
outputEnergies(chimera_blocks, contacts, fragments, parents, output_file, output_string, print_E, print_m)
else:
# It's a single chimera sequence
chimera_blocks = chimeras
outputEnergies(chimera_blocks, contacts, fragments, parents, output_file, output_string, print_E, print_m)
else:
# Enumerates all possible chimeras and their disruption and mutation values.
p = len(parents)
n = len(fragments)
Es = []
ms = []
for i in xrange(len(parents)**len(fragments)):
# The next two lines turn i into a chimera block pattern
# (e.g., 0 -> '11111111', 1 -> '11111112', 2 -> '11111113'...)
n2c = schema.base(i,p)
chimera_blocks = ''.join(['1']*(n-len(n2c))+['%d'%(int(x)+1,) for x in n2c])
(E, m) = outputEnergies(chimera_blocks, contacts, fragments, parents, output_file, output_string, print_E, print_m)
if (print_E):
Es.append(E)
if (print_m):
ms.append(m)
if (print_E):
mean_str = "# Average disruption <E> = %1.4f\n" % schema.mean(Es)
output_file.write(mean_str)
if (print_m):
mean_str = "# Average mutation <m> = %1.4f\n" % schema.mean(ms)
output_file.write(mean_str)
if arg_dict.has_key(ARG_OUTPUT_FILE):
output_file.close()
def main_wrapper():
main(sys.argv)
main_wrapper()
| gpl-3.0 | 2,953,055,289,161,466,000 | 34.138528 | 173 | 0.67488 | false |
phorust/howmanygiven | 61a/trends/graphics.py | 1 | 7390 | """The graphics module implements a simple GUI library."""
import sys
import math
try:
import tkinter
except Exception as e:
print('Could not load tkinter: ' + str(e))
FRAME_TIME = 1/30
class Canvas(object):
"""A Canvas object supports drawing and animation primitives.
draw_* methods return the id number of a shape object in the underlying Tk
object. This id can be passed to move_* and edit_* methods.
Canvas is a singleton; only one Canvas instance can be created.
"""
_instance = None
def __init__(self, width=1024, height=768, title='', color='White', tk=None):
# Singleton enforcement
if Canvas._instance is not None:
raise Exception('Only one canvas can be instantiated.')
Canvas._instance = self
# Attributes
self.color = color
self.width = width
self.height = height
# Root window
self._tk = tk or tkinter.Tk()
self._tk.protocol('WM_DELETE_WINDOW', sys.exit)
self._tk.title(title or 'Graphics Window')
self._tk.bind('<Button-1>', self._click)
self._click_pos = None
# Canvas object
self._canvas = tkinter.Canvas(self._tk, width=width, height=height)
self._canvas.pack()
self._draw_background()
self._canvas.update()
self._images = dict()
def clear(self, shape='all'):
"""Clear all shapes, text, and images."""
self._canvas.delete(shape)
if shape == 'all':
self._draw_background()
self._canvas.update()
def draw_polygon(self, points, color='Black', fill_color=None, filled=1, smooth=0, width=1):
"""Draw a polygon and return its tkinter id.
points -- a list of (x, y) pairs encoding pixel positions
"""
if fill_color == None:
fill_color = color
if filled == 0:
fill_color = ""
return self._canvas.create_polygon(flattened(points), outline=color, fill=fill_color,
smooth=smooth, width=width)
def draw_circle(self, center, radius, color='Black', fill_color=None, filled=1, width=1):
"""Draw a cirlce and return its tkinter id.
center -- an (x, y) pair encoding a pixel position
"""
if fill_color == None:
fill_color = color
if filled == 0:
fill_color = ""
x0, y0 = [c - radius for c in center]
x1, y1 = [c + radius for c in center]
return self._canvas.create_oval(x0, y0, x1, y1, outline=color, fill=fill_color, width=width)
def draw_image(self, pos, image_file=None, scale=1, anchor=tkinter.NW):
"""Draw an image from a file and return its tkinter id."""
key = (image_file, scale)
if key not in self._images:
image = tkinter.PhotoImage(file=image_file)
if scale >= 1:
image = image.zoom(int(scale))
else:
image = image.subsample(int(1/scale))
self._images[key] = image
image = self._images[key]
x, y = pos
return self._canvas.create_image(x, y, image=image, anchor=anchor)
def draw_text(self, text, pos, color='Black', font='Arial',
size=12, style='normal', anchor=tkinter.NW):
"""Draw text and return its tkinter id."""
x, y = pos
font = (font, str(size), style)
return self._canvas.create_text(x, y, fill=color, text=text, font=font, anchor=anchor)
def edit_text(self, id, text=None, color=None, font=None, size=12,
style='normal'):
"""Edit the text, color, or font of an existing text object."""
if color is not None:
self._canvas.itemconfigure(id, fill=color)
if text is not None:
self._canvas.itemconfigure(id, text=text)
if font is not None:
self._canvas.itemconfigure(id, font=(font, str(size), style))
def animate_shape(self, id, duration, points_fn, frame_count=0):
"""Animate an existing shape over points."""
max_frames = duration // FRAME_TIME
points = points_fn(frame_count)
self._canvas.coords(id, flattened(points))
if frame_count < max_frames:
def tail():
"""Continues the animation at the next frame."""
self.animate_shape(id, duration, points_fn, frame_count + 1)
self._tk.after(int(FRAME_TIME * 1000), tail)
def slide_shape(self, id, end_pos, duration, elapsed=0):
"""Slide an existing shape to end_pos."""
points = paired(self._canvas.coords(id))
start_pos = points[0]
max_frames = duration // FRAME_TIME
def points_fn(frame_count):
completed = frame_count / max_frames
offset = [(e - s) * completed for s, e in zip(start_pos, end_pos)]
return [shift_point(p, offset) for p in points]
self.animate_shape(id, duration, points_fn)
def wait_for_click(self, seconds=0):
"""Return (position, elapsed) pair of click position and elapsed time.
position: (x,y) pixel position of click
elapsed: milliseconds elapsed since call
seconds: maximum number of seconds to wait for a click
If there is still no click after the given time, return (None, seconds).
"""
elapsed = 0
while elapsed < seconds or seconds == 0:
if self._click_pos is not None:
pos = self._click_pos
self._click_pos = None
return pos, elapsed
self._sleep(FRAME_TIME)
elapsed += FRAME_TIME
return None, elapsed
def _draw_background(self):
w, h = self.width - 1, self.height - 1
corners = [(0,0), (0, h), (w, h), (w, 0)]
self.draw_polygon(corners, self.color, fill_color=self.color, filled=True, smooth=False)
def _click(self, event):
self._click_pos = (event.x, event.y)
def _sleep(self, seconds):
self._tk.update_idletasks()
self._tk.after(int(1000 * seconds), self._tk.quit)
self._tk.mainloop()
def flattened(points):
"""Return a flat list of coordinates from a list of pairs."""
coords = list()
[coords.extend(p) for p in points]
return tuple(coords)
def paired(coords):
"""Return a list of pairs from a flat list of coordinates."""
assert len(coords) % 2 == 0, 'Coordinates are not paired.'
points = []
x = None
for elem in coords:
if x is None:
x = elem
else:
points.append((x, elem))
x = None
return points
def translate_point(point, angle, distance):
"""Translate a point a distance in a direction (angle)."""
x, y = point
return (x + math.cos(angle) * distance, y + math.sin(angle) * distance)
def shift_point(point, offset):
"""Shift a point by an offset."""
x, y = point
dx, dy = offset
return (x + dx, y + dy)
def rectangle_points(pos, width, height):
"""Return the points of a rectangle starting at pos."""
x1, y1 = pos
x2, y2 = width + x1, height + y1
return [(x1, y1), (x1, y2), (x2, y2), (x2, y1)]
def format_color(r, g, b):
"""Format a color as a string.
r, g, b -- integers from 0 to 255
"""
return '#{0:02x}{1:02x}{2:02x}'.format(int(r * 255), int(g * 255), int(b * 255))
| mit | 1,031,208,024,837,745,900 | 34.190476 | 100 | 0.579973 | false |
michel-rodrigues/forum | source/comments/models.py | 1 | 1226 | from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.db import models
class CommentManager(models.Manager):
def filter_by_instance(self, instance):
"""
Retorna os comentários de uma instancia de Post
"""
content_type = ContentType.objects.get_for_model(instance.__class__)
obj_id = instance.id
queryset = super(CommentManager, self).filter(
content_type=content_type,
object_id=obj_id
)
return queryset
class Comment(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
content = models.TextField(default='')
timestamp = models.DateTimeField(auto_now_add=True)
objects = CommentManager()
class Meta:
ordering = ['-timestamp']
def __str__(self):
return str(self.user.username)
| gpl-3.0 | -4,633,203,222,845,435,000 | 31.236842 | 76 | 0.666939 | false |
actlea/TopicalCrawler | TopicalCrawl/TopicalCrawl/TopicalCrawl/spiders/apprentice.py | 1 | 2542 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
"""
@author: actlea
@file: apprentice.py
@time: 16-3-19 下午2:28
@description:
"""
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.http import Request
from TopicalCrawl.items import ResponseItem
from TopicalCrawl.url import *
from TopicalCrawl.util import Random
from TopicalCrawl.classifier.multiclassifier import MultiClassifier
from TopicalCrawl.htmlParse import html2words
"""
class Apprentice(CrawlSpider):
name = 'apprentice'
headers = {
'Accept':'text/html',
'User-Agent': Random.random_header()
}
allowed_domains=[
# 'sports.sohu.com',
# 'hupu.com',
# 'sports.ifeng.com',
# 'sports.qq.com',
'sports.sina.com.cn'
]
# start_urls = [
# 'http://sports.sohu.com/',
# 'http://www.hupu.com/',
# 'http://sports.ifeng.com/',
# 'http://sports.qq.com/',
# 'http://sports.sina.com.cn/'
# ]
start_urls = ['http://sports.sina.com.cn/g/championsleague/']
maxPageNumbers = 2000
count = 0 #page has been download
rules = (
Rule(SgmlLinkExtractor(unique=True), callback='parse', follow=False),
)
print '-----------load supervisior ----------------'
supervisior = MultiClassifier('test-zh-logreg')
model_path = '/mnt/UbutunShare/TopicalCrawl/TopicalCrawl/classifier'
supervisior.load(path=model_path)
def parse(self, response):
item = ResponseItem()
item['response'] = response
item['count'] = self.count
yield item
self.count += 1
if self.count > self.maxPageNumbers:
return
#深度优先的爬虫
# for url in collect_urls(response.body, response.url, response.encoding):
# if is_url_visited(url, visited_url_set):
# continue
# yield Request(url, callback=self.parse, headers=self.headers)
for urlitem in get_link_word_by_pair(response.body. response.url , response.encoding):
url = urlitem['url']
label, priority = urlitem['label'], urlitem['interestness']
priority = priority*1000
if is_url_visited(url, visited_url_set) or int(label)==-1:
continue
req = Request(url, callback=self.parse, headers=self.headers, priority=priority)
req.meta['link'] = urlitem
yield req
"""
if __name__ == '__main__':
pass
| gpl-3.0 | 6,758,932,857,458,104,000 | 27.681818 | 94 | 0.606181 | false |
opesci/devito | tests/test_interpolation.py | 1 | 19299 | from math import sin, floor
import numpy as np
import pytest
from devito import (Grid, Operator, Dimension, SparseFunction, SparseTimeFunction,
Function, TimeFunction,
PrecomputedSparseFunction, PrecomputedSparseTimeFunction,
MatrixSparseTimeFunction)
from devito.symbolics import FLOAT
from examples.seismic import (demo_model, TimeAxis, RickerSource, Receiver,
AcquisitionGeometry)
from examples.seismic.acoustic import AcousticWaveSolver
import scipy.sparse
def unit_box(name='a', shape=(11, 11), grid=None):
"""Create a field with value 0. to 1. in each dimension"""
grid = grid or Grid(shape=shape)
a = Function(name=name, grid=grid)
dims = tuple([np.linspace(0., 1., d) for d in shape])
a.data[:] = np.meshgrid(*dims)[1]
return a
def unit_box_time(name='a', shape=(11, 11)):
"""Create a field with value 0. to 1. in each dimension"""
grid = Grid(shape=shape)
a = TimeFunction(name=name, grid=grid, time_order=1)
dims = tuple([np.linspace(0., 1., d) for d in shape])
a.data[0, :] = np.meshgrid(*dims)[1]
a.data[1, :] = np.meshgrid(*dims)[1]
return a
def points(grid, ranges, npoints, name='points'):
"""Create a set of sparse points from a set of coordinate
ranges for each spatial dimension.
"""
points = SparseFunction(name=name, grid=grid, npoint=npoints)
for i, r in enumerate(ranges):
points.coordinates.data[:, i] = np.linspace(r[0], r[1], npoints)
return points
def time_points(grid, ranges, npoints, name='points', nt=10):
"""Create a set of sparse points from a set of coordinate
ranges for each spatial dimension.
"""
points = SparseTimeFunction(name=name, grid=grid, npoint=npoints, nt=nt)
for i, r in enumerate(ranges):
points.coordinates.data[:, i] = np.linspace(r[0], r[1], npoints)
return points
def a(shape=(11, 11)):
grid = Grid(shape=shape)
a = Function(name='a', grid=grid)
xarr = np.linspace(0., 1., shape[0])
yarr = np.linspace(0., 1., shape[1])
a.data[:] = np.meshgrid(xarr, yarr)[1]
return a
def at(shape=(11, 11)):
grid = Grid(shape=shape)
a = TimeFunction(name='a', grid=grid)
xarr = np.linspace(0., 1., shape[0])
yarr = np.linspace(0., 1., shape[1])
a.data[:] = np.meshgrid(xarr, yarr)[1]
return a
def custom_points(grid, ranges, npoints, name='points'):
"""Create a set of sparse points from a set of coordinate
ranges for each spatial dimension.
"""
scale = Dimension(name="scale")
dim = Dimension(name="dim")
points = SparseFunction(name=name, grid=grid, dimensions=(scale, dim),
shape=(3, npoints), npoint=npoints)
for i, r in enumerate(ranges):
points.coordinates.data[:, i] = np.linspace(r[0], r[1], npoints)
return points
def precompute_linear_interpolation(points, grid, origin):
""" Sample precompute function that, given point and grid information
precomputes gridpoints and interpolation coefficients according to a linear
scheme to be used in PrecomputedSparseFunction.
"""
gridpoints = [tuple(floor((point[i]-origin[i])/grid.spacing[i])
for i in range(len(point))) for point in points]
interpolation_coeffs = np.zeros((len(points), 2, 2))
for i, point in enumerate(points):
for d in range(grid.dim):
interpolation_coeffs[i, d, 0] = ((gridpoints[i][d] + 1)*grid.spacing[d] -
point[d])/grid.spacing[d]
interpolation_coeffs[i, d, 1] = (point[d]-gridpoints[i][d]*grid.spacing[d])\
/ grid.spacing[d]
return gridpoints, interpolation_coeffs
def test_precomputed_interpolation():
""" Test interpolation with PrecomputedSparseFunction which accepts
precomputed values for interpolation coefficients
"""
shape = (101, 101)
points = [(.05, .9), (.01, .8), (0.07, 0.84)]
origin = (0, 0)
grid = Grid(shape=shape, origin=origin)
r = 2 # Constant for linear interpolation
# because we interpolate across 2 neighbouring points in each dimension
def init(data):
for i in range(data.shape[0]):
for j in range(data.shape[1]):
data[i, j] = sin(grid.spacing[0]*i) + sin(grid.spacing[1]*j)
return data
m = Function(name='m', grid=grid, initializer=init, space_order=0)
gridpoints, interpolation_coeffs = precompute_linear_interpolation(points,
grid, origin)
sf = PrecomputedSparseFunction(name='s', grid=grid, r=r, npoint=len(points),
gridpoints=gridpoints,
interpolation_coeffs=interpolation_coeffs)
eqn = sf.interpolate(m)
op = Operator(eqn)
op()
expected_values = [sin(point[0]) + sin(point[1]) for point in points]
assert(all(np.isclose(sf.data, expected_values, rtol=1e-6)))
def test_precomputed_interpolation_time():
""" Test interpolation with PrecomputedSparseFunction which accepts
precomputed values for interpolation coefficients, but this time
with a TimeFunction
"""
shape = (101, 101)
points = [(.05, .9), (.01, .8), (0.07, 0.84)]
origin = (0, 0)
grid = Grid(shape=shape, origin=origin)
r = 2 # Constant for linear interpolation
# because we interpolate across 2 neighbouring points in each dimension
u = TimeFunction(name='u', grid=grid, space_order=0, save=5)
for it in range(5):
u.data[it, :] = it
gridpoints, interpolation_coeffs = precompute_linear_interpolation(points,
grid, origin)
sf = PrecomputedSparseTimeFunction(name='s', grid=grid, r=r, npoint=len(points),
nt=5, gridpoints=gridpoints,
interpolation_coeffs=interpolation_coeffs)
assert sf.data.shape == (5, 3)
eqn = sf.interpolate(u)
op = Operator(eqn)
op(time_m=0, time_M=4)
for it in range(5):
assert np.allclose(sf.data[it, :], it)
@pytest.mark.parametrize('shape, coords', [
((11, 11), [(.05, .9), (.01, .8)]),
((11, 11, 11), [(.05, .9), (.01, .8), (0.07, 0.84)])
])
def test_interpolate(shape, coords, npoints=20):
"""Test generic point interpolation testing the x-coordinate of an
abitrary set of points going across the grid.
"""
a = unit_box(shape=shape)
p = points(a.grid, coords, npoints=npoints)
xcoords = p.coordinates.data[:, 0]
expr = p.interpolate(a)
Operator(expr)(a=a)
assert np.allclose(p.data[:], xcoords, rtol=1e-6)
@pytest.mark.parametrize('shape, coords', [
((11, 11), [(.05, .9), (.01, .8)]),
((11, 11, 11), [(.05, .9), (.01, .8), (0.07, 0.84)])
])
def test_interpolate_cumm(shape, coords, npoints=20):
"""Test generic point interpolation testing the x-coordinate of an
abitrary set of points going across the grid.
"""
a = unit_box(shape=shape)
p = points(a.grid, coords, npoints=npoints)
xcoords = p.coordinates.data[:, 0]
p.data[:] = 1.
expr = p.interpolate(a, increment=True)
Operator(expr)(a=a)
assert np.allclose(p.data[:], xcoords + 1., rtol=1e-6)
@pytest.mark.parametrize('shape, coords', [
((11, 11), [(.05, .9), (.01, .8)]),
((11, 11, 11), [(.05, .9), (.01, .8), (0.07, 0.84)])
])
def test_interpolate_time_shift(shape, coords, npoints=20):
"""Test generic point interpolation testing the x-coordinate of an
abitrary set of points going across the grid.
This test verifies the optional time shifting for SparseTimeFunctions
"""
a = unit_box_time(shape=shape)
p = time_points(a.grid, coords, npoints=npoints, nt=10)
xcoords = p.coordinates.data[:, 0]
p.data[:] = 1.
expr = p.interpolate(a, u_t=a.indices[0]+1)
Operator(expr)(a=a)
assert np.allclose(p.data[0, :], xcoords, rtol=1e-6)
p.data[:] = 1.
expr = p.interpolate(a, p_t=p.indices[0]+1)
Operator(expr)(a=a)
assert np.allclose(p.data[1, :], xcoords, rtol=1e-6)
p.data[:] = 1.
expr = p.interpolate(a, u_t=a.indices[0]+1,
p_t=p.indices[0]+1)
Operator(expr)(a=a)
assert np.allclose(p.data[1, :], xcoords, rtol=1e-6)
@pytest.mark.parametrize('shape, coords', [
((11, 11), [(.05, .9), (.01, .8)]),
((11, 11, 11), [(.05, .9), (.01, .8), (0.07, 0.84)])
])
def test_interpolate_array(shape, coords, npoints=20):
"""Test generic point interpolation testing the x-coordinate of an
abitrary set of points going across the grid.
"""
a = unit_box(shape=shape)
p = points(a.grid, coords, npoints=npoints)
xcoords = p.coordinates.data[:, 0]
expr = p.interpolate(a)
Operator(expr)(a=a, points=p.data[:])
assert np.allclose(p.data[:], xcoords, rtol=1e-6)
@pytest.mark.parametrize('shape, coords', [
((11, 11), [(.05, .9), (.01, .8)]),
((11, 11, 11), [(.05, .9), (.01, .8), (0.07, 0.84)])
])
def test_interpolate_custom(shape, coords, npoints=20):
"""Test generic point interpolation testing the x-coordinate of an
abitrary set of points going across the grid.
"""
a = unit_box(shape=shape)
p = custom_points(a.grid, coords, npoints=npoints)
xcoords = p.coordinates.data[:, 0]
p.data[:] = 1.
expr = p.interpolate(a * p.indices[0])
Operator(expr)(a=a)
assert np.allclose(p.data[0, :], 0.0 * xcoords, rtol=1e-6)
assert np.allclose(p.data[1, :], 1.0 * xcoords, rtol=1e-6)
assert np.allclose(p.data[2, :], 2.0 * xcoords, rtol=1e-6)
def test_interpolation_dx():
"""
Test interpolation of a SparseFunction from a Derivative of
a Function.
"""
u = unit_box(shape=(11, 11))
sf1 = SparseFunction(name='s', grid=u.grid, npoint=1)
sf1.coordinates.data[0, :] = (0.5, 0.5)
op = Operator(sf1.interpolate(u.dx))
assert sf1.data.shape == (1,)
u.data[:] = 0.0
u.data[5, 5] = 4.0
u.data[4, 5] = 2.0
u.data[6, 5] = 2.0
op.apply()
# Exactly in the middle of 4 points, only 1 nonzero is 4
assert sf1.data[0] == pytest.approx(-20.0)
@pytest.mark.parametrize('shape, coords', [
((11, 11), [(.05, .9), (.01, .8)]),
((11, 11, 11), [(.05, .9), (.01, .8), (0.07, 0.84)])
])
def test_interpolate_indexed(shape, coords, npoints=20):
"""Test generic point interpolation testing the x-coordinate of an
abitrary set of points going across the grid. Unlike other tests,
here we interpolate an expression built using the indexed notation.
"""
a = unit_box(shape=shape)
p = custom_points(a.grid, coords, npoints=npoints)
xcoords = p.coordinates.data[:, 0]
p.data[:] = 1.
expr = p.interpolate(a[a.grid.dimensions] * p.indices[0])
Operator(expr)(a=a)
assert np.allclose(p.data[0, :], 0.0 * xcoords, rtol=1e-6)
assert np.allclose(p.data[1, :], 1.0 * xcoords, rtol=1e-6)
assert np.allclose(p.data[2, :], 2.0 * xcoords, rtol=1e-6)
@pytest.mark.parametrize('shape, coords, result', [
((11, 11), [(.05, .95), (.45, .45)], 1.),
((11, 11, 11), [(.05, .95), (.45, .45), (.45, .45)], 0.5)
])
def test_inject(shape, coords, result, npoints=19):
"""Test point injection with a set of points forming a line
through the middle of the grid.
"""
a = unit_box(shape=shape)
a.data[:] = 0.
p = points(a.grid, ranges=coords, npoints=npoints)
expr = p.inject(a, FLOAT(1.))
Operator(expr)(a=a)
indices = [slice(4, 6, 1) for _ in coords]
indices[0] = slice(1, -1, 1)
assert np.allclose(a.data[indices], result, rtol=1.e-5)
@pytest.mark.parametrize('shape, coords, result', [
((11, 11), [(.05, .95), (.45, .45)], 1.),
((11, 11, 11), [(.05, .95), (.45, .45), (.45, .45)], 0.5)
])
def test_inject_time_shift(shape, coords, result, npoints=19):
"""Test generic point injection testing the x-coordinate of an
abitrary set of points going across the grid.
This test verifies the optional time shifting for SparseTimeFunctions
"""
a = unit_box_time(shape=shape)
a.data[:] = 0.
p = time_points(a.grid, ranges=coords, npoints=npoints)
expr = p.inject(a, FLOAT(1.), u_t=a.indices[0]+1)
Operator(expr)(a=a, time=1)
indices = [slice(1, 1, 1)] + [slice(4, 6, 1) for _ in coords]
indices[1] = slice(1, -1, 1)
assert np.allclose(a.data[indices], result, rtol=1.e-5)
a.data[:] = 0.
expr = p.inject(a, FLOAT(1.), p_t=p.indices[0]+1)
Operator(expr)(a=a, time=1)
indices = [slice(0, 0, 1)] + [slice(4, 6, 1) for _ in coords]
indices[1] = slice(1, -1, 1)
assert np.allclose(a.data[indices], result, rtol=1.e-5)
a.data[:] = 0.
expr = p.inject(a, FLOAT(1.), u_t=a.indices[0]+1, p_t=p.indices[0]+1)
Operator(expr)(a=a, time=1)
indices = [slice(1, 1, 1)] + [slice(4, 6, 1) for _ in coords]
indices[1] = slice(1, -1, 1)
assert np.allclose(a.data[indices], result, rtol=1.e-5)
@pytest.mark.parametrize('shape, coords, result', [
((11, 11), [(.05, .95), (.45, .45)], 1.),
((11, 11, 11), [(.05, .95), (.45, .45), (.45, .45)], 0.5)
])
def test_inject_array(shape, coords, result, npoints=19):
"""Test point injection with a set of points forming a line
through the middle of the grid.
"""
a = unit_box(shape=shape)
a.data[:] = 0.
p = points(a.grid, ranges=coords, npoints=npoints)
p2 = points(a.grid, ranges=coords, npoints=npoints, name='p2')
p2.data[:] = 1.
expr = p.inject(a, p)
Operator(expr)(a=a, points=p2.data[:])
indices = [slice(4, 6, 1) for _ in coords]
indices[0] = slice(1, -1, 1)
assert np.allclose(a.data[indices], result, rtol=1.e-5)
@pytest.mark.parametrize('shape, coords, result', [
((11, 11), [(.05, .95), (.45, .45)], 1.),
((11, 11, 11), [(.05, .95), (.45, .45), (.45, .45)], 0.5)
])
def test_inject_from_field(shape, coords, result, npoints=19):
"""Test point injection from a second field along a line
through the middle of the grid.
"""
a = unit_box(shape=shape)
a.data[:] = 0.
b = Function(name='b', grid=a.grid)
b.data[:] = 1.
p = points(a.grid, ranges=coords, npoints=npoints)
expr = p.inject(field=a, expr=b)
Operator(expr)(a=a, b=b)
indices = [slice(4, 6, 1) for _ in coords]
indices[0] = slice(1, -1, 1)
assert np.allclose(a.data[indices], result, rtol=1.e-5)
@pytest.mark.parametrize('shape', [(50, 50, 50)])
def test_position(shape):
t0 = 0.0 # Start time
tn = 500. # Final time
nrec = 130 # Number of receivers
# Create model from preset
model = demo_model('constant-isotropic', spacing=[15. for _ in shape],
shape=shape, nbl=10)
# Derive timestepping from model spacing
dt = model.critical_dt
time_range = TimeAxis(start=t0, stop=tn, step=dt)
# Source and receiver geometries
src_coordinates = np.empty((1, len(shape)))
src_coordinates[0, :] = np.array(model.domain_size) * .5
src_coordinates[0, -1] = 30.
rec_coordinates = np.empty((nrec, len(shape)))
rec_coordinates[:, 0] = np.linspace(0., model.domain_size[0], num=nrec)
rec_coordinates[:, 1:] = src_coordinates[0, 1:]
geometry = AcquisitionGeometry(model, rec_coordinates, src_coordinates,
t0=t0, tn=tn, src_type='Ricker', f0=0.010)
# Create solver object to provide relevant operators
solver = AcousticWaveSolver(model, geometry, time_order=2, space_order=4)
rec, u, _ = solver.forward(save=False)
# Define source geometry (center of domain, just below surface) with 100. origin
src = RickerSource(name='src', grid=model.grid, f0=0.01, time_range=time_range)
src.coordinates.data[0, :] = np.array(model.domain_size) * .5 + 100.
src.coordinates.data[0, -1] = 130.
# Define receiver geometry (same as source, but spread across x)
rec2 = Receiver(name='rec', grid=model.grid, time_range=time_range, npoint=nrec)
rec2.coordinates.data[:, 0] = np.linspace(100., 100. + model.domain_size[0],
num=nrec)
rec2.coordinates.data[:, 1:] = src.coordinates.data[0, 1:]
ox_g, oy_g, oz_g = tuple(o + 100. for o in model.grid.origin)
rec1, u1, _ = solver.forward(save=False, src=src, rec=rec2,
o_x=ox_g, o_y=oy_g, o_z=oz_g)
assert(np.allclose(rec.data, rec1.data, atol=1e-5))
def test_edge_sparse():
"""
Test that interpolation uses the correct point for the edge case
where the sparse point is at the origin with non rational grid spacing.
Due to round up error the interpolation would use the halo point instead of
the point (0, 0) without the factorizaion of the expressions.
"""
grid = Grid(shape=(16, 16), extent=(225., 225.), origin=(25., 35.))
u = unit_box(shape=(16, 16), grid=grid)
u._data_with_outhalo[:u.space_order, :] = -1
u._data_with_outhalo[:, :u.space_order] = -1
sf1 = SparseFunction(name='s', grid=u.grid, npoint=1)
sf1.coordinates.data[0, :] = (25.0, 35.0)
expr = sf1.interpolate(u)
subs = {d.spacing: v for d, v in zip(u.grid.dimensions, u.grid.spacing)}
op = Operator(expr, subs=subs)
op()
assert sf1.data[0] == 0
def test_msf_interpolate():
""" Test interpolation with MatrixSparseTimeFunction which accepts
precomputed values for interpolation coefficients, but this time
with a TimeFunction
"""
shape = (101, 101)
points = [(.05, .9), (.01, .8), (0.07, 0.84)]
origin = (0, 0)
grid = Grid(shape=shape, origin=origin)
r = 2 # Constant for linear interpolation
# because we interpolate across 2 neighbouring points in each dimension
u = TimeFunction(name='u', grid=grid, space_order=0, save=5)
for it in range(5):
u.data[it, :] = it
gridpoints, interpolation_coeffs = precompute_linear_interpolation(points,
grid, origin)
matrix = scipy.sparse.eye(len(points))
sf = MatrixSparseTimeFunction(
name='s', grid=grid, r=r, matrix=matrix, nt=5
)
sf.gridpoints.data[:] = gridpoints
sf.coefficients_x.data[:] = interpolation_coeffs[:, 0, :]
sf.coefficients_y.data[:] = interpolation_coeffs[:, 0, :]
assert sf.data.shape == (5, 3)
eqn = sf.interpolate(u)
op = Operator(eqn)
print(op)
sf.manual_scatter()
op(time_m=0, time_M=4)
sf.manual_gather()
for it in range(5):
assert np.allclose(sf.data[it, :], it)
# Now test injection
u.data[:] = 0
eqn_inject = sf.inject(field=u, expr=sf)
op2 = Operator(eqn_inject)
print(op2)
op2(time_m=0, time_M=4)
# There should be 4 points touched for each source point
# (5, 90), (1, 80), (7, 84) and x+1, y+1 for each
nzt, nzx, nzy = np.nonzero(u.data)
assert np.all(np.unique(nzx) == np.array([1, 2, 5, 6, 7, 8]))
assert np.all(np.unique(nzy) == np.array([80, 81, 84, 85, 90, 91]))
assert np.all(np.unique(nzt) == np.array([1, 2, 3, 4]))
# 12 points x 4 timesteps
assert nzt.size == 48
| mit | -1,173,478,634,189,437,700 | 33.586022 | 88 | 0.600135 | false |
ajyoon/brown | tests/test_core/test_path.py | 1 | 4876 | import unittest
import pytest
from brown.core import brown
from brown.core.brush import Brush
from brown.core.invisible_object import InvisibleObject
from brown.core.path import Path
from brown.core.path_element_type import PathElementType
from brown.core.pen import Pen
from brown.utils.point import Point
class TestPath(unittest.TestCase):
def setUp(self):
brown.setup()
def test_init(self):
mock_parent = InvisibleObject((0, 0), parent=None)
test_pen = Pen('#eeeeee')
test_brush = Brush('#dddddd')
path = Path((5, 6), test_pen, test_brush, mock_parent)
assert(isinstance(path.pos, Point))
assert(path.x == 5)
assert(path.y == 6)
assert(isinstance(path.current_draw_pos, Point))
assert(path.current_draw_pos == Point(0, 0))
assert(path.pen == test_pen)
assert(path.brush == test_brush)
def test_straight_line(self):
test_line = Path.straight_line((5, 6), (10, 11))
assert(isinstance(test_line.pos, Point))
assert(test_line.x == 5)
assert(test_line.y == 6)
assert(test_line.current_draw_pos == Point(10, 11))
# noinspection PyPropertyAccess
def test_current_path_pos_has_no_setter(self):
test_line = Path((0, 0))
with pytest.raises(AttributeError):
test_line.current_draw_pos = (7, 8)
def test_line_to(self):
path = Path((5, 6))
path.line_to(10, 12)
assert(len(path.elements) == 2)
assert(path.elements[-1].pos.x == 10)
assert(path.current_draw_pos == Point(10, 12))
def test_line_to_with_parent(self):
path = Path((5, 6))
parent = InvisibleObject((100, 50))
path.line_to(1, 3, parent)
assert(path.elements[-1].parent == parent)
def test_cubic_to_with_no_parents(self):
path = Path((5, 6))
path.cubic_to(10, 11, 0, 1, 5, 6)
assert(len(path.elements) == 4)
assert(path.elements[0].element_type == PathElementType.move_to)
assert(path.elements[0].pos == Point(0, 0))
assert(path.elements[1].element_type == PathElementType.control_point)
assert(path.elements[1].pos == Point(10, 11))
assert(path.elements[2].element_type == PathElementType.control_point)
assert(path.elements[2].pos == Point(0, 1))
assert(path.elements[3].element_type == PathElementType.curve_to)
assert(path.elements[3].pos == Point(5, 6))
assert(path.current_draw_pos.x == 5)
assert(path.current_draw_pos.y == 6)
def test_cubic_to_with_parents(self):
path = Path((0, 0))
parent_1 = InvisibleObject((100, 50))
parent_2 = InvisibleObject((100, 50))
parent_3 = InvisibleObject((100, 50))
path.cubic_to(10, 11, 0, 1, 5, 6, parent_1, parent_2, parent_3)
assert(len(path.elements) == 4)
assert(path.elements[0].element_type == PathElementType.move_to)
assert(path.elements[0].pos == Point(0, 0))
assert(path.elements[1].element_type == PathElementType.control_point)
assert(path.elements[1].pos == Point(10, 11))
assert(path.elements[1].parent == parent_1)
assert(path.elements[2].element_type == PathElementType.control_point)
assert(path.elements[2].pos == Point(0, 1))
assert(path.elements[2].parent == parent_2)
assert(path.elements[3].element_type == PathElementType.curve_to)
assert(path.elements[3].pos == Point(5, 6))
assert(path.elements[3].parent == parent_3)
assert(path.current_draw_pos.x == 105)
assert(path.current_draw_pos.y == 56)
def test_move_to_with_no_parent(self):
path = Path((5, 6))
path.move_to(10, 11)
assert(len(path.elements) == 1)
assert(path.elements[0].element_type == PathElementType.move_to)
assert(path.elements[0].pos == Point(10, 11))
assert(path.current_draw_pos.x == 10)
assert(path.current_draw_pos.y == 11)
def test_move_to_with_parent(self):
path = Path((0, 0))
parent = InvisibleObject((100, 50))
path.move_to(10, 11, parent)
assert(len(path.elements) == 1)
assert(path.elements[0].element_type == PathElementType.move_to)
assert(path.elements[0].pos == Point(10, 11))
assert(path.elements[0].parent == parent)
assert(path.current_draw_pos.x == 110)
assert(path.current_draw_pos.y == 61)
def test_close_subpath(self):
path = Path((5, 6))
path.line_to(10, 10)
path.line_to(10, 100)
path.close_subpath()
assert(len(path.elements) == 4)
assert(path.elements[3].element_type == PathElementType.move_to)
assert(path.elements[3].pos == Point(0, 0))
assert(path.current_draw_pos.x == 0)
assert(path.current_draw_pos.y == 0)
| gpl-3.0 | -7,057,796,230,933,259,000 | 38.642276 | 78 | 0.609516 | false |
google-research-datasets/natural-questions | simplify_nq_data.py | 1 | 2567 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script to apply `text_utils.simplify_nq_data` to all examples in a split.
We have provided the processed training set at the link below.
https://storage.cloud.google.com/natural_questions/v1.0-simplified/simplified-nq-train.jsonl.gz
The test set, used by NQ's competition website, is only provided in the original
NQ format. If you wish to use the simplified format, then you should call
`text_utils.simplify_nq_data` in your submitted system.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import glob
import gzip
import json
import os
import time
from absl import app
from absl import flags
import text_utils as text_utils
FLAGS = flags.FLAGS
flags.DEFINE_string(
"data_dir", None, "Path to directory containing original NQ"
"files, matching the pattern `nq-<split>-??.jsonl.gz`.")
def main(_):
"""Runs `text_utils.simplify_nq_example` over all shards of a split.
Prints simplified examples to a single gzipped file in the same directory
as the input shards.
"""
split = os.path.basename(FLAGS.data_dir)
outpath = os.path.join(FLAGS.data_dir,
"simplified-nq-{}.jsonl.gz".format(split))
with gzip.open(outpath, "wb") as fout:
num_processed = 0
start = time.time()
for inpath in glob.glob(os.path.join(FLAGS.data_dir, "nq-*-??.jsonl.gz")):
print("Processing {}".format(inpath))
with gzip.open(inpath, "rb") as fin:
for l in fin:
utf8_in = l.decode("utf8", "strict")
utf8_out = json.dumps(
text_utils.simplify_nq_example(json.loads(utf8_in))) + u"\n"
fout.write(utf8_out.encode("utf8"))
num_processed += 1
if not num_processed % 100:
print("Processed {} examples in {}.".format(num_processed,
time.time() - start))
if __name__ == "__main__":
app.run(main)
| apache-2.0 | 6,546,578,906,025,235,000 | 33.226667 | 95 | 0.676665 | false |
WaterIsland/DLStudy | mln/CR/test-handwrite.py | 1 | 2342 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This precision method refered by
# http://scikit-learn.org/stable/auto_examples/classification/plot_digits_classification.html
# https://github.com/sylvan5/PRML/blob/master/ch5/digits.py
#
import time
import cv2
import numpy as np
import Mln as mln
import dump as dp
import progress as prg
import image as img
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import confusion_matrix, classification_report
teach_data = [0, 0, 0, 1, 0, 0, 0, 0, 0, 0] # dummy
print '--start--'
print '@@ Show after learning @@'
neuro_obj = dp.obj_load('./learn-cr.dump')
def recognition_digit_image(fname, digit = 100):
im = cv2.imread(fname)
im = img.change_size_with_size(im, 28, 28)
im = img.change_grayscale(im)
im = 255 - im
input_data = im
input_data = input_data.astype(np.float64)
input_data = im / im.max()
input_data = np.reshape(input_data, (1, 28*28))
neuro_obj.test(input_data, teach_data)
output = neuro_obj.get_output()
if digit >=0 and digit <= 9:
print "judged:", neuro_obj.get_max_output_index(),
print ", target order:", np.where(np.fliplr(np.argsort(output)) == digit)[1] + 1,
print ", order array:", np.fliplr(np.argsort(output))
else:
print "judged:", neuro_obj.get_max_output_index(),
print ", order array:", np.fliplr(np.argsort(output))
cv2.imshow("input_data", im)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
recognition_digit_image("image/0.png", 0)
recognition_digit_image("image/1.png", 1)
recognition_digit_image("image/2.png", 2)
recognition_digit_image("image/3.png", 3)
recognition_digit_image("image/4.png", 4)
recognition_digit_image("image/5.png", 5)
recognition_digit_image("image/6.png", 6)
recognition_digit_image("image/7.png", 7)
recognition_digit_image("image/8.png", 8)
recognition_digit_image("image/9.png", 9)
'''
recognition_digit_image("image/number.png")
| mit | 907,138,524,347,847,800 | 33.955224 | 163 | 0.60333 | false |
mperignon/anuga-sedtransport | demo_files/run_raster_sed_transport_veg.py | 1 | 5666 | """
Example of use of sediment transport and vegetation drag operators over
a raster-derived topography
M. Perignon
[email protected]
July 2014
"""
#------------------------------------------------------------------------------
# Import necessary modules
#------------------------------------------------------------------------------
import anuga
from anuga import rectangular_cross
from anuga import Domain
from anuga import Dirichlet_boundary
"""
Import operators
"""
from anuga.operators.sed_transport.sed_transport_operator import Sed_transport_operator, Vegetation_operator
"""
Import operator-specific boundaries
"""
from anuga.operators.sed_transport.sed_transport_utils import Reflective_boundary_Sed, Dirichlet_boundary_Sed
"""
Import operator-specific version of domain function
"""
from anuga.operators.sed_transport.sed_transport_utils import create_domain_from_regions_sed
"""
Import file conversion and quantity setting functions for vegetation file
"""
from anuga.operators.sed_transport.file_conversion.generic_asc2dem import generic_asc2dem
from anuga.operators.sed_transport.file_conversion.generic_dem2pts import generic_dem2pts
from anuga.operators.sed_transport.sed_transport_utils import set_quantity_NNeigh
#===============================================================================
# Setup Functions
#===============================================================================
# Convert an elevation raster into a point file
anuga.asc2dem('topo.asc', use_cache = False, verbose = True)
anuga.dem2pts('topo.dem', use_cache = False, verbose = True)
"""
Include the process-specific quantities when creating the domain
"""
evolved_quantities = ['stage', 'xmomentum', 'ymomentum', 'concentration']
other_quantities=['elevation', 'friction', 'height', 'xvelocity', \
'yvelocity', 'x', 'y', 'vegetation', 'diffusivity']
# import bounding polygon text file, set boundary tags
bounding_polygon = anuga.read_polygon('outline.csv')
boundary_tags = {'bottom':[0],
'side1':[1],
'side2':[2],
'top':[3],
'side3':[4],
'side4':[5]}
"""
Create the domain with operator-specific function (accepts quantities)
"""
domain = create_domain_from_regions_sed(bounding_polygon,
boundary_tags = boundary_tags,
maximum_triangle_area = 200,
mesh_filename = 'topo.msh',
interior_regions = {},
evolved_quantities = evolved_quantities,
other_quantities = other_quantities,
use_cache = False,
verbose = True)
#------------------------------------------------------------------------------
# Setup parameters of computational domain
#------------------------------------------------------------------------------
domain.set_name('run_raster_sed_transport_veg') # Name of sww file
# Print some stats about mesh and domain
print 'Number of triangles = ', len(domain)
print 'The extent is ', domain.get_extent()
print domain.statistics()
domain.set_quantity('elevation',
filename = 'topo.pts',
use_cache = False,
verbose = True,
alpha = 0.1)
domain.set_quantity('stage', expression='elevation')
#------------------------------------------------------------------------------
# Sediment transport and vegetation operators
#------------------------------------------------------------------------------
"""
Convert a raster of vegetation types into a point file
Set the values of quantity 'vegetation' to values of point file
with Nearest Neighbour algorithm
"""
generic_asc2dem('veg.asc',
quantity_name = 'vegetation',
use_cache = False,
verbose = True)
generic_dem2pts('veg.dem',
quantity_name = 'vegetation',
use_cache = False,
verbose = True)
set_quantity_NNeigh(domain, 'vegetation', filename = 'veg.pts')
op1 = Sed_transport_operator(domain,
erosion = True,
deposition = True,
turbulence = True,
momentum_sinks = True,
verbose = True)
op2 = Vegetation_operator(domain,
vegfile = 'vegcodes.txt',
verbose = True)
domain.set_flow_algorithm('1_75')
domain.set_quantities_to_be_stored({'elevation': 2,'stage': 2,'xmomentum': 2, 'concentration': 2, 'vegetation': 1})
#------------------------------------------------------------------------------
# Setup boundary conditions
#------------------------------------------------------------------------------
max_elev = domain.quantities['elevation'].vertex_values.max()
min_elev = domain.quantities['elevation'].vertex_values.min()
Bd = Dirichlet_boundary_Sed([1528, 0., 0., 0.2])
Bi = anuga.Dirichlet_boundary([min_elev - 1, 0., 0.])
Br = Reflective_boundary_Sed(domain)
domain.set_boundary({'bottom':Bi,
'side1':Br,
'side2':Br,
'top':Bd,
'side3':Br,
'side4':Br,
'exterior':Br})
#------------------------------------------------------------------------------
# Evolve system through time
#------------------------------------------------------------------------------
for t in domain.evolve(yieldstep = 5, finaltime = 100):
print domain.timestepping_statistics() | gpl-2.0 | -5,832,266,252,321,722,000 | 31.146199 | 115 | 0.513237 | false |
alexfalcucc/anaconda | anaconda_lib/autopep/autopep_wrapper.py | 1 | 1460 | # -*- coding: utf8 -*-
# Copyright (C) 2013 - Oscar Campos <[email protected]>
# This program is Free Software see LICENSE file for details
"""
This file is a wrapper for autopep8 library.
"""
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../linting'))
import threading
from .autopep8_lib import autopep8
class AnacondaAutopep8(threading.Thread):
"""Wrapper class around native autopep8 implementation
"""
def __init__(self, settings, code, callback):
threading.Thread.__init__(self)
self.code = code
self.callback = callback
self.options, _ = autopep8.parse_args(self.parse_settings(settings))
def run(self):
self.callback(autopep8.fix_string(self.code, options=self.options))
def parse_settings(self, settings):
"""Map anaconda settings to autopep8 settings
"""
args = []
args += ['-a'] * settings.get('aggressive', 0)
if len(settings.get('autoformat_ignore', [])) > 0:
args += ['--ignore={}'.format(
','.join(settings.get('autoformat_ignore')))]
if len(settings.get('autoformat_select', [])) > 0:
args += ['--select={}'.format(
','.join(settings.get('autoformat_select')))]
args += ['--max-line-length={}'.format(
settings.get('pep8_max_line_length', 150))]
args += ['anaconda_rocks']
return args
| gpl-3.0 | -3,411,383,632,525,060,600 | 27.627451 | 76 | 0.599315 | false |
ASzc/nagoya | nagoya/moromi.py | 1 | 9042 | #
# Copyright (C) 2014 Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import os
import re
import collections
import itertools
import docker
import toposort
import nagoya.dockerext.build
import nagoya.buildcsys
import nagoya.cli.cfg
logger = logging.getLogger("nagoya.build")
#
# Exceptions
#
class InvalidFormat(Exception):
pass
#
# Helpers
#
def line_split(string):
return map(str.strip, string.split("\n"))
def optional_plural(cfg, key):
if key in cfg:
logger.debug("Optional config key {key} exists".format(**locals()))
for elem in line_split(cfg[key]):
yield elem
else:
logger.debug("Optional config key {key} does not exist".format(**locals()))
#
# Container system image build
#
container_system_option_names = {"system", "commits", "persists", "root"}
dest_spec_pattern = re.compile(r'^(?P<container>[^ ]+) to (?P<image>[^ ]+)$')
ContainerDest = collections.namedtuple("ContainerDest", ["container", "image"])
def parse_dest_spec(spec, opt_name, image_name):
match = dest_spec_pattern.match(spec)
if match:
return ContainerDest(**match.groupdict())
else:
raise InvalidFormat("Invalid {opt_name} specification '{spec}' for image {image_name}".format(**locals()))
def build_container_system(image_name, image_config, client, quiet, extra_env):
logger.info("Creating container system for {image_name}".format(**locals()))
sys_config = nagoya.cli.cfg.read_one(image_config["system"], ["detach", "run_once"])
with nagoya.buildcsys.BuildContainerSystem.from_dict(sys_config, client=client) as bcs:
bcs.cleanup = "remove"
bcs.quiet = quiet
bcs.root_name(image_config["root"])
if "entrypoint" in image_config:
entrypoint_spec = image_config["entrypoint"]
res_paths = parse_dir_spec(entrypoint_spec, "entrypoint", image_name)
bcs.root.working_dir = res_paths.dest_dir
bcs.root.entrypoint = res_paths.dest_path
bcs.volume_include(bcs.root, res_paths.src_path, res_paths.dest_path, executable=True)
for lib_spec in optional_plural(image_config, "libs"):
res_paths = parse_dir_spec(lib_spec, "lib", image_name)
bcs.volume_include(bcs.root, res_paths.src_path, res_paths.dest_path)
for commit_spec in optional_plural(image_config, "commits"):
dest = parse_dest_spec(commit_spec, "commits", image_name)
logger.debug("Container {dest.container} will be committed to {dest.image}".format(**locals()))
bcs.commit(dest.container, dest.image)
for persist_spec in optional_plural(image_config, "persists"):
dest = parse_dest_spec(persist_spec, "persists", image_name)
logger.debug("Container {dest.container} will be persisted to {dest.image}".format(**locals()))
bcs.persist(dest.container, dest.image)
#
# Standard image build
#
dir_spec_pattern = re.compile(r'^(?P<sourcepath>.+) (?:in (?P<inpath>.+)|at (?P<atpath>.+))$')
ResPaths = collections.namedtuple("ResCopyPaths", ["src_path", "dest_path", "dest_dir"])
def parse_dir_spec(spec, opt_name, image_name):
match = dir_spec_pattern.match(spec)
if match:
gd = match.groupdict()
src_path = gd["sourcepath"]
src_basename = os.path.basename(src_path)
if "inpath" in gd:
image_dir = gd["inpath"]
image_path = os.path.join(image_dir, src_basename)
elif "atpath" in gd:
image_path = gd["atpath"]
image_dir = os.path.dirname(image_path)
else:
raise Exception("dir_spec_pattern is broken")
return ResPaths(src_path, image_path, image_dir)
else:
raise InvalidFormat("Invalid {opt_name} specification '{spec}' for image {image_name}".format(**locals()))
# Workaround Python 2 not having the nonlocal keyword
class Previous(object):
def __init__(self, initial):
self.value = initial
def __call__(self, new):
if self.value == new:
return True
else:
self.value = new
return False
def build_image(image_name, image_config, client, quiet, extra_env):
logger.info("Generating files for {image_name}".format(**locals()))
with nagoya.dockerext.build.BuildContext(image_name, image_config["from"], client, quiet) as context:
context.maintainer(image_config["maintainer"])
for port in optional_plural(image_config, "exposes"):
context.expose(port)
for volume in optional_plural(image_config, "volumes"):
context.volume(volume)
for lib_spec in optional_plural(image_config, "libs"):
res_paths = parse_dir_spec(lib_spec, "lib", image_name)
context.include(res_paths.src_path, res_paths.dest_path)
for env_spec in itertools.chain(optional_plural(image_config, "envs"), extra_env):
k,v = env_spec.split("=", 1)
context.env(k, v)
previous_workdir = Previous("")
def add_workdir(image_dir):
if not previous_workdir(image_dir):
context.workdir(image_dir)
for run_spec in optional_plural(image_config, "runs"):
res_paths = parse_dir_spec(run_spec, "run", image_name)
context.include(res_paths.src_path, res_paths.dest_path, executable=True)
add_workdir(res_paths.dest_dir)
context.run(res_paths.dest_path)
if "entrypoint" in image_config:
entrypoint_spec = image_config["entrypoint"]
res_paths = parse_dir_spec(entrypoint_spec, "entrypoint", image_name)
context.include(res_paths.src_path, res_paths.dest_path, executable=True)
add_workdir(res_paths.dest_dir)
context.entrypoint(res_paths.dest_path)
#
# Build images
#
def resolve_dep_order(images_config):
# Figure out what images are provided by this config
# Anything not provided is assumed to exist already
provided_images = dict()
for image_name,image_config in images_config.items():
if container_system_option_names.isdisjoint(image_config.keys()):
provided_images[image_name] = image_name
else:
for commit_spec in optional_plural(image_config, "commits"):
dest = parse_dest_spec(commit_spec, "commits", image_name)
provided_images[dest.image] = image_name
for persist_spec in optional_plural(image_config, "persists"):
dest = parse_dest_spec(commit_spec, "persists", image_name)
provided_images[dest.image] = image_name
# Figure out the images required (among those provided) by images in this config
deps = dict()
for image_name,image_config in images_config.items():
req = set()
deps[image_name] = req
if container_system_option_names.isdisjoint(image_config.keys()):
from_name = image_config["from"].split(":", 1)[0]
if from_name in provided_images:
req.add(from_name)
else:
sys_config = nagoya.cli.cfg.read_one(image_config["system"])
for cont_config in sys_config.values():
image_name = cont_config["image"].split(":", 1)[0]
if image_name in provided_images:
req.add(image_name)
# Toposort to sync groups, use original order of keys to order within groups
image_names = []
for group in toposort.toposort(deps):
image_names.extend(sorted(group, key=lambda n: images_config.keys().index(n)))
return image_names
def build_images(config, quiet, env, images=None):
if images is None:
logger.info("Resolving image dependency order")
images = resolve_dep_order(config)
num_img = len(images)
logger.info("Building {0} image{1}".format(num_img, "s" if num_img > 1 else ""))
docker_client = docker.Client(timeout=10)
docker_client.ping()
for image in images:
logger.debug("Processing image {image}".format(**locals()))
image_config = config[image]
if not container_system_option_names.isdisjoint(image_config.keys()):
build_container_system(image, image_config, docker_client, quiet, env)
else:
build_image(image, image_config, docker_client, quiet, env)
logger.info("Done")
| lgpl-3.0 | -8,376,207,827,642,993,000 | 36.675 | 114 | 0.642336 | false |
DIvyanshu-Goel/Biped | biped/include/left_leg.py | 1 | 4723 | #!/usr/bin/env python
import sys
import rospy
import time
import numpy as np
from std_msgs.msg import *
from math import *
from dynamixel_msgs.msg import JointState
from biped.msg import *
from biped.srv import *
#for details on motor ids see Data_Server.py
start_pos = [0, 0 ,0 , 0, 0, 0];
motorid_LL = [19,20,21,22,23,24];
update_rate = 50;
###########################################################################################################################
def left_leg(goal_pos,time_limit):
global start_pos;
motorLL1_response = motor_data_client(motorid_LL[0]);
motorLL2_response = motor_data_client(motorid_LL[1]);
motorLL3_response = motor_data_client(motorid_LL[2]);
motorLL4_response = motor_data_client(motorid_LL[3]);
motorLL5_response = motor_data_client(motorid_LL[4]);
motorLL6_response = motor_data_client(motorid_LL[5]);
start_pos = [motorLL1_response.current_pos,motorLL2_response.current_pos,motorLL3_response.current_pos,motorLL4_response.current_pos,motorLL5_response.current_pos,motorLL6_response.current_pos];
curr_pos = start_pos;
#handlers for motor publishers
LL1 = rospy.Publisher('/LL1_controller/command', Float64, queue_size=10);
LL2 = rospy.Publisher('/LL2_controller/command', Float64, queue_size=10);
LL3 = rospy.Publisher('/LL3_controller/command', Float64, queue_size=10);
LL4 = rospy.Publisher('/LL4_controller/command', Float64, queue_size=10);
LL5 = rospy.Publisher('/LL5_controller/command', Float64, queue_size=10);
LL6 = rospy.Publisher('/LL6_controller/command', Float64, queue_size=10);
#initialize node for the specific subpart
#rospy.init_node('Left_leg_node', anonymous=True);
rate = rospy.Rate(update_rate) # 50hz update rate
time.sleep(0.05); # make the system sleep a while
time_count = 0 ;
time_limit = time_limit * update_rate;
while (rospy.is_shutdown() == 0 and time_count <= time_limit ):
global curr_pos;
curr_pos = calculate_trajectory(time_count,start_pos,goal_pos,time_limit);
rospy.loginfo(rospy.get_caller_id() + " Publishing %s to left leg motor 1" %curr_pos[0] );
LL1.publish(curr_pos[0] );
rospy.loginfo(rospy.get_caller_id() + " Publishing %s to left leg motor 2" %curr_pos[1] );
LL2.publish(curr_pos[1] );
rospy.loginfo(rospy.get_caller_id() + " Publishing %s to left leg motor 3" %curr_pos[2] );
LL3.publish(curr_pos[2] );
rospy.loginfo(rospy.get_caller_id() + " Publishing %s to left leg motor 4" %curr_pos[3] );
LL4.publish(curr_pos[3] );
rospy.loginfo(rospy.get_caller_id() + " Publishing %s to left leg motor 5" %curr_pos[4] );
LL5.publish(curr_pos[4] );
rospy.loginfo(rospy.get_caller_id() + " Publishing %s to left leg motor 6" %curr_pos[5] );
LL6.publish(curr_pos[5] );
time_count = time_count + 1;
time.sleep(0.03);
###########################################################################################################################
def calculate_trajectory(time_count,start_pos,goal_pos,time_limit):
curr_position = start_pos;
curr_position[0] = start_pos[0] + ((goal_pos[0]-start_pos[0])/time_limit)*(time_count - (time_limit/2/3.14)*sin(2*3.14*time_count/time_limit));
curr_position[1] = start_pos[1] + ((goal_pos[1]-start_pos[1])/time_limit)*(time_count - (time_limit/2/3.14)*sin(2*3.14*time_count/time_limit));
curr_position[2] = start_pos[2] + ((goal_pos[2]-start_pos[2])/time_limit)*(time_count - (time_limit/2/3.14)*sin(2*3.14*time_count/time_limit));
curr_position[3] = start_pos[3] + ((goal_pos[3]-start_pos[3])/time_limit)*(time_count - (time_limit/2/3.14)*sin(2*3.14*time_count/time_limit));
curr_position[4] = start_pos[4] + ((goal_pos[4]-start_pos[4])/time_limit)*(time_count - (time_limit/2/3.14)*sin(2*3.14*time_count/time_limit));
curr_position[5] = start_pos[5] + ((goal_pos[5]-start_pos[5])/time_limit)*(time_count - (time_limit/2/3.14)*sin(2*3.14*time_count/time_limit));
return(curr_position);
###########################################################################################################################
def motor_data_client(x):
rospy.wait_for_service('Fetch_Motor_data')
client = rospy.ServiceProxy('Fetch_Motor_data', Fetch_Motor_Data)
resp1 = client(x);
return (resp1);
###########################################################################################################################
if __name__ == '__main__':
try:
left_leg([0,1,1,1],1);
time.sleep(2);
left_leg([0,0,0,0],2);
time.sleep(2);
except rospy.ROSInterruptException:
pass
| cc0-1.0 | 612,075,465,432,627,600 | 49.244681 | 198 | 0.584163 | false |
TheLazyHase/dragon_dice_simulator | business/dice/face/melee_with_special/kick.py | 1 | 1134 | # -*- coding: utf-8 *-*
# Copyright (c) 2013 Tisserant Pierre
#
# This file is part of Dragon dice simulator.
#
# Dragon dice simulator is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragon dice simulator is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Dragon dice simulator. If not, see <http://www.gnu.org/licenses/>.
from business.dice.face import MeleeWithSpecial
from business.effect import TargetedUnsecableDamageEffect
class Kick(MeleeWithSpecial):
@property
def name(self):
return '%s Kick' % self.amount
def get_special(self):
return TargetedUnsecableDamageEffect(1, increment=self.amount)
| gpl-3.0 | -1,001,280,138,872,206,300 | 39.5 | 83 | 0.73545 | false |
mmmichl/sqlalchemy-fixture-factory | tests/test_sqla_fix_fact.py | 1 | 7856 | # -*- coding: utf-8 -*-
"""
Tests for the fixture
"""
from __future__ import absolute_import, print_function, unicode_literals, division
from sqlalchemy_fixture_factory import sqla_fix_fact
from sqlalchemy_fixture_factory.sqla_fix_fact import BaseFix
from tests import TestCase
class TestFixFact(TestCase):
def test_basefix_create(self):
class FixPerson(BaseFix):
MODEL = self.Person
first_name = 'Franz'
p = FixPerson(self.fix_fact)
assert p is not None
result = p.model()
test = self.Person(first_name='Franz')
assert type(result) == type(test)
assert result.first_name == test.first_name
def test_basefix_create_w_parameter(self):
class FixPerson(BaseFix):
MODEL = self.Person
first_name = 'Franz'
p = FixPerson(self.fix_fact, first_name = 'Peter')
assert p is not None
result = p.model()
test = self.Person(first_name='Peter')
assert type(test) == type(result)
assert test.first_name == result.first_name
def test_basefix_create_ref(self):
class FixPersonAccount(BaseFix):
MODEL = self.Account
name = 'supercheck'
class FixPerson(BaseFix):
MODEL = self.Person
first_name = 'Franz'
account = sqla_fix_fact.subFactoryGet(FixPersonAccount)
p = FixPerson(self.fix_fact)
assert p is not None
result = p.model()
test = self.Person(first_name='Franz', account=self.Account(name='supercheck'))
assert type(result) == type(test)
assert test.first_name == result.first_name
assert result.account is not None
assert test.account.name == result.account.name
p2 = FixPerson(self.fix_fact)
assert p != p2
assert p.account == p2.account
def test_basefix_create_ref_w_parameter(self):
class FixPersonAccount(BaseFix):
MODEL = self.Account
name = 'supercheck'
class FixPerson(BaseFix):
MODEL = self.Person
first_name = 'Franz'
account = sqla_fix_fact.subFactoryGet(FixPersonAccount, name='nixcheck')
p = FixPerson(self.fix_fact, first_name='Peter')
assert p is not None
result = p.model()
test = self.Person(first_name='Peter', account=self.Account(name='nixcheck'))
assert type(result) == type(test)
assert test.first_name == result.first_name
assert result.account is not None
assert test.account.name == result.account.name
p2 = FixPerson(self.fix_fact)
assert p != p2
assert p.account == p2.account
def test_basefix_create_copy(self):
class FixPersonAccount(BaseFix):
MODEL = self.Account
name = 'supercheck'
class FixPerson(BaseFix):
MODEL = self.Person
first_name = 'Franz'
account = sqla_fix_fact.subFactoryCreate(FixPersonAccount)
p = FixPerson(self.fix_fact)
assert p is not None
result = p.model()
test = self.Person(first_name='Franz', account=self.Account(name='supercheck'))
assert type(result) == type(test)
assert test.first_name == result.first_name
assert result.account is not None
assert test.account.name == result.account.name
p2 = FixPerson(self.fix_fact)
assert p != p2
assert p.model().account != p2.model().account
def test_save_fixture_in_db(self):
class FixPerson(BaseFix):
MODEL = self.Person
first_name = 'Franz'
p = FixPerson(self.fix_fact).create()
assert p is not None
# check if primary key is set
assert p.id is not None
result = self.db_session.query(self.Person).all()
assert 1 == len(result)
assert result[0] == p
p2 = FixPerson(self.fix_fact).create()
assert p2 is not None
# check if primary key is set
assert p2.id is not None
result = self.db_session.query(self.Person).all()
assert 2 == len(result)
def test_build_fixture_only(self):
class FixPerson(BaseFix):
MODEL = self.Person
first_name = 'Franz'
p = FixPerson(self.fix_fact).get()
assert p is not None
result = self.db_session.query(self.Person).all()
assert 1 == len(result)
assert result[0] == p
p2 = FixPerson(self.fix_fact).get()
assert p2 is not None
# check if primary key is set
assert p2.id is not None
result = self.db_session.query(self.Person).all()
assert 1 == len(result)
def test_create_with_reference_list(self):
class AdminRole(BaseFix):
MODEL = self.Role
name = 'admin'
class FixAccount(BaseFix):
MODEL = self.Account
name = 'peter'
roles = [
sqla_fix_fact.subFactoryGet(AdminRole)
]
a = FixAccount(self.fix_fact).create()
assert a is not None
assert a.id is not None
result = self.db_session.query(self.Account).get(a.id)
assert 'peter' == result.name
assert 'admin' == result.roles[0].name
def test_sub_factory_get_delivers_same_instance_on_multiple_instantiations(self):
class FixPersonAccount(BaseFix):
MODEL = self.Account
name = 'supercheck'
class FixPerson(BaseFix):
MODEL = self.Person
first_name = 'Franz'
account = sqla_fix_fact.subFactoryGet(FixPersonAccount)
fix_person_1 = FixPerson(self.fix_fact).create()
assert fix_person_1 is not None
assert fix_person_1.id is not None
assert fix_person_1.account is not None
assert 1 == self.db_session.query(self.Person).count()
assert 1 == self.db_session.query(self.Account).count()
fix_person_2 = FixPerson(self.fix_fact).create()
fix_person_3 = FixPerson(self.fix_fact).create()
assert 3 == self.db_session.query(self.Person).count()
assert 1 == self.db_session.query(self.Account).count()
def test_model_instantiates_but_does_not_save_in_db(self):
class FixPerson(BaseFix):
MODEL = self.Person
first_name = 'Franz'
fix_model = FixPerson(self.fix_fact).model()
assert fix_model is not None
assert 0 == self.db_session.query(self.Person).count()
def test_model_does_creates_sub_factories_create_references_in_db(self):
class FixPersonAccount(BaseFix):
MODEL = self.Account
name = 'supercheck'
class FixPerson(BaseFix):
MODEL = self.Person
first_name = 'Franz'
account = sqla_fix_fact.subFactoryCreate(FixPersonAccount)
fix_model = FixPerson(self.fix_fact).model()
assert fix_model is not None
assert 0 == self.db_session.query(self.Person).count()
assert 1 == self.db_session.query(self.Account).count()
account_entry = self.db_session.query(self.Account).all()[0]
assert account_entry == fix_model.account
def test_model_does_not_create_sub_factories_model_references_in_db(self):
class FixPersonAccount(BaseFix):
MODEL = self.Account
name = 'supercheck'
class FixPerson(BaseFix):
MODEL = self.Person
first_name = 'Franz'
account = sqla_fix_fact.subFactoryModel(FixPersonAccount)
fix_model = FixPerson(self.fix_fact).model()
assert fix_model is not None
assert 0 == self.db_session.query(self.Person).count()
assert 0 == self.db_session.query(self.Account).count()
| mit | 6,830,134,704,725,960,000 | 28.533835 | 87 | 0.596741 | false |
HJLebbink/x86doc | Python/extract.py | 1 | 1292 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sys
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.layout import LAParams
from pdfminer.converter import PDFPageAggregator
from x86manual import x86ManParser
def main(argv):
for arg in argv[1:]:
fd = open(arg)
parser = PDFParser(fd)
document = PDFDocument(parser)
if not document.is_extractable:
print "Document not extractable."
return 1
params = LAParams(char_margin=1)
resMan = PDFResourceManager(caching=True)
device = PDFPageAggregator(resMan, laparams=params)
interpreter = PDFPageInterpreter(resMan, device)
parser = x86ManParser("html", params)
i = 1
for page in PDFPage.get_pages(fd, set(), caching=True, check_extractable=True):
print "Processing page %i" % i
interpreter.process_page(page)
page = device.get_result()
parser.process_page(page)
i += 1
parser.flush()
fd.close()
print "Conversion result: %i/%i" % (parser.success, parser.success + parser.fail)
with open("opcodes.json", "wb") as fd:
fd.write(parser.output_opcodes_json())
if __name__ == "__main__":
result = main(sys.argv)
sys.exit(result)
| mit | -6,772,827,665,649,511,000 | 27.711111 | 83 | 0.729102 | false |
FlightGear/flightgear | scripts/python/demo.py | 1 | 1204 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
from FlightGear import FlightGear
import time
def main():
fg = FlightGear('localhost', 5500)
# Wait five seconds for simulator to settle down
while 1:
if fg['/sim/time/elapsed-sec'] > 5:
break
time.sleep(1.0)
print(fg['/sim/time/elapsed-sec'])
# parking brake on
fg['/controls/parking-brake'] = 1
# heading = fg['/orientation/heading-deg']
# Switch to external view for for 'walk around'.
fg.view_next()
fg['/sim/current-view/goal-heading-offset-deg'] = 180.0
#fg.wait_for_prop_eq('/sim/current-view/heading-offset-deg', 180.0)
fg['/sim/current-view/goal-heading-offset-deg'] = 90.0
#fg.wait_for_prop_eq('/sim/current-view/heading-offset-deg', 90.0)
fg['/sim/current-view/goal-heading-offset-deg'] = 0.0
#fg.wait_for_prop_eq('/sim/current-view/heading-offset-deg', 0.0)
time.sleep(2.0)
# Switch back to cockpit view
fg.view_prev()
time.sleep(2.0)
# Flaps to take off position
fg['/controls/flaps'] = 0.34
#fg.wait_for_prop_eq('/surface-positions/flap-pos-norm', 0.34)
fg.quit()
if __name__ == '__main__':
main()
| gpl-2.0 | 6,510,073,827,120,023,000 | 23.571429 | 71 | 0.613787 | false |
anish/buildbot | master/buildbot/test/util/db.py | 1 | 11486 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
from sqlalchemy.schema import MetaData
from twisted.internet import defer
from twisted.internet import reactor
from twisted.python import log
from twisted.trial import unittest
from buildbot.db import enginestrategy
from buildbot.db import model
from buildbot.db import pool
from buildbot.db.connector import DBConnector
from buildbot.util.sautils import sa_version
from buildbot.util.sautils import withoutSqliteForeignKeys
def skip_for_dialect(dialect):
"""Decorator to skip a test for a particular SQLAlchemy dialect."""
def dec(fn):
def wrap(self, *args, **kwargs):
if self.db_engine.dialect.name == dialect:
raise unittest.SkipTest(
"Not supported on dialect '%s'" % dialect)
return fn(self, *args, **kwargs)
return wrap
return dec
class RealDatabaseMixin:
"""
A class that sets up a real database for testing. This sets self.db_url to
the URL for the database. By default, it specifies an in-memory SQLite
database, but if the BUILDBOT_TEST_DB_URL environment variable is set, it
will use the specified database, being careful to clean out *all* tables in
the database before and after the tests are run - so each test starts with
a clean database.
@ivar db_pool: a (real) DBThreadPool instance that can be used as desired
@ivar db_url: the DB URL used to run these tests
@ivar db_engine: the engine created for the test database
Note that this class uses the production database model. A
re-implementation would be virtually identical and just require extra
work to keep synchronized.
Similarly, this class uses the production DB thread pool. This achieves
a few things:
- affords more thorough tests for the pool
- avoids repetitive implementation
- cooperates better at runtime with thread-sensitive DBAPI's
Finally, it duplicates initialization performed in db.connector.DBConnector.setup().
Never call that method in tests that use RealDatabaseMixin, use
RealDatabaseWithConnectorMixin.
"""
def __thd_clean_database(self, conn):
# In general it's nearly impossible to do "bullet proof" database
# cleanup with SQLAlchemy that will work on a range of databases
# and they configurations.
#
# Following approaches were considered.
#
# 1. Drop Buildbot Model schema:
#
# model.Model.metadata.drop_all(bind=conn, checkfirst=True)
#
# Dropping schema from model is correct and working operation only
# if database schema is exactly corresponds to the model schema.
#
# If it is not (e.g. migration script failed or migration results in
# old version of model), then some tables outside model schema may be
# present, which may reference tables in the model schema.
# In this case either dropping model schema will fail (if database
# enforces referential integrity, e.g. PostgreSQL), or
# dropping left tables in the code below will fail (if database allows
# removing of tables on which other tables have references,
# e.g. SQLite).
#
# 2. Introspect database contents and drop found tables.
#
# meta = MetaData(bind=conn)
# meta.reflect()
# meta.drop_all()
#
# May fail if schema contains reference cycles (and Buildbot schema
# has them). Reflection looses metadata about how reference cycles
# can be teared up (e.g. use_alter=True).
# Introspection may fail if schema has invalid references
# (e.g. possible in SQLite).
#
# 3. What is actually needed here is accurate code for each engine
# and each engine configuration that will drop all tables,
# indexes, constraints, etc in proper order or in a proper way
# (using tables alternation, or DROP TABLE ... CASCADE, etc).
#
# Conclusion: use approach 2 with manually teared apart known
# reference cycles.
# pylint: disable=too-many-nested-blocks
try:
meta = MetaData(bind=conn)
# Reflect database contents. May fail, e.g. if table references
# non-existent table in SQLite.
meta.reflect()
# Table.foreign_key_constraints introduced in SQLAlchemy 1.0.
if sa_version()[:2] >= (1, 0):
# Restore `use_alter` settings to break known reference cycles.
# Main goal of this part is to remove SQLAlchemy warning
# about reference cycle.
# Looks like it's OK to do it only with SQLAlchemy >= 1.0.0,
# since it's not issued in SQLAlchemy == 0.8.0
# List of reference links (table_name, ref_table_name) that
# should be broken by adding use_alter=True.
table_referenced_table_links = [
('buildsets', 'builds'), ('builds', 'buildrequests')]
for table_name, ref_table_name in table_referenced_table_links:
if table_name in meta.tables:
table = meta.tables[table_name]
for fkc in table.foreign_key_constraints:
if fkc.referred_table.name == ref_table_name:
fkc.use_alter = True
# Drop all reflected tables and indices. May fail, e.g. if
# SQLAlchemy wouldn't be able to break circular references.
# Sqlalchemy fk support with sqlite is not yet perfect, so we must deactivate fk during that
# operation, even though we made our possible to use use_alter
with withoutSqliteForeignKeys(conn.engine, conn):
meta.drop_all()
except Exception:
# sometimes this goes badly wrong; being able to see the schema
# can be a big help
if conn.engine.dialect.name == 'sqlite':
r = conn.execute("select sql from sqlite_master "
"where type='table'")
log.msg("Current schema:")
for row in r.fetchall():
log.msg(row.sql)
raise
def __thd_create_tables(self, conn, table_names):
table_names_set = set(table_names)
tables = [t for t in model.Model.metadata.tables.values()
if t.name in table_names_set]
# Create tables using create_all() method. This way not only tables
# and direct indices are created, but also deferred references
# (that use use_alter=True in definition).
model.Model.metadata.create_all(
bind=conn, tables=tables, checkfirst=True)
@defer.inlineCallbacks
def setUpRealDatabase(self, table_names=None, basedir='basedir',
want_pool=True, sqlite_memory=True):
"""
Set up a database. Ordinarily sets up an engine and a pool and takes
care of cleaning out any existing tables in the database. If
C{want_pool} is false, then no pool will be created, and the database
will not be cleaned.
@param table_names: list of names of tables to instantiate
@param basedir: (optional) basedir for the engine
@param want_pool: (optional) false to not create C{self.db_pool}
@param sqlite_memory: (optional) False to avoid using an in-memory db
@returns: Deferred
"""
if table_names is None:
table_names = []
self.__want_pool = want_pool
default_sqlite = 'sqlite://'
self.db_url = os.environ.get('BUILDBOT_TEST_DB_URL', default_sqlite)
if not sqlite_memory and self.db_url == default_sqlite:
self.db_url = "sqlite:///tmp.sqlite"
if not os.path.exists(basedir):
os.makedirs(basedir)
self.basedir = basedir
self.db_engine = enginestrategy.create_engine(self.db_url,
basedir=basedir)
# if the caller does not want a pool, we're done.
if not want_pool:
return None
self.db_pool = pool.DBThreadPool(self.db_engine, reactor=reactor)
log.msg("cleaning database %s" % self.db_url)
yield self.db_pool.do(self.__thd_clean_database)
yield self.db_pool.do(self.__thd_create_tables, table_names)
@defer.inlineCallbacks
def tearDownRealDatabase(self):
if self.__want_pool:
yield self.db_pool.do(self.__thd_clean_database)
yield self.db_pool.shutdown()
@defer.inlineCallbacks
def insertTestData(self, rows):
"""Insert test data into the database for use during the test.
@param rows: be a sequence of L{fakedb.Row} instances. These will be
sorted by table dependencies, so order does not matter.
@returns: Deferred
"""
# sort the tables by dependency
all_table_names = {row.table for row in rows}
ordered_tables = [t for t in model.Model.metadata.sorted_tables
if t.name in all_table_names]
def thd(conn):
# insert into tables -- in order
for tbl in ordered_tables:
for row in [r for r in rows if r.table == tbl.name]:
tbl = model.Model.metadata.tables[row.table]
try:
tbl.insert(bind=conn).execute(row.values)
except Exception:
log.msg("while inserting %s - %s" % (row, row.values))
raise
yield self.db_pool.do(thd)
class RealDatabaseWithConnectorMixin(RealDatabaseMixin):
# Same as RealDatabaseMixin, except that a real DBConnector is also setup in a correct way.
@defer.inlineCallbacks
def setUpRealDatabaseWithConnector(self, master, table_names=None, basedir='basedir',
want_pool=True, sqlite_memory=True):
yield self.setUpRealDatabase(table_names, basedir, want_pool, sqlite_memory)
master.config.db['db_url'] = self.db_url
master.db = DBConnector(self.basedir)
yield master.db.setServiceParent(master)
master.db.pool = self.db_pool
def tearDownRealDatabaseWithConnector(self):
return self.tearDownRealDatabase()
class TestCase(unittest.TestCase):
@defer.inlineCallbacks
def assertFailure(self, d, excp):
exception = None
try:
yield d
except Exception as e:
exception = e
self.assertIsInstance(exception, excp)
self.flushLoggedErrors(excp)
| gpl-2.0 | 3,904,388,709,226,155,000 | 40.615942 | 104 | 0.628678 | false |
stuart-knock/tvb-library | tvb/datatypes/mode_decompositions.py | 1 | 13929 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Scientific Package. This package holds all simulators, and
# analysers necessary to run brain-simulations. You can use it stand alone or
# in conjunction with TheVirtualBrain-Framework Package. See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
The Mode Decomposition datatypes. This brings together the scientific and
framework methods that are associated with the Mode Decomposition datatypes.
.. moduleauthor:: Stuart A. Knock <[email protected]>
.. moduleauthor:: Paula Sanz Leon <[email protected]>
"""
import numpy
from tvb.basic.logger.builder import get_logger
import tvb.basic.traits.core as core
import tvb.basic.traits.types_basic as basic
import tvb.datatypes.arrays as arrays
import tvb.datatypes.time_series as time_series
from tvb.basic.traits.types_mapped import MappedType
LOG = get_logger(__name__)
class PrincipalComponents(MappedType):
"""
Result of a Principal Component Analysis (PCA).
"""
source = time_series.TimeSeries(
label="Source time-series",
doc="Links to the time-series on which the PCA is applied.")
weights = arrays.FloatArray(
label="Principal vectors",
doc="""The vectors of the 'weights' with which each time-series is
represented in each component.""",
file_storage=core.FILE_STORAGE_EXPAND)
fractions = arrays.FloatArray(
label="Fraction explained",
doc="""A vector or collection of vectors representing the fraction of
the variance explained by each principal component.""",
file_storage=core.FILE_STORAGE_EXPAND)
norm_source = arrays.FloatArray(
label="Normalised source time series",
file_storage=core.FILE_STORAGE_EXPAND)
component_time_series = arrays.FloatArray(
label="Component time series",
file_storage=core.FILE_STORAGE_EXPAND)
normalised_component_time_series = arrays.FloatArray(
label="Normalised component time series",
file_storage=core.FILE_STORAGE_EXPAND)
def write_data_slice(self, partial_result):
"""
Append chunk.
"""
self.store_data_chunk('weights', partial_result.weights, grow_dimension=2, close_file=False)
self.store_data_chunk('fractions', partial_result.fractions, grow_dimension=1, close_file=False)
partial_result.compute_norm_source()
self.store_data_chunk('norm_source', partial_result.norm_source, grow_dimension=1, close_file=False)
partial_result.compute_component_time_series()
self.store_data_chunk('component_time_series', partial_result.component_time_series,
grow_dimension=1, close_file=False)
partial_result.compute_normalised_component_time_series()
self.store_data_chunk('normalised_component_time_series', partial_result.normalised_component_time_series,
grow_dimension=1, close_file=False)
def read_fractions_data(self, from_comp, to_comp):
"""
Return a list with fractions for components in interval from_comp, to_comp and in
addition have in position n the sum of the fractions for the rest of the components.
"""
from_comp = int(from_comp)
to_comp = int(to_comp)
all_data = self.get_data('fractions').flat
sum_others = 0
for idx, val in enumerate(all_data):
if idx < from_comp or idx > to_comp:
sum_others += val
return numpy.array(all_data[from_comp:to_comp].tolist() + [sum_others])
def read_weights_data(self, from_comp, to_comp):
"""
Return the weights data for the components in the interval [from_comp, to_comp].
"""
from_comp = int(from_comp)
to_comp = int(to_comp)
data_slice = slice(from_comp, to_comp, None)
weights_shape = self.get_data_shape('weights')
weights_slice = [slice(size) for size in weights_shape]
weights_slice[0] = data_slice
weights_data = self.get_data('weights', tuple(weights_slice))
return weights_data.flatten()
def configure(self):
"""
Invoke the compute methods for computable attributes that haven't been
set during initialization.
"""
super(PrincipalComponents, self).configure()
if self.trait.use_storage is False and sum(self.get_data_shape('weights')) != 0:
if self.norm_source.size == 0:
self.compute_norm_source()
if self.component_time_series.size == 0:
self.compute_component_time_series()
if self.normalised_component_time_series.size == 0:
self.compute_normalised_component_time_series()
def _find_summary_info(self):
"""
Gather scientifically interesting summary information from an instance
of this datatype.
"""
summary = {"Mode decomposition type": self.__class__.__name__}
summary["Source"] = self.source.title
# summary["Number of variables"] = self...
# summary["Number of mewasurements"] = self...
# summary["Number of components"] = self...
# summary["Number required for 95%"] = self...
return summary
def compute_norm_source(self):
"""Normalised source time-series."""
self.norm_source = ((self.source.data - self.source.data.mean(axis=0)) /
self.source.data.std(axis=0))
self.trait["norm_source"].log_debug(owner=self.__class__.__name__)
# TODO: ??? Any value in making this a TimeSeries datatypes ???
def compute_component_time_series(self):
"""Compnent time-series."""
# TODO: Generalise -- it currently assumes 4D TimeSeriesSimulator...
ts_shape = self.source.data.shape
component_ts = numpy.zeros(ts_shape)
for var in range(ts_shape[1]):
for mode in range(ts_shape[3]):
w = self.weights[:, :, var, mode]
ts = self.source.data[:, var, :, mode]
component_ts[:, var, :, mode] = numpy.dot(w, ts.T).T
self.component_time_series = component_ts
self.trait["component_time_series"].log_debug(owner=self.__class__.__name__)
# TODO: ??? Any value in making this a TimeSeries datatypes ???
def compute_normalised_component_time_series(self):
"""normalised_Compnent time-series."""
# TODO: Generalise -- it currently assumes 4D TimeSeriesSimulator...
ts_shape = self.source.data.shape
component_ts = numpy.zeros(ts_shape)
for var in range(ts_shape[1]):
for mode in range(ts_shape[3]):
w = self.weights[:, :, var, mode]
nts = self.norm_source[:, var, :, mode]
component_ts[:, var, :, mode] = numpy.dot(w, nts.T).T
self.normalised_component_time_series = component_ts
self.trait["normalised_component_time_series"].log_debug(owner=self.__class__.__name__)
class IndependentComponents(MappedType):
"""
Result of an Independent Component Analysis.
"""
source = time_series.TimeSeries(
label="Source time-series",
doc="Links to the time-series on which the ICA is applied.")
mixing_matrix = arrays.FloatArray(
label="Mixing matrix - Spatial Maps",
doc="""The linear mixing matrix (Mixing matrix) """)
unmixing_matrix = arrays.FloatArray(
label="Unmixing matrix - Spatial maps",
doc="""The estimated unmixing matrix used to obtain the unmixed
sources from the data""")
prewhitening_matrix = arrays.FloatArray(
label="Pre-whitening matrix",
doc=""" """)
n_components = basic.Integer(
label="Number of independent components",
doc=""" Observed data matrix is considered to be a linear combination
of :math:`n` non-Gaussian independent components""")
norm_source = arrays.FloatArray(
label="Normalised source time series. Zero centered and whitened.",
file_storage=core.FILE_STORAGE_EXPAND)
component_time_series = arrays.FloatArray(
label="Component time series. Unmixed sources.",
file_storage=core.FILE_STORAGE_EXPAND)
normalised_component_time_series = arrays.FloatArray(
label="Normalised component time series",
file_storage=core.FILE_STORAGE_EXPAND)
def write_data_slice(self, partial_result):
"""
Append chunk.
"""
self.store_data_chunk('unmixing_matrix', partial_result.unmixing_matrix, grow_dimension=2, close_file=False)
self.store_data_chunk('prewhitening_matrix', partial_result.prewhitening_matrix,
grow_dimension=2, close_file=False)
partial_result.compute_norm_source()
self.store_data_chunk('norm_source', partial_result.norm_source, grow_dimension=1, close_file=False)
partial_result.compute_component_time_series()
self.store_data_chunk('component_time_series', partial_result.component_time_series,
grow_dimension=1, close_file=False)
partial_result.compute_normalised_component_time_series()
self.store_data_chunk('normalised_component_time_series', partial_result.normalised_component_time_series,
grow_dimension=1, close_file=False)
partial_result.compute_mixing_matrix()
self.store_data_chunk('mixing_matrix', partial_result.mixing_matrix, grow_dimension=2, close_file=False)
def configure(self):
"""
Invoke the compute methods for computable attributes that haven't been
set during initialisation.
"""
super(IndependentComponents, self).configure()
if self.trait.use_storage is False and sum(self.get_data_shape('unmixing_matrix')) != 0:
if self.norm_source.size == 0:
self.compute_norm_source()
if self.component_time_series.size == 0:
self.compute_component_time_series()
if self.normalised_component_time_series.size == 0:
self.compute_normalised_component_time_series()
def compute_norm_source(self):
"""Normalised source time-series."""
self.norm_source = ((self.source.data - self.source.data.mean(axis=0)) /
self.source.data.std(axis=0))
def compute_component_time_series(self):
ts_shape = self.source.data.shape
component_ts_shape = (ts_shape[0], ts_shape[1], self.n_components, ts_shape[3])
component_ts = numpy.zeros(component_ts_shape)
for var in range(ts_shape[1]):
for mode in range(ts_shape[3]):
w = self.unmixing_matrix[:, :, var, mode]
k = self.prewhitening_matrix[:, :, var, mode]
ts = self.source.data[:, var, :, mode]
component_ts[:, var, :, mode] = numpy.dot(w, numpy.dot(k, ts.T)).T
self.component_time_series = component_ts
def compute_normalised_component_time_series(self):
ts_shape = self.source.data.shape
component_ts_shape = (ts_shape[0], ts_shape[1], self.n_components, ts_shape[3])
component_nts = numpy.zeros(component_ts_shape)
for var in range(ts_shape[1]):
for mode in range(ts_shape[3]):
w = self.unmixing_matrix[:, :, var, mode]
k = self.prewhitening_matrix[:, :, var, mode]
nts = self.norm_source[:, var, :, mode]
component_nts[:, var, :, mode] = numpy.dot(w, numpy.dot(k, nts.T)).T
self.normalised_component_time_series = component_nts
def compute_mixing_matrix(self):
"""
Compute the linear mixing matrix A, so X = A * S ,
where X is the observed data and S contain the independent components
"""
ts_shape = self.source.data.shape
mixing_matrix_shape = (ts_shape[2], self.n_components, ts_shape[1], ts_shape[3])
mixing_matrix = numpy.zeros(mixing_matrix_shape)
for var in range(ts_shape[1]):
for mode in range(ts_shape[3]):
w = self.unmixing_matrix[:, :, var, mode]
k = self.prewhitening_matrix[:, :, var, mode]
temp = numpy.matrix(numpy.dot(w, k))
mixing_matrix[:, :, var, mode] = numpy.array(numpy.dot(temp.T, (numpy.dot(temp, temp.T)).T))
self.mixing_matrix = mixing_matrix
def _find_summary_info(self):
"""
Gather scientifically interesting summary information from an instance
of this datatype.
"""
summary = {"Mode decomposition type": self.__class__.__name__}
summary["Source"] = self.source.title
return summary | gpl-2.0 | -2,460,027,885,842,812,000 | 41.993827 | 116 | 0.635724 | false |
EndingCredits/PyGame-Learning-Environment | ple/games/monsterkong/monsterPerson.py | 1 | 6130 | __author__ = 'Erilyth'
import pygame
import os
from person import Person
'''
This class defines all the Monsters present in our game.
Each Monster can only move on the top floor and cannot move vertically.
'''
class MonsterPerson(Person):
def __init__(self, raw_image, position, rng, dir, width=15, height=15):
super(MonsterPerson, self).__init__(raw_image, position, width, height)
self.__speed = 2
self.rng = rng
self.__direction = int(self.rng.rand() * 100) % 2
self.__cycles = 0
self.__stopDuration = 0
self.IMAGES = {
"monster0": pygame.image.load(os.path.join(dir, 'assets/monster0.png')).convert_alpha(),
"monster1": pygame.image.load(os.path.join(dir, 'assets/monster1.png')).convert_alpha(),
"monster2": pygame.image.load(os.path.join(dir, 'assets/monster2.png')).convert_alpha(),
"monster3": pygame.image.load(os.path.join(dir, 'assets/monster3.png')).convert_alpha(),
"monster01": pygame.image.load(os.path.join(dir, 'assets/monster01.png')).convert_alpha(),
"monster11": pygame.image.load(os.path.join(dir, 'assets/monster11.png')).convert_alpha(),
"monster21": pygame.image.load(os.path.join(dir, 'assets/monster21.png')).convert_alpha(),
"monster31": pygame.image.load(os.path.join(dir, 'assets/monster31.png')).convert_alpha(),
"monsterstill0": pygame.image.load(os.path.join(dir, 'assets/monsterstill0.png')).convert_alpha(),
"monsterstill10": pygame.image.load(os.path.join(dir, 'assets/monsterstill10.png')).convert_alpha(),
"monsterstill1": pygame.image.load(os.path.join(dir, 'assets/monsterstill1.png')).convert_alpha(),
"monsterstill11": pygame.image.load(os.path.join(dir, 'assets/monsterstill11.png')).convert_alpha()
}
# Getters and Setters
def getSpeed(self):
return self.__speed
def setSpeed(self):
return self.__speed
def getStopDuration(self):
return self.__stopDuration
def setStopDuration(self, stopDuration):
self.__stopDuration = stopDuration
# Checks for collisions with walls in order to change direction when hit
# by a wall
def checkWall(self, colliderGroup):
if self.__direction == 0:
# Right collision with wall
self.updateWH(self.image, "H", 20, 40, 40)
if self.__direction == 1:
# Left collision with wall
self.updateWH(self.image, "H", -20, 40, 40)
Colliders = pygame.sprite.spritecollide(self, colliderGroup, False)
if self.__direction == 0:
# Right collision with wall
self.updateWH(self.image, "H", -20, 40, 40)
if self.__direction == 1:
# Left collision with wall
self.updateWH(self.image, "H", 20, 40, 40)
return Colliders
# This is used to animate the monster
def continuousUpdate(self, GroupList, GroupList2):
# If the stop duration is 0 then monster is currently moving either
# left or right
if self.__stopDuration == 0:
# Currently moving right
if self.__direction == 0:
self.__cycles += 1
if self.__cycles % 24 < 6:
self.updateWH(
self.IMAGES["monster0"], "H", self.__speed, 45, 45)
elif self.__cycles % 24 < 12:
self.updateWH(
self.IMAGES["monster1"], "H", self.__speed, 45, 45)
elif self.__cycles % 24 < 18:
self.updateWH(
self.IMAGES["monster2"], "H", self.__speed, 45, 45)
else:
self.updateWH(
self.IMAGES["monster3"], "H", self.__speed, 45, 45)
if self.checkWall(GroupList):
self.__direction = 1
self.__cycles = 0
self.updateWH(self.image, "H", -self.__speed, 45, 45)
# Currently moving left
else:
self.__cycles += 1
if self.__cycles % 24 < 6:
self.updateWH(
self.IMAGES["monster01"], "H", -self.__speed, 45, 45)
elif self.__cycles % 24 < 12:
self.updateWH(
self.IMAGES["monster11"], "H", -self.__speed, 45, 45)
elif self.__cycles % 24 < 18:
self.updateWH(
self.IMAGES["monster21"], "H", -self.__speed, 45, 45)
else:
self.updateWH(
self.IMAGES["monster31"], "H", -self.__speed, 45, 45)
if self.checkWall(GroupList):
self.__direction = 0
self.__cycles = 0
self.updateWH(self.image, "H", self.__speed, 45, 45)
# Donkey Kong is currently not moving, which means he is launching a
# fireball
else:
self.__stopDuration -= 1
if self.__stopDuration == 0: # Once he finishes launching a fireball, we go back to our normal movement animation
self.updateWH(self.image, "V", 12, 50, 50)
if self.__stopDuration >= 10:
if self.__direction == 0:
self.updateWH(self.IMAGES["monsterstill0"], "H", 0, 45, 45)
else:
self.updateWH(
self.IMAGES["monsterstill10"], "H", 0, 45, 45)
elif self.__stopDuration >= 5:
if self.__direction == 0:
self.updateWH(self.IMAGES["monsterstill1"], "H", 0, 45, 45)
else:
self.updateWH(
self.IMAGES["monsterstill11"], "H", 0, 45, 45)
else:
if self.__direction == 0:
self.updateWH(self.IMAGES["monsterstill0"], "H", 0, 45, 45)
else:
self.updateWH(
self.IMAGES["monsterstill10"], "H", 0, 45, 45)
| mit | 3,208,077,670,063,567,000 | 43.744526 | 126 | 0.528222 | false |
rsheftel/pandas_market_calendars | tests/test_market_calendar.py | 1 | 21687 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import time
from itertools import chain
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_index_equal, assert_series_equal
from pandas.tseries.holiday import AbstractHolidayCalendar
from pytz import timezone
from pandas_market_calendars import get_calendar, get_calendar_names
from pandas_market_calendars.holidays_us import (Christmas, HurricaneSandyClosings, MonTuesThursBeforeIndependenceDay,
USNationalDaysofMourning, USNewYearsDay)
from pandas_market_calendars.market_calendar import MarketCalendar, clean_dates, days_at_time
class FakeCalendar(MarketCalendar):
@property
def open_time_default(self):
return time(11, 13)
@property
def close_time_default(self):
return time(11, 49)
@property
def name(self):
return "DMY"
@property
def tz(self):
return timezone("Asia/Ulaanbaatar")
@property
def regular_holidays(self):
return AbstractHolidayCalendar(rules=[USNewYearsDay, Christmas])
@property
def adhoc_holidays(self):
return list(chain(HurricaneSandyClosings, USNationalDaysofMourning))
@property
def special_opens(self):
return [(time(11, 15), AbstractHolidayCalendar(rules=[MonTuesThursBeforeIndependenceDay]))]
@property
def special_opens_adhoc(self):
return [(time(11, 20), ['2016-12-13'])]
@property
def special_closes(self):
return [(time(11, 30), AbstractHolidayCalendar(rules=[MonTuesThursBeforeIndependenceDay]))]
@property
def special_closes_adhoc(self):
return [(time(11, 40), ['2016-12-14'])]
class FakeBreakCalendar(MarketCalendar):
@property
def open_time_default(self):
return time(9, 30)
@property
def close_time_default(self):
return time(12, 00)
@property
def break_start(self):
return time(10, 00)
@property
def break_end(self):
return time(11, 00)
@property
def name(self):
return "BRK"
@property
def tz(self):
return timezone("America/New_York")
@property
def regular_holidays(self):
return AbstractHolidayCalendar(rules=[USNewYearsDay, Christmas])
@property
def special_opens_adhoc(self):
return [(time(10, 20), ['2016-12-29'])]
@property
def special_closes_adhoc(self):
return [(time(10, 40), ['2016-12-30'])]
@pytest.fixture
def patch_get_current_time(monkeypatch):
def get_fake_time():
return pd.Timestamp('2014-07-02 03:40', tz='UTC')
monkeypatch.setattr(MarketCalendar, '_get_current_time', get_fake_time)
def test_default_calendars():
for name in get_calendar_names():
print(name)
assert get_calendar(name) is not None
def test_days_at_time():
def dat(day, day_offset, time_offset, tz, expected):
days = pd.DatetimeIndex([pd.Timestamp(day, tz=tz)])
result = days_at_time(days, time_offset, tz, day_offset)[0]
expected = pd.Timestamp(expected, tz=tz).tz_convert('UTC')
assert result == expected
args_list = [
# NYSE standard day
(
'2016-07-19', 0, time(9, 31), timezone('America/New_York'),
'2016-07-19 9:31',
),
# CME standard day
(
'2016-07-19', -1, time(17, 1), timezone('America/Chicago'),
'2016-07-18 17:01',
),
# CME day after DST start
(
'2004-04-05', -1, time(17, 1), timezone('America/Chicago'),
'2004-04-04 17:01'
),
# ICE day after DST start
(
'1990-04-02', -1, time(19, 1), timezone('America/Chicago'),
'1990-04-01 19:01',
),
]
for args in args_list:
dat(args[0], args[1], args[2], args[3], args[4])
def test_clean_dates():
start, end = clean_dates('2016-12-01', '2016-12-31')
assert start == pd.Timestamp('2016-12-01')
assert end == pd.Timestamp('2016-12-31')
start, end = clean_dates('2016-12-01 12:00', '2016-12-31 12:00')
assert start == pd.Timestamp('2016-12-01')
assert end == pd.Timestamp('2016-12-31')
start, end = clean_dates(pd.Timestamp('2016-12-01', tz='America/Chicago'),
pd.Timestamp('2016-12-31', tz='America/New_York'))
assert start == pd.Timestamp('2016-12-01')
assert end == pd.Timestamp('2016-12-31')
start, end = clean_dates(pd.Timestamp('2016-12-01 09:31', tz='America/Chicago'),
pd.Timestamp('2016-12-31 16:00', tz='America/New_York'))
assert start == pd.Timestamp('2016-12-01')
assert end == pd.Timestamp('2016-12-31')
def test_properties():
cal = FakeCalendar()
assert cal.name == 'DMY'
assert cal.tz == timezone('Asia/Ulaanbaatar')
def test_holidays():
cal = FakeCalendar()
actual = cal.holidays().holidays
assert pd.Timestamp('2016-12-26') in actual
assert pd.Timestamp('2012-01-02') in actual
assert pd.Timestamp('2012-12-25') in actual
assert pd.Timestamp('2012-10-29') in actual
assert pd.Timestamp('2012-10-30') in actual
def test_valid_dates():
cal = FakeCalendar()
expected = pd.DatetimeIndex([pd.Timestamp(x, tz='UTC') for x in ['2016-12-23', '2016-12-27', '2016-12-28',
'2016-12-29', '2016-12-30', '2017-01-03']])
actual = cal.valid_days('2016-12-23', '2017-01-03')
assert_index_equal(actual, expected)
def test_schedule():
cal = FakeCalendar()
assert cal.open_time == time(11, 13)
assert cal.close_time == time(11, 49)
expected = pd.DataFrame({'market_open': [pd.Timestamp('2016-12-01 03:13:00', tz='UTC'),
pd.Timestamp('2016-12-02 03:13:00', tz='UTC')],
'market_close': [pd.Timestamp('2016-12-01 03:49:00', tz='UTC'),
pd.Timestamp('2016-12-02 03:49:00', tz='UTC')]},
columns=['market_open', 'market_close'],
index=[pd.Timestamp('2016-12-01'), pd.Timestamp('2016-12-02')])
actual = cal.schedule('2016-12-01', '2016-12-02')
assert_frame_equal(actual, expected)
results = cal.schedule('2016-12-01', '2016-12-31')
assert len(results) == 21
expected = pd.Series({'market_open': pd.Timestamp('2016-12-01 03:13:00+0000', tz='UTC', freq='B'),
'market_close': pd.Timestamp('2016-12-01 03:49:00+0000', tz='UTC', freq='B')},
name=pd.Timestamp('2016-12-01'), index=['market_open', 'market_close'])
# because of change in pandas in v0.24, pre-0.24 versions need object dtype
if pd.__version__ < '0.24':
expected = expected.astype(object)
assert_series_equal(results.iloc[0], expected)
expected = pd.Series({'market_open': pd.Timestamp('2016-12-30 03:13:00+0000', tz='UTC', freq='B'),
'market_close': pd.Timestamp('2016-12-30 03:49:00+0000', tz='UTC', freq='B')},
name=pd.Timestamp('2016-12-30'), index=['market_open', 'market_close'])
# because of change in pandas in v0.24, pre-0.24 versions need object dtype
if pd.__version__ < '0.24':
expected = expected.astype(object)
assert_series_equal(results.iloc[-1], expected)
# one day schedule
expected = pd.DataFrame({'market_open': pd.Timestamp('2016-12-01 03:13:00+0000', tz='UTC', freq='B'),
'market_close': pd.Timestamp('2016-12-01 03:49:00+0000', tz='UTC', freq='B')},
index=pd.DatetimeIndex([pd.Timestamp('2016-12-01')], freq='C'),
columns=['market_open', 'market_close'])
actual = cal.schedule('2016-12-01', '2016-12-01')
if pd.__version__ < '1.1.0':
assert_frame_equal(actual, expected)
else:
assert_frame_equal(actual, expected, check_freq=False)
# start date after end date
with pytest.raises(ValueError):
cal.schedule('2016-02-02', '2016-01-01')
# using a different time zone
expected = pd.DataFrame({'market_open': pd.Timestamp('2016-11-30 22:13:00-05:00', tz='US/Eastern', freq='B'),
'market_close': pd.Timestamp('2016-11-30 22:49:00-05:00', tz='US/Eastern', freq='B')},
index=pd.DatetimeIndex([pd.Timestamp('2016-12-01')]),
columns=['market_open', 'market_close'])
actual = cal.schedule('2016-12-01', '2016-12-01', tz='US/Eastern')
if pd.__version__ < '1.1.0':
assert_frame_equal(actual, expected)
else:
assert_frame_equal(actual, expected, check_freq=False)
def test_schedule_w_breaks():
cal = FakeBreakCalendar()
assert cal.open_time == time(9, 30)
assert cal.close_time == time(12, 00)
assert cal.break_start == time(10, 00)
assert cal.break_end == time(11, 00)
expected = pd.DataFrame({'market_open': [pd.Timestamp('2016-12-01 14:30:00', tz='UTC'),
pd.Timestamp('2016-12-02 14:30:00', tz='UTC')],
'market_close': [pd.Timestamp('2016-12-01 17:00:00', tz='UTC'),
pd.Timestamp('2016-12-02 17:00:00', tz='UTC')],
'break_start': [pd.Timestamp('2016-12-01 15:00:00', tz='UTC'),
pd.Timestamp('2016-12-02 15:00:00', tz='UTC')],
'break_end': [pd.Timestamp('2016-12-01 16:00:00', tz='UTC'),
pd.Timestamp('2016-12-02 16:00:00', tz='UTC')]
},
columns=['market_open', 'market_close', 'break_start', 'break_end'],
index=[pd.Timestamp('2016-12-01'), pd.Timestamp('2016-12-02')])
actual = cal.schedule('2016-12-01', '2016-12-02')
assert_frame_equal(actual, expected)
results = cal.schedule('2016-12-01', '2016-12-31')
assert len(results) == 21
expected = pd.Series({'market_open': pd.Timestamp('2016-12-01 14:30:00+0000', tz='UTC', freq='B'),
'market_close': pd.Timestamp('2016-12-01 17:00:00+0000', tz='UTC', freq='B'),
'break_start': pd.Timestamp('2016-12-01 15:00:00+0000', tz='UTC', freq='B'),
'break_end': pd.Timestamp('2016-12-01 16:00:00+0000', tz='UTC', freq='B')
},
name=pd.Timestamp('2016-12-01'), index=['market_open', 'market_close', 'break_start',
'break_end'])
assert_series_equal(results.iloc[0], expected)
# special open is after break start
expected = pd.Series({'market_open': pd.Timestamp('2016-12-29 15:20:00+0000', tz='UTC', freq='B'),
'market_close': pd.Timestamp('2016-12-29 17:00:00+0000', tz='UTC', freq='B'),
'break_start': pd.Timestamp('2016-12-29 15:20:00+0000', tz='UTC', freq='B'),
'break_end': pd.Timestamp('2016-12-29 16:00:00+0000', tz='UTC', freq='B')},
name=pd.Timestamp('2016-12-29'), index=['market_open', 'market_close', 'break_start',
'break_end'])
assert_series_equal(results.iloc[-2], expected)
# special close is before break end
expected = pd.Series({'market_open': pd.Timestamp('2016-12-30 14:30:00+0000', tz='UTC', freq='B'),
'market_close': pd.Timestamp('2016-12-30 15:40:00+0000', tz='UTC', freq='B'),
'break_start': pd.Timestamp('2016-12-30 15:00:00+0000', tz='UTC', freq='B'),
'break_end': pd.Timestamp('2016-12-30 15:40:00+0000', tz='UTC', freq='B')},
name=pd.Timestamp('2016-12-30'), index=['market_open', 'market_close', 'break_start',
'break_end'])
assert_series_equal(results.iloc[-1], expected)
# using a different time zone
expected = pd.DataFrame({'market_open': pd.Timestamp('2016-12-28 09:30:00-05:00', tz='America/New_York', freq='B'),
'market_close': pd.Timestamp('2016-12-28 12:00:00-05:00', tz='America/New_York', freq='B'),
'break_start': pd.Timestamp('2016-12-28 10:00:00-05:00', tz='America/New_York', freq='B'),
'break_end': pd.Timestamp('2016-12-28 11:00:00-05:00', tz='America/New_York', freq='B')},
index=pd.DatetimeIndex([pd.Timestamp('2016-12-28')], freq='C'),
columns=['market_open', 'market_close', 'break_start', 'break_end'])
actual = cal.schedule('2016-12-28', '2016-12-28', tz='America/New_York')
if pd.__version__ < '1.1.0':
assert_frame_equal(actual, expected)
else:
assert_frame_equal(actual, expected, check_freq=False)
def test_schedule_w_times():
cal = FakeCalendar(time(12, 12), time(13, 13))
assert cal.open_time == time(12, 12)
assert cal.close_time == time(13, 13)
results = cal.schedule('2016-12-01', '2016-12-31')
assert len(results) == 21
expected = pd.Series({'market_open': pd.Timestamp('2016-12-01 04:12:00+0000', tz='UTC', freq='B'),
'market_close': pd.Timestamp('2016-12-01 05:13:00+0000', tz='UTC', freq='B')},
name=pd.Timestamp('2016-12-01'), index=['market_open', 'market_close'])
# because of change in pandas in v0.24, pre-0.24 versions need object dtype
if pd.__version__ < '0.24':
expected = expected.astype(object)
assert_series_equal(results.iloc[0], expected)
expected = pd.Series({'market_open': pd.Timestamp('2016-12-30 04:12:00+0000', tz='UTC', freq='B'),
'market_close': pd.Timestamp('2016-12-30 05:13:00+0000', tz='UTC', freq='B')},
name=pd.Timestamp('2016-12-30'), index=['market_open', 'market_close'])
# because of change in pandas in v0.24, pre-0.24 versions need object dtype
if pd.__version__ < '0.24':
expected = expected.astype(object)
assert_series_equal(results.iloc[-1], expected)
def test_regular_holidays():
cal = FakeCalendar()
results = cal.schedule('2016-12-01', '2017-01-05')
days = results.index
# check regular holidays
# Christmas
assert pd.Timestamp('2016-12-23') in days
assert pd.Timestamp('2016-12-26') not in days
# New Years
assert pd.Timestamp('2017-01-02') not in days
assert pd.Timestamp('2017-01-03') in days
def test_adhoc_holidays():
cal = FakeCalendar()
results = cal.schedule('2012-10-15', '2012-11-15')
days = results.index
# check adhoc holidays
# Hurricane Sandy
assert pd.Timestamp('2012-10-26') in days
assert pd.Timestamp('2012-10-29') not in days
assert pd.Timestamp('2012-10-30') not in days
assert pd.Timestamp('2012-10-31') in days
def test_special_opens():
cal = FakeCalendar()
results = cal.schedule('2012-07-01', '2012-07-06')
opens = results['market_open'].tolist()
# confirm that the day before July 4th is an 11:15 open not 11:13
assert pd.Timestamp('2012-07-02 11:13', tz='Asia/Ulaanbaatar').tz_convert('UTC') in opens
assert pd.Timestamp('2012-07-03 11:15', tz='Asia/Ulaanbaatar').tz_convert('UTC') in opens
assert pd.Timestamp('2012-07-04 11:13', tz='Asia/Ulaanbaatar').tz_convert('UTC') in opens
def test_special_opens_adhoc():
cal = FakeCalendar()
results = cal.schedule('2016-12-10', '2016-12-20')
opens = results['market_open'].tolist()
# confirm that 2016-12-13 is an 11:20 open not 11:13
assert pd.Timestamp('2016-12-12 11:13', tz='Asia/Ulaanbaatar').tz_convert('UTC') in opens
assert pd.Timestamp('2016-12-13 11:20', tz='Asia/Ulaanbaatar').tz_convert('UTC') in opens
assert pd.Timestamp('2016-12-14 11:13', tz='Asia/Ulaanbaatar').tz_convert('UTC') in opens
def test_special_closes():
cal = FakeCalendar()
results = cal.schedule('2012-07-01', '2012-07-06')
closes = results['market_close'].tolist()
# confirm that the day before July 4th is an 11:30 close not 11:49
assert pd.Timestamp('2012-07-02 11:49', tz='Asia/Ulaanbaatar').tz_convert('UTC') in closes
assert pd.Timestamp('2012-07-03 11:30', tz='Asia/Ulaanbaatar').tz_convert('UTC') in closes
assert pd.Timestamp('2012-07-04 11:49', tz='Asia/Ulaanbaatar').tz_convert('UTC') in closes
# early close first date
results = cal.schedule('2012-07-03', '2012-07-04')
actual = results['market_close'].tolist()
expected = [pd.Timestamp('2012-07-03 11:30', tz='Asia/Ulaanbaatar').tz_convert('UTC'),
pd.Timestamp('2012-07-04 11:49', tz='Asia/Ulaanbaatar').tz_convert('UTC')]
assert actual == expected
# early close last date
results = cal.schedule('2012-07-02', '2012-07-03')
actual = results['market_close'].tolist()
expected = [pd.Timestamp('2012-07-02 11:49', tz='Asia/Ulaanbaatar').tz_convert('UTC'),
pd.Timestamp('2012-07-03 11:30', tz='Asia/Ulaanbaatar').tz_convert('UTC')]
assert actual == expected
def test_special_closes_adhoc():
cal = FakeCalendar()
results = cal.schedule('2016-12-10', '2016-12-20')
closes = results['market_close'].tolist()
# confirm that 2016-12-14 is an 11:40 close not 11:49
assert pd.Timestamp('2016-12-13 11:49', tz='Asia/Ulaanbaatar').tz_convert('UTC') in closes
assert pd.Timestamp('2016-12-14 11:40', tz='Asia/Ulaanbaatar').tz_convert('UTC') in closes
assert pd.Timestamp('2016-12-15 11:49', tz='Asia/Ulaanbaatar').tz_convert('UTC') in closes
# now with the early close as end date
results = cal.schedule('2016-12-13', '2016-12-14')
closes = results['market_close'].tolist()
assert pd.Timestamp('2016-12-13 11:49', tz='Asia/Ulaanbaatar').tz_convert('UTC') in closes
assert pd.Timestamp('2016-12-14 11:40', tz='Asia/Ulaanbaatar').tz_convert('UTC') in closes
def test_early_closes():
cal = FakeCalendar()
schedule = cal.schedule('2014-01-01', '2016-12-31')
results = cal.early_closes(schedule)
assert pd.Timestamp('2014-07-03') in results.index
assert pd.Timestamp('2016-12-14') in results.index
def test_open_at_time():
cal = FakeCalendar()
schedule = cal.schedule('2014-01-01', '2016-12-31')
# regular trading day
assert cal.open_at_time(schedule, pd.Timestamp('2014-07-02 03:40', tz='UTC')) is True
# early close
assert cal.open_at_time(schedule, pd.Timestamp('2014-07-03 03:40', tz='UTC')) is False
# holiday
assert cal.open_at_time(schedule, pd.Timestamp('2014-12-25 03:30', tz='UTC')) is False
# last bar of the day defaults to False
assert cal.open_at_time(schedule, pd.Timestamp('2016-09-07 11:49', tz='Asia/Ulaanbaatar')) is False
# last bar of the day is True if include_close is True
assert cal.open_at_time(schedule, pd.Timestamp('2016-09-07 11:49', tz='Asia/Ulaanbaatar'),
include_close=True) is True
# equivalent to 2014-07-02 03:40 UTC
assert cal.open_at_time(schedule, pd.Timestamp('2014-07-01 23:40:00-0400', tz='America/New_York')) is True
def test_open_at_time_breaks():
cal = FakeBreakCalendar()
schedule = cal.schedule('2016-12-20', '2016-12-30')
# between open and break
assert cal.open_at_time(schedule, pd.Timestamp('2016-12-28 09:50', tz='America/New_York')) is True
# at break start
assert cal.open_at_time(schedule, pd.Timestamp('2016-12-28 10:00', tz='America/New_York')) is False
assert cal.open_at_time(schedule, pd.Timestamp('2016-12-28 10:00', tz='America/New_York'), include_close=True) is True
# during break
assert cal.open_at_time(schedule, pd.Timestamp('2016-12-28 10:30', tz='America/New_York')) is False
assert cal.open_at_time(schedule, pd.Timestamp('2016-12-28 10:59', tz='America/New_York')) is False
# at break end
assert cal.open_at_time(schedule, pd.Timestamp('2016-12-28 11:00', tz='America/New_York')) is True
# between break and close
assert cal.open_at_time(schedule, pd.Timestamp('2016-12-28 11:30', tz='America/New_York')) is True
def test_is_open_now(patch_get_current_time):
cal = FakeCalendar()
schedule = cal.schedule('2014-01-01', '2016-12-31')
assert cal.is_open_now(schedule) is True
def test_bad_dates():
cal = FakeCalendar()
empty = pd.DataFrame(columns=['market_open', 'market_close'], index=pd.DatetimeIndex([], freq='C'))
# single weekend date
schedule = cal.schedule('2018-06-30', '2018-06-30')
assert_frame_equal(schedule, empty)
# two weekend dates
schedule = cal.schedule('2018-06-30', '2018-07-01')
assert_frame_equal(schedule, empty)
# single holiday
schedule = cal.schedule('2018-01-01', '2018-01-01')
assert_frame_equal(schedule, empty)
# weekend and holiday
schedule = cal.schedule('2017-12-30', '2018-01-01')
assert_frame_equal(schedule, empty)
| mit | -8,934,113,318,885,306,000 | 39.310409 | 122 | 0.602942 | false |
jordiguerrero/FresonCam | Fresonbot.py | 1 | 3705 | import Adafruit_BBIO.PWM as PWM
import Adafruit_BBIO.GPIO as GPIO
#import Adafruit_BBIO.ADC as ADC #IR
import subprocess
import math
import pinout
class Fresonbot(object):
ticksPerTurn = 12 *100 # 12 counts per revolution * 100:1 reduction gearbox
## you have to take this measures accurately
# WheelRadius = 31.8/2 # I took the diameter and divided by 2
# WheelDistance = 88.9 # between centers
WheelRadius = 16 # In mm
WheelDistance = 89 # In mm
duty_min = 3
duty_max = 14
duty_span = duty_max - duty_min
def __init__(self):
subprocess.call("bashScripts/enable_encoder_slots.sh")
GPIO.setup(pinout.PinMotorLeftPhase, GPIO.OUT)
GPIO.setup(pinout.PinMotorRightPhase, GPIO.OUT)
GPIO.output(pinout.PinMotorLeftPhase, 0)
GPIO.output(pinout.PinMotorRightPhase, 0)
PWM.start(pinout.PinMotorLeftPwm,0)
PWM.start(pinout.PinMotorRightPwm,0)
self.x = 0.0
self.y = 0.0
self.distance = 0.0
self.heading = 0.0
(TicksLeft, TicksRight) = self.getTicks();
self.StartTicksLeft = TicksLeft
self.StartTicksRight = TicksRight
# ADC.setup() # IR
# def motion(self,VelLeft,VelRight):
def motion(self,VelRight,VelLeft):
AbsVelLeft = abs(VelLeft)
AbsVelRight = abs(VelRight)
if (VelLeft < 0):
PhaseLeft = 1
else:
PhaseLeft = 0
if (VelRight < 0):
PhaseRight = 1
else:
PhaseRight = 0
if (AbsVelLeft > 100):
AbsVelLeft = 100
if (AbsVelRight > 100):
AbsVelRight = 100
GPIO.output(pinout.PinMotorLeftPhase, PhaseLeft)
GPIO.output(pinout.PinMotorRightPhase, PhaseRight)
PWM.set_duty_cycle(pinout.PinMotorLeftPwm,AbsVelLeft)
PWM.set_duty_cycle(pinout.PinMotorRightPwm,AbsVelRight)
return
def getTicks(self):
global TicksLeft
global TicksRight
fTicksLeft = "/sys/devices/ocp.3/48302000.epwmss/48302180.eqep/position"
fTicksRight = "/sys/devices/ocp.3/48304000.epwmss/48304180.eqep/position"
foTicksLeft = open(fTicksLeft, "r")
foTicksRight = open(fTicksRight, "r")
TicksLeft = foTicksLeft.read()
TicksLeft = int(TicksLeft.split('\n', 1)[0])
TicksRight = foTicksRight.read()
TicksRight = int(TicksRight.split('\n', 1)[0])
foTicksLeft.close()
foTicksRight.close()
return TicksLeft, TicksRight
def getPosition(self):
(TicksLeft, TicksRight) = self.getTicks()
EndTicksLeft = TicksLeft
EndTicksRight = TicksRight
IncTicksLeft = EndTicksLeft - self.StartTicksLeft
IncTicksRight = EndTicksRight - self.StartTicksRight
distanceLeft = 2 * math.pi * self.WheelRadius * (float(IncTicksLeft) / self.ticksPerTurn)
distanceRight = 2 * math.pi * self.WheelRadius * (float(IncTicksRight) / self.ticksPerTurn)
newdistance = (distanceLeft + distanceRight) / 2
self.distance += newdistance
self.heading += (distanceLeft - distanceRight) / self.WheelDistance
self.x += newdistance * math.cos(self.heading)
self.y += newdistance * math.sin(self.heading)
self.headingDec = math.degrees(self.heading)
self.StartTicksLeft = EndTicksLeft
self.StartTicksRight = EndTicksRight
return (self.x, self.y, self.heading,self.distance)
def stop(self):
self.motion(0,0);
return
# def readIR(self):
# voltage = ADC.read(pinout.PinIRFront)
## return value1 * 1.8
# return 3.07427335017539*voltage**-1.18207892010248
| gpl-3.0 | -2,484,470,841,870,335,500 | 29.121951 | 99 | 0.633198 | false |
SciViews/sciviewsk | pylib/cile_r.py | 1 | 4738 | #!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is SciViews-K by Philippe Grosjean et al.
#
# Contributor(s):
# Philippe Grosjean
# ActiveState Software Inc (code inspired from)
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
"""A Code Intelligence Language Engine for the R language.
A "Language Engine" is responsible for scanning content of
its language and generating CIX output that represents an outline of
the code elements in that content. See the CIX (Code Intelligence XML)
format:
http://community.activestate.com/faq/codeintel-cix-schema
Module Usage:
from cile_r import scan
mtime = os.stat("bar.R")[stat.ST_MTIME]
content = open("bar.R", "r").read()
scan(content, "bar.R", mtime=mtime)
"""
__version__ = "1.0.0"
import os
import sys
import time
import optparse
import logging
import pprint
import glob
# Note: c*i*ElementTree is the codeintel system's slightly modified
# cElementTree. Use it exactly as you would the normal cElementTree API:
# http://effbot.org/zone/element-index.htm
import ciElementTree as ET
from codeintel2.common import CILEError
#---- exceptions
class RCILEError(CILEError):
pass
#---- global data
log = logging.getLogger("cile.r")
#log.setLevel(logging.DEBUG)
#---- public module interface
def scan_buf(buf, mtime=None, lang="R"):
"""Scan the given RBuffer return an ElementTree (conforming
to the CIX schema) giving a summary of its code elements.
@param buf {RBuffer} is the R buffer to scan
@param mtime {int} is a modified time for the file (in seconds since
the "epoch"). If it is not specified the _current_ time is used.
Note that the default is not to stat() the file and use that
because the given content might not reflect the saved file state.
"""
# Dev Notes:
# - This stub implementation of the R CILE return an "empty"
# summary for the given content, i.e. CIX content that says "there
# are no code elements in this R content".
# - Use the following command (in the extension source dir) to
# debug/test your scanner:
# codeintel scan -p -l R <example-R-file>
# "codeintel" is a script available in the Komodo SDK.
log.info("scan '%s'", buf.path)
if mtime is None:
mtime = int(time.time())
# The 'path' attribute must use normalized dir separators.
if sys.platform.startswith("win"):
path = buf.path.replace('\\', '/')
else:
path = buf.path
tree = ET.Element("codeintel", version="2.0",
xmlns="urn:activestate:cix:2.0")
file = ET.SubElement(tree, "file", lang=lang, mtime=str(mtime))
blob = ET.SubElement(file, "scope", ilk="blob", lang=lang,
name=os.path.basename(path))
# Dev Note:
# This is where you process the R content and add CIX elements
# to 'blob' as per the CIX schema (cix-2.0.rng). Use the
# "buf.accessor" API (see class Accessor in codeintel2.accessor) to
# analyze. For example:
# - A token stream of the content is available via:
# buf.accessor.gen_tokens()
# Use the "codeintel html -b <example-R-file>" command as
# a debugging tool.
# - "buf.accessor.text" is the whole content of the file. If you have
# a separate tokenizer/scanner tool for R content, you may
# want to use it.
return tree
| lgpl-2.1 | 2,992,210,303,469,165,000 | 35.446154 | 76 | 0.688898 | false |
orionzhou/robin | formats/fastq.py | 1 | 37468 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Processing fastq files
"""
import os.path as op
import sys
import re
import logging
import json
import gzip
from itertools import islice
from Bio import SeqIO
from Bio.SeqIO.QualityIO import FastqGeneralIterator
from maize.formats.base import must_open, DictFile
from maize.utils.cbook import percentage
from maize.apps.base import sh, which, mkdir, need_update
FastqExt = ("fastq", "fq")
qual_offset = lambda x: 33 if x == "sanger" else 64
allowed_dialect_conversions = {
">=1.8": ("<1.8"),
"sra": ("<1.8"),
}
class FastqLite (object):
def __init__(self, name, seq, qual):
self.name = name
self.seq = seq
self.qual = qual
def __str__(self):
return "\n".join((self.name, self.seq, "+", self.qual))
def rc(self):
self.seq = rc(self.seq)
self.qual = self.qual[::-1]
class FastqRecord (object):
def __init__(self, fh, offset=0, key=None):
self.name = self.header = fh.readline()
if not self.name:
return
self.name = self.name.split()[0]
self.seq = fh.readline().rstrip()
self.l3 = fh.readline().rstrip()
self.qual = fh.readline().rstrip()
if offset != 0:
self.qual = "".join(chr(ord(x) + offset) for x in self.qual)
self.length = len(self.seq)
assert self.length == len(self.qual), \
"length mismatch: seq(%s) and qual(%s)" % (self.seq, self.qual)
if key:
self.name = key(self.name)
def __str__(self):
return "\n".join((self.name, self.seq, "+", self.qual))
def __len__(self):
return self.length
@property
def quality(self):
return [ord(x) for x in self.qual]
class FastqHeader(object):
def __init__(self, row):
header = row.strip().split(" ")
self.readId, self.readLen, self.readNum = None, None, None
self.multiplexId = 0
self.paired = False
if len(header) == 3 and "length" in header[2]:
self.dialect = "sra"
self.readId = header[0].lstrip('@')
m = re.search("length\=(\d+)", header[2])
if m:
self.readLen = m.group(1)
h = header[1].split(":")
self.instrument = h[0]
if len(h) == 7:
self.runId, self.flowcellId = int(h[1]), h[2]
self.laneNum, self.tileNum = int(h[3]), int(h[4])
self.xPos, self.yPos = h[5], h[6]
else:
self.runId, self.flowcellId = None, None
self.laneNum, self.tileNum = int(h[1]), int(h[2])
self.xPos, self.yPos = h[3], h[4]
else:
h = header[0].split(":")
self.instrument = h[0].lstrip('@')
if len(header) == 2 and header[1].find(":"):
self.dialect = ">=1.8" # Illumina Casava 1.8+ format
self.runId = int(h[1])
self.flowcellId = h[2]
self.laneNum = int(h[3])
self.tileNum = int(h[4])
self.xPos = int(h[5])
self.yPos = h[6]
if re.search("/", self.yPos):
self.paired = True
self.yPos, self.readNum = self.yPos.split("/")
a = header[1].split(":")
self.readNum = int(a[0])
self.isFiltered = a[1]
self.controlNum = int(a[2])
self.barcode = a[3]
else:
self.dialect = "<1.8" # Old Illumina Casava format (< 1.8)
self.laneNum = int(h[1])
self.tileNum = int(h[2])
self.xPos = int(h[3])
self.yPos = h[4]
m = re.search(r"(\d+)(#\S+)\/(\d+)", self.yPos)
if m:
self.paired = True
self.yPos, self.multiplexId, self.readNum = \
m.group(1), m.group(2), m.group(3)
def __str__(self):
if self.dialect == "sra":
h0 = self.readId
if self.readNum:
h0 += "/{0}".format(self.readNum)
h1elems = [self.instrument, self.laneNum, self.tileNum, \
self.xPos, self.yPos]
if self.runId and self.flowcellId:
h1elems[1:1] = [self.runId, self.flowcellId]
h1 = ":".join(str(x) for x in h1elems)
h2 = "length={0}".format(self.readLen)
return "@{0} {1} {2}".format(h0, h1, h2)
elif self.dialect == ">=1.8":
yPos = "{0}/{1}".format(self.yPos, self.readNum) if self.paired \
else self.yPos
h0 = ":".join(str(x) for x in (self.instrument, self.runId, \
self.flowcellId, self.laneNum, self.tileNum, \
self.xPos, yPos))
h1 = ":".join(str(x) for x in (self.readNum, self.isFiltered, \
self.controlNum, self.barcode))
return "@{0} {1}".format(h0, h1)
else:
yPos = "{0}#{1}/{2}".format(self.yPos, self.multiplexId, \
self.readNum) if self.paired else self.yPos
h0 = ":".join(str(x) for x in (self.instrument, self.laneNum, \
self.tileNum, self.xPos, yPos))
return "@{0}".format(h0)
def format_header(self, dialect=None, tag=None):
if dialect:
if self.dialect == dialect:
logging.error("Input and output dialect are the same")
elif dialect not in allowed_dialect_conversions[self.dialect]:
logging.error("Error: Cannot convert from `{0}` to `{1}` dialect".format(self.dialect, dialect))
logging.error("Allowed conversions: {0}".format(json.dumps(allowed_dialect_conversions, indent=4)))
sys.exit()
else:
self.dialect = dialect
if tag:
readNum = tag.split("/")[1]
self.readNum = readNum
self.paired = True
return str(self)
def pairspf(pp, commonprefix=True):
if commonprefix:
pf = op.commonprefix(pp).rstrip("._-")
else:
pf = min(pp)
pf = op.basename(pf)
if not pf.strip():
pf = op.basename(pp[0])
return pf
def iter_fastq(filename, offset=0, key=None):
if isinstance(filename, str):
logging.debug("Read file `{0}`".format(filename))
fh = must_open(filename)
else:
fh = filename
while True:
rec = FastqRecord(fh, offset=offset, key=key)
if not rec.name:
break
yield rec
yield None # sentinel
def uniq(args):
"""
%prog uniq fastqfile
Retain only first instance of duplicate reads. Duplicate is defined as
having the same read name.
"""
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
fw = must_open(args.outfile, "w")
nduplicates = nreads = 0
seen = set()
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break
name = rec.name
if name in seen:
nduplicates += 1
continue
seen.add(name)
print >> fw, rec
logging.debug("Removed duplicate reads: {}".\
format(percentage(nduplicates, nreads)))
def suffix(args):
"""
%prog suffix fastqfile CAG
Filter reads based on suffix.
"""
p = OptionParser(suffix.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
fastqfile, sf = args
fw = must_open(args.outfile, "w")
nreads = nselected = 0
for rec in iter_fastq(fastqfile):
nreads += 1
if rec is None:
break
if rec.seq.endswith(sf):
print >> fw, rec
nselected += 1
logging.debug("Selected reads with suffix {0}: {1}".\
format(sf, percentage(nselected, nreads)))
def calc_readlen(f, first):
from maize.utils.cbook import SummaryStats
L = []
ai = iter_fastq(f)
rec = ai.next()
while rec:
L.append(rec.length)
if len(L) > first:
break
rec = ai.next()
s = SummaryStats(L)
return s
def is_fastq(f):
fq = f.replace(".gz", "") if f.endswith(".gz") else f
return fq.endswith((".fastq", ".fq"))
def readlen(args):
"""
%prog readlen fastqfile
Calculate read length, will only try the first N reads. Output min, max, and
avg for each file.
"""
p = OptionParser(readlen.__doc__)
p.set_firstN()
sp1.add_argument("--silent", default=False, action="store_true",
help="Do not print read length stats")
sp1.add_argument("--nocheck", default=False, action="store_true",
help="Do not check file type suffix")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
f, = args
if (not args.nocheck) and (not is_fastq(f)):
logging.debug("File `{}` does not endswith .fastq or .fq".format(f))
return 0
s = calc_readlen(f, args.firstN)
if not args.silent:
print("\t".join(str(x) for x in (f, s.min, s.max, s.mean, s.median)))
return int(s.max)
def fasta(args):
"""
%prog fasta fastqfiles
Convert fastq to fasta and qual file.
"""
p = OptionParser(fasta.__doc__)
sp1.add_argument("--seqtk", default=False, action="store_true",
help="Use seqtk to convert")
p.set_outdir()
p.set_outfile(outfile=None)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fastqfiles = args
outdir = args.outdir
if outdir and outdir != ".":
mkdir(outdir)
fastqfile = fastqfiles[0]
pf = op.basename(fastqfile)
gzinput = pf.endswith(".gz")
if gzinput:
pf = pf.rsplit(".", 1)[0]
pf, sf = pf.rsplit(".", 1)
if sf not in ("fq", "fastq"):
logging.debug("Assumed FASTA: suffix not `fq` or `fastq`")
return fastqfile, None
fastafile, qualfile = pf + ".fasta", pf + ".qual"
outfile = args.outfile or fastafile
outfile = op.join(outdir, outfile)
if args.seqtk:
if need_update(fastqfiles, outfile):
for i, fastqfile in enumerate(fastqfiles):
cmd = "seqtk seq -A {0} -L 30 -l 70".format(fastqfile)
# First one creates file, following ones append to it
sh(cmd, outfile=outfile, append=i)
else:
logging.debug("Outfile `{0}` already exists.".format(outfile))
return outfile, None
for fastqfile in fastqfiles:
SeqIO.convert(fastqfile, "fastq", fastafile, "fasta")
SeqIO.convert(fastqfile, "fastq", qualfile, "qual")
return fastafile, qualfile
def first(args):
"""
%prog first N fastqfile(s)
Get first N reads from file.
"""
from maize.apps.base import need_update
p = OptionParser(first.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
N = int(args[0])
nlines = N * 4
fastqfiles = args[1:]
fastqfile = fastqfiles[0]
outfile = args.outfile
if not need_update(fastqfiles, outfile):
logging.debug("File `{0}` exists. Will not overwrite.".format(outfile))
return
gz = fastqfile.endswith(".gz")
for fastqfile in fastqfiles:
if gz:
cmd = "zcat {0} | head -n {1}".format(fastqfile, nlines)
else:
cmd = "head -n {0} {1}".format(nlines, fastqfile)
sh(cmd, outfile=args.outfile, append=True)
def FastqPairedIterator(read1, read2):
if read1 == read2:
p1fp = p2fp = must_open(read1)
else:
p1fp = must_open(read1)
p2fp = must_open(read2)
return p1fp, p2fp
def isHighQv(qs, qvchar, pct=90):
cutoff = len(qs) * pct / 100
highs = sum(1 for x in qs if x >= qvchar)
return highs >= cutoff
def filter(args):
"""
%prog filter paired.fastq
Filter to get high qv reads. Use interleaved format (one file) or paired
format (two files) to filter on paired reads.
"""
p = OptionParser(filter.__doc__)
sp1.add_argument("-q", dest="qv", default=20, type="int",
help="Minimum quality score to keep [default: %default]")
sp1.add_argument("-p", dest="pct", default=95, type="int",
help="Minimum percent of bases that have [-q] quality "\
"[default: %default]")
opts, args = p.parse_args(args)
if len(args) not in (1, 2):
sys.exit(not p.print_help())
if len(args) == 1:
r1 = r2 = args[0]
else:
r1, r2 = args
qv = args.qv
pct = args.pct
offset = guessoffset([r1])
qvchar = chr(offset + qv)
logging.debug("Call base qv >= {0} as good.".format(qvchar))
outfile = r1.rsplit(".", 1)[0] + ".q{0}.paired.fastq".format(qv)
fw = open(outfile, "w")
p1fp, p2fp = FastqPairedIterator(r1, r2)
while True:
a = list(islice(p1fp, 4))
if not a:
break
b = list(islice(p2fp, 4))
q1 = a[-1].rstrip()
q2 = b[-1].rstrip()
if isHighQv(q1, qvchar, pct=pct) and isHighQv(q2, qvchar, pct=pct):
fw.writelines(a)
fw.writelines(b)
def checkShuffleSizes(p1, p2, pairsfastq, extra=0):
from maize.apps.base import getfilesize
pairssize = getfilesize(pairsfastq)
p1size = getfilesize(p1)
p2size = getfilesize(p2)
assert pairssize == p1size + p2size + extra, \
"The sizes do not add up: {0} + {1} + {2} != {3}".\
format(p1size, p2size, extra, pairssize)
def shuffle(args):
"""
%prog shuffle p1.fastq p2.fastq
Shuffle pairs into interleaved format.
"""
p = OptionParser(shuffle.__doc__)
p.set_tag()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
p1, p2 = args
pairsfastq = pairspf((p1, p2)) + ".fastq"
tag = args.tag
p1fp = must_open(p1)
p2fp = must_open(p2)
pairsfw = must_open(pairsfastq, "w")
nreads = 0
while True:
a = list(islice(p1fp, 4))
if not a:
break
b = list(islice(p2fp, 4))
if tag:
name = a[0].rstrip()
a[0] = name + "/1\n"
b[0] = name + "/2\n"
pairsfw.writelines(a)
pairsfw.writelines(b)
nreads += 2
pairsfw.close()
extra = nreads * 2 if tag else 0
checkShuffleSizes(p1, p2, pairsfastq, extra=extra)
logging.debug("File `{0}` verified after writing {1} reads.".\
format(pairsfastq, nreads))
return pairsfastq
def split(args):
"""
%prog split pairs.fastq
Split shuffled pairs into `.1.fastq` and `.2.fastq`, using `sed`. Can work
on gzipped file.
<http://seqanswers.com/forums/showthread.php?t=13776>
"""
from maize.apps.grid import Jobs
p = OptionParser(split.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pairsfastq, = args
gz = pairsfastq.endswith(".gz")
pf = pairsfastq.replace(".gz", "").rsplit(".", 1)[0]
p1 = pf + ".1.fastq"
p2 = pf + ".2.fastq"
cmd = "zcat" if gz else "cat"
p1cmd = cmd + " {0} | sed -ne '1~8{{N;N;N;p}}'".format(pairsfastq)
p2cmd = cmd + " {0} | sed -ne '5~8{{N;N;N;p}}'".format(pairsfastq)
if gz:
p1cmd += " | gzip"
p2cmd += " | gzip"
p1 += ".gz"
p2 += ".gz"
p1cmd += " > " + p1
p2cmd += " > " + p2
args = [(p1cmd, ), (p2cmd, )]
m = Jobs(target=sh, args=args)
m.run()
checkShuffleSizes(p1, p2, pairsfastq)
def guessoffset(args):
"""
%prog guessoffset fastqfile
Guess the quality offset of the fastqfile, whether 33 or 64.
See encoding schemes: <http://en.wikipedia.org/wiki/FASTQ_format>
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS...............................
..........................XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
...............................IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII
.................................JJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJJ
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL...............................
!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh
| | | | |
33 59 64 73 104
S - Sanger Phred+33, raw reads typically (0, 40)
X - Solexa Solexa+64, raw reads typically (-5, 40)
I - Illumina 1.3+ Phred+64, raw reads typically (0, 40)
J - Illumina 1.5+ Phred+64, raw reads typically (3, 40)
L - Illumina 1.8+ Phred+33, raw reads typically (0, 40)
with 0=unused, 1=unused, 2=Read Segment Quality Control Indicator (bold)
"""
p = OptionParser(guessoffset.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
ai = iter_fastq(fastqfile)
rec = ai.next()
offset = 64
while rec:
quality = rec.quality
lowcounts = len([x for x in quality if x < 59])
highcounts = len([x for x in quality if x > 74])
diff = highcounts - lowcounts
if diff > 10:
break
elif diff < -10:
offset = 33
break
rec = ai.next()
if offset == 33:
print >> sys.stderr, "Sanger encoding (offset=33)"
elif offset == 64:
print >> sys.stderr, "Illumina encoding (offset=64)"
return offset
def format(args):
"""
%prog format fastqfile
Format FASTQ file. Currently provides option to convert FASTQ header from
one dialect to another.
"""
p = OptionParser(format.__doc__)
sp1.add_argument("--convert", default=None, choices=[">=1.8", "<1.8", "sra"],
help="Convert fastq header to a different format" +
" [default: %default]")
p.set_tag(specify_tag=True)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
ai = iter_fastq(fastqfile)
rec = ai.next()
dialect = None
while rec:
h = FastqHeader(rec.header)
if not dialect:
dialect = h.dialect
logging.debug("Input fastq dialect: `{0}`".format(dialect))
if args.convert:
logging.debug("Output fastq dialect: `{0}`".format(args.convert))
rec.name = h.format_header(dialect=args.convert, tag=args.tag)
print(rec)
rec = ai.next()
def some(args):
"""
%prog some idsfile afastq [bfastq]
Select a subset of the reads with ids present in the idsfile.
`bfastq` is optional (only if reads are paired)
"""
p = OptionParser(some.__doc__)
opts, args = p.parse_args(args)
if len(args) not in (2, 3):
sys.exit(not p.print_help())
idsfile, afastq, = args[:2]
bfastq = args[2] if len(args) == 3 else None
ids = DictFile(idsfile, valuepos=None)
ai = iter_fastq(open(afastq))
arec = ai.next()
if bfastq:
bi = iter_fastq(open(bfastq))
brec = bi.next()
while arec:
if arec.name[1:] in ids:
print(arec)
if bfastq:
print(brec)
arec = ai.next()
if bfastq:
brec = bi.next()
def trim(args):
"""
%prog trim fastqfile
Wraps `fastx_trimmer` to trim from begin or end of reads.
"""
p = OptionParser(trim.__doc__)
sp1.add_argument("-f", dest="first", default=0, type="int",
help="First base to keep. Default is 1.")
sp1.add_argument("-l", dest="last", default=0, type="int",
help="Last base to keep. Default is entire read.")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
obfastqfile = op.basename(fastqfile)
fq = obfastqfile.rsplit(".", 1)[0] + ".ntrimmed.fastq"
if fastqfile.endswith(".gz"):
fq = obfastqfile.rsplit(".", 2)[0] + ".ntrimmed.fastq.gz"
cmd = "fastx_trimmer -Q33 "
if args.first:
cmd += "-f {0.first} ".format(opts)
if args.last:
cmd += "-l {0.last} ".format(opts)
sh(cmd, infile=fastqfile, outfile=fq)
def catread(args):
"""
%prog catread fastqfile1 fastqfile2
Concatenate paired end reads into one. Useful for example to do single-end
mapping and perform filtering on the whole read pair level.
"""
p = OptionParser(catread.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
r1, r2 = args
p1fp, p2fp = FastqPairedIterator(r1, r2)
outfile = pairspf((r1, r2)) + ".cat.fastq"
fw = must_open(outfile, "w")
while True:
a = list(islice(p1fp, 4))
if not a:
break
atitle, aseq, _, aqual = a
btitle, bseq, _, bqual = list(islice(p2fp, 4))
print >> fw, "\n".join((atitle.strip(), aseq.strip() + bseq.strip(), \
"+", aqual.strip() + bqual.strip()))
def splitread(args):
"""
%prog splitread fastqfile
Split fastqfile into two read fastqfiles, cut in the middle.
"""
p = OptionParser(splitread.__doc__)
sp1.add_argument("-n", dest="n", default=76, type="int",
help="Split at N-th base position [default: %default]")
sp1.add_argument("--rc", default=False, action="store_true",
help="Reverse complement second read [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pairsfastq, = args
base = op.basename(pairsfastq).split(".")[0]
fq1 = base + ".1.fastq"
fq2 = base + ".2.fastq"
fw1 = must_open(fq1, "w")
fw2 = must_open(fq2, "w")
fp = must_open(pairsfastq)
n = args.n
minsize = n * 8 / 5
for name, seq, qual in FastqGeneralIterator(fp):
if len(seq) < minsize:
logging.error("Skipping read {0}, length={1}".format(name, len(seq)))
continue
name = "@" + name
rec1 = FastqLite(name, seq[:n], qual[:n])
rec2 = FastqLite(name, seq[n:], qual[n:])
if args.rc:
rec2.rc()
print >> fw1, rec1
print >> fw2, rec2
logging.debug("Reads split into `{0},{1}`".format(fq1, fq2))
fw1.close()
fw2.close()
def size(args):
"""
%prog size fastqfile
Find the total base pairs in a list of fastq files
"""
p = OptionParser(size.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
total_size = total_numrecords = 0
for f in args:
cur_size = cur_numrecords = 0
for rec in iter_fastq(f):
if not rec:
break
cur_numrecords += 1
cur_size += len(rec)
print(" ".join(str(x) for x in (op.basename(f), cur_numrecords, cur_size)))
total_numrecords += cur_numrecords
total_size += cur_size
if len(args) > 1:
print(" ".join(str(x) for x in ("Total", total_numrecords, total_size)))
def convert(args):
"""
%prog convert in.fastq
illumina fastq quality encoding uses offset 64, and sanger uses 33. This
script creates a new file with the correct encoding. Output gzipped file if
input is also gzipped.
"""
p = OptionParser(convert.__doc__)
p.set_phred()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
infastq, = args
phred = args.phred or str(guessoffset([infastq]))
ophred = {"64": "33", "33": "64"}[phred]
gz = infastq.endswith(".gz")
outfastq = infastq.rsplit(".", 1)[0] if gz else infastq
pf, sf = outfastq.rsplit(".", 1)
outfastq = "{0}.q{1}.{2}".format(pf, ophred, sf)
if gz:
outfastq += ".gz"
fin = "illumina" if phred == "64" else "sanger"
fout = "sanger" if phred == "64" else "illumina"
seqret = "seqret"
if infastq.endswith(".gz"):
cmd = "zcat {0} | ".format(infastq)
cmd += seqret + " fastq-{0}::stdin fastq-{1}::stdout".\
format(fin, fout)
else:
cmd = seqret + " fastq-{0}::{1} fastq-{2}::stdout".\
format(fin, infastq, fout)
sh(cmd, outfile=outfastq)
return outfastq
def pairinplace(args):
"""
%prog pairinplace bulk.fastq
Pair up the records in bulk.fastq by comparing the names for adjancent
records. If they match, print to bulk.pairs.fastq, else print to
bulk.frags.fastq.
"""
from maize.utils.iter import pairwise
p = OptionParser(pairinplace.__doc__)
p.set_rclip()
p.set_tag()
sp1.add_argument("--base",
help="Base name for the output files [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
fastqfile, = args
base = args.base or op.basename(fastqfile).split(".")[0]
frags = base + ".frags.fastq"
pairs = base + ".pairs.fastq"
if fastqfile.endswith(".gz"):
frags += ".gz"
pairs += ".gz"
fragsfw = must_open(frags, "w")
pairsfw = must_open(pairs, "w")
N = args.rclip
tag = args.tag
strip_name = (lambda x: x[:-N]) if N else None
fh_iter = iter_fastq(fastqfile, key=strip_name)
skipflag = False # controls the iterator skip
for a, b in pairwise(fh_iter):
if b is None: # hit the eof
break
if skipflag:
skipflag = False
continue
if a.name == b.name:
if tag:
a.name += "/1"
b.name += "/2"
print >> pairsfw, a
print >> pairsfw, b
skipflag = True
else:
print >> fragsfw, a
# don't forget the last one, when b is None
if not skipflag:
print >> fragsfw, a
logging.debug("Reads paired into `%s` and `%s`" % (pairs, frags))
return pairs
def fromsra(args):
"""
%prog fromsra srafile
Convert sra file to fastq using the sratoolkit `fastq-dump`
"""
p = OptionParser(fromsra.__doc__)
sp1.add_argument("--paired", default=False, action="store_true",
help="Specify if library layout is paired-end " + \
"[default: %default]")
sp1.add_argument("--compress", default=None, choices=["gzip", "bzip2"],
help="Compress output fastq files [default: %default]")
p.set_outdir()
p.set_grid()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
srafile, = args
paired = args.paired
compress = args.compress
outdir = args.outdir
script_path = which("fastq-dump")
if not script_path:
logging.error("Cannot find `fastq-dump` in the PATH")
sys.exit()
cmd = [script_path]
if compress:
cmd.append("--{0}".format(compress))
if paired:
cmd.append("--split-files")
if outdir:
cmd.append("--outdir {0}".format(outdir))
cmd.append(srafile)
outcmd = " ".join(cmd)
sh(outcmd, grid=args.grid)
class Error(Exception):
pass
class Line(str):
"""A line of text with associated filename and line number."""
def error(self, message):
"""Return an error relating to this line."""
return Error("{0}({1}): {2}\n{3}"
.format(self.filename, self.lineno, message, self))
class Lines(object):
"""Lines(filename, iterator) wraps 'iterator' so that it yields Line
objects, with line numbers starting from 1. 'filename' is used in
error messages.
"""
def __init__(self, filename, iterator):
self.filename = filename
self.lines = enumerate(iterator, start=1)
def __iter__(self):
return self
def __next__(self):
lineno, s = next(self.lines)
s = s.decode('utf-8')
line = Line(s)
line.filename = self.filename
line.lineno = lineno
return line
# For compatibility with Python 2.
next = __next__
def read_fastq(filename, iterator):
"""Read FASTQ data from 'iterator' (which may be a file object or any
other iterator that yields strings) and generate tuples (sequence
name, sequence data, quality data). 'filename' is used in error
messages.
"""
# This implementation follows the FASTQ specification given here:
# <http://nar.oxfordjournals.org/content/38/6/1767.full>
import re
at_seqname_re = re.compile(r'@(.+)$')
sequence_re = re.compile(r'[!-*,-~]*$')
plus_seqname_re = re.compile(r'\+(.*)$')
quality_re = re.compile(r'[!-~]*$')
lines = Lines(filename, iterator)
for line in lines:
# First line of block is @<seqname>.
m = at_seqname_re.match(line)
if not m:
raise line.error("Expected @<seqname> but found:")
seqname = m.group(1)
try:
# One or more lines of sequence data.
sequence = []
for line in lines:
m = sequence_re.match(line)
if not m:
break
sequence.append(m.group(0))
if not sequence:
raise line.error("Expected <sequence> but found:")
# The line following the sequence data consists of a plus
# sign and an optional sequence name (if supplied, it must
# match the sequence name from the start of the block).
m = plus_seqname_re.match(line)
if not m:
raise line.error("Expected +[<seqname>] but found:")
if m.group(1) not in ['', seqname]:
raise line.error("Expected +{} but found:".format(seqname))
# One or more lines of quality data, containing the same
# number of characters as the sequence data.
quality = []
n = sum(map(len, sequence))
while n > 0:
line = next(lines)
m = quality_re.match(line)
if not m:
raise line.error("Expected <quality> but found:")
n -= len(m.group(0))
if n < 0:
raise line.error("<quality> is longer than <sequence>:")
quality.append(m.group(0))
yield seqname, ''.join(sequence), ''.join(quality)
except StopIteration:
raise line.error("End of input before sequence was complete:")
def breakread(args):
fhi = must_open(args.fi)
fo1 = "%s_1.fq.gz" % fo
fo2 = "%s_2.fq.gz" % fo
fho1 = gzip.open(fo1, "wb")
fho2 = gzip.open(fo2, "wb")
for (seqid, seq, qual) in read_fastq(args.fi, fhi):
assert len(seq) == readlen * 2 and len(qual) == readlen * 2, \
"%s: seq[%d] qual[%d] not %d" % \
(seqid, len(seq), len(qual), readlen)
eles = seqid.split(" ")
if len(eles) > 2: seqid = " ".join(eles[0:2])
seq1, seq2 = seq[0:readlen], seq[readlen:readlen*2]
qual1, qual2 = qual[0:readlen], qual[readlen:readlen*2]
fho1.write(("@%s\n%s\n+\n%s\n" % (seqid, seq1, qual1)).encode('utf8'))
fho2.write(("@%s\n%s\n+\n%s\n" % (seqid, seq2, qual2)).encode('utf8'))
def UMIcount(args):
"""
%prog UMIcount fastqfile
Report number of occurances of each unique UMI
"""
fhi = must_open(args.fi)
if args.fi.endswith(".gz"):
fhi = gzip.open(args.fi, "r")
ud = dict()
for (seqid, seq, qual) in read_fastq(args.fi, fhi):
umi = seqid.split(" ")[1].split("+")[1]
if umi in ud:
ud[umi] += 1
else:
ud[umi] = 1
fho = must_open(args.fo, 'w')
for umi, cnt in ud.items():
fho.write("%s\t%s\n" % (umi, cnt))
logging.debug("{} UMIs detected".format(len(ud)))
def main():
import argparse
parser = argparse.ArgumentParser(
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
description = 'fastq utilities'
)
sp = parser.add_subparsers(title = 'available commands', dest = 'command')
sp1 = sp.add_parser("break", help = "break each fastq seq into two seqs of equal lengths")
sp1.add_argument('fi', help = 'input file (*.fastq or *.fastq.gz)')
sp1.add_argument('fo', help = 'output prefix (*_1.fq.gz and *_2.fq.gz)')
sp1.add_argument('readlen', type = int, help = 'read length')
sp1.set_defaults(func = breakread)
sp1 = sp.add_parser('UMIcount', help='count occurance of UMIs in file',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('fi', help = 'input file (*.fastq or *.fastq.gz)')
sp1.add_argument('fo', help = 'output table of UMI occurances (*.tsv)')
sp1.set_defaults(func = UMIcount)
sp1 = sp.add_parser('size', help='total base pairs in the fastq files',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = size)
sp1 = sp.add_parser('shuffle', help='shuffle paired reads into the same file interleaved',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = shuffle)
sp1 = sp.add_parser('split', help='split paired reads into two files',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = split)
sp1 = sp.add_parser('splitread', help='split appended reads (from JGI)',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = splitread)
sp1 = sp.add_parser('catread', help='cat pairs together (reverse of splitread)',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = catread)
sp1 = sp.add_parser('pairinplace', help='collect pairs by checking adjacent ids',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = pairinplace)
sp1 = sp.add_parser('convert', help='convert between illumina and sanger offset',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = convert)
sp1 = sp.add_parser('first', help='get first N reads from file',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = first)
sp1 = sp.add_parser('filter', help='filter to get high qv reads',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = filter)
sp1 = sp.add_parser('suffix', help='filter reads based on suffix',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = suffix)
sp1 = sp.add_parser('trim', help='trim reads using fastx_trimmer',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = trim)
sp1 = sp.add_parser('some', help='select a subset of fastq reads',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = some)
sp1 = sp.add_parser('guessoffset', help='guess the quality offset of the fastq records',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = guessoffset)
sp1 = sp.add_parser('readlen', help='calculate read length',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = readlen)
sp1 = sp.add_parser('format', help='format fastq file, convert header from casava 1.8+ to older format',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = format)
sp1 = sp.add_parser('fasta', help='convert fastq to fasta and qual file',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = fasta)
sp1 = sp.add_parser('fromsra', help='convert sra to fastq using `fastq-dump`',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = fromsra)
sp1 = sp.add_parser('uniq', help='retain only first instance of duplicate (by name) reads',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = uniq)
args = parser.parse_args()
if args.command:
args.func(args)
else:
print('Error: need to specify a sub command\n')
parser.print_help()
if __name__ == '__main__':
main()
| gpl-2.0 | 7,219,325,160,960,035,000 | 30.14547 | 115 | 0.559678 | false |
Justyer/NightHeartDataPlatform | firefly/app/doc/views.py | 1 | 3838 | from flask import jsonify, json
from flask import render_template, session, redirect, url_for, current_app, request
from .. import db
from ..models import Doc, DataSrc
from . import doc
from pymongo import MongoClient
#add a new doc without content
@doc.route('/doc/add', methods=['POST'])
def newDataSrc():
data = json.loads(request.form.get('data'))
name = data['doc_name']
dc = Doc()
dc.doc_name = name
dc.account_id = session['account_id']
db.session.add(dc)
db.session.commit()
return redirect(url_for("main.getDoc"))
#skip checkpage
@doc.route('/doc/check/<int:id>', methods=['GET'])
def checkDoc(id):
session['doc_id'] = id
return render_template("checkdoc.html")
#skip editpage
@doc.route('/doc/edit/<int:id>', methods=['GET'])
def editDoc(id):
session['doc_id'] = id
return render_template("editdoc.html")
@doc.route('/doc/edit/init', methods=['GET'])
def editDocInit():
id = session['doc_id']
mongoCol = MongoClient("localhost", 27017)
db_doc = mongoCol.docdb
db_datasrc = mongoCol.datasrcdb
init_array = []
doc_find = db_doc.doccol.find_one({"doc_id":str(id)}, {"_id":0})
if doc_find is not None:
for i in range(0, len(doc_find['component'])):
component = {}
if doc_find['component'][i]['data_type'] in ['lt', 'p']:
component['data_id'] = doc_find['component'][i]['data_id']
component['type'] = doc_find['component'][i]['data_type']
component['data'] = doc_find['component'][i]['data_id']
else:
datasrc_find = db_datasrc.datasrccol.find_one({"data_id": doc_find['component'][i]['data_id']})
component['data_id'] = doc_find['component'][i]['data_id']
component['type'] = DataSrc.query.filter_by(data_id=component['data_id']).first().data_type
component['data'] = datasrc_find['data']
init_array.append(component)
title = Doc.query.filter_by(doc_id=id).first().doc_name
initDoc = {"doc_id": id, "component":init_array, "title": title}
return jsonify(initDoc)
@doc.route('/doc/rename/<int:id>', methods=['POST'])
def renameDataSrc(id):
data = json.loads(request.form.get('data'))
rename = data['doc_name']
dc = Doc.query.filter_by(doc_id=id).first()
dc.doc_name = rename
db.session.commit()
return redirect(url_for("main.getDoc"))
#edit loadpage init coponent
@doc.route('/doc/delete/<int:id>', methods=['GET'])
def deleteDataSrc(id):
dc = Doc.query.filter_by(doc_id=id).first()
db.session.delete(dc)
db.session.commit()
mongoCol = MongoClient("localhost", 27017)
db1 = mongoCol.docdb
result = db1.doccol.find_one({'doc_id': str(id)})
if result is not None:
db1.doccol.remove(result)
return redirect(url_for("main.getDoc"))
@doc.route('/doc/init/select', methods=['GET'])
def initSelect():
account_id = session['account_id']
ds = DataSrc.query.filter_by(account_id=account_id).all()
ds_array = []
for item in ds:
if item.have_data != 0:
ds_dict = {}
ds_dict['data_id'] = item.data_id
ds_dict['data_name'] = item.data_name
ds_dict['data_type'] = item.data_type
ds_array.append(ds_dict)
initSel = {"initselect": ds_array}
return jsonify(initSel)
@doc.route('/doc/add/component/<int:id>', methods=['GET'])
def addComponent(id):
mongoCol = MongoClient("localhost", 27017)
db = mongoCol.datasrcdb
result = db.datasrccol.find_one({'data_id': str(id)}, {"_id":0})
component = {}
component['data_id'] = id
component['type'] = DataSrc.query.filter_by(data_id=id).first().data_type
component['data'] = result['data']
addcpt = {"addcomponent": [component]}
return jsonify(addcpt)
@doc.route('/doc/save', methods=['POST'])
def saveDoc():
data = json.loads(request.form.get('data'))
data_as = {"doc_id":str(data['doc_id']), "component":data['component']}
mongoCol = MongoClient("localhost", 27017)
db = mongoCol.docdb
db.doccol.update({"doc_id":str(data['doc_id'])}, data_as, upsert=True)
return jsonify({})
| mit | 422,848,269,793,811,500 | 30.719008 | 99 | 0.670922 | false |
kaushik94/sympy | sympy/solvers/pde.py | 2 | 36106 | """
This module contains pdsolve() and different helper functions that it
uses. It is heavily inspired by the ode module and hence the basic
infrastructure remains the same.
**Functions in this module**
These are the user functions in this module:
- pdsolve() - Solves PDE's
- classify_pde() - Classifies PDEs into possible hints for dsolve().
- pde_separate() - Separate variables in partial differential equation either by
additive or multiplicative separation approach.
These are the helper functions in this module:
- pde_separate_add() - Helper function for searching additive separable solutions.
- pde_separate_mul() - Helper function for searching multiplicative
separable solutions.
**Currently implemented solver methods**
The following methods are implemented for solving partial differential
equations. See the docstrings of the various pde_hint() functions for
more information on each (run help(pde)):
- 1st order linear homogeneous partial differential equations
with constant coefficients.
- 1st order linear general partial differential equations
with constant coefficients.
- 1st order linear partial differential equations with
variable coefficients.
"""
from __future__ import print_function, division
from itertools import combinations_with_replacement
from sympy.simplify import simplify
from sympy.core import Add, S
from sympy.core.compatibility import (reduce, is_sequence, range)
from sympy.core.function import Function, expand, AppliedUndef, Subs
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, symbols
from sympy.functions import exp
from sympy.integrals.integrals import Integral
from sympy.utilities.iterables import has_dups
from sympy.utilities.misc import filldedent
from sympy.solvers.deutils import _preprocess, ode_order, _desolve
from sympy.solvers.solvers import solve
from sympy.simplify.radsimp import collect
import operator
allhints = (
"1st_linear_constant_coeff_homogeneous",
"1st_linear_constant_coeff",
"1st_linear_constant_coeff_Integral",
"1st_linear_variable_coeff"
)
def pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs):
"""
Solves any (supported) kind of partial differential equation.
**Usage**
pdsolve(eq, f(x,y), hint) -> Solve partial differential equation
eq for function f(x,y), using method hint.
**Details**
``eq`` can be any supported partial differential equation (see
the pde docstring for supported methods). This can either
be an Equality, or an expression, which is assumed to be
equal to 0.
``f(x,y)`` is a function of two variables whose derivatives in that
variable make up the partial differential equation. In many
cases it is not necessary to provide this; it will be autodetected
(and an error raised if it couldn't be detected).
``hint`` is the solving method that you want pdsolve to use. Use
classify_pde(eq, f(x,y)) to get all of the possible hints for
a PDE. The default hint, 'default', will use whatever hint
is returned first by classify_pde(). See Hints below for
more options that you can use for hint.
``solvefun`` is the convention used for arbitrary functions returned
by the PDE solver. If not set by the user, it is set by default
to be F.
**Hints**
Aside from the various solving methods, there are also some
meta-hints that you can pass to pdsolve():
"default":
This uses whatever hint is returned first by
classify_pde(). This is the default argument to
pdsolve().
"all":
To make pdsolve apply all relevant classification hints,
use pdsolve(PDE, func, hint="all"). This will return a
dictionary of hint:solution terms. If a hint causes
pdsolve to raise the NotImplementedError, value of that
hint's key will be the exception object raised. The
dictionary will also include some special keys:
- order: The order of the PDE. See also ode_order() in
deutils.py
- default: The solution that would be returned by
default. This is the one produced by the hint that
appears first in the tuple returned by classify_pde().
"all_Integral":
This is the same as "all", except if a hint also has a
corresponding "_Integral" hint, it only returns the
"_Integral" hint. This is useful if "all" causes
pdsolve() to hang because of a difficult or impossible
integral. This meta-hint will also be much faster than
"all", because integrate() is an expensive routine.
See also the classify_pde() docstring for more info on hints,
and the pde docstring for a list of all supported hints.
**Tips**
- You can declare the derivative of an unknown function this way:
>>> from sympy import Function, Derivative
>>> from sympy.abc import x, y # x and y are the independent variables
>>> f = Function("f")(x, y) # f is a function of x and y
>>> # fx will be the partial derivative of f with respect to x
>>> fx = Derivative(f, x)
>>> # fy will be the partial derivative of f with respect to y
>>> fy = Derivative(f, y)
- See test_pde.py for many tests, which serves also as a set of
examples for how to use pdsolve().
- pdsolve always returns an Equality class (except for the case
when the hint is "all" or "all_Integral"). Note that it is not possible
to get an explicit solution for f(x, y) as in the case of ODE's
- Do help(pde.pde_hintname) to get help more information on a
specific hint
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, diff, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0)
>>> pdsolve(eq)
Eq(f(x, y), F(3*x - 2*y)*exp(-2*x/13 - 3*y/13))
"""
if not solvefun:
solvefun = Function('F')
# See the docstring of _desolve for more details.
hints = _desolve(eq, func=func, hint=hint, simplify=True,
type='pde', **kwargs)
eq = hints.pop('eq', False)
all_ = hints.pop('all', False)
if all_:
# TODO : 'best' hint should be implemented when adequate
# number of hints are added.
pdedict = {}
failed_hints = {}
gethints = classify_pde(eq, dict=True)
pdedict.update({'order': gethints['order'],
'default': gethints['default']})
for hint in hints:
try:
rv = _helper_simplify(eq, hint, hints[hint]['func'],
hints[hint]['order'], hints[hint][hint], solvefun)
except NotImplementedError as detail:
failed_hints[hint] = detail
else:
pdedict[hint] = rv
pdedict.update(failed_hints)
return pdedict
else:
return _helper_simplify(eq, hints['hint'], hints['func'],
hints['order'], hints[hints['hint']], solvefun)
def _helper_simplify(eq, hint, func, order, match, solvefun):
"""Helper function of pdsolve that calls the respective
pde functions to solve for the partial differential
equations. This minimizes the computation in
calling _desolve multiple times.
"""
if hint.endswith("_Integral"):
solvefunc = globals()[
"pde_" + hint[:-len("_Integral")]]
else:
solvefunc = globals()["pde_" + hint]
return _handle_Integral(solvefunc(eq, func, order,
match, solvefun), func, order, hint)
def _handle_Integral(expr, func, order, hint):
r"""
Converts a solution with integrals in it into an actual solution.
Simplifies the integral mainly using doit()
"""
if hint.endswith("_Integral"):
return expr
elif hint == "1st_linear_constant_coeff":
return simplify(expr.doit())
else:
return expr
def classify_pde(eq, func=None, dict=False, **kwargs):
"""
Returns a tuple of possible pdsolve() classifications for a PDE.
The tuple is ordered so that first item is the classification that
pdsolve() uses to solve the PDE by default. In general,
classifications near the beginning of the list will produce
better solutions faster than those near the end, though there are
always exceptions. To make pdsolve use a different classification,
use pdsolve(PDE, func, hint=<classification>). See also the pdsolve()
docstring for different meta-hints you can use.
If ``dict`` is true, classify_pde() will return a dictionary of
hint:match expression terms. This is intended for internal use by
pdsolve(). Note that because dictionaries are ordered arbitrarily,
this will most likely not be in the same order as the tuple.
You can get help on different hints by doing help(pde.pde_hintname),
where hintname is the name of the hint without "_Integral".
See sympy.pde.allhints or the sympy.pde docstring for a list of all
supported hints that can be returned from classify_pde.
Examples
========
>>> from sympy.solvers.pde import classify_pde
>>> from sympy import Function, diff, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0)
>>> classify_pde(eq)
('1st_linear_constant_coeff_homogeneous',)
"""
prep = kwargs.pop('prep', True)
if func and len(func.args) != 2:
raise NotImplementedError("Right now only partial "
"differential equations of two variables are supported")
if prep or func is None:
prep, func_ = _preprocess(eq, func)
if func is None:
func = func_
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_pde(eq.lhs - eq.rhs, func)
eq = eq.lhs
f = func.func
x = func.args[0]
y = func.args[1]
fx = f(x,y).diff(x)
fy = f(x,y).diff(y)
# TODO : For now pde.py uses support offered by the ode_order function
# to find the order with respect to a multi-variable function. An
# improvement could be to classify the order of the PDE on the basis of
# individual variables.
order = ode_order(eq, f(x,y))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {'order': order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
eq = expand(eq)
a = Wild('a', exclude = [f(x,y)])
b = Wild('b', exclude = [f(x,y), fx, fy, x, y])
c = Wild('c', exclude = [f(x,y), fx, fy, x, y])
d = Wild('d', exclude = [f(x,y), fx, fy, x, y])
e = Wild('e', exclude = [f(x,y), fx, fy])
n = Wild('n', exclude = [x, y])
# Try removing the smallest power of f(x,y)
# from the highest partial derivatives of f(x,y)
reduced_eq = None
if eq.is_Add:
var = set(combinations_with_replacement((x,y), order))
dummyvar = var.copy()
power = None
for i in var:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a]:
power = match[n]
dummyvar.remove(i)
break
dummyvar.remove(i)
for i in dummyvar:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a] and match[n] < power:
power = match[n]
if power:
den = f(x,y)**power
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
reduced_eq = collect(reduced_eq, f(x, y))
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
if not r[e]:
## Linear first-order homogeneous partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d})
matching_hints["1st_linear_constant_coeff_homogeneous"] = r
else:
if r[b]**2 + r[c]**2 != 0:
## Linear first-order general partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_constant_coeff"] = r
matching_hints[
"1st_linear_constant_coeff_Integral"] = r
else:
b = Wild('b', exclude=[f(x, y), fx, fy])
c = Wild('c', exclude=[f(x, y), fx, fy])
d = Wild('d', exclude=[f(x, y), fx, fy])
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_variable_coeff"] = r
# Order keys based on allhints.
retlist = []
for i in allhints:
if i in matching_hints:
retlist.append(i)
if dict:
# Dictionaries are ordered arbitrarily, so make note of which
# hint would come first for pdsolve(). Use an ordered dict in Py 3.
matching_hints["default"] = None
matching_hints["ordered_hints"] = tuple(retlist)
for i in allhints:
if i in matching_hints:
matching_hints["default"] = i
break
return matching_hints
else:
return tuple(retlist)
def checkpdesol(pde, sol, func=None, solve_for_func=True):
"""
Checks if the given solution satisfies the partial differential
equation.
pde is the partial differential equation which can be given in the
form of an equation or an expression. sol is the solution for which
the pde is to be checked. This can also be given in an equation or
an expression form. If the function is not provided, the helper
function _preprocess from deutils is used to identify the function.
If a sequence of solutions is passed, the same sort of container will be
used to return the result for each solution.
The following methods are currently being implemented to check if the
solution satisfies the PDE:
1. Directly substitute the solution in the PDE and check. If the
solution hasn't been solved for f, then it will solve for f
provided solve_for_func hasn't been set to False.
If the solution satisfies the PDE, then a tuple (True, 0) is returned.
Otherwise a tuple (False, expr) where expr is the value obtained
after substituting the solution in the PDE. However if a known solution
returns False, it may be due to the inability of doit() to simplify it to zero.
Examples
========
>>> from sympy import Function, symbols, diff
>>> from sympy.solvers.pde import checkpdesol, pdsolve
>>> x, y = symbols('x y')
>>> f = Function('f')
>>> eq = 2*f(x,y) + 3*f(x,y).diff(x) + 4*f(x,y).diff(y)
>>> sol = pdsolve(eq)
>>> assert checkpdesol(eq, sol)[0]
>>> eq = x*f(x,y) + f(x,y).diff(x)
>>> checkpdesol(eq, sol)
(False, (x*F(4*x - 3*y) - 6*F(4*x - 3*y)/25 + 4*Subs(Derivative(F(_xi_1), _xi_1), _xi_1, 4*x - 3*y))*exp(-6*x/25 - 8*y/25))
"""
# Converting the pde into an equation
if not isinstance(pde, Equality):
pde = Eq(pde, 0)
# If no function is given, try finding the function present.
if func is None:
try:
_, func = _preprocess(pde.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (
sol if is_sequence(sol, set) else [sol])]
funcs = set().union(funcs)
if len(funcs) != 1:
raise ValueError(
'must pass func arg to checkpdesol for this case.')
func = funcs.pop()
# If the given solution is in the form of a list or a set
# then return a list or set of tuples.
if is_sequence(sol, set):
return type(sol)([checkpdesol(
pde, i, func=func,
solve_for_func=solve_for_func) for i in sol])
# Convert solution into an equation
if not isinstance(sol, Equality):
sol = Eq(func, sol)
elif sol.rhs == func:
sol = sol.reversed
# Try solving for the function
solved = sol.lhs == func and not sol.rhs.has(func)
if solve_for_func and not solved:
solved = solve(sol, func)
if solved:
if len(solved) == 1:
return checkpdesol(pde, Eq(func, solved[0]),
func=func, solve_for_func=False)
else:
return checkpdesol(pde, [Eq(func, t) for t in solved],
func=func, solve_for_func=False)
# try direct substitution of the solution into the PDE and simplify
if sol.lhs == func:
pde = pde.lhs - pde.rhs
s = simplify(pde.subs(func, sol.rhs).doit())
return s is S.Zero, s
raise NotImplementedError(filldedent('''
Unable to test if %s is a solution to %s.''' % (sol, pde)))
def pde_1st_linear_constant_coeff_homogeneous(eq, func, order, match, solvefun):
r"""
Solves a first order linear homogeneous
partial differential equation with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{\partial f(x,y)}{\partial x}
+ b \frac{\partial f(x,y)}{\partial y} + c f(x,y) = 0
where `a`, `b` and `c` are constants.
The general solution is of the form:
.. math::
f(x, y) = F(- a y + b x ) e^{- \frac{c (a x + b y)}{a^2 + b^2}}
and can be found in SymPy with ``pdsolve``::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y)
dx dy
>>> pprint(pdsolve(genform))
-c*(a*x + b*y)
---------------
2 2
a + b
f(x, y) = F(-a*y + b*x)*e
Examples
========
>>> from sympy.solvers.pde import (
... pde_1st_linear_constant_coeff_homogeneous)
>>> from sympy import pdsolve
>>> from sympy import Function, diff, pprint
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y))
Eq(f(x, y), F(x - y)*exp(-x/2 - y/2))
>>> pprint(pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)))
x y
- - - -
2 2
f(x, y) = F(x - y)*e
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
return Eq(f(x,y), exp(-S(d)/(b**2 + c**2)*(b*x + c*y))*solvefun(c*x - b*y))
def pde_1st_linear_constant_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{\partial f(x,y)}{\partial x}
+ b \frac{\partial f(x,y)}{\partial y}
+ c f(x,y) = G(x,y)
where `a`, `b` and `c` are constants and `G(x, y)` can be an arbitrary
function in `x` and `y`.
The general solution of the PDE is:
.. math::
f(x, y) = \left. \left[F(\eta) + \frac{1}{a^2 + b^2}
\int\limits^{a x + b y} G\left(\frac{a \xi + b \eta}{a^2 + b^2},
\frac{- a \eta + b \xi}{a^2 + b^2} \right)
e^{\frac{c \xi}{a^2 + b^2}}\, d\xi\right]
e^{- \frac{c \xi}{a^2 + b^2}}
\right|_{\substack{\eta=- a y + b x\\ \xi=a x + b y }}\, ,
where `F(\eta)` is an arbitrary single-valued function. The solution
can be found in SymPy with ``pdsolve``::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> G = Function('G')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u - G(x,y)
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y) - G(x, y)
dx dy
>>> pprint(pdsolve(genform, hint='1st_linear_constant_coeff_Integral'))
// a*x + b*y \
|| / |
|| | |
|| | c*xi |
|| | ------- |
|| | 2 2 |
|| | /a*xi + b*eta -a*eta + b*xi\ a + b |
|| | G|------------, -------------|*e d(xi)|
|| | | 2 2 2 2 | |
|| | \ a + b a + b / |
|| | |
|| / |
|| |
f(x, y) = ||F(eta) + -------------------------------------------------------|*
|| 2 2 |
\\ a + b /
<BLANKLINE>
\|
||
||
||
||
||
||
||
||
-c*xi ||
-------||
2 2||
a + b ||
e ||
||
/|eta=-a*y + b*x, xi=a*x + b*y
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, diff, pprint, exp
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = -2*f(x,y).diff(x) + 4*f(x,y).diff(y) + 5*f(x,y) - exp(x + 3*y)
>>> pdsolve(eq)
Eq(f(x, y), (F(4*x + 2*y) + exp(x/2 + 4*y)/15)*exp(x/2 - y))
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
expterm = exp(-S(d)/(b**2 + c**2)*xi)
functerm = solvefun(eta)
solvedict = solve((b*x + c*y - xi, c*x - b*y - eta), x, y)
# Integral should remain as it is in terms of xi,
# doit() should be done in _handle_Integral.
genterm = (1/S(b**2 + c**2))*Integral(
(1/expterm*e).subs(solvedict), (xi, b*x + c*y))
return Eq(f(x,y), Subs(expterm*(functerm + genterm),
(eta, xi), (c*x - b*y, b*x + c*y)))
def pde_1st_linear_variable_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with variable coefficients. The general form of this partial
differential equation is
.. math:: a(x, y) \frac{\partial f(x, y)}{\partial x}
+ b(x, y) \frac{\partial f(x, y)}{\partial y}
+ c(x, y) f(x, y) = G(x, y)
where `a(x, y)`, `b(x, y)`, `c(x, y)` and `G(x, y)` are arbitrary
functions in `x` and `y`. This PDE is converted into an ODE by
making the following transformation:
1. `\xi` as `x`
2. `\eta` as the constant in the solution to the differential
equation `\frac{dy}{dx} = -\frac{b}{a}`
Making the previous substitutions reduces it to the linear ODE
.. math:: a(\xi, \eta)\frac{du}{d\xi} + c(\xi, \eta)u - G(\xi, \eta) = 0
which can be solved using ``dsolve``.
>>> from sympy.solvers.pde import pdsolve
>>> from sympy.abc import x, y
>>> from sympy import Function, pprint
>>> a, b, c, G, f= [Function(i) for i in ['a', 'b', 'c', 'G', 'f']]
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a(x, y)*u + b(x, y)*ux + c(x, y)*uy - G(x,y)
>>> pprint(genform)
d d
-G(x, y) + a(x, y)*f(x, y) + b(x, y)*--(f(x, y)) + c(x, y)*--(f(x, y))
dx dy
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, diff, pprint, exp
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2
>>> pdsolve(eq)
Eq(f(x, y), F(x*y)*exp(y**2/2) + 1)
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
from sympy.integrals.integrals import integrate
from sympy.solvers.ode import dsolve
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
if not d:
# To deal with cases like b*ux = e or c*uy = e
if not (b and c):
if c:
try:
tsol = integrate(e/c, y)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(x) + tsol)
if b:
try:
tsol = integrate(e/b, x)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(y) + tsol)
if not c:
# To deal with cases when c is 0, a simpler method is used.
# The PDE reduces to b*(u.diff(x)) + d*u = e, which is a linear ODE in x
plode = f(x).diff(x)*b + d*f(x) - e
sol = dsolve(plode, f(x))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, y)
return Eq(f(x, y), rhs)
if not b:
# To deal with cases when b is 0, a simpler method is used.
# The PDE reduces to c*(u.diff(y)) + d*u = e, which is a linear ODE in y
plode = f(y).diff(y)*c + d*f(y) - e
sol = dsolve(plode, f(y))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, x)
return Eq(f(x, y), rhs)
dummy = Function('d')
h = (c/b).subs(y, dummy(x))
sol = dsolve(dummy(x).diff(x) - h, dummy(x))
if isinstance(sol, list):
sol = sol[0]
solsym = sol.free_symbols - h.free_symbols - {x, y}
if len(solsym) == 1:
solsym = solsym.pop()
etat = (solve(sol, solsym)[0]).subs(dummy(x), y)
ysub = solve(eta - etat, y)[0]
deq = (b*(f(x).diff(x)) + d*f(x) - e).subs(y, ysub)
final = (dsolve(deq, f(x), hint='1st_linear')).rhs
if isinstance(final, list):
final = final[0]
finsyms = final.free_symbols - deq.free_symbols - {x, y}
rhs = _simplify_variable_coeff(final, finsyms, solvefun, etat)
return Eq(f(x, y), rhs)
else:
raise NotImplementedError("Cannot solve the partial differential equation due"
" to inability of constantsimp")
def _simplify_variable_coeff(sol, syms, func, funcarg):
r"""
Helper function to replace constants by functions in 1st_linear_variable_coeff
"""
eta = Symbol("eta")
if len(syms) == 1:
sym = syms.pop()
final = sol.subs(sym, func(funcarg))
else:
for key, sym in enumerate(syms):
final = sol.subs(sym, func(funcarg))
return simplify(final.subs(eta, funcarg))
def pde_separate(eq, fun, sep, strategy='mul'):
"""Separate variables in partial differential equation either by additive
or multiplicative separation approach. It tries to rewrite an equation so
that one of the specified variables occurs on a different side of the
equation than the others.
:param eq: Partial differential equation
:param fun: Original function F(x, y, z)
:param sep: List of separated functions [X(x), u(y, z)]
:param strategy: Separation strategy. You can choose between additive
separation ('add') and multiplicative separation ('mul') which is
default.
Examples
========
>>> from sympy import E, Eq, Function, pde_separate, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='add')
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
>>> eq = Eq(D(u(x, t), x, 2), D(u(x, t), t, 2))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='mul')
[Derivative(X(x), (x, 2))/X(x), Derivative(T(t), (t, 2))/T(t)]
See Also
========
pde_separate_add, pde_separate_mul
"""
do_add = False
if strategy == 'add':
do_add = True
elif strategy == 'mul':
do_add = False
else:
raise ValueError('Unknown strategy: %s' % strategy)
if isinstance(eq, Equality):
if eq.rhs != 0:
return pde_separate(Eq(eq.lhs - eq.rhs, 0), fun, sep, strategy)
else:
return pde_separate(Eq(eq, 0), fun, sep, strategy)
if eq.rhs != 0:
raise ValueError("Value should be 0")
# Handle arguments
orig_args = list(fun.args)
subs_args = []
for s in sep:
for j in range(0, len(s.args)):
subs_args.append(s.args[j])
if do_add:
functions = reduce(operator.add, sep)
else:
functions = reduce(operator.mul, sep)
# Check whether variables match
if len(subs_args) != len(orig_args):
raise ValueError("Variable counts do not match")
# Check for duplicate arguments like [X(x), u(x, y)]
if has_dups(subs_args):
raise ValueError("Duplicate substitution arguments detected")
# Check whether the variables match
if set(orig_args) != set(subs_args):
raise ValueError("Arguments do not match")
# Substitute original function with separated...
result = eq.lhs.subs(fun, functions).doit()
# Divide by terms when doing multiplicative separation
if not do_add:
eq = 0
for i in result.args:
eq += i/functions
result = eq
svar = subs_args[0]
dvar = subs_args[1:]
return _separate(result, svar, dvar)
def pde_separate_add(eq, fun, sep):
"""
Helper function for searching additive separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x) + y(y, z)`
Examples
========
>>> from sympy import E, Eq, Function, pde_separate_add, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate_add(eq, u(x, t), [X(x), T(t)])
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
"""
return pde_separate(eq, fun, sep, strategy='add')
def pde_separate_mul(eq, fun, sep):
"""
Helper function for searching multiplicative separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x)*u(y, z)`
Examples
========
>>> from sympy import Function, Eq, pde_separate_mul, Derivative as D
>>> from sympy.abc import x, y
>>> u, X, Y = map(Function, 'uXY')
>>> eq = Eq(D(u(x, y), x, 2), D(u(x, y), y, 2))
>>> pde_separate_mul(eq, u(x, y), [X(x), Y(y)])
[Derivative(X(x), (x, 2))/X(x), Derivative(Y(y), (y, 2))/Y(y)]
"""
return pde_separate(eq, fun, sep, strategy='mul')
def _separate(eq, dep, others):
"""Separate expression into two parts based on dependencies of variables."""
# FIRST PASS
# Extract derivatives depending our separable variable...
terms = set()
for term in eq.args:
if term.is_Mul:
for i in term.args:
if i.is_Derivative and not i.has(*others):
terms.add(term)
continue
elif term.is_Derivative and not term.has(*others):
terms.add(term)
# Find the factor that we need to divide by
div = set()
for term in terms:
ext, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
div.add(ext)
# FIXME: Find lcm() of all the divisors and divide with it, instead of
# current hack :(
# https://github.com/sympy/sympy/issues/4597
if len(div) > 0:
final = 0
for term in eq.args:
eqn = 0
for i in div:
eqn += term / i
final += simplify(eqn)
eq = final
# SECOND PASS - separate the derivatives
div = set()
lhs = rhs = 0
for term in eq.args:
# Check, whether we have already term with independent variable...
if not term.has(*others):
lhs += term
continue
# ...otherwise, try to separate
temp, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
# Extract the divisors
div.add(sep)
rhs -= term.expand()
# Do the division
fulldiv = reduce(operator.add, div)
lhs = simplify(lhs/fulldiv).expand()
rhs = simplify(rhs/fulldiv).expand()
# ...and check whether we were successful :)
if lhs.has(*others) or rhs.has(dep):
return None
return [lhs, rhs]
| bsd-3-clause | -722,910,946,330,786,300 | 34.294233 | 127 | 0.541517 | false |
astokes/SynVinQR | etude/QR_here.py | 1 | 1432 | # QR_here.py displays the current URL encoded as a QR image.
import os, sys, inspect
#
# Much ugly bother to work with a local copy of pyqrcode
#
curpath = os.path.split(inspect.getfile(inspect.currentframe()))[0]
newpath = os.path.join(curpath, "../pyqrcode-read-only")
cmd_folder = os.path.realpath(os.path.abspath(newpath))
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
try:
import pyqrcode
except ImportError:
print "you need to run the script to obtain the pyqrcode module"
sys.exit (1)
import bottle
import pymongo
import StringIO
print "binding to wildcard route at http://yourhost/"
@bottle.route('<path:re:.*>') # all-route regex wildcard
def index(path):
# path does not contain scheme, hostname, or port
route_URL = 'http://' + path
URL = bottle.request.url
# NOGO need to strip the parameters off the passed URL
print bottle.request.query.setdefault ('key', '')
# urllib.quote('/test', '')
from pymongo import Connection
connection = Connection('localhost', 27017)
# pyqrcode bails out if URL string is shorter than http:/
img = pyqrcode.MakeQRImage(URL, rounding = 0, fg = "black", bg = "burlywood", br = False)
bottle.response.set_header('Content-type', 'image/png')
img_buff = StringIO.StringIO()
img.save(img_buff, format='png')
img_buff.seek(0)
return img_buff.read()
bottle.run(host='0.0.0.0', port=8800)
| bsd-2-clause | 5,605,485,311,043,706,000 | 27.078431 | 93 | 0.687849 | false |
sfu-fas/coursys | grad/importer/career.py | 1 | 12498 | from .parameters import SIMS_SOURCE, RELEVANT_PROGRAM_START, CMPT_CUTOFF
from .happenings import build_program_map, build_reverse_program_map
from .happenings import ProgramStatusChange, ApplProgramChange, GradResearchArea
from .tools import STRM_MAP
from coredata.queries import add_person
from grad.models import GradStudent, GradProgramHistory, GradStatus, Supervisor, SHORT_STATUSES, SUPERVISOR_TYPE
class GradCareer(object):
"""
One grad career as we understand it (a grad.models.GradStudent object).
"""
program_map = None
reverse_program_map = None
def __init__(self, emplid, adm_appl_nbr, app_stdnt_car_nbr, unit):
self.emplid = emplid
self.adm_appl_nbr = adm_appl_nbr
self.app_stdnt_car_nbr = app_stdnt_car_nbr
self.unit = unit
self.happenings = []
self.admit_term = None
self.stdnt_car_nbr = None
self.last_program = None
self.gradstudent = None
self.current_program = None # used to track program changes as we import
self.student_info = None
self.metadata = None
self.research_areas = set()
if not GradCareer.program_map:
GradCareer.program_map = build_program_map()
if not GradCareer.reverse_program_map:
GradCareer.reverse_program_map = build_reverse_program_map()
def __repr__(self):
return "%s@%s:%s" % (self.emplid, self.adm_appl_nbr, self.stdnt_car_nbr)
def add(self, h):
"""
Add happening to this career, and maintain the metadata we need.
"""
if h.adm_appl_nbr and not isinstance(h, GradResearchArea):
if not self.adm_appl_nbr:
self.adm_appl_nbr = h.adm_appl_nbr
if self.stdnt_car_nbr is None:
self.stdnt_car_nbr = h.stdnt_car_nbr
if self.adm_appl_nbr != h.adm_appl_nbr or (h.stdnt_car_nbr is not None and self.stdnt_car_nbr != h.stdnt_car_nbr):
raise ValueError
assert h.unit == self.unit
if hasattr(h, 'admit_term'):
# record most-recent admit term we find
self.admit_term = h.admit_term
if isinstance(h, ProgramStatusChange):
self.last_program = h.acad_prog
self.happenings.append(h)
h.in_career = True
def sort_happenings(self):
# sort ApplProgramChange after the corresponding ProgramStatusChange: let ProgramStatusChange win if they're on
# the same day.
self.happenings.sort(key=lambda h: (h.strm, h.effdt, 1 if isinstance(h, ApplProgramChange) else 0))
def import_key(self):
if self.adm_appl_nbr:
adm_appl_nbr = self.adm_appl_nbr
else:
adm_appl_nbr = None
return [self.emplid, adm_appl_nbr, self.unit.slug]
def possibly_active_on(self, effdt):
"""
Is this a date in which this career is conceivably active? i.e. might be taking courses or forming committees?
"""
matr = [(h.effdt, h.admit_term) for h in self.happenings if isinstance(h, ProgramStatusChange) and h.prog_action in ['MATR', 'RADM']]
# doing things happens after you are admitted (in the finally-selected admit_term).
if not matr:
return False
matr_strm = max(matr)[1]
matr_effdt = max(matr)[0]
matr_dt = STRM_MAP[matr_strm].start
if matr_dt > effdt:
return False
grads = [h.effdt for h in self.happenings if h.effdt >= matr_effdt and isinstance(h, ProgramStatusChange) and h.prog_status == 'CM']
# can do things up to graduation day
if grads:
return effdt <= max(grads)
ends = [h.effdt for h in self.happenings if h.effdt >= matr_effdt and isinstance(h, ProgramStatusChange) and h.prog_status in ['CN', 'DC']]
if ends:
# can't do things if you bailed
end_dt = max(ends)
return effdt < end_dt
# ongoing program, so anything after admission
return True
def program_as_of(self, effdt):
"""
What acad_prog is this career in as of effdt?
"""
statuses = [h for h in self.happenings if isinstance(h, ProgramStatusChange) and h.effdt <= effdt]
statuses.sort(key=lambda h: h.effdt)
if statuses:
return statuses[-1].acad_prog
else:
return None
# program selection methods:
def by_key(self, gs):
return gs.config.get(SIMS_SOURCE, 'none') == self.import_key()
def by_adm_appl_nbr(self, gs):
return (gs.config.get('adm_appl_nbr', 'none') == self.adm_appl_nbr)
def by_program_and_start(self, gs):
return (self.last_program in GradCareer.reverse_program_map[gs.program]
and gs.start_semester
and gs.start_semester.name == self.admit_term
and 'adm_appl_nbr' not in gs.config and SIMS_SOURCE not in gs.config)
def by_similar_program_and_start(self, gs):
return (self.last_program in GradCareer.reverse_program_map[gs.program]
and gs.start_semester
and gs.start_semester.offset_name(-2) <= self.admit_term <= gs.start_semester.offset_name(2)
and 'adm_appl_nbr' not in gs.config and SIMS_SOURCE not in gs.config
)
def by_program_history(self, gs):
gph = GradProgramHistory.objects.filter(student=gs, program=GradCareer.program_map[self.last_program], start_semester=gs.start_semester)
return gph.exists()
def by_hail_mary(self, gs):
return (self.last_program in GradCareer.reverse_program_map[gs.program]
and (not gs.start_semester
or gs.start_semester.offset_name(-4) <= self.admit_term <= gs.start_semester.offset_name(4))
and 'adm_appl_nbr' not in gs.config and SIMS_SOURCE not in gs.config
)
# ways we have to find a matching GradStudent, in decreasing order of rigidness
GS_SELECTORS = [ # (method_name, is_okay_to_find_multiple_matches?)
('by_key', False),
('by_adm_appl_nbr', False),
('by_program_and_start', True),
('by_similar_program_and_start', True),
#('by_program_history', False),
#('by_hail_mary', False),
]
def find_gradstudent(self, verbosity, dry_run):
gss = GradStudent.objects.filter(person__emplid=self.emplid, program__unit=self.unit).select_related('start_semester', 'program__unit', 'person')
gss = list(gss)
if self.admit_term < RELEVANT_PROGRAM_START:
return
for method, multiple_okay in GradCareer.GS_SELECTORS:
by_selector = [gs for gs in gss if getattr(self, method)(gs)]
#print method, by_selector
if len(by_selector) == 1:
return by_selector[0]
elif len(by_selector) > 1:
if multiple_okay:
return by_selector[-1]
else:
raise ValueError("Multiple records found by %s for %s." % (method, self))
if GradCareer.program_map[self.last_program].unit.slug == 'cmpt' and self.admit_term < CMPT_CUTOFF:
# Don't try to probe the depths of history for CMPT. You'll hurt yourself.
# We have nice clean adm_appl_nbrs for CMPT_CUTOFF onwards, so the reliable GS_SELECTORS will find the student
return
if verbosity:
print("New grad student career found: %s/%s in %s starting %s." % (self.emplid, self.unit.slug, self.last_program, self.admit_term))
# can't find anything in database: create new
gs = GradStudent(person=add_person(self.emplid, commit=(not dry_run)))
# everything else updated by gs.update_status_fields later
gs.program = GradCareer.program_map[self.last_program] # ...but this is needed to save
if not dry_run:
gs.save() # get gs.id filled in for foreign keys elsewhere
return gs
def fill_gradstudent(self, verbosity, dry_run):
gs = self.find_gradstudent(verbosity=verbosity, dry_run=dry_run)
# be extra sure we aren't seeing multiple-unit GradStudent objects
units = set(GradProgramHistory.objects.filter(student=gs).values_list('program__unit', flat=True))
if len(units) > 1:
if verbosity:
raise ValueError("Grad Student %s (%i) has programs in multiple units: that shouldn't be." % (gs.slug, gs.id))
self.gradstudent = gs
def get_student_info(self):
student_info = {
'student': self.gradstudent,
'career': self,
'statuses': list(GradStatus.objects.filter(student=self.gradstudent, hidden=False)
.select_related('start').order_by('start__name', 'start_date')),
'programs': list(GradProgramHistory.objects.filter(student=self.gradstudent)
.select_related('start_semester', 'program').order_by('start_semester__name', 'starting')),
'committee': list(Supervisor.objects.filter(student=self.gradstudent, removed=False) \
.exclude(supervisor_type='POT')),
'real_admit_term': self.admit_term,
}
return student_info
def update_local_data(self, verbosity, dry_run):
"""
Update local data for the GradStudent using what we found in SIMS
"""
# make sure we can find it easily next time
self.gradstudent.config[SIMS_SOURCE] = self.import_key()
if self.adm_appl_nbr:
self.gradstudent.config['adm_appl_nbr'] = self.adm_appl_nbr
if self.metadata:
self.metadata.update_local_data(self.gradstudent, verbosity=verbosity, dry_run=dry_run)
student_info = self.get_student_info()
self.student_info = student_info
for h in self.happenings:
# do this first for everything so a second pass can try harder to find things not matching in the first pass
h.find_local_data(student_info, verbosity=verbosity)
for h in self.happenings:
h.update_local_data(student_info, verbosity=verbosity, dry_run=dry_run)
# research area: let anything manually entered/changed win.
if self.research_areas and not self.gradstudent.research_area:
r = ' | '.join(self.research_areas)
self.gradstudent.research_area = r + ' (from application)'
if verbosity > 1:
print("* Setting research area for %s/%s." % (self.emplid, self.unit.slug))
# are there any GradProgramHistory objects happening before the student actually started (because they
# deferred)? If so, defer them too.
premature_gph = GradProgramHistory.objects.filter(student=self.gradstudent,
start_semester__name__lt=self.admit_term)
for gph in premature_gph:
gph.start_semester = STRM_MAP[self.admit_term]
if verbosity:
print("Deferring program start for %s/%s to %s." % (self.emplid, self.unit.slug, self.admit_term))
if not dry_run:
gph.save()
# TODO: should we set GradStudent.config['start_semester'] here and be done with it?
if not dry_run:
self.gradstudent.update_status_fields()
self.gradstudent.save_if_dirty()
def find_rogue_local_data(self, verbosity, dry_run):
"""
Find any local data that doesn't seem to belong and report it.
"""
extra_statuses = [s for s in self.student_info['statuses'] if SIMS_SOURCE not in s.config]
extra_programs = [p for p in self.student_info['programs'] if SIMS_SOURCE not in p.config]
extra_committee = [c for c in self.student_info['committee'] if SIMS_SOURCE not in c.config]
# if self.unit.slug == 'cmpt':
# # doesn't make sense for CMPT, since we're not importing everything else
# return
if verbosity:
for s in extra_statuses:
print("Rogue grad status: %s was %s in %s" % (self.emplid, SHORT_STATUSES[s.status], s.start.name))
for p in extra_programs:
print("Rogue program change: %s in %s as of %s." % (self.emplid, p.program.slug, p.start_semester.name))
for c in extra_committee:
print("Rogue committee member: %s is a %s for %s" % (c.sortname(), SUPERVISOR_TYPE[c.supervisor_type], self.emplid))
| gpl-3.0 | -4,438,936,686,740,990,500 | 43.635714 | 153 | 0.614898 | false |
graik/biskit | archive_biskit2/scripts/Mod/setup_validation.py | 1 | 1908 | #!/usr/bin/env python
##
## Biskit, a toolkit for the manipulation of macromolecular structures
## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner
##
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You find a copy of the GNU General Public License in the file
## license.txt along with this program; if not, write to the Free
## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
## Contributions: Olivier PERIN
from Biskit.Mod.ValidationSetup import ValidationSetup as VS
import Biskit.tools as T
import sys, os, os.path
def _use( o ):
print """
Setup the cross-validation folder for one or several projects
Syntax: setup_validation.py [ -o |project folder(s)| ]
Options:
-o .. one or several project folders (default: current)
-? or -help .. this help screen
Default options:
"""
for key, value in o.items():
print "\t-",key, "\t",value
sys.exit(0)
if __name__ == '__main__':
options = T.cmdDict({'o':[ os.getcwd() ]})
if '?' in options or 'help' in options:
_use( options )
folders = T.toList( options['o'] )
if not os.path.exists( folders[0] +'/templates'):
print 'Current directory is not a valid modeling folder.'
_use( options )
T.flushPrint( "Creating folders and links...\n" )
for f in folders:
sv = VS(outFolder=f)
sv.go(f)
T.flushPrint( "done\n" )
| gpl-3.0 | 2,084,467,221,445,404,400 | 27.909091 | 70 | 0.650419 | false |
callofdutyops/YXH2016724098982 | eye_eval.py | 1 | 4555 | """Evaluation for eye.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
from datetime import datetime
import numpy as np
import tensorflow as tf
import eye_model
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('eval_dir', '/tmp/eye_eval',
"""Directory where to write event logs.""")
tf.app.flags.DEFINE_string('eval_data', 'test',
"""Either 'test' or 'train_eval'.""")
tf.app.flags.DEFINE_string('checkpoint_dir', '/tmp/eye_train',
"""Directory where to read model checkpoints.""")
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,
"""How often to run the eval.""")
tf.app.flags.DEFINE_integer('num_examples', 35,
"""Number of examples to run.""")
tf.app.flags.DEFINE_boolean('run_once', False,
"""Whether to run eval only once.""")
def eval_once(saver, summary_writer, top_k_op, summary_op):
"""Run Eval once.
Args:
saver: Saver.
summary_writer: Summary writer.
top_k_op: Top K op.
summary_op: Summary op.
"""
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/eye_train/model.ckpt-0,
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
num_iter = int(math.ceil(FLAGS.num_examples / FLAGS.batch_size))
true_count = 0 # Counts the number of correct predictions.
total_sample_count = num_iter * FLAGS.batch_size
step = 0
while step < num_iter and not coord.should_stop():
predictions = sess.run([top_k_op])
true_count += np.sum(predictions)
step += 1
# Compute precision @ 1.
precision = true_count / total_sample_count
print('%s: precision @ 1 = %.3f' % (datetime.now(), precision))
summary = tf.Summary()
summary.ParseFromString(sess.run(summary_op))
summary.value.add(tag='Precision @ 1', simple_value=precision)
summary_writer.add_summary(summary, global_step)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def evaluate():
"""Eval eye images for a number of steps."""
with tf.Graph().as_default() as g:
# Get images and labels of eye.
eval_data = FLAGS.eval_data == 'test'
images, labels = eye_model.inputs(eval_data=eval_data)
# Build a Graph that computes the logits predictions from the
# inference model.
logits = eye_model.inference(images)
# Calculate predictions.
top_k_op = tf.nn.in_top_k(logits, labels, 1)
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
eye_model.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)
while True:
eval_once(saver, summary_writer, top_k_op, summary_op)
if FLAGS.run_once:
break
time.sleep(FLAGS.eval_interval_secs)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
evaluate()
if __name__ == '__main__':
tf.app.run()
| mit | 1,335,802,770,314,994,700 | 35.44 | 82 | 0.591218 | false |
kayhayen/Nuitka | tests/basics/Classes34.py | 1 | 1298 | # Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from enum import Enum
print("Enum class with duplicate enumeration values:")
try:
class Color(Enum):
red = 1
green = 2
blue = 3
red = 4
print("not allowed to get here")
except Exception as e:
print("Occurred", e)
print("Class variable that conflicts with closure variable:")
def testClassNamespaceOverridesClosure():
# See #17853.
x = 42
class X:
locals()["x"] = 43
y = x
print("should be 43:", X.y)
testClassNamespaceOverridesClosure()
| apache-2.0 | -3,027,807,181,180,732,400 | 25.489796 | 79 | 0.671032 | false |
PainNarrativesLab/TextTools | NgramTools.py | 1 | 5758 | """
Previously in TextTools
Created by adam on 11/11/15
"""
__author__ = 'adam'
class NgramGetter(object):
"""
Abstract parent class for extracting ngrams.
Attributes:
collocation_finder: One of the nltk's collocation finder tools (e.g., BigramCollocationFinder)
top_likelihood_ratio:
measurement_tool: One of nltk's measurement tools (e.g., nltk.collocations.BigramAssocMeasures)
modifiers: IModifier instantiating tool for modifying the text before calculating ngrams
ngrams: List of ngrams
raw_freq: Frequency distribution of ngrams
sorted_ngrams: List of tuples sorted by self.scored_ngrams
top_pmi: Variable number of n-grams with the highest Pointwise Mutual Information (i.e., which occur together
more often than would be expected)
word_bag: List of text to process
"""
def __init__(self):
self.modifiers = []
self.ngram_filters = []
self.word_bag = []
self.ngrams = []
if not self.measurement_tool:
raise NotImplementedError
def add_modifier(self, iModifier):
assert(isinstance(iModifier, IModifier))
self.modifiers.append(iModifier)
def _run_modifiers(self):
"""
Calls the modifiers in sequence and stores the results back in word_bag
"""
for modifier in self.modifiers:
self.word_bag = [modifier.process(w) for w in self.word_bag]
def add_filter(self, iNgramFilter):
"""
Adds a filter to be run after the ngrams are created
:param iNgramFilter:
:return:
"""
self.ngram_filters.append(iNgramFilter)
def apply_filters(self):
for ftr in self.ngram_filters:
self.collocation_finder.apply_ngram_filter(ftr)
def process(self, word_bag, min_freq=3, get_top=10, **kwargs):
"""
Runs any modifiers (stemmers, lemmatizers, etc) on the list of terms and
then extracts the ngrams
Args:
get_top: The cut off for ngrams to get stats for
min_freq: Integer of minimum number of appearances of ngram to extract
word_bag: List of strings to extract ngrams from. Should already be filtered.
"""
raise NotImplementedError
def _calculate_statistics(self, get_top=10, **kwargs):
"""
Arguments:
get_top: The cut off for ngrams to get stats for
"""
self.topPMI = self.collocation_finder.nbest(self.measurement_tool.pmi, get_top)
self.raw_freq = self.collocation_finder.score_ngrams(self.measurement_tool.raw_freq)
self.sorted_ngrams = (ngram for ngram, score in self.raw_freq)
self.top_likelihood_ratio = self.collocation_finder.nbest(self.measurement_tool.likelihood_ratio, get_top)
class BigramGetter(NgramGetter):
"""
Extracts 2-grams from a word bag and calculates statistics
Attributes:
top_pmi: Variable number of n-grams with the highest Pointwise Mutual Information (i.e., which occur together
more often than would be expected)
top_likelihood_ratio:
raw_freq: Frequency distribution of ngrams
sorted_ngrams: List of tuples sorted by self.scored_ngrams
"""
def __init__(self):
self.measurement_tool = nltk.metrics.BigramAssocMeasures()
NgramGetter.__init__(self)
def process(self, word_bag, min_freq=3, get_top=10, **kwargs):
"""
Arguments:
word_bag: List of strings
"""
assert(isinstance(word_bag, list))
try:
self._run_modifiers()
self.collocation_finder = nltk.collocations.BigramCollocationFinder.from_words(self.word_bag)
self.collocation_finder.apply_freq_filter(min_freq)
except NgramError('finding collocations for bigram'):
pass
try:
self._calculate_statistics(get_top)
except NgramError('calculating statistics for bigram'):
pass
def _calculate_statistics(self, get_top=10, **kwargs):
"""
A number of measures are available to score collocations or other associations.
The arguments to measure functions are marginals of a contingency table,
in the bigram case (n_ii, (n_ix, n_xi), n_xx):
w1 ~w1
------ ------
w2 | n_ii | n_oi | = n_xi
------ ------
~w2 | n_io | n_oo |
------ ------
= n_ix TOTAL = n_xx
We test their calculation using some known values presented
in Manning and Schutze's text and other papers.
Student's t: examples from Manning and Schutze 5.3.2
"""
NgramGetter._calculate_statistics(self, get_top)
# self.measurement_tool.student_t()
# self.measurement_tool.chi_sq()
class TrigramGetter(NgramGetter):
"""
Extracts 3-grams from a word bag and calculates statistics
"""
def __init__(self):
self.measurement_tool = nltk.metrics.TrigramAssocMeasures()
NgramGetter.__init__(self)
def process(self, word_bag, min_freq=3, get_top=10, **kwargs):
"""
Arguments:
word_bag: List of strings
"""
assert(isinstance(word_bag, list))
try:
self._run_modifiers()
self.collocation_finder = nltk.collocations.TrigramCollocationFinder.from_words(self.word_bag)
self.collocation_finder.apply_freq_filter(min_freq)
except NgramError('finding collocations for trigram'):
pass
try:
self._calculate_statistics(get_top)
except NgramError('calculating statistics for trigram'):
pass
| mit | 7,105,661,231,997,221,000 | 35.910256 | 117 | 0.619833 | false |
naoliv/osmose-backend | plugins/Source.py | 1 | 3739 | #-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Etienne Chové <[email protected]> 2010 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from plugins.Plugin import Plugin
import re
class Source(Plugin):
only_for = ["FR"]
def init(self, logger):
Plugin.init(self, logger)
self.errors[706] = { "item": 3020, "level": 1, "tag": ["source", "fix:chair"], "desc": T_(u"Illegal or incomplete source tag") }
self.errors[707] = { "item": 2040, "level": 3, "tag": ["source", "fix:chair"], "desc": T_(u"Missing source tag") }
self.IGN = re.compile(".*(\wign)|(ign\w).*")
def check(self, tags):
if u"AAAA" in tags[u"source"]:
return [(706,0,{"fr":u"Le tag source contient AAAA", "en":u"Source tag contains AAAA"})]
if u"Cartographes Associés" in tags[u"source"]:
return [(706,1,{"en":u"Cartographes Associés"})]
source = tags[u"source"].lower()
if u"google" in source:
return [(706,2,{"en":u"Google"})]
if u"geoportail" in source or u"géoportail" in source:
return [(706,3,{"en":u"Géoportail"})]
if u"ign" in source and not u"geofla" in source and not u"cartographie réglementaire" in source and not u"géodési" in source and not u"500" in source:
if not self.IGN.match(source):
return [(706,4,{"en":u"IGN"})]
if u"camptocamp" in source:
return [(706,5,{"en":u"CampToCamp"})]
def node(self, data, tags):
if u"source" not in tags:
return
return self.check(tags)
def way(self, data, tags, nds):
if u"source" not in tags:
if tags.get(u"boundary", None) == u"administrative":
return [(707,0,{})]
return
return self.check(tags)
def relation(self, data, tags, members):
if u"source" not in tags:
return
return self.check(tags)
###########################################################################
from plugins.Plugin import TestPluginCommon
class Test(TestPluginCommon):
def test(self):
a = Source(None)
a.init(None)
for d in [{u"source":u"nign"},
{u"source":u"ignoville"},
{u"source":u"IGN géodésique"},
{u"source":u"road sign"},
]:
assert not a.node(None, d), d
for d in [{u"source":u"IGN"}]:
self.check_err(a.node(None, d), d)
| gpl-3.0 | -6,301,568,131,360,903,000 | 42.870588 | 158 | 0.472513 | false |
tboyce1/home-assistant | homeassistant/components/device_tracker/owntracks.py | 2 | 16441 | """
Device tracker platform that adds support for OwnTracks over MQTT.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.owntracks/
"""
import asyncio
import base64
import json
import logging
from collections import defaultdict
import voluptuous as vol
import homeassistant.components.mqtt as mqtt
import homeassistant.helpers.config_validation as cv
from homeassistant.components import zone as zone_comp
from homeassistant.components.device_tracker import (
PLATFORM_SCHEMA, ATTR_SOURCE_TYPE, SOURCE_TYPE_BLUETOOTH_LE,
SOURCE_TYPE_GPS
)
from homeassistant.const import STATE_HOME
from homeassistant.core import callback
from homeassistant.util import slugify, decorator
REQUIREMENTS = ['libnacl==1.6.1']
_LOGGER = logging.getLogger(__name__)
HANDLERS = decorator.Registry()
BEACON_DEV_ID = 'beacon'
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
CONF_SECRET = 'secret'
CONF_WAYPOINT_IMPORT = 'waypoints'
CONF_WAYPOINT_WHITELIST = 'waypoint_whitelist'
CONF_MQTT_TOPIC = 'mqtt_topic'
CONF_REGION_MAPPING = 'region_mapping'
CONF_EVENTS_ONLY = 'events_only'
DEPENDENCIES = ['mqtt']
DEFAULT_OWNTRACKS_TOPIC = 'owntracks/#'
REGION_MAPPING = {}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MAX_GPS_ACCURACY): vol.Coerce(float),
vol.Optional(CONF_WAYPOINT_IMPORT, default=True): cv.boolean,
vol.Optional(CONF_EVENTS_ONLY, default=False): cv.boolean,
vol.Optional(CONF_MQTT_TOPIC, default=DEFAULT_OWNTRACKS_TOPIC):
mqtt.valid_subscribe_topic,
vol.Optional(CONF_WAYPOINT_WHITELIST): vol.All(
cv.ensure_list, [cv.string]),
vol.Optional(CONF_SECRET): vol.Any(
vol.Schema({vol.Optional(cv.string): cv.string}),
cv.string),
vol.Optional(CONF_REGION_MAPPING, default=REGION_MAPPING): dict
})
def get_cipher():
"""Return decryption function and length of key.
Async friendly.
"""
from libnacl import crypto_secretbox_KEYBYTES as KEYLEN
from libnacl.secret import SecretBox
def decrypt(ciphertext, key):
"""Decrypt ciphertext using key."""
return SecretBox(key).decrypt(ciphertext)
return (KEYLEN, decrypt)
@asyncio.coroutine
def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Set up an OwnTracks tracker."""
context = context_from_config(async_see, config)
@asyncio.coroutine
def async_handle_mqtt_message(topic, payload, qos):
"""Handle incoming OwnTracks message."""
try:
message = json.loads(payload)
except ValueError:
# If invalid JSON
_LOGGER.error("Unable to parse payload as JSON: %s", payload)
return
message['topic'] = topic
yield from async_handle_message(hass, context, message)
yield from mqtt.async_subscribe(
hass, context.mqtt_topic, async_handle_mqtt_message, 1)
return True
def _parse_topic(topic, subscribe_topic):
"""Parse an MQTT topic {sub_topic}/user/dev, return (user, dev) tuple.
Async friendly.
"""
subscription = subscribe_topic.split('/')
try:
user_index = subscription.index('#')
except ValueError:
_LOGGER.error("Can't parse subscription topic: '%s'", subscribe_topic)
raise
topic_list = topic.split('/')
try:
user, device = topic_list[user_index], topic_list[user_index + 1]
except IndexError:
_LOGGER.error("Can't parse topic: '%s'", topic)
raise
return user, device
def _parse_see_args(message, subscribe_topic):
"""Parse the OwnTracks location parameters, into the format see expects.
Async friendly.
"""
user, device = _parse_topic(message['topic'], subscribe_topic)
dev_id = slugify('{}_{}'.format(user, device))
kwargs = {
'dev_id': dev_id,
'host_name': user,
'gps': (message['lat'], message['lon']),
'attributes': {}
}
if 'acc' in message:
kwargs['gps_accuracy'] = message['acc']
if 'batt' in message:
kwargs['battery'] = message['batt']
if 'vel' in message:
kwargs['attributes']['velocity'] = message['vel']
if 'tid' in message:
kwargs['attributes']['tid'] = message['tid']
if 'addr' in message:
kwargs['attributes']['address'] = message['addr']
if 't' in message:
if message['t'] == 'c':
kwargs['attributes'][ATTR_SOURCE_TYPE] = SOURCE_TYPE_GPS
if message['t'] == 'b':
kwargs['attributes'][ATTR_SOURCE_TYPE] = SOURCE_TYPE_BLUETOOTH_LE
return dev_id, kwargs
def _set_gps_from_zone(kwargs, location, zone):
"""Set the see parameters from the zone parameters.
Async friendly.
"""
if zone is not None:
kwargs['gps'] = (
zone.attributes['latitude'],
zone.attributes['longitude'])
kwargs['gps_accuracy'] = zone.attributes['radius']
kwargs['location_name'] = location
return kwargs
def _decrypt_payload(secret, topic, ciphertext):
"""Decrypt encrypted payload."""
try:
keylen, decrypt = get_cipher()
except OSError:
_LOGGER.warning(
"Ignoring encrypted payload because libsodium not installed")
return None
if isinstance(secret, dict):
key = secret.get(topic)
else:
key = secret
if key is None:
_LOGGER.warning(
"Ignoring encrypted payload because no decryption key known "
"for topic %s", topic)
return None
key = key.encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b'\0')
try:
ciphertext = base64.b64decode(ciphertext)
message = decrypt(ciphertext, key)
message = message.decode("utf-8")
_LOGGER.debug("Decrypted payload: %s", message)
return message
except ValueError:
_LOGGER.warning(
"Ignoring encrypted payload because unable to decrypt using "
"key for topic %s", topic)
return None
def context_from_config(async_see, config):
"""Create an async context from Home Assistant config."""
max_gps_accuracy = config.get(CONF_MAX_GPS_ACCURACY)
waypoint_import = config.get(CONF_WAYPOINT_IMPORT)
waypoint_whitelist = config.get(CONF_WAYPOINT_WHITELIST)
secret = config.get(CONF_SECRET)
region_mapping = config.get(CONF_REGION_MAPPING)
events_only = config.get(CONF_EVENTS_ONLY)
mqtt_topic = config.get(CONF_MQTT_TOPIC)
return OwnTracksContext(async_see, secret, max_gps_accuracy,
waypoint_import, waypoint_whitelist,
region_mapping, events_only, mqtt_topic)
class OwnTracksContext:
"""Hold the current OwnTracks context."""
def __init__(self, async_see, secret, max_gps_accuracy, import_waypoints,
waypoint_whitelist, region_mapping, events_only, mqtt_topic):
"""Initialize an OwnTracks context."""
self.async_see = async_see
self.secret = secret
self.max_gps_accuracy = max_gps_accuracy
self.mobile_beacons_active = defaultdict(set)
self.regions_entered = defaultdict(list)
self.import_waypoints = import_waypoints
self.waypoint_whitelist = waypoint_whitelist
self.region_mapping = region_mapping
self.events_only = events_only
self.mqtt_topic = mqtt_topic
@callback
def async_valid_accuracy(self, message):
"""Check if we should ignore this message."""
acc = message.get('acc')
if acc is None:
return False
try:
acc = float(acc)
except ValueError:
return False
if acc == 0:
_LOGGER.warning(
"Ignoring %s update because GPS accuracy is zero: %s",
message['_type'], message)
return False
if self.max_gps_accuracy is not None and \
acc > self.max_gps_accuracy:
_LOGGER.info("Ignoring %s update because expected GPS "
"accuracy %s is not met: %s",
message['_type'], self.max_gps_accuracy,
message)
return False
return True
@asyncio.coroutine
def async_see_beacons(self, hass, dev_id, kwargs_param):
"""Set active beacons to the current location."""
kwargs = kwargs_param.copy()
# Mobile beacons should always be set to the location of the
# tracking device. I get the device state and make the necessary
# changes to kwargs.
device_tracker_state = hass.states.get(
"device_tracker.{}".format(dev_id))
if device_tracker_state is not None:
acc = device_tracker_state.attributes.get("gps_accuracy")
lat = device_tracker_state.attributes.get("latitude")
lon = device_tracker_state.attributes.get("longitude")
kwargs['gps_accuracy'] = acc
kwargs['gps'] = (lat, lon)
# the battery state applies to the tracking device, not the beacon
# kwargs location is the beacon's configured lat/lon
kwargs.pop('battery', None)
for beacon in self.mobile_beacons_active[dev_id]:
kwargs['dev_id'] = "{}_{}".format(BEACON_DEV_ID, beacon)
kwargs['host_name'] = beacon
yield from self.async_see(**kwargs)
@HANDLERS.register('location')
@asyncio.coroutine
def async_handle_location_message(hass, context, message):
"""Handle a location message."""
if not context.async_valid_accuracy(message):
return
if context.events_only:
_LOGGER.debug("Location update ignored due to events_only setting")
return
dev_id, kwargs = _parse_see_args(message, context.mqtt_topic)
if context.regions_entered[dev_id]:
_LOGGER.debug(
"Location update ignored, inside region %s",
context.regions_entered[-1])
return
yield from context.async_see(**kwargs)
yield from context.async_see_beacons(hass, dev_id, kwargs)
@asyncio.coroutine
def _async_transition_message_enter(hass, context, message, location):
"""Execute enter event."""
zone = hass.states.get("zone.{}".format(slugify(location)))
dev_id, kwargs = _parse_see_args(message, context.mqtt_topic)
if zone is None and message.get('t') == 'b':
# Not a HA zone, and a beacon so mobile beacon.
# kwargs will contain the lat/lon of the beacon
# which is not where the beacon actually is
# and is probably set to 0/0
beacons = context.mobile_beacons_active[dev_id]
if location not in beacons:
beacons.add(location)
_LOGGER.info("Added beacon %s", location)
yield from context.async_see_beacons(hass, dev_id, kwargs)
else:
# Normal region
regions = context.regions_entered[dev_id]
if location not in regions:
regions.append(location)
_LOGGER.info("Enter region %s", location)
_set_gps_from_zone(kwargs, location, zone)
yield from context.async_see(**kwargs)
yield from context.async_see_beacons(hass, dev_id, kwargs)
@asyncio.coroutine
def _async_transition_message_leave(hass, context, message, location):
"""Execute leave event."""
dev_id, kwargs = _parse_see_args(message, context.mqtt_topic)
regions = context.regions_entered[dev_id]
if location in regions:
regions.remove(location)
beacons = context.mobile_beacons_active[dev_id]
if location in beacons:
beacons.remove(location)
_LOGGER.info("Remove beacon %s", location)
yield from context.async_see_beacons(hass, dev_id, kwargs)
else:
new_region = regions[-1] if regions else None
if new_region:
# Exit to previous region
zone = hass.states.get(
"zone.{}".format(slugify(new_region)))
_set_gps_from_zone(kwargs, new_region, zone)
_LOGGER.info("Exit to %s", new_region)
yield from context.async_see(**kwargs)
yield from context.async_see_beacons(hass, dev_id, kwargs)
return
_LOGGER.info("Exit to GPS")
# Check for GPS accuracy
if context.async_valid_accuracy(message):
yield from context.async_see(**kwargs)
yield from context.async_see_beacons(hass, dev_id, kwargs)
@HANDLERS.register('transition')
@asyncio.coroutine
def async_handle_transition_message(hass, context, message):
"""Handle a transition message."""
if message.get('desc') is None:
_LOGGER.error(
"Location missing from `Entering/Leaving` message - "
"please turn `Share` on in OwnTracks app")
return
# OwnTracks uses - at the start of a beacon zone
# to switch on 'hold mode' - ignore this
location = message['desc'].lstrip("-")
# Create a layer of indirection for Owntracks instances that may name
# regions differently than their HA names
if location in context.region_mapping:
location = context.region_mapping[location]
if location.lower() == 'home':
location = STATE_HOME
if message['event'] == 'enter':
yield from _async_transition_message_enter(
hass, context, message, location)
elif message['event'] == 'leave':
yield from _async_transition_message_leave(
hass, context, message, location)
else:
_LOGGER.error(
"Misformatted mqtt msgs, _type=transition, event=%s",
message['event'])
@asyncio.coroutine
def async_handle_waypoint(hass, name_base, waypoint):
"""Handle a waypoint."""
name = waypoint['desc']
pretty_name = '{} - {}'.format(name_base, name)
lat = waypoint['lat']
lon = waypoint['lon']
rad = waypoint['rad']
# check zone exists
entity_id = zone_comp.ENTITY_ID_FORMAT.format(slugify(pretty_name))
# Check if state already exists
if hass.states.get(entity_id) is not None:
return
zone = zone_comp.Zone(hass, pretty_name, lat, lon, rad,
zone_comp.ICON_IMPORT, False)
zone.entity_id = entity_id
yield from zone.async_update_ha_state()
@HANDLERS.register('waypoint')
@HANDLERS.register('waypoints')
@asyncio.coroutine
def async_handle_waypoints_message(hass, context, message):
"""Handle a waypoints message."""
if not context.import_waypoints:
return
if context.waypoint_whitelist is not None:
user = _parse_topic(message['topic'], context.mqtt_topic)[0]
if user not in context.waypoint_whitelist:
return
if 'waypoints' in message:
wayps = message['waypoints']
else:
wayps = [message]
_LOGGER.info("Got %d waypoints from %s", len(wayps), message['topic'])
name_base = ' '.join(_parse_topic(message['topic'], context.mqtt_topic))
for wayp in wayps:
yield from async_handle_waypoint(hass, name_base, wayp)
@HANDLERS.register('encrypted')
@asyncio.coroutine
def async_handle_encrypted_message(hass, context, message):
"""Handle an encrypted message."""
plaintext_payload = _decrypt_payload(context.secret, message['topic'],
message['data'])
if plaintext_payload is None:
return
decrypted = json.loads(plaintext_payload)
decrypted['topic'] = message['topic']
yield from async_handle_message(hass, context, decrypted)
@HANDLERS.register('lwt')
@HANDLERS.register('configuration')
@HANDLERS.register('beacon')
@HANDLERS.register('cmd')
@HANDLERS.register('steps')
@HANDLERS.register('card')
@asyncio.coroutine
def async_handle_not_impl_msg(hass, context, message):
"""Handle valid but not implemented message types."""
_LOGGER.debug('Not handling %s message: %s', message.get("_type"), message)
@asyncio.coroutine
def async_handle_unsupported_msg(hass, context, message):
"""Handle an unsupported or invalid message type."""
_LOGGER.warning('Received unsupported message type: %s.',
message.get('_type'))
@asyncio.coroutine
def async_handle_message(hass, context, message):
"""Handle an OwnTracks message."""
msgtype = message.get('_type')
handler = HANDLERS.get(msgtype, async_handle_unsupported_msg)
yield from handler(hass, context, message)
| apache-2.0 | -5,057,499,817,063,968,000 | 31.750996 | 79 | 0.637127 | false |
ytsvetko/adjective_supersense_classifier | src/eval.py | 1 | 1937 | #!/usr/bin/env python2.7
from __future__ import division
import sys
import json
import argparse
import collections
parser = argparse.ArgumentParser()
parser.add_argument("--predicted_results", required=True)
parser.add_argument("--held_out_seed", required=True)
parser.add_argument("--out_file", default=None)
args = parser.parse_args()
def CollectResults(predicted_results, seed, out_file):
def NormalizedHistogram(hist, sum_of_all):
normalized = []
total = 0
for index in xrange(max(hist)+1):
total += hist[index]
normalized.append(total / sum_of_all)
return normalized
hist = collections.Counter()
total_count = 0
for line in open(predicted_results):
instance, label, posteriors_str = line.strip().split("\t")
posteriors = json.loads(posteriors_str)
sorted_posteriors = sorted(posteriors.iteritems(), key=lambda x: x[1], reverse=True)
sorted_labels = [k.lower() for k,v in sorted_posteriors]
# covert_substance_antonym
word = instance.split("_")[0].lower()
if word in seed:
min_pos = 100000
for label in seed[word]:
min_pos = min(min_pos, sorted_labels.index(label))
hist[min_pos] += 1
total_count += 1
normalized_hist = NormalizedHistogram(hist, total_count)
out_file.write(repr(normalized_hist))
out_file.write("\n")
def LoadSeed(seed_filename):
result = collections.defaultdict(set)
for line in open(seed_filename):
line = line.strip()
if len(line) == 0:
continue
tokens = line.split("\t")
if len(tokens) == 2:
word, label = tokens
else:
word, label, rel = tokens
result[word.lower()].add(label.lower())
return result
def main():
if args.out_file is not None:
out_file = open(args.out_file, "a")
else:
out_file = sys.stdout
seed = LoadSeed(args.held_out_seed)
CollectResults(args.predicted_results, seed, out_file)
if __name__ == '__main__':
main()
| gpl-2.0 | 1,734,206,193,779,853,800 | 25.902778 | 88 | 0.662881 | false |
OCA/partner-contact | partner_deduplicate_filter/tests/test_partner_deduplicate_filter.py | 1 | 3138 | # Copyright 2016 Tecnativa - Pedro M. Baeza
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo.tests import common
from odoo.tools.safe_eval import safe_eval
class TestDeduplicateFilter(common.TransactionCase):
def setUp(self):
super(TestDeduplicateFilter, self).setUp()
self.partner_1 = self.env["res.partner"].create(
{
"name": "Partner 1",
"email": "[email protected]",
"is_company": True,
"parent_id": False,
}
)
self.partner_1.copy()
self.partner_2 = self.env["res.partner"].create(
{
"name": "Partner 2",
"email": "[email protected]",
"is_company": False,
"parent_id": self.partner_1.id,
}
)
self.partner_2.copy()
self.partner_3 = self.env["res.partner"].create(
{
"name": "Partner 3",
"email": "[email protected]",
"is_company": False,
"parent_id": False,
}
)
self.partner_3.copy()
self.wizard = self.env["base.partner.merge.automatic.wizard"].create(
{"group_by_email": True}
)
def test_deduplicate_exclude_is_company(self):
self.wizard.exclude_is_company = True
self.wizard.action_start_manual_process()
matched_founds = 0
for line in self.wizard.line_ids:
match_ids = safe_eval(line.aggr_ids)
if self.partner_1.id in match_ids:
self.assertTrue(False, "Partner with is company not excluded")
if self.partner_2.id in match_ids:
matched_founds += 1
if self.partner_3.id in match_ids:
matched_founds += 1
self.assertEqual(matched_founds, 2)
def test_deduplicate_exclude_not_parent(self):
self.wizard.exclude_not_parent = True
self.wizard.action_start_manual_process()
matched_founds = 0
for line in self.wizard.line_ids:
match_ids = safe_eval(line.aggr_ids)
if self.partner_1.id in match_ids:
self.assertTrue(False, "Partner without parent not excluded")
if self.partner_3.id in match_ids:
self.assertTrue(False, "Partner without parent not excluded")
if self.partner_2.id in match_ids:
matched_founds += 1
self.assertEqual(matched_founds, 1)
def test_deduplicate_exclude_parent(self):
self.wizard.exclude_parent = True
self.wizard.action_start_manual_process()
matched_founds = 0
for line in self.wizard.line_ids:
match_ids = safe_eval(line.aggr_ids)
if self.partner_2.id in match_ids:
self.assertTrue(False, "Partner with parent not excluded")
if self.partner_1.id in match_ids:
matched_founds += 1
if self.partner_3.id in match_ids:
matched_founds += 1
self.assertEqual(matched_founds, 2)
| agpl-3.0 | 7,859,822,055,651,330,000 | 37.268293 | 78 | 0.555449 | false |
maxikov/attfocus | 2dbluetooths/naive_hist_reg/set_builder.py | 1 | 1572 | #!/usr/bin/env python
import coordmap
import glob
import random
#data_path = "../data/"
data_path = "../data_1_foot_grid/"
test_ratio = 0.02
def read_by_locid(locid):
print "Reading locid:", locid
filename = glob.glob(data_path + str(locid) + "-*.csv")[0]
f = open(filename, "r")
res = read_one_file(f)
f.close()
return res
def read_one_file(f):
res = []
for line in f.readlines()[1:]:
if not line:
break
d = map(float, line.split(','))
res.append(d)
return res
def build_x_y():
global data_path
X = []
Y = []
if data_path == "../data/":
for loc in coordmap.locids:
data = read_by_locid(loc)
X += data
Y += [list(coordmap._1to2[loc]) for _ in xrange(len(data))]
elif data_path == "../data_1_foot_grid/":
for _x in xrange(0, 8+1):
for _y in xrange(0, 4+1):
if _x == 0 and _y == 0:
loc = "0"
elif _x == 0:
loc = str(_y)
else:
loc = str(_x) + str(_y)
data = read_by_locid(loc)
X += [x[-4:] for x in data]
Y += [[_x, _y] for _ in xrange(len(data))]
return X, Y
def build_sets(X, Y):
training = [[], []]
test = [[], []]
for i in xrange(len(X)):
dst_set = training if random.uniform(0, 1) > test_ratio else test
dst_set[0].append(X[i])
dst_set[1].append(Y[i])
return training, test
def write_sets(training, test):
f = open("training_set.py", "w")
f.write(str(training))
f.close()
f = open("test_set.py", "w")
f.write(str(test))
f.close()
def main():
X, Y = build_x_y()
training, test = build_sets(X, Y)
write_sets(training, test)
if __name__ == "__main__":
main()
| gpl-3.0 | -653,928,471,155,588,000 | 19.415584 | 67 | 0.570611 | false |
fatrix/django-fastapp | fastapp/migrations/0011_auto__chg_field_instance_last_beat.py | 2 | 8409 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Instance.last_beat'
db.alter_column(u'fastapp_instance', 'last_beat', self.gf('django.db.models.fields.DateTimeField')(null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Instance.last_beat'
#raise RuntimeError("Cannot reverse this migration. 'Instance.last_beat' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Instance.last_beat'
db.alter_column(u'fastapp_instance', 'last_beat', self.gf('django.db.models.fields.DateTimeField')(auto_now=True))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'fastapp.apy': {
'Meta': {'object_name': 'Apy'},
'base': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'apys'", 'null': 'True', 'to': u"orm['fastapp.Base']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'module': ('django.db.models.fields.CharField', [], {'default': "'def func(self):\\n pass'", 'max_length': '8192'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'fastapp.authprofile': {
'Meta': {'object_name': 'AuthProfile'},
'access_token': ('django.db.models.fields.CharField', [], {'max_length': '72'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'authprofile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'fastapp.base': {
'Meta': {'object_name': 'Base'},
'content': ('django.db.models.fields.CharField', [], {'default': '\'{% extends "fastapp/index.html" %}\\n{% block content %}\\n{% endblock %}\\n\'', 'max_length': '8192', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'related_name': "'+'", 'blank': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'fastapp.counter': {
'Meta': {'object_name': 'Counter'},
'apy': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'counter'", 'unique': 'True', 'to': u"orm['fastapp.Apy']"}),
'executed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'failed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'fastapp.executor': {
'Meta': {'object_name': 'Executor'},
'base': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'executor'", 'unique': 'True', 'to': u"orm['fastapp.Base']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_instances': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'pid': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'})
},
u'fastapp.host': {
'Meta': {'object_name': 'Host'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'fastapp.instance': {
'Meta': {'object_name': 'Instance'},
'executor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'instances'", 'to': u"orm['fastapp.Executor']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_alive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_beat': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'})
},
u'fastapp.setting': {
'Meta': {'object_name': 'Setting'},
'base': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'setting'", 'to': u"orm['fastapp.Base']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '8192'})
}
}
complete_apps = ['fastapp']
| mit | 655,555,603,534,695,400 | 69.075 | 201 | 0.551195 | false |
ThinkEE/Kameleon | kameleon/model/query/base.py | 1 | 1587 | ################################################################################
# MIT License
#
# Copyright (c) 2017 Jean-Charles Fosse & Johann Bigler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
################################################################################
class Query(object):
"""
Base object representing a query
"""
def __init__(self, model_class):
self.model_class = model_class
self.database = model_class._meta.database
self._where = None
def __repr__(self):
return '%s' % (self.model_class)
| mit | 2,246,978,086,995,422,200 | 43.083333 | 80 | 0.659105 | false |
rclmenezes/sqlalchemy | test/orm/test_naturalpks.py | 1 | 37764 | """
Primary key changing capabilities and passive/non-passive cascading updates.
"""
from __future__ import with_statement
from sqlalchemy.testing import eq_, ne_, \
assert_raises, assert_raises_message
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, String, ForeignKey, Unicode
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, create_session, backref, Session
from sqlalchemy.orm.session import make_transient
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from test.orm import _fixtures
def _backend_specific_fk_args():
if testing.requires.deferrable_fks.enabled:
fk_args = dict(deferrable=True, initially='deferred')
elif not testing.requires.on_update_cascade.enabled:
fk_args = dict()
else:
fk_args = dict(onupdate='cascade')
return fk_args
class NaturalPKTest(fixtures.MappedTest):
# MySQL 5.5 on Windows crashes (the entire server, not the client)
# if you screw around with ON UPDATE CASCADE type of stuff.
__requires__ = 'skip_mysql_on_windows', 'on_update_or_deferrable_fks'
@classmethod
def define_tables(cls, metadata):
fk_args = _backend_specific_fk_args()
users = Table('users', metadata,
Column('username', String(50), primary_key=True),
Column('fullname', String(100)),
test_needs_fk=True)
addresses = Table('addresses', metadata,
Column('email', String(50), primary_key=True),
Column('username', String(50),
ForeignKey('users.username', **fk_args)),
test_needs_fk=True)
items = Table('items', metadata,
Column('itemname', String(50), primary_key=True),
Column('description', String(100)),
test_needs_fk=True)
users_to_items = Table('users_to_items', metadata,
Column('username', String(50),
ForeignKey('users.username', **fk_args),
primary_key=True),
Column('itemname', String(50),
ForeignKey('items.itemname', **fk_args),
primary_key=True),
test_needs_fk=True)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
class Item(cls.Comparable):
pass
def test_entity(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session()
u1 = User(username='jack', fullname='jack')
sess.add(u1)
sess.flush()
assert sess.query(User).get('jack') is u1
u1.username = 'ed'
sess.flush()
def go():
assert sess.query(User).get('ed') is u1
self.assert_sql_count(testing.db, go, 0)
assert sess.query(User).get('jack') is None
sess.expunge_all()
u1 = sess.query(User).get('ed')
eq_(User(username='ed', fullname='jack'), u1)
def test_load_after_expire(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session()
u1 = User(username='jack', fullname='jack')
sess.add(u1)
sess.flush()
assert sess.query(User).get('jack') is u1
users.update(values={User.username:'jack'}).execute(username='ed')
# expire/refresh works off of primary key. the PK is gone
# in this case so theres no way to look it up. criterion-
# based session invalidation could solve this [ticket:911]
sess.expire(u1)
assert_raises(sa.orm.exc.ObjectDeletedError, getattr, u1, 'username')
sess.expunge_all()
assert sess.query(User).get('jack') is None
assert sess.query(User).get('ed').fullname == 'jack'
def test_flush_new_pk_after_expire(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sess = create_session()
u1 = User(username='jack', fullname='jack')
sess.add(u1)
sess.flush()
assert sess.query(User).get('jack') is u1
sess.expire(u1)
u1.username = 'ed'
sess.flush()
sess.expunge_all()
assert sess.query(User).get('ed').fullname == 'jack'
@testing.requires.on_update_cascade
def test_onetomany_passive(self):
self._test_onetomany(True)
def test_onetomany_nonpassive(self):
self._test_onetomany(False)
def _test_onetomany(self, passive_updates):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses':relationship(Address, passive_updates=passive_updates)
})
mapper(Address, addresses)
sess = create_session()
u1 = User(username='jack', fullname='jack')
u1.addresses.append(Address(email='jack1'))
u1.addresses.append(Address(email='jack2'))
sess.add(u1)
sess.flush()
assert sess.query(Address).get('jack1') is u1.addresses[0]
u1.username = 'ed'
sess.flush()
assert u1.addresses[0].username == 'ed'
sess.expunge_all()
eq_([Address(username='ed'), Address(username='ed')],
sess.query(Address).all())
u1 = sess.query(User).get('ed')
u1.username = 'jack'
def go():
sess.flush()
if not passive_updates:
# test passive_updates=False;
#load addresses, update user, update 2 addresses
self.assert_sql_count(testing.db, go, 4)
else:
# test passive_updates=True; update user
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
assert User(username='jack', addresses=[
Address(username='jack'),
Address(username='jack')]) == \
sess.query(User).get('jack')
u1 = sess.query(User).get('jack')
u1.addresses = []
u1.username = 'fred'
sess.flush()
sess.expunge_all()
assert sess.query(Address).get('jack1').username is None
u1 = sess.query(User).get('fred')
eq_(User(username='fred', fullname='jack'), u1)
@testing.requires.on_update_cascade
def test_manytoone_passive(self):
self._test_manytoone(True)
def test_manytoone_nonpassive(self):
self._test_manytoone(False)
def test_manytoone_nonpassive_cold_mapping(self):
"""test that the mapper-level m2o dependency processor
is set up even if the opposite side relationship
hasn't yet been part of a flush.
"""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
with testing.db.begin() as conn:
conn.execute(users.insert(),
username='jack', fullname='jack'
)
conn.execute(addresses.insert(),
email='jack1', username='jack'
)
conn.execute(addresses.insert(),
email='jack2', username='jack'
)
mapper(User, users)
mapper(Address, addresses, properties={
'user': relationship(User,
passive_updates=False)
})
sess = create_session()
u1 = sess.query(User).first()
a1, a2 = sess.query(Address).all()
u1.username = 'ed'
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 3)
def _test_manytoone(self, passive_updates):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
'user': relationship(User, passive_updates=passive_updates)
})
sess = create_session()
a1 = Address(email='jack1')
a2 = Address(email='jack2')
u1 = User(username='jack', fullname='jack')
a1.user = u1
a2.user = u1
sess.add(a1)
sess.add(a2)
sess.flush()
u1.username = 'ed'
def go():
sess.flush()
if passive_updates:
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 3)
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 0)
assert a1.username == a2.username == 'ed'
sess.expunge_all()
eq_([Address(username='ed'), Address(username='ed')],
sess.query(Address).all())
@testing.requires.on_update_cascade
def test_onetoone_passive(self):
self._test_onetoone(True)
def test_onetoone_nonpassive(self):
self._test_onetoone(False)
def _test_onetoone(self, passive_updates):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
"address":relationship(Address, passive_updates=passive_updates,
uselist=False)
})
mapper(Address, addresses)
sess = create_session()
u1 = User(username='jack', fullname='jack')
sess.add(u1)
sess.flush()
a1 = Address(email='jack1')
u1.address = a1
sess.add(a1)
sess.flush()
u1.username = 'ed'
def go():
sess.flush()
if passive_updates:
sess.expire(u1, ['address'])
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 2)
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 0)
sess.expunge_all()
eq_([Address(username='ed')], sess.query(Address).all())
@testing.requires.on_update_cascade
def test_bidirectional_passive(self):
self._test_bidirectional(True)
def test_bidirectional_nonpassive(self):
self._test_bidirectional(False)
def _test_bidirectional(self, passive_updates):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={
'user': relationship(User, passive_updates=passive_updates,
backref='addresses')})
sess = create_session()
a1 = Address(email='jack1')
a2 = Address(email='jack2')
u1 = User(username='jack', fullname='jack')
a1.user = u1
a2.user = u1
sess.add(a1)
sess.add(a2)
sess.flush()
u1.username = 'ed'
(ad1, ad2) = sess.query(Address).all()
eq_([Address(username='jack'), Address(username='jack')], [ad1, ad2])
def go():
sess.flush()
if passive_updates:
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 3)
eq_([Address(username='ed'), Address(username='ed')], [ad1, ad2])
sess.expunge_all()
eq_([Address(username='ed'), Address(username='ed')],
sess.query(Address).all())
u1 = sess.query(User).get('ed')
assert len(u1.addresses) == 2 # load addresses
u1.username = 'fred'
def go():
sess.flush()
# check that the passive_updates is on on the other side
if passive_updates:
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 3)
sess.expunge_all()
eq_([Address(username='fred'), Address(username='fred')],
sess.query(Address).all())
@testing.requires.on_update_cascade
def test_manytomany_passive(self):
self._test_manytomany(True)
# mysqldb executemany() of the association table fails to
# report the correct row count
@testing.fails_if(lambda: testing.against('mysql')
and not testing.against('+zxjdbc'))
def test_manytomany_nonpassive(self):
self._test_manytomany(False)
def _test_manytomany(self, passive_updates):
users, items, Item, User, users_to_items = (self.tables.users,
self.tables.items,
self.classes.Item,
self.classes.User,
self.tables.users_to_items)
mapper(User, users, properties={
'items':relationship(Item, secondary=users_to_items,
backref='users',
passive_updates=passive_updates)})
mapper(Item, items)
sess = create_session()
u1 = User(username='jack')
u2 = User(username='fred')
i1 = Item(itemname='item1')
i2 = Item(itemname='item2')
u1.items.append(i1)
u1.items.append(i2)
i2.users.append(u2)
sess.add(u1)
sess.add(u2)
sess.flush()
r = sess.query(Item).all()
# ComparableEntity can't handle a comparison with the backrefs
# involved....
eq_(Item(itemname='item1'), r[0])
eq_(['jack'], [u.username for u in r[0].users])
eq_(Item(itemname='item2'), r[1])
eq_(['jack', 'fred'], [u.username for u in r[1].users])
u2.username='ed'
def go():
sess.flush()
go()
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 0)
sess.expunge_all()
r = sess.query(Item).all()
eq_(Item(itemname='item1'), r[0])
eq_(['jack'], [u.username for u in r[0].users])
eq_(Item(itemname='item2'), r[1])
eq_(['ed', 'jack'], sorted([u.username for u in r[1].users]))
sess.expunge_all()
u2 = sess.query(User).get(u2.username)
u2.username='wendy'
sess.flush()
r = sess.query(Item).with_parent(u2).all()
eq_(Item(itemname='item2'), r[0])
class TransientExceptionTesst(_fixtures.FixtureTest):
run_inserts = None
def test_transient_exception(self):
"""An object that goes from a pk value to transient/pending
doesn't count as a "pk" switch.
"""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users)
mapper(Address, addresses, properties={'user':relationship(User)})
sess = create_session()
u1 = User(id=5, name='u1')
ad1 = Address(email_address='e1', user=u1)
sess.add_all([u1, ad1])
sess.flush()
make_transient(u1)
u1.id = None
u1.username='u2'
sess.add(u1)
sess.flush()
eq_(ad1.user_id, 5)
sess.expire_all()
eq_(ad1.user_id, 5)
ne_(u1.id, 5)
ne_(u1.id, None)
eq_(sess.query(User).count(), 2)
class ReversePKsTest(fixtures.MappedTest):
"""reverse the primary keys of two entities and ensure bookkeeping
succeeds."""
@classmethod
def define_tables(cls, metadata):
Table(
'user', metadata,
Column('code', Integer, primary_key=True),
Column('status', Integer, primary_key=True),
Column('username', Unicode(50), nullable=False),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
def __init__(self, code, status, username):
self.code = code
self.status = status
self.username = username
def test_reverse(self):
user, User = self.tables.user, self.classes.User
PUBLISHED, EDITABLE, ARCHIVED = 1, 2, 3
mapper(User, user)
session = sa.orm.sessionmaker()()
a_published = User(1, PUBLISHED, u'a')
session.add(a_published)
session.commit()
a_editable = User(1, EDITABLE, u'a')
session.add(a_editable)
session.commit()
# do the switch in both directions -
# one or the other should raise the error
# based on platform dictionary ordering
a_published.status = ARCHIVED
a_editable.status = PUBLISHED
session.commit()
assert session.query(User).get([1, PUBLISHED]) is a_editable
assert session.query(User).get([1, ARCHIVED]) is a_published
a_published.status = PUBLISHED
a_editable.status = EDITABLE
session.commit()
assert session.query(User).get([1, PUBLISHED]) is a_published
assert session.query(User).get([1, EDITABLE]) is a_editable
class SelfReferentialTest(fixtures.MappedTest):
# mssql, mysql don't allow
# ON UPDATE on self-referential keys
__unsupported_on__ = ('mssql', 'mysql')
__requires__ = 'on_update_or_deferrable_fks',
@classmethod
def define_tables(cls, metadata):
fk_args = _backend_specific_fk_args()
Table('nodes', metadata,
Column('name', String(50), primary_key=True),
Column('parent', String(50),
ForeignKey('nodes.name', **fk_args)),
test_needs_fk=True
)
@classmethod
def setup_classes(cls):
class Node(cls.Comparable):
pass
def test_one_to_many_on_m2o(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
'children': relationship(Node,
backref=sa.orm.backref('parentnode',
remote_side=nodes.c.name,
passive_updates=False),
)})
sess = Session()
n1 = Node(name='n1')
sess.add(n1)
n2 = Node(name='n11', parentnode=n1)
n3 = Node(name='n12', parentnode=n1)
n4 = Node(name='n13', parentnode=n1)
sess.add_all([n2, n3, n4])
sess.commit()
n1.name = 'new n1'
sess.commit()
eq_(['new n1', 'new n1', 'new n1'],
[n.parent
for n in sess.query(Node).filter(
Node.name.in_(['n11', 'n12', 'n13']))])
def test_one_to_many_on_o2m(self):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
'children': relationship(Node,
backref=sa.orm.backref('parentnode',
remote_side=nodes.c.name),
passive_updates=False
)})
sess = Session()
n1 = Node(name='n1')
n1.children.append(Node(name='n11'))
n1.children.append(Node(name='n12'))
n1.children.append(Node(name='n13'))
sess.add(n1)
sess.commit()
n1.name = 'new n1'
sess.commit()
eq_(n1.children[1].parent, 'new n1')
eq_(['new n1', 'new n1', 'new n1'],
[n.parent
for n in sess.query(Node).filter(
Node.name.in_(['n11', 'n12', 'n13']))])
@testing.requires.on_update_cascade
def test_many_to_one_passive(self):
self._test_many_to_one(True)
def test_many_to_one_nonpassive(self):
self._test_many_to_one(False)
def _test_many_to_one(self, passive):
Node, nodes = self.classes.Node, self.tables.nodes
mapper(Node, nodes, properties={
'parentnode':relationship(Node,
remote_side=nodes.c.name,
passive_updates=passive)
}
)
sess = Session()
n1 = Node(name='n1')
n11 = Node(name='n11', parentnode=n1)
n12 = Node(name='n12', parentnode=n1)
n13 = Node(name='n13', parentnode=n1)
sess.add_all([n1, n11, n12, n13])
sess.commit()
n1.name = 'new n1'
sess.commit()
eq_(['new n1', 'new n1', 'new n1'],
[n.parent
for n in sess.query(Node).filter(
Node.name.in_(['n11', 'n12', 'n13']))])
class NonPKCascadeTest(fixtures.MappedTest):
__requires__ = 'skip_mysql_on_windows', 'on_update_or_deferrable_fks'
@classmethod
def define_tables(cls, metadata):
fk_args = _backend_specific_fk_args()
Table('users', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('username', String(50), unique=True),
Column('fullname', String(100)),
test_needs_fk=True)
Table('addresses', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('email', String(50)),
Column('username', String(50),
ForeignKey('users.username', **fk_args)),
test_needs_fk=True
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
@testing.requires.on_update_cascade
def test_onetomany_passive(self):
self._test_onetomany(True)
def test_onetomany_nonpassive(self):
self._test_onetomany(False)
def _test_onetomany(self, passive_updates):
User, Address, users, addresses = (self.classes.User,
self.classes.Address,
self.tables.users,
self.tables.addresses)
mapper(User, users, properties={
'addresses':relationship(Address,
passive_updates=passive_updates)})
mapper(Address, addresses)
sess = create_session()
u1 = User(username='jack', fullname='jack')
u1.addresses.append(Address(email='jack1'))
u1.addresses.append(Address(email='jack2'))
sess.add(u1)
sess.flush()
a1 = u1.addresses[0]
eq_(sa.select([addresses.c.username]).execute().fetchall(),
[('jack',), ('jack',)])
assert sess.query(Address).get(a1.id) is u1.addresses[0]
u1.username = 'ed'
sess.flush()
assert u1.addresses[0].username == 'ed'
eq_(sa.select([addresses.c.username]).execute().fetchall(),
[('ed',), ('ed',)])
sess.expunge_all()
eq_([Address(username='ed'), Address(username='ed')],
sess.query(Address).all())
u1 = sess.query(User).get(u1.id)
u1.username = 'jack'
def go():
sess.flush()
if not passive_updates:
# test passive_updates=False; load addresses,
# update user, update 2 addresses
self.assert_sql_count(testing.db, go, 4)
else:
# test passive_updates=True; update user
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
assert User(username='jack',
addresses=[Address(username='jack'),
Address(username='jack')]) == \
sess.query(User).get(u1.id)
sess.expunge_all()
u1 = sess.query(User).get(u1.id)
u1.addresses = []
u1.username = 'fred'
sess.flush()
sess.expunge_all()
a1 = sess.query(Address).get(a1.id)
eq_(a1.username, None)
eq_(sa.select([addresses.c.username]).execute().fetchall(),
[(None,), (None,)])
u1 = sess.query(User).get(u1.id)
eq_(User(username='fred', fullname='jack'), u1)
class CascadeToFKPKTest(fixtures.MappedTest, testing.AssertsCompiledSQL):
"""A primary key mutation cascades onto a foreign key that is itself a
primary key."""
@classmethod
def define_tables(cls, metadata):
fk_args = _backend_specific_fk_args()
Table('users', metadata,
Column('username', String(50), primary_key=True),
test_needs_fk=True)
Table('addresses', metadata,
Column('username', String(50),
ForeignKey('users.username', **fk_args),
primary_key=True
),
Column('email', String(50), primary_key=True),
Column('etc', String(50)),
test_needs_fk=True
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
@testing.requires.on_update_cascade
def test_onetomany_passive(self):
self._test_onetomany(True)
# PG etc. need passive=True to allow PK->PK cascade
@testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc')
def test_onetomany_nonpassive(self):
self._test_onetomany(False)
def test_o2m_change_passive(self):
self._test_o2m_change(True)
def test_o2m_change_nonpassive(self):
self._test_o2m_change(False)
def _test_o2m_change(self, passive_updates):
"""Change the PK of a related entity to another.
"on update cascade" is not involved here, so the mapper has
to do the UPDATE itself.
"""
User, Address, users, addresses = (self.classes.User,
self.classes.Address,
self.tables.users,
self.tables.addresses)
mapper(User, users, properties={
'addresses':relationship(Address,
passive_updates=passive_updates)})
mapper(Address, addresses)
sess = create_session()
a1 = Address(username='ed', email='ed@host1')
u1 = User(username='ed', addresses=[a1])
u2 = User(username='jack')
sess.add_all([a1, u1, u2])
sess.flush()
a1.username = 'jack'
sess.flush()
def test_o2m_move_passive(self):
self._test_o2m_move(True)
def test_o2m_move_nonpassive(self):
self._test_o2m_move(False)
def _test_o2m_move(self, passive_updates):
"""Move the related entity to a different collection,
changing its PK.
"""
User, Address, users, addresses = (self.classes.User,
self.classes.Address,
self.tables.users,
self.tables.addresses)
mapper(User, users, properties={
'addresses':relationship(Address,
passive_updates=passive_updates)})
mapper(Address, addresses)
sess = create_session()
a1 = Address(username='ed', email='ed@host1')
u1 = User(username='ed', addresses=[a1])
u2 = User(username='jack')
sess.add_all([a1, u1, u2])
sess.flush()
u1.addresses.remove(a1)
u2.addresses.append(a1)
sess.flush()
@testing.requires.on_update_cascade
def test_change_m2o_passive(self):
self._test_change_m2o(True)
@testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc')
def test_change_m2o_nonpassive(self):
self._test_change_m2o(False)
def _test_change_m2o(self, passive_updates):
User, Address, users, addresses = (self.classes.User,
self.classes.Address,
self.tables.users,
self.tables.addresses)
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User, passive_updates=passive_updates)
})
sess = create_session()
u1 = User(username='jack')
a1 = Address(user=u1, email='foo@bar')
sess.add_all([u1, a1])
sess.flush()
u1.username='edmodified'
sess.flush()
eq_(a1.username, 'edmodified')
sess.expire_all()
eq_(a1.username, 'edmodified')
def test_move_m2o_passive(self):
self._test_move_m2o(True)
def test_move_m2o_nonpassive(self):
self._test_move_m2o(False)
def _test_move_m2o(self, passive_updates):
User, Address, users, addresses = (self.classes.User,
self.classes.Address,
self.tables.users,
self.tables.addresses)
# tests [ticket:1856]
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User, passive_updates=passive_updates)
})
sess = create_session()
u1 = User(username='jack')
u2 = User(username='ed')
a1 = Address(user=u1, email='foo@bar')
sess.add_all([u1, u2, a1])
sess.flush()
a1.user = u2
sess.flush()
def test_rowswitch_doesntfire(self):
User, Address, users, addresses = (self.classes.User,
self.classes.Address,
self.tables.users,
self.tables.addresses)
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User, passive_updates=True)
})
sess = create_session()
u1 = User(username='ed')
a1 = Address(user=u1, email='ed@host1')
sess.add(u1)
sess.add(a1)
sess.flush()
sess.delete(u1)
sess.delete(a1)
u2 = User(username='ed')
a2 = Address(user=u2, email='ed@host1', etc='foo')
sess.add(u2)
sess.add(a2)
from sqlalchemy.testing.assertsql import CompiledSQL
# test that the primary key columns of addresses are not
# being updated as well, since this is a row switch.
self.assert_sql_execution(testing.db,
sess.flush,
CompiledSQL(
"UPDATE addresses SET etc=:etc WHERE "
"addresses.username = :addresses_username AND"
" addresses.email = :addresses_email",
{'etc': 'foo', 'addresses_username':'ed',
'addresses_email':'ed@host1'} ),
)
def _test_onetomany(self, passive_updates):
"""Change the PK of a related entity via foreign key cascade.
For databases that require "on update cascade", the mapper
has to identify the row by the new value, not the old, when
it does the update.
"""
User, Address, users, addresses = (self.classes.User,
self.classes.Address,
self.tables.users,
self.tables.addresses)
mapper(User, users, properties={
'addresses':relationship(Address,
passive_updates=passive_updates)})
mapper(Address, addresses)
sess = create_session()
a1, a2 = Address(username='ed', email='ed@host1'),\
Address(username='ed', email='ed@host2')
u1 = User(username='ed', addresses=[a1, a2])
sess.add(u1)
sess.flush()
eq_(a1.username, 'ed')
eq_(a2.username, 'ed')
eq_(sa.select([addresses.c.username]).execute().fetchall(),
[('ed',), ('ed',)])
u1.username = 'jack'
a2.email='ed@host3'
sess.flush()
eq_(a1.username, 'jack')
eq_(a2.username, 'jack')
eq_(sa.select([addresses.c.username]).execute().fetchall(),
[('jack',), ('jack', )])
class JoinedInheritanceTest(fixtures.MappedTest):
"""Test cascades of pk->pk/fk on joined table inh."""
# mssql doesn't allow ON UPDATE on self-referential keys
__unsupported_on__ = ('mssql',)
__requires__ = 'skip_mysql_on_windows',
@classmethod
def define_tables(cls, metadata):
fk_args = _backend_specific_fk_args()
Table('person', metadata,
Column('name', String(50), primary_key=True),
Column('type', String(50), nullable=False),
test_needs_fk=True)
Table('engineer', metadata,
Column('name', String(50), ForeignKey('person.name', **fk_args),
primary_key=True),
Column('primary_language', String(50)),
Column('boss_name', String(50),
ForeignKey('manager.name', **fk_args)),
test_needs_fk=True
)
Table('manager', metadata,
Column('name', String(50),
ForeignKey('person.name', **fk_args),
primary_key=True),
Column('paperwork', String(50)),
test_needs_fk=True
)
@classmethod
def setup_classes(cls):
class Person(cls.Comparable):
pass
class Engineer(Person):
pass
class Manager(Person):
pass
@testing.requires.on_update_cascade
def test_pk_passive(self):
self._test_pk(True)
# PG etc. need passive=True to allow PK->PK cascade
@testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc')
def test_pk_nonpassive(self):
self._test_pk(False)
@testing.requires.on_update_cascade
def test_fk_passive(self):
self._test_fk(True)
# PG etc. need passive=True to allow PK->PK cascade
@testing.fails_on_everything_except('sqlite', 'mysql+zxjdbc', 'oracle',
'postgresql+zxjdbc')
def test_fk_nonpassive(self):
self._test_fk(False)
def _test_pk(self, passive_updates):
Person, Manager, person, manager, Engineer, engineer = (self.classes.Person,
self.classes.Manager,
self.tables.person,
self.tables.manager,
self.classes.Engineer,
self.tables.engineer)
mapper(Person, person, polymorphic_on=person.c.type,
polymorphic_identity='person',
passive_updates=passive_updates)
mapper(Engineer, engineer, inherits=Person,
polymorphic_identity='engineer', properties={
'boss':relationship(Manager,
primaryjoin=manager.c.name==engineer.c.boss_name,
passive_updates=passive_updates
)
})
mapper(Manager, manager, inherits=Person,
polymorphic_identity='manager')
sess = sa.orm.sessionmaker()()
e1 = Engineer(name='dilbert', primary_language='java')
sess.add(e1)
sess.commit()
e1.name = 'wally'
e1.primary_language = 'c++'
sess.commit()
def _test_fk(self, passive_updates):
Person, Manager, person, manager, Engineer, engineer = (self.classes.Person,
self.classes.Manager,
self.tables.person,
self.tables.manager,
self.classes.Engineer,
self.tables.engineer)
mapper(Person, person, polymorphic_on=person.c.type,
polymorphic_identity='person',
passive_updates=passive_updates)
mapper(Engineer, engineer, inherits=Person,
polymorphic_identity='engineer', properties={
'boss':relationship(Manager,
primaryjoin=manager.c.name==engineer.c.boss_name,
passive_updates=passive_updates
)
})
mapper(Manager, manager, inherits=Person,
polymorphic_identity='manager')
sess = sa.orm.sessionmaker()()
m1 = Manager(name='dogbert', paperwork='lots')
e1, e2 = \
Engineer(name='dilbert', primary_language='java', boss=m1),\
Engineer(name='wally', primary_language='c++', boss=m1)
sess.add_all([
e1, e2, m1
])
sess.commit()
eq_(e1.boss_name, 'dogbert')
eq_(e2.boss_name, 'dogbert')
sess.expire_all()
m1.name = 'pointy haired'
e1.primary_language = 'scala'
e2.primary_language = 'cobol'
sess.commit()
eq_(e1.boss_name, 'pointy haired')
eq_(e2.boss_name, 'pointy haired')
| mit | 1,728,209,882,002,347,500 | 31.866841 | 84 | 0.531459 | false |
jangorecki/h2o-3 | h2o-py/h2o/demos.py | 1 | 11128 | # -*- encoding: utf-8 -*-
"""
Interactive demos for the h2o-py library.
:copyright: (c) 2016 H2O.ai
:license: Apache License Version 2.0 (see LICENSE for details)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import linecache
import os
import sys
import h2o
# noinspection PyUnresolvedReferences
from h2o.utils.compatibility import * # NOQA
from h2o.utils.typechecks import assert_is_type
def gbm(interactive=True, echo=True, testing=False):
"""GBM model demo."""
def demo_body(go):
"""
Demo of H2O's Gradient Boosting estimator.
This demo uploads a dataset to h2o, parses it, and shows a description.
Then it divides the dataset into training and test sets, builds a GLM
from the training set, and makes predictions for the test set.
Finally, default performance metrics are displayed.
"""
go()
# Connect to H2O
h2o.init()
go()
# Upload the prostate dataset that comes included in the h2o python package
prostate = h2o.load_dataset("prostate")
go()
# Print a description of the prostate data
prostate.describe()
go()
# Randomly split the dataset into ~70/30, training/test sets
train, test = prostate.split_frame(ratios=[0.70])
go()
# Convert the response columns to factors (for binary classification problems)
train["CAPSULE"] = train["CAPSULE"].asfactor()
test["CAPSULE"] = test["CAPSULE"].asfactor()
go()
# Build a (classification) GLM
from h2o.estimators import H2OGradientBoostingEstimator
prostate_gbm = H2OGradientBoostingEstimator(distribution="bernoulli", ntrees=10, max_depth=8,
min_rows=10, learn_rate=0.2)
prostate_gbm.train(x=["AGE", "RACE", "PSA", "VOL", "GLEASON"],
y="CAPSULE", training_frame=train)
go()
# Show the model
prostate_gbm.show()
go()
# Predict on the test set and show the first ten predictions
predictions = prostate_gbm.predict(test)
predictions.show()
go()
# Show default performance metrics
performance = prostate_gbm.model_performance(test)
performance.show()
# Execute:
_run_demo(demo_body, interactive, echo, testing)
def deeplearning(interactive=True, echo=True, testing=False):
"""Deep Learning model demo."""
def demo_body(go):
"""
Demo of H2O's Deep Learning model.
This demo uploads a dataset to h2o, parses it, and shows a description.
Then it divides the dataset into training and test sets, builds a GLM
from the training set, and makes predictions for the test set.
Finally, default performance metrics are displayed.
"""
go()
# Connect to H2O
h2o.init()
go()
# Upload the prostate dataset that comes included in the h2o python package
prostate = h2o.load_dataset("prostate")
go()
# Print a description of the prostate data
prostate.describe()
go()
# Randomly split the dataset into ~70/30, training/test sets
train, test = prostate.split_frame(ratios=[0.70])
go()
# Convert the response columns to factors (for binary classification problems)
train["CAPSULE"] = train["CAPSULE"].asfactor()
test["CAPSULE"] = test["CAPSULE"].asfactor()
go()
# Build a (classification) GLM
from h2o.estimators import H2ODeepLearningEstimator
prostate_dl = H2ODeepLearningEstimator(activation="Tanh", hidden=[10, 10, 10], epochs=10000)
prostate_dl.train(x=list(set(prostate.col_names) - {"ID", "CAPSULE"}),
y="CAPSULE", training_frame=train)
go()
# Show the model
prostate_dl.show()
go()
# Predict on the test set and show the first ten predictions
predictions = prostate_dl.predict(test)
predictions.show()
go()
# Show default performance metrics
performance = prostate_dl.model_performance(test)
performance.show()
# Execute:
_run_demo(demo_body, interactive, echo, testing)
def glm(interactive=True, echo=True, testing=False):
"""GLM model demo."""
def demo_body(go):
"""
Demo of H2O's Generalized Linear Estimator.
This demo uploads a dataset to h2o, parses it, and shows a description.
Then it divides the dataset into training and test sets, builds a GLM
from the training set, and makes predictions for the test set.
Finally, default performance metrics are displayed.
"""
go()
# Connect to H2O
h2o.init()
go()
# Upload the prostate dataset that comes included in the h2o python package
prostate = h2o.load_dataset("prostate")
go()
# Print a description of the prostate data
prostate.describe()
go()
# Randomly split the dataset into ~70/30, training/test sets
train, test = prostate.split_frame(ratios=[0.70])
go()
# Convert the response columns to factors (for binary classification problems)
train["CAPSULE"] = train["CAPSULE"].asfactor()
test["CAPSULE"] = test["CAPSULE"].asfactor()
go()
# Build a (classification) GLM
from h2o.estimators import H2OGeneralizedLinearEstimator
prostate_glm = H2OGeneralizedLinearEstimator(family="binomial", alpha=[0.5])
prostate_glm.train(x=["AGE", "RACE", "PSA", "VOL", "GLEASON"],
y="CAPSULE", training_frame=train)
go()
# Show the model
prostate_glm.show()
go()
# Predict on the test set and show the first ten predictions
predictions = prostate_glm.predict(test)
predictions.show()
go()
# Show default performance metrics
performance = prostate_glm.model_performance(test)
performance.show()
# Execute:
_run_demo(demo_body, interactive, echo, testing)
def _run_demo(body_fn, interactive, echo, testing):
"""
Execute the demo, echoing commands and pausing for user input.
:param body_fn: function that contains the sequence of demo's commands.
:param interactive: If True, the user will be prompted to continue the demonstration after every segment.
:param echo: If True, the python commands that are executed will be displayed.
:param testing: Used for pyunit testing. h2o.init() will not be called if set to True.
:type body_fn: function
"""
import colorama
from colorama import Style, Fore
colorama.init()
class StopExecution(Exception):
"""Helper class for cancelling the demo."""
assert_is_type(body_fn, type(_run_demo))
# Reformat description by removing extra spaces; then print it.
if body_fn.__doc__:
desc_lines = body_fn.__doc__.split("\n")
while desc_lines[0].strip() == "":
desc_lines = desc_lines[1:]
while desc_lines[-1].strip() == "":
desc_lines = desc_lines[:-1]
strip_spaces = min(len(line) - len(line.lstrip(" ")) for line in desc_lines[1:] if line.strip() != "")
maxlen = max(len(line) for line in desc_lines)
print(Fore.CYAN)
print("-" * maxlen)
for line in desc_lines:
print(line[strip_spaces:].rstrip())
print("-" * maxlen)
print(Style.RESET_ALL, end="")
# Prepare the executor function
def controller():
"""Print to console the next block of commands, and wait for keypress."""
try:
raise RuntimeError("Catch me!")
except RuntimeError:
print()
# Extract and print lines that will be executed next
if echo:
tb = sys.exc_info()[2]
fr = tb.tb_frame.f_back
filename = fr.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, fr.f_lineno, fr.f_globals).rstrip()
indent_len = len(line) - len(line.lstrip(" "))
assert line[indent_len:] == "go()"
i = fr.f_lineno
output_lines = []
n_blank_lines = 0
while True:
i += 1
line = linecache.getline(filename, i, fr.f_globals).rstrip()
# Detect dedent
if line[:indent_len].strip() != "": break
line = line[indent_len:]
if line == "go()": break
style = Fore.LIGHTBLACK_EX if line.lstrip().startswith("#") else Style.BRIGHT
prompt = "... " if line.startswith(" ") else ">>> "
output_lines.append(Fore.CYAN + prompt + Fore.RESET + style + line + Style.RESET_ALL)
del style # Otherwise exception print-outs may get messed-up...
if line.strip() == "":
n_blank_lines += 1
if n_blank_lines > 5: break # Just in case we hit file end or something
else:
n_blank_lines = 0
for line in output_lines[:-n_blank_lines]:
print(line)
# Prompt for user input
if interactive:
print("\n" + Style.DIM + "(press any key)" + Style.RESET_ALL, end="")
key = _wait_for_keypress()
print("\r \r", end="")
if key.lower() == "q":
raise StopExecution()
# Replace h2o.init() with a stub when running in "test" mode
_h2o_init = h2o.init
if testing:
h2o.init = lambda *args, **kwargs: None
# Run the test
try:
body_fn(controller)
print("\n" + Fore.CYAN + "---- End of Demo ----" + Style.RESET_ALL)
except (StopExecution, KeyboardInterrupt):
print("\n" + Fore.RED + "---- Demo aborted ----" + Style.RESET_ALL)
# Clean-up
if testing:
h2o.init = _h2o_init
print()
colorama.deinit()
def _wait_for_keypress():
"""
Wait for a key press on the console and return it.
Borrowed from http://stackoverflow.com/questions/983354/how-do-i-make-python-to-wait-for-a-pressed-key
"""
result = None
if os.name == "nt":
# noinspection PyUnresolvedReferences
import msvcrt
result = msvcrt.getch()
else:
import termios
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1)
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return result
| apache-2.0 | 3,266,901,048,643,783,700 | 33.666667 | 110 | 0.580787 | false |
SVilgelm/CloudFerry | tests/model/test_model.py | 1 | 15518 | # Copyright 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import uuid
from cloudferry import model
from tests.lib.utils import test_local_db
class ExampleReferenced(model.Model):
object_id = model.PrimaryKey()
qux = model.Integer(required=True)
def equals(self, other):
if super(ExampleReferenced, self).equals(other):
return True
return self.qux == other.qux
@classmethod
def create_object(cls, cloud, cloud_obj_id):
with model.Session() as session:
session.store(ExampleReferenced.load({
'object_id': {
'cloud': cloud,
'id': cloud_obj_id,
'type': cls.get_class_qualname(),
},
'qux': 1337,
}))
class ExampleNested(model.Model):
foo = model.String(required=True)
ref = model.Dependency(ExampleReferenced, required=True)
refs = model.Dependency(ExampleReferenced, required=True, many=True)
ref_none = model.Dependency(ExampleReferenced, missing=None,
allow_none=True)
refs_none = model.Dependency(ExampleReferenced, missing=None,
many=True, allow_none=True)
class Simple(model.Model):
foo = model.String(required=True)
class Example(model.Model):
object_id = model.PrimaryKey()
bar = model.String(required=True)
baz = model.Nested(ExampleNested)
ref = model.Dependency(ExampleReferenced, required=True)
refs = model.Dependency(ExampleReferenced, required=True, many=True)
ref_none = model.Dependency(ExampleReferenced, missing=None,
allow_none=True)
refs_none = model.Dependency(ExampleReferenced, missing=None,
many=True, allow_none=True)
count = 0
@classmethod
def generate_data(cls, object_id=None, cloud='test_cloud'):
cls.count += 1
if object_id is None:
object_id = uuid.uuid5(uuid.NAMESPACE_DNS, 'test%d' % cls.count)
ref1 = uuid.uuid5(uuid.NAMESPACE_DNS, 'ref1_%d' % cls.count)
ref2 = uuid.uuid5(uuid.NAMESPACE_DNS, 'ref2_%d' % cls.count)
ExampleReferenced.create_object(cloud, str(ref1))
ExampleReferenced.create_object(cloud, str(ref2))
return {
'object_id': {
'cloud': cloud,
'id': str(object_id),
'type': Example.get_class_qualname(),
},
'bar': 'some non-random string',
'baz': {
'foo': 'other non-random string',
'ref': {
'cloud': cloud,
'id': str(ref1),
'type': ExampleReferenced.get_class_qualname(),
},
'refs': [{
'cloud': cloud,
'id': str(ref2),
'type': ExampleReferenced.get_class_qualname(),
}],
},
'ref': {
'cloud': cloud,
'id': str(ref1),
'type': ExampleReferenced.get_class_qualname(),
},
'refs': [{
'cloud': cloud,
'id': str(ref2),
'type': ExampleReferenced.get_class_qualname(),
}],
}
class ExampleRef(model.Model):
object_id = model.PrimaryKey()
ref = model.Reference(ExampleReferenced, allow_none=True)
def equals(self, other):
# pylint: disable=no-member
if super(ExampleRef, self).equals(other):
return True
if self.ref is None:
return other.ref is None
return self.ref.equals(other.ref)
@classmethod
def create_object(cls, cloud, unique_id, ref_unique_id):
data = {
'object_id': {
'cloud': cloud,
'id': unique_id,
'type': cls.get_class_qualname(),
},
}
if ref_unique_id is not None:
ref = {
'cloud': cloud,
'id': ref_unique_id,
'type': ExampleReferenced.get_class_qualname(),
}
else:
ref = None
data['ref'] = ref
return cls.load(data)
class ModelTestCase(test_local_db.DatabaseMockingTestCase):
def setUp(self):
super(ModelTestCase, self).setUp()
self.cloud = mock.MagicMock()
self.cloud.name = 'test_cloud'
self.cloud2 = mock.MagicMock()
self.cloud2.name = 'test_cloud2'
def _validate_example_obj(self, object_id, obj, validate_refs=True,
bar_value='some non-random string'):
self.assertEqual(object_id, obj.object_id)
self.assertEqual(bar_value, obj.bar)
self.assertEqual('other non-random string', obj.baz.foo)
if validate_refs:
self.assertEqual(1337, obj.ref.qux)
self.assertEqual(1337, obj.refs[0].qux)
@staticmethod
def _make_id(model_class, cloud_obj_id, cloud='test_cloud'):
return {
'id': cloud_obj_id,
'cloud': cloud,
'type': model_class.get_class_qualname(),
}
def test_load(self):
data = Example.generate_data()
obj = Example.load(data)
self._validate_example_obj(
model.ObjectId(data['object_id']['id'], 'test_cloud'), obj, False)
def test_non_dirty(self):
obj = Example.load(Example.generate_data())
self.assertTrue(obj.is_dirty('objects'))
def test_simple_dirty(self):
obj = Example.load(Example.generate_data())
obj.bar = 'value is changed'
self.assertTrue(obj.is_dirty('objects'))
def test_nested_dirty(self):
obj = Example.load(Example.generate_data())
obj.baz.foo = 'value is changed'
self.assertTrue(obj.is_dirty('objects'))
def test_ref_dirty(self):
obj = Example.load(Example.generate_data())
ref_obj = ExampleReferenced.load({
'object_id': self._make_id(ExampleReferenced, 'hello'),
'qux': 313373,
})
obj.ref = ref_obj
self.assertTrue(obj.is_dirty('objects'))
def test_refs_dirty(self):
obj = Example.load(Example.generate_data())
ref_obj = ExampleReferenced.load({
'object_id': self._make_id(ExampleReferenced, 'hello'),
'qux': 313373,
})
obj.refs.append(ref_obj)
self.assertTrue(obj.is_dirty('objects'))
def test_nested_ref_dirty(self):
obj = Example.load(Example.generate_data())
ref_obj = ExampleReferenced.load({
'object_id': self._make_id(ExampleReferenced, 'hello'),
'qux': 313373,
})
obj.baz.ref = ref_obj
self.assertTrue(obj.is_dirty('objects'))
def test_nested_refs_dirty(self):
obj = Example.load(Example.generate_data())
ref_obj = ExampleReferenced.load({
'object_id': self._make_id(ExampleReferenced, 'hello'),
'qux': 313373,
})
obj.baz.refs.append(ref_obj)
self.assertTrue(obj.is_dirty('objects'))
def test_store_retrieve(self):
orig_obj = Example.load(Example.generate_data())
object_id = orig_obj.object_id
with model.Session() as session:
session.store(orig_obj)
# Validate retrieve working before commit
self._validate_example_obj(
object_id, session.retrieve(Example, object_id))
with model.Session() as session:
# Validate retrieve working after commit
self._validate_example_obj(
object_id, session.retrieve(Example, object_id))
def test_store_list(self):
orig_obj = Example.load(Example.generate_data())
object_id = orig_obj.object_id
with model.Session() as session:
session.store(orig_obj)
# Validate retrieve working before commit
self._validate_example_obj(object_id, session.list(Example)[0])
with model.Session() as session:
# Validate retrieve working after commit
self._validate_example_obj(object_id, session.list(Example)[0])
def test_store_list_cloud(self):
orig_obj1 = Example.load(Example.generate_data(cloud=self.cloud.name))
object1_id = orig_obj1.object_id
orig_obj2 = Example.load(Example.generate_data(cloud=self.cloud2.name))
object2_id = orig_obj2.object_id
with model.Session() as session:
session.store(orig_obj1)
session.store(orig_obj2)
# Validate retrieve working before commit
self._validate_example_obj(object1_id,
session.list(Example, self.cloud)[0])
self._validate_example_obj(object2_id,
session.list(Example, self.cloud2)[0])
# Validate retrieve working after commit
with model.Session() as session:
self._validate_example_obj(object1_id,
session.list(Example, self.cloud)[0])
with model.Session() as session:
self._validate_example_obj(object2_id,
session.list(Example, self.cloud2)[0])
def test_load_store(self):
orig_obj = Example.load(Example.generate_data())
object_id = orig_obj.object_id
with model.Session() as session:
session.store(orig_obj)
with model.Session() as session:
obj = session.retrieve(Example, object_id)
self._validate_example_obj(object_id, obj)
obj.baz.foo = 'changed'
obj.bar = 'changed too'
with model.Session() as session:
loaded_obj = session.retrieve(Example, object_id)
self.assertEqual('changed', loaded_obj.baz.foo)
self.assertEqual('changed too', loaded_obj.bar)
def test_many_nested(self):
class ExampleMany(model.Model):
object_id = model.PrimaryKey()
many = model.Nested(Simple, many=True)
many = ExampleMany.load({
'object_id': self._make_id(ExampleMany, 'foo'),
'many': [
{'foo': 'foo'},
{'foo': 'bar'},
{'foo': 'baz'},
],
})
self.assertEqual('foo', many.many[0].foo)
self.assertEqual('bar', many.many[1].foo)
self.assertEqual('baz', many.many[2].foo)
with model.Session() as session:
session.store(many)
with model.Session() as session:
obj = session.retrieve(
ExampleMany, model.ObjectId('foo', 'test_cloud'))
self.assertEqual('foo', obj.many[0].foo)
self.assertEqual('bar', obj.many[1].foo)
self.assertEqual('baz', obj.many[2].foo)
def test_example_name_ref(self):
class ExampleNameRef(model.Model):
object_id = model.PrimaryKey()
ref = model.Dependency(Example.get_class_qualname())
with model.Session() as session:
example = Example.load(Example.generate_data('foo-bar-baz'))
session.store(example)
obj = ExampleNameRef.load({
'object_id': self._make_id(ExampleNameRef, 'ExampleNameRef-1'),
'ref': self._make_id(Example, 'foo-bar-baz'),
})
self.assertIs(Example, obj.ref.get_class())
def test_nested_sessions(self):
orig_obj1 = Example.load(Example.generate_data(cloud=self.cloud.name))
object1_id = orig_obj1.object_id
orig_obj2 = Example.load(Example.generate_data(cloud=self.cloud2.name))
object2_id = orig_obj2.object_id
with model.Session() as s1:
s1.store(orig_obj1)
with model.Session() as s2:
s2.store(orig_obj2)
self._validate_example_obj(
object1_id, s2.retrieve(Example, object1_id))
self._validate_example_obj(
object2_id, s2.retrieve(Example, object2_id))
with model.Session() as s:
self._validate_example_obj(
object1_id, s.retrieve(Example, object1_id))
self._validate_example_obj(
object2_id, s2.retrieve(Example, object2_id))
def test_nested_sessions_save_updates_after_nested(self):
orig_obj1 = Example.load(Example.generate_data(cloud=self.cloud.name))
object1_id = orig_obj1.object_id
orig_obj2 = Example.load(Example.generate_data(cloud=self.cloud2.name))
object2_id = orig_obj2.object_id
with model.Session() as s1:
s1.store(orig_obj1)
with model.Session() as s2:
s2.store(orig_obj2)
self._validate_example_obj(
object1_id, s2.retrieve(Example, object1_id))
self._validate_example_obj(
object2_id, s2.retrieve(Example, object2_id))
orig_obj1.bar = 'some other non-random string'
with model.Session() as s:
self._validate_example_obj(
object1_id, s.retrieve(Example, object1_id),
bar_value='some other non-random string')
self._validate_example_obj(
object2_id, s2.retrieve(Example, object2_id))
def test_absent_reference_equals1(self):
object1 = ExampleRef.create_object(
'test_cloud1', 'example_ref_id', 'example_referenced_id')
object2 = ExampleRef.create_object(
'test_cloud2', 'example_ref_id', 'example_referenced_id')
self.assertTrue(object1.equals(object2))
def test_absent_reference_equals2(self):
object1 = ExampleRef.create_object(
'test_cloud1', 'example_ref_id', 'example_referenced_id')
object2 = ExampleRef.create_object(
'test_cloud2', 'example_ref_id', 'other_referenced_id')
self.assertFalse(object1.equals(object2))
def test_absent_reference_equals3(self):
object1 = ExampleRef.create_object(
'test_cloud1', 'example_ref_id', None)
object2 = ExampleRef.create_object(
'test_cloud2', 'example_ref_id', None)
self.assertTrue(object1.equals(object2))
def test_absent_reference_equals4(self):
with model.Session():
ExampleReferenced.create_object(
'test_cloud1', 'example_referenced_id')
ExampleReferenced.create_object(
'test_cloud2', 'other_referenced_id')
object1 = ExampleRef.create_object(
'test_cloud1', 'example_ref_id', 'example_referenced_id')
object2 = ExampleRef.create_object(
'test_cloud2', 'example_ref_id', 'other_referenced_id')
# We have equivalent objects referenced by example_referenced_id and
# other_referenced_id this time
self.assertTrue(object1.equals(object2))
| apache-2.0 | 5,586,063,717,737,296,000 | 37.316049 | 79 | 0.573399 | false |
opensim-org/opensim-core | OpenSim/Moco/Archive/Tests/plot_gait10dof18musc_activation.py | 1 | 3138 | # -------------------------------------------------------------------------- #
# OpenSim Moco: plot_gait10dof18musc_activation.py #
# -------------------------------------------------------------------------- #
# Copyright (c) 2017 Stanford University and the Authors #
# #
# Author(s): Christopher Dembia #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain a #
# copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# -------------------------------------------------------------------------- #
import pylab as pl
import pandas as pd
import math
cycle_start = 0.58
cycle_end = 1.81
duration_of_gait_cycle = cycle_end - cycle_start
half_gait_cycle = 0.5 * duration_of_gait_cycle
muscles = ['glut_max', 'iliopsoas', 'rect_fem',
'hamstrings', 'bifemsh', 'vasti',
'gastroc', 'soleus', 'tib_ant']
sol = pd.read_csv('GlobalStaticOptimization_OCP_solution.csv', index_col=0,
skiprows=2)
#sol = pd.read_csv('INDYGO_OCP_solution.csv', index_col=0,
# skiprows=2)
# sol.plot()
#num_muscles = 0
#plot_names = list()
#for col in sol.columns:
# if col.endswith('activation'):
# num_muscles += 1
# plot_names.append(col)
col_indices_r = list()
col_indices_l = list()
for muscle in muscles:
for i, col in enumerate(sol.columns):
if muscle + '_r' in col and 'activation' in col:
col_indices_r.append(i)
if muscle + '_l' in col and 'activation' in col:
col_indices_l.append(i)
num_cols = 3
num_rows = 3 #math.ceil(float(num_muscles) / num_cols)
pl.figure(figsize=(4 * num_cols, 3 * num_rows))
pgc_r = 100.0 * (sol.index - cycle_start) / duration_of_gait_cycle
for i in range(len(muscles)):
ax = pl.subplot(num_rows, num_cols, i + 1)
col_label_r = sol.columns[col_indices_r[i]]
ax.plot(pgc_r, sol[col_label_r])
#col_label_l = sol.columns[col_indices_l[i]]
#ax.plot(sol.index + half_gait_cycle, sol[col_label_l])
ax.set_title(col_label_r.split('/')[-1].replace('_r_activation', ''))
ax.set_ylim(0, 1)
if i == 3:
ax.set_ylabel('activation')
if i < 6:
ax.set_xticklabels([])
i_col = i % num_cols
if i_col > 0:
ax.set_yticklabels([])
if i == 7:
ax.set_xlabel('time (% gait cycle)')
pl.savefig('gait10dof18musc_activation.png')
| apache-2.0 | -31,799,801,736,403,156 | 40.84 | 78 | 0.516571 | false |
cwangVT/DNNRegressor | RNN_model.py | 1 | 14101 | import os
import sys
import random
import numpy as np
import tensorflow as tf
import pandas as pd
import itertools
import copy
import math
import scipy.optimize
import matplotlib
matplotlib.use("Agg") # to work without X-window
import matplotlib.pyplot as plt
from DNN_model import *
from Prep_data import *
tf.logging.set_verbosity(tf.logging.ERROR)
from Create_Simulate_Data import y1, y2
def Predict_N_Step(sess, saver, pred, x_holder, input_data, keep_holder, n_step, n_features,index_input_state,
index_output_state, index_out = -1, n_state =1, n_output=1, initial_state=None, keep_rate = 1,
path_to_save = "tmp/my_model.ckpt", process = True, means=None, maxmin=None,load = False):
# === check input data shape
if load:
saver.restore(sess, path_to_save)
if x_holder.get_shape().as_list()[1]!=n_features or input_data.shape[1]!=n_features or n_state>n_features:
sys.exit("Error when predict time series: wrong input size")
if pred.get_shape().as_list()[1]!=n_state+n_output:
sys.exit("Error when predict time series: wrong output size")
if initial_state == None: # if no specified initial state, read from input_data
initial_state = input_data[0][-n_state:]
elif np.array(initial_state).shape[1]!=n_state:
sys.exit("Error when predict time series: wrong initial state size")
# === start predicting
if process:
if means == None or maxmin==None:
sys.exit("Error when predict time series: can not perform data processing")
predict_y = [] # predicted values (state and output labels)
total = 0 # final value (sum of output of each step)
for ii in range(n_step): # predict values for n steps
# create input features for current step
fs =np.array([[input_data[ii][jj] for jj in range(n_features)]])
# create state feautres for current step
fs[0][index_input_state] = initial_state
# calculate predicted states and output labels
pred_value = Pred_DNN(sess=sess, saver=saver,pred= pred, x_holder=x_holder, x_data = fs,
keep_prob=keep_holder, keep_rate=keep_rate, path_to_save = path_to_save)
# post process the predicted values
pred_value= np.array([[ Post_Process(pred_value[0][jj],n_features+jj, means,maxmin)
for jj in range(n_state+n_output)]])
# append predicted values to return list
# add output to "total"
predict_y.append(pred_value[0][:])
total+=pred_value[0][index_out]
# create state features for next step (preprocess current predicted state features )
initial_state = np.array([ Pre_Process(pred_value[0][index_output_state[jj]],index_input_state[jj], means,maxmin)
for jj in range(n_state)])
# return predict values for each step and sum of output values
return predict_y,total
def Predict_N_Step_np(input_data, py_weights, py_biases,DIM, n_step, n_features,index_input_state,
index_output_state, index_out = -1, n_state =1, n_output=1, initial_state=None,
process = True, means=None, maxmin=None,load = False):
# === check input data shape
if py_weights[0].shape[0]!=n_features or input_data.shape[1]!=n_features or n_state>n_features:
sys.exit("Error when predict time series: wrong input size")
if py_weights[-1].shape[1]!=n_state+n_output:
sys.exit("Error when predict time series: wrong output size")
if initial_state == None: # if no specified initial state, read from input_data
initial_state = input_data[0][-n_state:]
elif np.array(initial_state).shape[1]!=n_state:
sys.exit("Error when predict time series: wrong initial state size")
# === start predicting
if process:
if means == None or maxmin==None:
sys.exit("Error when predict time series: can not perform data processing")
predict_y = [] # predicted values (state and output labels)
total = 0 # final value (sum of output of each step)
for ii in range(n_step): # predict values for n steps
# create input features for current step
fs =np.array([[input_data[ii][jj] for jj in range(n_features)]])
# create state feautres for current step
fs[0][index_input_state] = initial_state
# calculate predicted states and output labels
pred_value = Recreate_NN(fs,py_weights=py_weights,py_biases=py_biases,DIM=DIM)
# post process the predicted values
pred_value= np.array([[ Post_Process(pred_value[0][jj],n_features+jj, means,maxmin)
for jj in range(n_state+n_output)]])
# append predicted values to return list
# add output to "total"
predict_y.append(pred_value[0][:])
total+=pred_value[0][index_out]
# create state features for next step (preprocess current predicted state features )
initial_state = np.array([ Pre_Process(pred_value[0][index_output_state[jj]],index_input_state[jj], means,maxmin)
for jj in range(n_state)])
# return predict values for each step and sum of output values
return predict_y,total
if __name__ == "__main__":
# ===========================================================
# ======== Load data, create NN and perform training =======
# ===========================================================
path_to_save = "tmp/my_model.ckpt"
N_Features = 24
N_Labels = 9
input_fold = "input_data/"
output_fold = "output_data/"
X_train,Y_train,COLUMNS_ = Load_Prep_Data(input_fold+"train_set_preped.txt",N_Features,N_Labels)
X_valid,Y_valid,COLUMNS_ = Load_Prep_Data(input_fold+"valid_set_preped.txt",N_Features,N_Labels,COLUMNS_)
X_test, Y_test, COLUMNS_ = Load_Prep_Data(input_fold+"test_set_preped.txt",N_Features,N_Labels, COLUMNS_)
x_ = tf.placeholder("float64", [None, N_Features])
y_ = tf.placeholder("float64", [None,N_Labels])
keep_ = tf.placeholder(tf.float64)
DIM_ = [50,50,50,50,50]
embed_layer_, embed_coeff_ = Create_Embed_Layer(x_, N_Features)
layers0, weights0, biases0 = Create_DNN_Model(embed_layer_, y_, keep_,n_input = N_Features,n_classes=N_Labels, DIM= DIM_)
list_of_regular_ = [weights0]
list_of_train_ = [weights0,biases0]
list_of_weights_ = [] # do not use regularization
cost_, pred_ = Cost_Function(y_,layers0,list_of_regular_,beta = 0.0)
optimizer_ = Optimizer(cost_, learning_rate = 0.00005,var_list=list_of_train_ )
saver_ = tf.train.Saver()
config = tf.ConfigProto()
config = tf.ConfigProto(device_count = {'GPU':0 })
with tf.Session(config=config) as sess0_:
Train_DNN(sess0_, saver_, optimizer_, pred_, cost_,x_, X_train, X_valid, X_test,
y_, Y_train, Y_valid, Y_test, keep_, dropout_rate = 1,path_to_save = path_to_save,
training_epochs =0, batch_size=64, display_step=1, accuracy_step = 1, save_step = 10)
# =================================================
# =================================================
# ============ DNN with time series ===============
# =================================================
# =================================================
print "========= now entering DNN with time series ==========="
n_step_ = 20 # number of time steps
index_control_ = range(8) # index of control variables
# namely, the variables in optimization
index_input_state_ = range(16,24) # index of input state variables
index_output_state_ = range(8) # index of output state variables
index_out_ = -1 # index of final output
# the cost function in optimization
n_state_ = len(index_input_state_) # number of states
if n_state_ != len(index_output_state_):
sys.exit("different number of state-variables for input and output")
n_output_ = 1
n_control_ = len(index_control_)
if n_output_ + n_state_ != N_Labels:
sys.exit("wrong number of state-variables, output, or labels")
ctrl_range = [-1.0,0.0,1.0]
#ctrl_range = [-1.0,-0.75,-0.5,-0.25,0.0,0.25,0.5,0.75,1.0]
means_,maxmin_ = Load_Process_Param(input_fold+"preprocess.txt",COLUMNS_) # data for pre- and post processing
predict_y1 = [] # predicted states
predict_y2 = [] # predicted output
true_y1 =[] # true states
true_y2 =[] # true output
# ==================================================================
# ===== testing accuracy of DNN time series (with for loop) ========
# ==================================================================
with tf.Session(config=config) as sess1_:
# ===== initialize features for prediction (pre-processed)
# for continuous input
# fs = [[random.random()-0.5 for ii in range(N_Features)]]
# for ii in range(n_state_):
# fs[0][N_Features-n_state_+ii] = Pre_Process(0,N_Features-1, means_,maxmin_) # initial state set to 0
# for discrete input
fs = [[Pre_Process(random.choice(ctrl_range),ii, means_,maxmin_) for ii in range(N_Features)]]
for ii in range(n_state_):
fs[0][index_input_state_[ii]] = Pre_Process(0,index_input_state_[ii], means_,maxmin_) # initial state set to 0
# --------------- (for testing, manually set other variable )-----------------------------
# for ii in range(n_control_):
# fs[0][ii+n_control_] = Pre_Process(0.,ii+n_control_, means_,maxmin_)
# --------------- end for testing ---------------------------------------------------------
# ===== initialize features for true labels (post-processed)
true_fs = [[Post_Process(fs[0][ii],ii, means_,maxmin_) for ii in range(N_Features)]]
for ii in range(n_state_):
true_fs[0][index_input_state_[ii]] = 0 # initial state set to 0
# ===== load variables of DNN
saver_.restore(sess1_, path_to_save)
# ===== start predicting =========
input_data =[] # save the generated input data (control variable) for further testing
for step in range(n_step_):
input_data.append(fs[0])
# ========= true lable, use post-processed data
true_value = y1(true_fs[0])+y2(true_fs[0])
true_y1.append([true_value[index_output_state_[ii]] for ii in range(n_state_)])
true_y2.append(true_value[index_out_])
# ========= predict labels
# ==== get predicted labels; here input data (fs) and output data (pred_value) are pre-processed,
pred_value = Pred_DNN(sess1_, saver_, pred_, x_, fs, keep_, path_to_save = path_to_save)
# ==== get post-processed labels, and append them to output result
pred_value =[[ Post_Process(pred_value[0][index],N_Features+index, means_,maxmin_) for index in range(N_Labels)]]
predict_y1.append([pred_value[0][index_output_state_[ii]] for ii in range(n_state_)])
predict_y2.append(pred_value[0][index_out_])
# ==== pre-process predicted label and generate inputs for the next prediction
# continuous input features
# fs = [[random.random()-0.5 for ii in range(N_Features)]]
# for ii in range(n_state_):
# fs[0][N_Features-n_state_+ii] = Pre_Process(pred_value[0][ii],N_Features-n_state_+ii, means_,maxmin_)
# discrete input features
fs = [[Pre_Process(random.choice(ctrl_range),ii, means_,maxmin_) for ii in range(N_Features)]]
for ii in range(n_state_):
fs[0][index_input_state_[ii]] = Pre_Process(pred_value[0][index_output_state_[ii]],
index_input_state_[ii], means_,maxmin_)
# --------------- (for testing, manually set other variable )-----------------------------
# for ii in range(n_control_):
# fs[0][ii+n_control_] = Pre_Process(0.,ii+n_control_, means_,maxmin_)
# --------------- end for testing ---------------------------------------------------------
# ==== generate inputs for next true label
true_fs = [[Post_Process(fs[0][ii],ii, means_,maxmin_) for ii in range(N_Features)]]
for ii in range(n_state_):
true_fs[0][index_input_state_[ii]] = true_value[ii]
# print true & predicted states VS time step
fig = plt.figure(0)
plt.plot(predict_y1,'r--', label = 'predict')
plt.plot(true_y1,'b-.' , label = 'label')
# plt.legend(loc='upper left')
plt.xlabel('time step')
plt.ylabel('value')
plt.title("Y1")
plt.grid(True)
plt.savefig(input_fold+"test_Y1.png")
plt.close(fig)
# print true & predicted output VS time step
fig = plt.figure(1)
plt.plot(predict_y2,'r--', label = 'predict')
plt.plot(true_y2,'b-.' , label = 'label')
plt.legend(loc='upper left')
plt.xlabel('time step')
plt.ylabel('value')
plt.title("Y2")
plt.grid(True)
plt.savefig(input_fold+"test_Y2.png")
plt.close(fig)
# sys.exit()
# ======= end of testing accuracy of DNN time series ========
# ==================================================================
# ================= test Predict_N_Step function ===================
# ==================================================================
input_data = np.array(input_data)
with tf.Session(config=config) as sess2_:
saver_.restore(sess2_, path_to_save)
predict_v, total_E = Predict_N_Step(sess=sess2_, saver=saver_, pred=pred_, x_holder=x_,
input_data = input_data, keep_holder=keep_, n_step=n_step_, n_features=N_Features,
index_input_state = index_input_state_, index_output_state = index_output_state_,
n_state =n_state_, n_output=1, index_out=index_out_, initial_state=None, keep_rate = 1,
path_to_save = "tmp/my_model.ckpt", process = True, means=means_, maxmin=maxmin_)
# difference between Predict_N_Step and for loop
print "======== testing Predicting function ========"
print np.array(predict_v)[:,:-1] - (predict_y1)
print np.array(predict_v)[:,-1] - predict_y2
print "=========== should be closed to 0 ==========="
# sys.exit()
# ==================================================================
# =============== test Predict_N_Step_np function =================
# ==================================================================
with tf.Session(config=config) as sess3_:
saver_.restore(sess3_, path_to_save)
py_weights_, py_biases_ = Load_Variable(sess3_, saver_,weights0,biases0, path_to_save = path_to_save)
predict_v, total_E = Predict_N_Step_np(input_data=input_data, py_weights=py_weights_, py_biases=py_biases_,DIM = DIM_, n_step=n_step_,
n_features=N_Features, index_input_state = index_input_state_, index_output_state= index_output_state_,
index_out = index_out_, n_state = n_state_, n_output=1, means=means_, maxmin=maxmin_)
# difference between Predict_N_Step and for loop
print "======== testing Predicting function NP ========"
print np.array(predict_v)[:,:-1] - (predict_y1)
print np.array(predict_v)[:,-1] - predict_y2
print "=========== should be closed to 0 ==========="
| gpl-3.0 | -117,274,546,670,985,020 | 47.12628 | 135 | 0.624424 | false |
drnextgis/QGIS | python/plugins/processing/algs/taudem/TauDEMUtils.py | 1 | 3962 | # -*- coding: utf-8 -*-
"""
***************************************************************************
TauDEMUtils.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import object
__author__ = 'Alexander Bruy'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import subprocess
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import QgsApplication
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.ProcessingLog import ProcessingLog
from processing.tools.system import isMac
class TauDEMUtils(object):
TAUDEM_FOLDER = 'TAUDEM_FOLDER'
TAUDEM_MULTIFILE_FOLDER = 'TAUDEM_MULTIFILE_FOLDER'
TAUDEM_USE_SINGLEFILE = 'TAUDEM_USE_SINGLEFILE'
TAUDEM_USE_MULTIFILE = 'TAUDEM_USE_MULTIFILE'
MPIEXEC_FOLDER = 'MPIEXEC_FOLDER'
MPI_PROCESSES = 'MPI_PROCESSES'
@staticmethod
def taudemPath():
folder = ProcessingConfig.getSetting(TauDEMUtils.TAUDEM_FOLDER)
if folder is None:
folder = ''
if isMac():
testfolder = os.path.join(QgsApplication.prefixPath(), 'bin')
if os.path.exists(os.path.join(testfolder, 'pitremove')):
folder = testfolder
else:
testfolder = '/usr/local/bin'
if os.path.exists(os.path.join(testfolder, 'pitremove')):
folder = testfolder
return folder
@staticmethod
def mpiexecPath():
folder = ProcessingConfig.getSetting(TauDEMUtils.MPIEXEC_FOLDER)
if folder is None:
folder = ''
if isMac():
testfolder = os.path.join(QgsApplication.prefixPath(), 'bin')
if os.path.exists(os.path.join(testfolder, 'mpiexec')):
folder = testfolder
else:
testfolder = '/usr/local/bin'
if os.path.exists(os.path.join(testfolder, 'mpiexec')):
folder = testfolder
return folder
@staticmethod
def taudemDescriptionPath():
return os.path.normpath(
os.path.join(os.path.dirname(__file__), 'description'))
@staticmethod
def executeTauDEM(command, progress):
loglines = []
loglines.append(TauDEMUtils.tr('TauDEM execution console output'))
fused_command = ''.join(['"%s" ' % c for c in command])
progress.setInfo(TauDEMUtils.tr('TauDEM command:'))
progress.setCommand(fused_command.replace('" "', ' ').strip('"'))
proc = subprocess.Popen(
fused_command,
shell=True,
stdout=subprocess.PIPE,
stdin=subprocess.DEVNULL,
stderr=subprocess.STDOUT,
universal_newlines=True,
).stdout
for line in iter(proc.readline, ''):
progress.setConsoleInfo(line)
loglines.append(line)
ProcessingLog.addToLog(ProcessingLog.LOG_INFO, loglines)
@staticmethod
def tr(string, context=''):
if context == '':
context = 'TauDEMUtils'
return QCoreApplication.translate(context, string)
| gpl-2.0 | -4,206,243,438,572,159,500 | 35.018182 | 75 | 0.548713 | false |
coleifer/peewee | examples/twitter/app.py | 1 | 11440 | import datetime
from flask import Flask
from flask import g
from flask import redirect
from flask import request
from flask import session
from flask import url_for, abort, render_template, flash
from functools import wraps
from hashlib import md5
from peewee import *
# config - aside from our database, the rest is for use by Flask
DATABASE = 'tweepee.db'
DEBUG = True
SECRET_KEY = 'hin6bab8ge25*r=x&+5$0kn=-#log$pt^#@vrqjld!^2ci@g*b'
# create a flask application - this ``app`` object will be used to handle
# inbound requests, routing them to the proper 'view' functions, etc
app = Flask(__name__)
app.config.from_object(__name__)
# create a peewee database instance -- our models will use this database to
# persist information
database = SqliteDatabase(DATABASE)
# model definitions -- the standard "pattern" is to define a base model class
# that specifies which database to use. then, any subclasses will automatically
# use the correct storage. for more information, see:
# https://charlesleifer.com/docs/peewee/peewee/models.html#model-api-smells-like-django
class BaseModel(Model):
class Meta:
database = database
# the user model specifies its fields (or columns) declaratively, like django
class User(BaseModel):
username = CharField(unique=True)
password = CharField()
email = CharField()
join_date = DateTimeField()
# it often makes sense to put convenience methods on model instances, for
# example, "give me all the users this user is following":
def following(self):
# query other users through the "relationship" table
return (User
.select()
.join(Relationship, on=Relationship.to_user)
.where(Relationship.from_user == self)
.order_by(User.username))
def followers(self):
return (User
.select()
.join(Relationship, on=Relationship.from_user)
.where(Relationship.to_user == self)
.order_by(User.username))
def is_following(self, user):
return (Relationship
.select()
.where(
(Relationship.from_user == self) &
(Relationship.to_user == user))
.exists())
def gravatar_url(self, size=80):
return 'http://www.gravatar.com/avatar/%s?d=identicon&s=%d' % \
(md5(self.email.strip().lower().encode('utf-8')).hexdigest(), size)
# this model contains two foreign keys to user -- it essentially allows us to
# model a "many-to-many" relationship between users. by querying and joining
# on different columns we can expose who a user is "related to" and who is
# "related to" a given user
class Relationship(BaseModel):
from_user = ForeignKeyField(User, backref='relationships')
to_user = ForeignKeyField(User, backref='related_to')
class Meta:
indexes = (
# Specify a unique multi-column index on from/to-user.
(('from_user', 'to_user'), True),
)
# a dead simple one-to-many relationship: one user has 0..n messages, exposed by
# the foreign key. because we didn't specify, a users messages will be accessible
# as a special attribute, User.message_set
class Message(BaseModel):
user = ForeignKeyField(User, backref='messages')
content = TextField()
pub_date = DateTimeField()
# simple utility function to create tables
def create_tables():
with database:
database.create_tables([User, Relationship, Message])
# flask provides a "session" object, which allows us to store information across
# requests (stored by default in a secure cookie). this function allows us to
# mark a user as being logged-in by setting some values in the session data:
def auth_user(user):
session['logged_in'] = True
session['user_id'] = user.id
session['username'] = user.username
flash('You are logged in as %s' % (user.username))
# get the user from the session
def get_current_user():
if session.get('logged_in'):
return User.get(User.id == session['user_id'])
# view decorator which indicates that the requesting user must be authenticated
# before they can access the view. it checks the session to see if they're
# logged in, and if not redirects them to the login view.
def login_required(f):
@wraps(f)
def inner(*args, **kwargs):
if not session.get('logged_in'):
return redirect(url_for('login'))
return f(*args, **kwargs)
return inner
# given a template and a SelectQuery instance, render a paginated list of
# objects from the query inside the template
def object_list(template_name, qr, var_name='object_list', **kwargs):
kwargs.update(
page=int(request.args.get('page', 1)),
pages=qr.count() / 20 + 1)
kwargs[var_name] = qr.paginate(kwargs['page'])
return render_template(template_name, **kwargs)
# retrieve a single object matching the specified query or 404 -- this uses the
# shortcut "get" method on model, which retrieves a single object or raises a
# DoesNotExist exception if no matching object exists
# https://charlesleifer.com/docs/peewee/peewee/models.html#Model.get)
def get_object_or_404(model, *expressions):
try:
return model.get(*expressions)
except model.DoesNotExist:
abort(404)
# custom template filter -- flask allows you to define these functions and then
# they are accessible in the template -- this one returns a boolean whether the
# given user is following another user.
@app.template_filter('is_following')
def is_following(from_user, to_user):
return from_user.is_following(to_user)
# Request handlers -- these two hooks are provided by flask and we will use them
# to create and tear down a database connection on each request.
@app.before_request
def before_request():
g.db = database
g.db.connect()
@app.after_request
def after_request(response):
g.db.close()
return response
# views -- these are the actual mappings of url to view function
@app.route('/')
def homepage():
# depending on whether the requesting user is logged in or not, show them
# either the public timeline or their own private timeline
if session.get('logged_in'):
return private_timeline()
else:
return public_timeline()
@app.route('/private/')
def private_timeline():
# the private timeline exemplifies the use of a subquery -- we are asking for
# messages where the person who created the message is someone the current
# user is following. these messages are then ordered newest-first.
user = get_current_user()
messages = (Message
.select()
.where(Message.user << user.following())
.order_by(Message.pub_date.desc()))
return object_list('private_messages.html', messages, 'message_list')
@app.route('/public/')
def public_timeline():
# simply display all messages, newest first
messages = Message.select().order_by(Message.pub_date.desc())
return object_list('public_messages.html', messages, 'message_list')
@app.route('/join/', methods=['GET', 'POST'])
def join():
if request.method == 'POST' and request.form['username']:
try:
with database.atomic():
# Attempt to create the user. If the username is taken, due to the
# unique constraint, the database will raise an IntegrityError.
user = User.create(
username=request.form['username'],
password=md5((request.form['password']).encode('utf-8')).hexdigest(),
email=request.form['email'],
join_date=datetime.datetime.now())
# mark the user as being 'authenticated' by setting the session vars
auth_user(user)
return redirect(url_for('homepage'))
except IntegrityError:
flash('That username is already taken')
return render_template('join.html')
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST' and request.form['username']:
try:
pw_hash = md5(request.form['password'].encode('utf-8')).hexdigest()
user = User.get(
(User.username == request.form['username']) &
(User.password == pw_hash))
except User.DoesNotExist:
flash('The password entered is incorrect')
else:
auth_user(user)
return redirect(url_for('homepage'))
return render_template('login.html')
@app.route('/logout/')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('homepage'))
@app.route('/following/')
@login_required
def following():
user = get_current_user()
return object_list('user_following.html', user.following(), 'user_list')
@app.route('/followers/')
@login_required
def followers():
user = get_current_user()
return object_list('user_followers.html', user.followers(), 'user_list')
@app.route('/users/')
def user_list():
users = User.select().order_by(User.username)
return object_list('user_list.html', users, 'user_list')
@app.route('/users/<username>/')
def user_detail(username):
# using the "get_object_or_404" shortcut here to get a user with a valid
# username or short-circuit and display a 404 if no user exists in the db
user = get_object_or_404(User, User.username == username)
# get all the users messages ordered newest-first -- note how we're accessing
# the messages -- user.message_set. could also have written it as:
# Message.select().where(Message.user == user)
messages = user.messages.order_by(Message.pub_date.desc())
return object_list('user_detail.html', messages, 'message_list', user=user)
@app.route('/users/<username>/follow/', methods=['POST'])
@login_required
def user_follow(username):
user = get_object_or_404(User, User.username == username)
try:
with database.atomic():
Relationship.create(
from_user=get_current_user(),
to_user=user)
except IntegrityError:
pass
flash('You are following %s' % user.username)
return redirect(url_for('user_detail', username=user.username))
@app.route('/users/<username>/unfollow/', methods=['POST'])
@login_required
def user_unfollow(username):
user = get_object_or_404(User, User.username == username)
(Relationship
.delete()
.where(
(Relationship.from_user == get_current_user()) &
(Relationship.to_user == user))
.execute())
flash('You are no longer following %s' % user.username)
return redirect(url_for('user_detail', username=user.username))
@app.route('/create/', methods=['GET', 'POST'])
@login_required
def create():
user = get_current_user()
if request.method == 'POST' and request.form['content']:
message = Message.create(
user=user,
content=request.form['content'],
pub_date=datetime.datetime.now())
flash('Your message has been created')
return redirect(url_for('user_detail', username=user.username))
return render_template('create.html')
@app.context_processor
def _inject_user():
return {'current_user': get_current_user()}
# allow running from the command line
if __name__ == '__main__':
create_tables()
app.run()
| mit | 629,514,362,000,838,700 | 35.433121 | 89 | 0.658741 | false |
stevei101/flask-neo4j | flask_neo4j.py | 1 | 5909 | from __future__ import print_function
import time
import logging
from py2neo import Graph,Node
from py2neo.ext import ogm
from py2neo.packages.httpstream.http import SocketError
log = logging.getLogger('flask.neo4j')
logging.basicConfig()
# Find the stack on which we want to store the GraphDatabaseService instance.
# Starting with Flask 0.9, the _app_ctx_stack is the correct one,
# before that we need to use the _request_ctx_stack.
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
class Neo4j(object):
"""Automatically connects to Neo4j graph database using parameters defined
in Flask configuration.
One can use this extension by providing the Flask app on instantiation or
by calling the :meth:`init_app` method on an instance object of `Neo4j`. An example
of providing the application on instantiation: ::
app = Flask(__name__)
n4j = Neo4j(app)
...and an example calling the :meth:`init_app` method instead: ::
n4j = Neo4j()
def init_app():
app = Flask(__name__)
n4j.init_app(app)
return app
One can also providing a dict of indexes that will be used to automatically
get or create indexes in the graph database ::
app = Flask(__name__)
graph_indexes = {'Species': neo4j.Node}
n4j = Neo4j(app, graph_indexes)
print n4j.gdb.neo4j_version
species_index = n4j.index['Species']
...
"""
def __init__(self, app=None, indexes=None):
self.app = app
self._indexes = indexes
if app is not None:
self.init_app(app)
print ("flask.ext.Neo4j init_app called")
def init_app(self, app):
"""Initialize the `app` for use with this :class:`~Neo4j`. This is
called automatically if `app` is passed to :meth:`~Neo4j.__init__`.
The app is configured according to these configuration variables
``CONNECTION_RETRY``
``RETRY_INTERVAL``
:param flask.Flask app: the application configured for use with
this :class:`~Neo4j`
"""
self.app = app
app.n4j = self
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['neo4j'] = self
# Use the newstyle teardown_appcontext if it's available,
# otherwise fall back to the request context
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
else:
app.teardown_request(self.teardown)
def teardown(self, exception):
ctx = stack.top # TODO clean up teardown related to graph_db behavior
if hasattr(ctx, 'graph_db'):
# py2neo does not have an 'open' connection that needs closing
ctx.graph_db = None
@property
def gdb(self):
"""The graph database service instance as a property, for convenience.
Note: The property will use these configuration variables
``CONNECTION_RETRY``
``RETRY_INTERVAL``
:return: the graph database service as a property
"""
retry = False
if 'CONNECTION_RETRY' in self.app.config:
retry = self.app.config['CONNECTION_RETRY']
retry_interval = 5
if 'RETRY_INTERVAL' in self.app.config:
retry_interval = self.app.config['RETRY_INTERVAL']
retry_count = 0
try:
self.graph_db = Graph(self.app.config['GRAPH_DATABASE'])
except SocketError as se:
log.error('SocketError: {0}'.format(se.message))
if retry:
while retry_count < 3:
log.debug('Waiting {0}secs before Connection Retry to GraphDatabaseService'.format(
retry_interval
))
time.sleep(retry_interval)
#time.sleep(1)
retry_count += 1
try:
self.graph_db = Graph(self.app.config['GRAPH_DATABASE'])
except SocketError as sse:
log.error('SocketError: {0}'.format(sse.message))
if not hasattr(self, 'index'):
self.index = {}
# add all the indexes as app attributes
if self._indexes is not None:
for i, i_type in self._indexes.iteritems():
log.debug('getting or creating graph index:{0} {1}'.format(
i, i_type
))
self.index[i] = \
self.graph_db.legacy.get_or_create_index(i_type, i)
return self.graph_db
@property
def store(self):
"""
The object graph mapping store available as a property.
Note: The property will use these configuration variables
``CONNECTION_RETRY``
``RETRY_INTERVAL``
:return: the object graph mapping store property
"""
store = ogm.Store(self.gdb)
return store
def delete_index(self, index_name):
"""
Simple delete index capability that takes only a name.
Note: uses the index_types as remembered from indexes variable given at
initialization.
:param index_name: the name of the index to delete from the database
"""
i_type = self._indexes[index_name]
self.graph_db.legacy.delete_index(content_type=i_type, index_name=index_name)
if __name__ == '__main__':
from flask import Flask
app = Flask(__name__)
app.config['GRAPH_DATABASE'] = 'http://localhost:7474/db/data/'
graph_indexes = {'Species': Node}
flask4j = Neo4j(app, graph_indexes)
print (flask4j.gdb.neo4j_version)
species_index = flask4j.index['Species']
print ('species index:', species_index)
flask4j.delete_index('Species')
| mit | 5,536,978,590,505,155,000 | 34.172619 | 103 | 0.594855 | false |
AdaptivePELE/AdaptivePELE | AdaptivePELE/tests/testMD.py | 1 | 3682 | from __future__ import absolute_import, division, print_function, unicode_literals
import os
import glob
import shutil
import unittest
import AdaptivePELE.adaptiveSampling as adaptiveSampling
class TestMD(unittest.TestCase):
def check_succesful_simulation(self, output, epochs, nTrajs):
for epoch in range(epochs):
self.assertTrue(os.path.exists(os.path.join(output, "%d" % epoch, "clustering", "summary.txt")))
self.assertTrue(len(glob.glob(os.path.join(output, "%d" % epoch, "trajectory*"))), nTrajs)
self.assertTrue(len(glob.glob(os.path.join(output, "%d" % epoch, "report*"))), nTrajs)
self.assertTrue(os.path.exists(os.path.join(output, "%d" % epoch, "clustering", "object.pkl")))
def testOpenMM3ptb(self):
output_path = "tests/data/openmm_3ptb"
controlFile = "tests/data/templetized_controlFile_3ptb_md.conf"
adaptiveSampling.main(controlFile)
self.check_succesful_simulation(output_path, 2, 4)
# cleanup
shutil.rmtree(output_path)
def testOpenMM3ptb_noligand(self):
output_path = "tests/data/openmm_3ptb_no_ligand"
controlFile = "tests/data/templetized_controlFile_3ptb_no_ligand_md.conf"
adaptiveSampling.main(controlFile)
self.check_succesful_simulation(output_path, 1, 4)
# cleanup
shutil.rmtree(output_path)
def testOpenMM3ptb_cyl(self):
output_path = "tests/data/openmm_3ptb_cyl"
controlFile = "tests/data/templetized_controlFile_3ptb_cyl_md.conf"
adaptiveSampling.main(controlFile)
self.check_succesful_simulation(output_path, 2, 4)
# cleanup
shutil.rmtree(output_path)
def testOpenMM1ab1(self):
output_path = "tests/data/openmm_1ab1"
controlFile = "tests/data/templetized_controlFile_1ab1_md.conf"
adaptiveSampling.main(controlFile)
self.check_succesful_simulation(output_path, 2, 4)
# cleanup
shutil.rmtree(output_path)
def testRestartAt0(self):
output_path = "tests/data/openmm_restart_0"
controlFile = "tests/data/templetized_controlFile_restart_0_md.conf"
if os.path.exists(output_path):
shutil.rmtree(output_path)
shutil.copytree("tests/data/restart_0", output_path)
adaptiveSampling.main(controlFile)
self.check_succesful_simulation(output_path, 2, 4)
# cleanup
shutil.rmtree(output_path)
def testRestartAt1(self):
output_path = "tests/data/openmm_restart_1"
controlFile = "tests/data/templetized_controlFile_restart_1_md.conf"
if os.path.exists(output_path):
shutil.rmtree(output_path)
shutil.copytree("tests/data/restart_1", output_path)
adaptiveSampling.main(controlFile)
self.check_succesful_simulation(output_path, 2, 4)
# cleanup
shutil.rmtree(output_path)
def test_simulation_cofactors(self):
output_path = "tests/data/cofactors"
controlFile = "tests/data/cofactors.conf"
if os.path.exists(output_path):
shutil.rmtree(output_path)
adaptiveSampling.main(controlFile)
self.check_succesful_simulation(output_path, 1, 4)
# cleanup
shutil.rmtree(output_path)
def test_simulation_cofactors_ligand(self):
output_path = "tests/data/cofactors_ligand"
controlFile = "tests/data/cofactors_ligand.conf"
if os.path.exists(output_path):
shutil.rmtree(output_path)
adaptiveSampling.main(controlFile)
self.check_succesful_simulation(output_path, 1, 4)
# cleanup
shutil.rmtree(output_path)
| mit | 6,358,680,688,635,963,000 | 38.591398 | 108 | 0.663498 | false |
RedHatSatellite/satellite-clone | library/parse_backup_metadata.py | 1 | 2177 | import yaml
import re
from ansible.module_utils.basic import *
# module: parse_backup_metadata
# description:
# - Return the Satellite version and Puppet version
# specified in a Satellite backup
# notes:
# - The Satellite version is determined from the Satellite rpm
# version using the backup's rpm list from metadata.yml
# - The puppet version is determined from the presence of
# puppet and puppet-agent rpms
# options:
# metadata_path:
# description:
# - Full path (including file name) to metadata.yml
# required: true
SUPPORTED_VERSIONS = ["6.5", "6.6", "6.7", "6.8", "6.9"]
def find_rpm(rpms, pattern):
matches = [r for r in rpms if pattern.match(r)]
if len(matches) > 0:
return matches[0]
else:
return False
def get_rpm_version(rpms, pattern, hyphen_split=1, version_split=2):
rpm_pattern = re.compile(pattern)
rpm = find_rpm(rpms, rpm_pattern)
if rpm:
rpm_version = rpm.split("-")[hyphen_split]
return '.'.join(rpm_version.split('.')[0:version_split])
else:
return False
def parse_backup_metadata(params):
with open(params["metadata_path"]) as data_file:
data = yaml.load(data_file)
rpm_key = ":rpms" if ":rpms" in data else "rpms"
rpms = data[rpm_key]
satellite_version = get_rpm_version(rpms, "^satellite-[\d+].*")
if not satellite_version or satellite_version not in SUPPORTED_VERSIONS:
msg = "Satellite version is not supported or found. " \
"Only Satellite {0} is supported.".format(", ".join(SUPPORTED_VERSIONS))
return False, dict(msg=msg)
msg = "{0} backup found".format(satellite_version)
result = dict(satellite_version=satellite_version,
msg=msg,
changed=False)
return True, result
def main():
fields = {
"metadata_path": {"required": True, "type": "str"}
}
module = AnsibleModule(argument_spec=fields)
success, result = parse_backup_metadata(module.params)
if success:
module.exit_json(**result)
else:
module.fail_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 | 5,713,598,998,334,430,000 | 28.026667 | 86 | 0.62701 | false |
liveaverage/baruwa | src/baruwa/reports/management/commands/sendpdfreports.py | 1 | 17166 | #
# Baruwa - Web 2.0 MailScanner front-end.
# Copyright (C) 2010-2012 Andrew Colin Kissa <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# vim: ai ts=4 sts=4 et sw=4
#
import re
import datetime
from optparse import make_option
from StringIO import StringIO
from reportlab.lib import colors
from reportlab.lib.units import inch
from reportlab.graphics.shapes import Rect
from reportlab.graphics.shapes import Drawing
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus import SimpleDocTemplate, Spacer, Table, \
TableStyle, Paragraph, Image, PageBreak
from django.conf import settings
from django.db.models import Count, Sum, Q
from django.contrib.auth.models import User
from django.core.validators import email_re
from django.utils.translation import ugettext as _
from django.template.loader import render_to_string
from django.template.defaultfilters import filesizeformat
from django.core.mail import EmailMessage, SMTPConnection
from django.core.management.base import BaseCommand, CommandError
from baruwa.messages.models import Message
from baruwa.messages.models import MessageTotals
from baruwa.accounts.models import UserProfile, UserAddresses
from baruwa.messages.templatetags.messages_extras import tds_trunc
from baruwa.utils.graphs import PieChart, PIE_CHART_COLORS, BarChart
def draw_square(color):
"draws a square"
square = Drawing(5, 5)
sqr = Rect(0, 2.5, 5, 5)
sqr.fillColor = color
sqr.strokeColor = color
square.add(sqr)
return square
def checkdate(date):
"check a date string"
year, month, day = map(int, date.split('-'))
try:
datetime.date(year, month, day)
return True
except ValueError:
return False
class Command(BaseCommand):
"Generate and email PDF reports"
help = _("Generates and sends PDF summary reports via email")
option_list = BaseCommand.option_list + (
make_option('--bydomain', action='store_true', dest='by_domain',
default=False, help='Generate reports per domain'),
make_option('--domain', dest='domain_name', default='all',
help='Specify the domain to report on, use "all" for all the domains'),
make_option('--copyadmin', action='store_true', dest='copy_admin',
default=False, help='Send a copy of the report to the admin'),
make_option('--period', dest='period', default=None,
help='Period to report on: valid options are '
'"day(s)","week(s)"" Examples: '
'--period="1 day" --period="2 weeks"'),
make_option('--full', action='store_true', dest='include_daily',
default=False, help='Include the daily totals table'),
make_option('--startdate', dest='startdate', default=None,
help='Start date to report on: YYYY-MM-DD'),
make_option('--enddate', dest='enddate', default=None,
help='End date to report on: YYYY-MM-DD'),
)
def handle(self, *args, **options):
if len(args) != 0:
raise CommandError(_("Command doesn't accept any arguments"))
by_domain = options.get('by_domain')
domain_name = options.get('domain_name')
copy_admin = options.get('copy_admin')
period = options.get('period')
include_daily = options.get('include_daily')
startdate = options.get('startdate')
end_date = options.get('enddate')
enddate = None
if startdate and end_date:
if not checkdate(startdate) or not checkdate(end_date):
raise CommandError(_("The startdate, enddate specified is invalid"))
daterange = (startdate, end_date)
else:
daterange = None
period_re = re.compile(r"(?P<num>(\d+))\s+(?P<period>(day|week|month))(?:s)?")
if period:
match = period_re.match(period)
if not match:
raise CommandError(_("The period you specified is invalid"))
num = match.group('num')
ptype = match.group('period')
if not ptype.endswith('s'):
ptype = ptype + 's'
delta = datetime.timedelta(**{ptype: int(num)})
enddate = datetime.date.today() - delta
table_style = TableStyle([
('FONT', (0, 0), (-1, -1), 'Helvetica'),
('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
('FONTSIZE', (0, 0), (-1, -1), 8),
('GRID', (0, 0), (-1, -1), 0.15, colors.black),
('ALIGN', (0, 0), (-1, 0), 'CENTER'),
('ALIGN', (4, 1), (-1, -1), 'CENTER'),
('ALIGN', (0, 0), (0, -1), 'CENTER'),
('VALIGN', (4, 1), (-1, -1), 'MIDDLE'),
('SPAN', (4, 1), (-1, -1)),
])
styles = getSampleStyleSheet()
reports = [
[
'from_address', {'from_address__exact': ""}, 'num_count',
'Top senders by quantity'],
[
'from_address', {'from_address__exact': ""}, 'total_size',
'Top senders by volume'],
[
'from_domain', {'from_domain__exact': ""}, 'num_count',
'Top sender domains by quantity'],
[
'from_domain', {'from_domain__exact': ""}, 'total_size',
'Top sender domains by volume'],
[
'to_address', {'to_address__exact': ""}, 'num_count',
'Top recipients by quantity'],
[
'to_address', {'to_address__exact': ""}, 'total_size',
'Top recipients by volume'],
[
'to_domain', {'to_domain__exact': "",
'to_domain__isnull': False}, 'num_count',
'Top recipient domains by quantity'],
[
'to_domain', {'to_domain__exact': "",
'to_domain__isnull': False}, 'total_size',
'Top recipient domains by volume'],
]
emails = []
admin_addrs = []
if copy_admin:
mails = User.objects.values('email').filter(is_superuser=True)
admin_addrs = [mail['email'] for mail in mails]
from_email = getattr(settings, 'DEFAULT_FROM_EMAIL',
'postmaster@localhost')
url = getattr(settings, 'QUARANTINE_REPORT_HOSTURL', '')
logo_dir = getattr(settings, 'MEDIA_ROOT', '')
img = Image(logo_dir + '/imgs/css/logo.jpg')
def build_chart(data, column, order, title):
"build chart"
headings = [('', _('Address'), _('Count'), _('Volume'), '')]
rows = [[draw_square(PIE_CHART_COLORS[index]),
tds_trunc(row[column], 45), row['num_count'],
filesizeformat(row['total_size']), '']
for index, row in enumerate(data)]
if len(rows) != 10:
missing = 10 - len(rows)
add_rows = [
('', '', '', '', '') for ind in range(missing)
]
rows.extend(add_rows)
headings.extend(rows)
dat = [row[order] for row in data]
total = sum(dat)
labels = [
("%.1f%%" % ((1.0 * row[order] / total) * 100))
for row in data
]
pie = PieChart()
pie.chart.labels = labels
pie.chart.data = dat
headings[1][4] = pie
table_with_style = Table(headings, [0.2 * inch,
2.8 * inch, 0.5 * inch, 0.7 * inch, 3.2 * inch])
table_with_style.setStyle(table_style)
paragraph = Paragraph(title, styles['Heading1'])
return [paragraph, table_with_style]
def build_parts(account, enddate, isdom=None, daterange=None):
"build parts"
parts = []
sentry = 0
for report in reports:
column = report[0]
exclude_kwargs = report[1]
order_by = "-%s" % report[2]
order = report[2]
title = report[3]
if isdom:
#dom
data = Message.objects.values(column).\
filter(Q(from_domain=account.address) | \
Q(to_domain=account.address)).\
exclude(**exclude_kwargs).annotate(
num_count=Count(column), total_size=Sum('size')
).order_by(order_by)
if daterange:
data.filter(date__range=(daterange[0], daterange[1]))
elif enddate:
data.filter(date__gt=enddate)
data = data[:10]
else:
#all users
data = Message.report.all(user, enddate, daterange).values(
column).exclude(**exclude_kwargs).annotate(
num_count=Count(column), total_size=Sum('size')
).order_by(order_by)
data = data[:10]
if data:
sentry += 1
pgraphs = build_chart(data, column, order, title)
parts.extend(pgraphs)
parts.append(Spacer(1, 70))
if (sentry % 2) == 0:
parts.append(PageBreak())
parts.append(Paragraph(_('Message Totals'), styles['Heading1']))
if isdom:
#doms
msg_totals = MessageTotals.objects.doms(account.address, enddate)
else:
#norm
filters = []
addrs = [
addr.address for addr in UserAddresses.objects.filter(
user=account
).exclude(enabled__exact=0)]
if enddate:
efilter = {
'filter': 3,
'field': 'date',
'value': str(enddate)
}
filters.append(efilter)
msg_totals = MessageTotals.objects.all(
account, filters, addrs,
profile.account_type,
daterange)
mail_total = []
spam_total = []
virus_total = []
dates = []
if include_daily:
rows = [(
Table([[draw_square(colors.white),
Paragraph('Date', styles["Heading6"])]],
[0.35 * inch, 1.50 * inch, ]),
Table([[draw_square(colors.green),
Paragraph('Mail totals', styles["Heading6"])]],
[0.35 * inch, 1.50 * inch, ]),
Table([[draw_square(colors.pink),
Paragraph('Spam totals', styles["Heading6"])]],
[0.35 * inch, 1.50 * inch, ]),
Table([[draw_square(colors.red),
Paragraph('Virus totals', styles["Heading6"])]],
[0.35 * inch, 1.50 * inch, ]),
)]
for ind, msgt in enumerate(msg_totals):
if ind % 10:
dates.append('')
else:
dates.append(str(msgt.date))
mail_total.append(int(msgt.mail_total))
spam_total.append(int(msgt.spam_total))
virus_total.append(int(msgt.virus_total))
if include_daily:
rows.append((str(msgt.date), msgt.mail_total,
msgt.spam_total, msgt.virus_total))
graph = BarChart()
graph.chart.data = [
tuple(mail_total), tuple(spam_total),
tuple(virus_total)
]
graph.chart.categoryAxis.categoryNames = dates
graph_table = Table([[graph]], [7.4 * inch])
parts.append(graph_table)
if include_daily:
rows.append(('Totals', sum(mail_total), sum(spam_total),
sum(virus_total)))
parts.append(Spacer(1, 20))
graph_table = Table(rows, [1.85 * inch, 1.85 * inch,
1.85 * inch, 1.85 * inch, ])
graph_table.setStyle(TableStyle([
('FONTSIZE', (0, 0), (-1, -1), 8),
('FONT', (0, 0), (-1, -1), 'Helvetica'),
('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
('GRID', (0, 0), (-1, -1), 0.15, colors.black),
('FONT', (0, -1), (-1, -1), 'Helvetica-Bold'),
#('BACKGROUND', (0, -1), (-1, -1), colors.green),
]))
parts.append(graph_table)
return parts
def build_pdf(charts):
"Build a PDF"
pdf = StringIO()
doc = SimpleDocTemplate(pdf, topMargin=50, bottomMargin=18)
logo = [(img, _('Baruwa mail report'))]
logo_table = Table(logo, [2.0 * inch, 5.4 * inch])
logo_table.setStyle(TableStyle([
('FONT', (0, 0), (-1, 0), 'Helvetica-Bold'),
('ALIGN', (0, 0), (-1, 0), 'LEFT'),
('ALIGN', (1, 0), (-1, 0), 'RIGHT'),
('FONTSIZE', (1, 0), (-1, 0), 10),
('LINEBELOW', (0, 0), (-1, -1), 0.15, colors.black),
]))
parts = [logo_table]
parts.append(Spacer(1, 20))
parts.extend(charts)
try:
doc.build(parts)
except IndexError:
pass
return pdf
def gen_email(pdf, user, owner):
"generate and return email"
text_content = render_to_string('reports/pdf_report.txt',
{'user': user, 'url': url})
subject = _('Baruwa usage report for: %(user)s') % {
'user': owner}
if email_re.match(user.username):
toaddr = user.username
if email_re.match(user.email):
toaddr = user.email
if admin_addrs:
msg = EmailMessage(subject, text_content, from_email, [toaddr], admin_addrs)
else:
msg = EmailMessage(subject, text_content, from_email, [toaddr])
msg.attach('baruwa.pdf', pdf.getvalue(), "application/pdf")
print _("* Queue %(user)s's report to: %(addr)s") % {
'user': owner, 'addr': toaddr}
pdf.close()
return msg
print _("=================== Processing reports ======================")
if by_domain:
#do domain query
#print "camacamlilone"
domains = UserAddresses.objects.filter(Q(enabled=1), Q(address_type=1))
if domain_name != 'all':
domains = domains.filter(address=domain_name)
if not domains:
print _("========== domain name %(dom)s does not exist ==========") % {
'dom': domain_name
}
for domain in domains:
if email_re.match(domain.user.email):
parts = build_parts(domain, enddate, True, daterange)
if parts:
pdf = build_pdf(parts)
email = gen_email(pdf, domain.user, domain.address)
emails.append(email)
else:
#do normal query
profiles = UserProfile.objects.filter(send_report=1)
for profile in profiles:
try:
user = profile.user
if email_re.match(user.email) or email_re.match(user.username):
parts = build_parts(user, enddate, False, daterange)
if parts:
pdf = build_pdf(parts)
email = gen_email(pdf, user, user.username)
emails.append(email)
except User.DoesNotExist:
pass
if emails:
try:
conn = SMTPConnection()
conn.send_messages(emails)
print _("====== sending %(num)s messages =======") % {
'num': str(len(emails))}
except Exception, exception:
print _("Sending failed ERROR: %(error)s") % {'error': str(exception)}
| gpl-2.0 | 2,146,115,334,760,439,300 | 39.968974 | 92 | 0.498078 | false |
allanice001/RJ45 | plugins/op.py | 1 | 5033 | from util import hook
def mode_cmd(mode, text, inp, chan, conn, notice):
""" generic mode setting function """
split = inp.split(" ")
if split[0].startswith("#"):
channel = split[0]
target = split[1]
notice("Attempting to {} {} in {}...".format(text, target, channel))
conn.send("MODE {} {} {}".format(channel, mode, target))
else:
channel = chan
target = split[0]
notice("Attempting to {} {} in {}...".format(text, target, channel))
conn.send("MODE {} {} {}".format(channel, mode, target))
@hook.command(permissions=["op_ban", "op"])
def ban(inp, conn=None, chan=None, notice=None):
"ban [channel] <user> -- Makes the bot ban <user> in [channel]. "\
"If [channel] is blank the bot will ban <user> in "\
"the channel the command was used in."
mode_cmd("+b", "ban", inp, chan, conn, notice)
@hook.command(permissions=["op_ban", "op"])
def unban(inp, conn=None, chan=None, notice=None):
"unban [channel] <user> -- Makes the bot unban <user> in [channel]. "\
"If [channel] is blank the bot will unban <user> in "\
"the channel the command was used in."
mode_cmd("-b", "unban", inp, chan, conn, notice)
@hook.command(permissions=["op_quiet", "op"])
def quiet(inp, conn=None, chan=None, notice=None):
"quiet [channel] <user> -- Makes the bot quiet <user> in [channel]. "\
"If [channel] is blank the bot will quiet <user> in "\
"the channel the command was used in."
mode_cmd("+q", "quiet", inp, chan, conn, notice)
@hook.command(permissions=["op_quiet", "op"])
def unquiet(inp, conn=None, chan=None, notice=None):
"unquiet [channel] <user> -- Makes the bot unquiet <user> in [channel]. "\
"If [channel] is blank the bot will unquiet <user> in "\
"the channel the command was used in."
mode_cmd("-q", "unquiet", inp, chan, conn, notice)
@hook.command(permissions=["op_voice", "op"])
def voice(inp, conn=None, chan=None, notice=None):
"voice [channel] <user> -- Makes the bot voice <user> in [channel]. "\
"If [channel] is blank the bot will voice <user> in "\
"the channel the command was used in."
mode_cmd("+v", "voice", inp, chan, conn, notice)
@hook.command(permissions=["op_voice", "op"])
def devoice(inp, conn=None, chan=None, notice=None):
"devoice [channel] <user> -- Makes the bot devoice <user> in [channel]. "\
"If [channel] is blank the bot will devoice <user> in "\
"the channel the command was used in."
mode_cmd("-v", "devoice", inp, chan, conn, notice)
@hook.command(permissions=["op_op", "op"])
def op(inp, conn=None, chan=None, notice=None):
"op [channel] <user> -- Makes the bot op <user> in [channel]. "\
"If [channel] is blank the bot will op <user> in "\
"the channel the command was used in."
mode_cmd("+o", "op", inp, chan, conn, notice)
@hook.command(permissions=["op_op", "op"])
def deop(inp, conn=None, chan=None, notice=None):
"deop [channel] <user> -- Makes the bot deop <user> in [channel]. "\
"If [channel] is blank the bot will deop <user> in "\
"the channel the command was used in."
mode_cmd("-o", "deop", inp, chan, conn, notice)
@hook.command(permissions=["op_topic", "op"])
def topic(inp, conn=None, chan=None):
"topic [channel] <topic> -- Change the topic of a channel."
split = inp.split(" ")
if split[0].startswith("#"):
message = " ".join(split[1:])
chan = split[0]
out = "TOPIC {} :{}".format(chan, message)
else:
message = " ".join(split)
out = "TOPIC {} :{}".format(chan, message)
conn.send(out)
@hook.command(permissions=["op_kick", "op"])
def kick(inp, chan=None, conn=None, notice=None):
"kick [channel] <user> [reason] -- Makes the bot kick <user> in [channel] "\
"If [channel] is blank the bot will kick the <user> in "\
"the channel the command was used in."
split = inp.split(" ")
if split[0].startswith("#"):
channel = split[0]
target = split[1]
if len(split) > 2:
reason = " ".join(split[2:])
out = "KICK {} {}: {}".format(channel, target, reason)
else:
out = "KICK {} {}".format(channel, target)
else:
channel = chan
target = split[0]
if len(split) > 1:
reason = " ".join(split[1:])
out = "KICK {} {}: {}".format(channel, target, reason)
else:
out = "KICK {} {}".format(channel, target)
notice("Attempting to kick {} from {}...".format(target, channel))
conn.send(out)
@hook.command(permissions=["op_rem", "op"])
def remove(inp, chan=None, conn=None, notice=None):
"remove [channel] [user] -- Force a user to part from a channel."
split = inp.split(" ")
if split[0].startswith("#"):
message = " ".join(split[1:])
chan = split[0]
out = "REMOVE {} :{}".format(chan, message)
else:
message = " ".join(split)
out = "REMOVE {} :{}".format(chan, message)
conn.send(out)
| gpl-2.0 | 2,041,800,450,125,925,000 | 36.281481 | 80 | 0.588317 | false |
0hoo/libearth | tests/subscribe_test.py | 1 | 14162 | from datetime import datetime
from pytest import fixture, mark
from libearth.feed import Feed, Link, Person, Text
from libearth.stage import Stage
from libearth.subscribe import Body, Category, Subscription, SubscriptionList
from libearth.schema import read
from libearth.tz import utc
from .stage_test import fx_repo, fx_session
@fixture
def fx_subscription():
return Subscription(
label='Title',
feed_uri='http://example.com/rss.xml',
alternate_uri='http://example.com/'
)
def test_count_empty_list():
subs = SubscriptionList()
assert len(subs) == 0
subs = SubscriptionList(body=Body())
assert len(subs) == 0
def test_count_duplicated_url(fx_subscription):
subs = SubscriptionList()
subs.add(fx_subscription)
assert len(subs) == 1
assert list(subs) == [fx_subscription]
subs.add(fx_subscription)
assert len(subs) == 1
assert list(subs) == [fx_subscription]
def test_count_after_remove(fx_subscription):
subs = SubscriptionList()
subs.add(fx_subscription)
assert len(subs) == 1
assert list(subs) == [fx_subscription]
subs.discard(fx_subscription)
assert not subs
assert len(subs) == 0
assert list(subs) == []
XML = b'''
<opml xmlns:e="http://earthreader.org/subscription-list/" version="2.0">
<head>
<title>Earth Reader's Subscriptions</title>
<dateCreated>Sat, 18 Jun 2005 12:11:52 +0000</dateCreated>
<ownerName>Earth Reader Team</ownerName>
<ownerEmail>[email protected]</ownerEmail>
<ownerId>http://earthreader.org/</ownerId>
<expansionState>a,b,c,d</expansionState>
<vertScrollState>1</vertScrollState>
<windowTop>12</windowTop>
<windowLeft>34</windowLeft>
<windowBottom>56</windowBottom>
<windowRight>78</windowRight>
</head>
<body>
<outline text="CNET News.com" type="rss" version="RSS2"
xmlUrl="http://news.com/2547-1_3-0-5.xml"/>
<outline text="test.com" type="rss" xmlUrl="http://test.com/"
e:id="2f0bdb1d4987309e304ad0d7f982a37791fb06d4" />
</body>
</opml>
'''
XML_CATEGORY = b'''
<opml version="2.0">
<head>
<title>Earth Reader's Subscriptions</title>
<dateCreated>Sat, 18 Jun 2005 12:11:52 +0000</dateCreated>
<ownerName>Earth Reader Team</ownerName>
<ownerEmail>[email protected]</ownerEmail>
<ownerId>http://earthreader.org/</ownerId>
<expansionState>a,b,c,d</expansionState>
<vertScrollState>1</vertScrollState>
<windowTop>12</windowTop>
<windowLeft>34</windowLeft>
<windowBottom>56</windowBottom>
<windowRight>78</windowRight>
</head>
<body>
<outline text="Game" title="Game" type="category">
<outline text="valve" title="valve" xmlUrl="http://valve.com/" />
<outline text="nintendo" title="nintendo"
xmlUrl="http://nintendo.com/" />
</outline>
<outline text="Music" title="Music" type="category">
<outline text="capsule" title="capsule"
xmlUrl="http://www.capsule-web.com/" />
</outline>
</body>
</opml>
'''
XML_DUPLICATION = b'''
<opml version="2.0">
<head>
<title>Earth Reader's Subscriptions</title>
<dateCreated>Sat, 18 Jun 2005 12:11:52 +0000</dateCreated>
<ownerName>Earth Reader Team</ownerName>
<ownerEmail>[email protected]</ownerEmail>
<ownerId>http://earthreader.org/</ownerId>
</head>
<body>
<outline text="Duplicated" title="Duplicated" type="category">
<outline text="dup" title="dup" xmlUrl="http://example.com/" />
<outline text="dup" title="dup" xmlUrl="http://example.com/" />
</outline>
<outline text="Duplicated" title="Duplicated" type="category">
</outline>
</body>
</opml>
'''
XML_RECURSIVE = b'''
<opml version="2.0">
<head>
<title>Earth Reader's Subscriptions</title>
<dateCreated>Sat, 18 Jun 2005 12:11:52 +0000</dateCreated>
<ownerName>Earth Reader Team</ownerName>
<ownerEmail>[email protected]</ownerEmail>
<ownerId>http://earthreader.org/</ownerId>
<expansionState>a,b,c,d</expansionState>
<vertScrollState>1</vertScrollState>
<windowTop>12</windowTop>
<windowLeft>34</windowLeft>
<windowBottom>56</windowBottom>
<windowRight>78</windowRight>
</head>
<body>
<outline text="Game" title="Game" type="category">
<outline text="valve" title="valve" xmlUrl="http://valve.com/" />
<outline text="nintendo" title="nintendo"
xmlUrl="http://nintendo.com/" />
<outline text="Riot" title="Riot" type="category">
<outline text="LOL" title="LOL"
xmlUrl="http://leagueoflegend.com" />
</outline>
</outline>
<outline text="Music" title="Music" type="category">
<outline text="capsule" title="capsule"
xmlUrl="http://www.capsule-web.com/" />
</outline>
</body>
</opml>
'''
@fixture
def fx_subscription_list():
return read(SubscriptionList, [XML])
def test_subscription_list_datetime(fx_subscription_list):
expected_datetime = datetime(2005, 6, 18, 12, 11, 52, tzinfo=utc)
assert fx_subscription_list.head.created_at == expected_datetime
assert fx_subscription_list.head.updated_at is None
def test_subscription_list_title(fx_subscription_list):
assert fx_subscription_list.head.title == "Earth Reader's Subscriptions"
assert fx_subscription_list.title == "Earth Reader's Subscriptions"
fx_subscription_list.title = "Hong Minhee's Subscriptions"
assert fx_subscription_list.head.title == "Hong Minhee's Subscriptions"
def test_subscription_list_owner(fx_subscription_list):
assert fx_subscription_list.head.owner_name == 'Earth Reader Team'
assert (fx_subscription_list.head.owner_email ==
'earthreader' '@' 'librelist.com')
assert fx_subscription_list.head.owner_uri == 'http://earthreader.org/'
assert fx_subscription_list.owner == Person(
name='Earth Reader Team',
email='earthreader' '@' 'librelist.com',
uri='http://earthreader.org/'
)
fx_subscription_list.owner = Person(
name='Hong Minhee',
email='minhee' '@' 'dahlia.kr',
uri='http://dahlia.kr/'
)
assert fx_subscription_list.head.owner_name == 'Hong Minhee'
assert fx_subscription_list.head.owner_email == 'minhee' '@' 'dahlia.kr'
assert fx_subscription_list.head.owner_uri == 'http://dahlia.kr/'
fx_subscription_list.owner = None
assert fx_subscription_list.owner is None
assert fx_subscription_list.head.owner_name is None
assert fx_subscription_list.head.owner_email is None
assert fx_subscription_list.head.owner_uri is None
def test_subscription_list_iter(fx_subscription_list):
assert frozenset(fx_subscription_list) == frozenset([
Subscription(label='CNET News.com',
feed_uri='http://news.com/2547-1_3-0-5.xml'),
Subscription(label='test.com', feed_uri='http://test.com/')
])
def test_subscription_list_update(fx_subscription_list):
sub = next(iter(fx_subscription_list))
assert sub.label == 'CNET News.com'
sub.label = 'updated'
assert sub.label == 'updated'
assert next(iter(fx_subscription_list)).label == 'updated'
def test_subscription_feed_id(fx_subscription_list):
test_com = next(s for s in fx_subscription_list if s.label == 'test.com')
assert test_com.feed_id == '2f0bdb1d4987309e304ad0d7f982a37791fb06d4'
cnet = next(s for s in fx_subscription_list if s.label == 'CNET News.com')
assert cnet.feed_id == '95e2b8d3378bc34d13685583528d616f9b8dce1b'
@fixture
def fx_categorized_subscription_list():
return read(SubscriptionList, [XML_CATEGORY])
def test_subscription_list_contains_category(fx_categorized_subscription_list):
subs = fx_categorized_subscription_list
expected = {
Category(label='Game'): frozenset([
Subscription(label='valve', feed_uri='http://valve.com/'),
Subscription(label='nintendo', feed_uri='http://nintendo.com/')
]),
Category(label='Music'): frozenset([
Subscription(label='capsule',
feed_uri='http://www.capsule-web.com/')
])
}
assert frozenset(subs) == frozenset(expected)
for outline in subs:
print(outline.label)
assert outline.type == 'category'
print(list(outline))
assert frozenset(outline) == expected[outline]
def test_subscription_list_category_update(fx_categorized_subscription_list):
subs = fx_categorized_subscription_list
category = next(iter(subs))
category.add(Subscription(label='added', feed_uri='http://example.com/'))
assert len(category) == 3
assert len(next(iter(subs))) == 3
def test_subscription_set_categories_subscriptions():
subs = SubscriptionList()
subs.add(Category(label='Category A'))
subs.add(Subscription(label='Subscription A', feed_uri='http://feeda.com/'))
subs.add(Category(label='Category B'))
subs.add(Subscription(label='Subscription B', feed_uri='http://feedb.com/'))
subs.add(Category(label='Category C'))
subs.add(Subscription(label='Subscription C', feed_uri='http://feedc.com/'))
assert subs.categories == {
'Category A': Category(label='Category A'),
'Category B': Category(label='Category B'),
'Category C': Category(label='Category C')
}
assert subs.subscriptions == frozenset([
Subscription(label='Subscription A', feed_uri='http://feeda.com/'),
Subscription(label='Subscription B', feed_uri='http://feedb.com/'),
Subscription(label='Subscription C', feed_uri='http://feedc.com/')
])
@fixture
def fx_duplicated_subscription_list():
return read(SubscriptionList, [XML_DUPLICATION])
def test_subscription_set_iter_uniqueness(fx_duplicated_subscription_list):
assert len(list(fx_duplicated_subscription_list)) == 1
category = next(iter(fx_duplicated_subscription_list))
assert len(list(category)) == 1
@fixture
def fx_recursive_subscription_list():
return read(SubscriptionList, [XML_RECURSIVE])
def test_recursive_subscription_list(fx_recursive_subscription_list):
assert len(fx_recursive_subscription_list.recursive_subscriptions) == 4
game_category = fx_recursive_subscription_list.categories['Game']
assert len(game_category.recursive_subscriptions) == 3
XML_NO_HEAD = b'''
<opml version="2.0">
<body>
<outline text="CNET News.com" type="rss" version="RSS2"
xmlUrl="http://news.com/2547-1_3-0-5.xml"/>
<outline text="test.com" type="rss" xmlUrl="http://test.com/"/>
</body>
</opml>
'''
@fixture
def fx_headless_subscription_list():
return read(SubscriptionList, [XML_NO_HEAD])
def test_no_head(fx_headless_subscription_list):
subs = fx_headless_subscription_list
assert subs.owner is None
assert subs.title is None
repr(subs) # should not raise AttributeError
def test_no_head_set_title(fx_headless_subscription_list):
fx_headless_subscription_list.title = 'Title'
assert fx_headless_subscription_list.title == 'Title'
assert fx_headless_subscription_list.head.title == 'Title'
def test_no_head_set_owner(fx_headless_subscription_list):
owner = Person(
name='Earth Reader Team',
email='earthreader' '@' 'librelist.com',
uri='http://earthreader.org/'
)
fx_headless_subscription_list.owner = owner
assert fx_headless_subscription_list.owner == owner
assert fx_headless_subscription_list.head.owner_name == owner.name
assert fx_headless_subscription_list.head.owner_email == owner.email
assert fx_headless_subscription_list.head.owner_uri == owner.uri
@mark.parametrize('subs', [
SubscriptionList(),
Category()
])
def test_subscription_set_subscribe(subs):
feed = Feed(
id='urn:earthreader:test:test_subscription_set_subscribe',
title=Text(value='Feed title')
)
feed.links.extend([
Link(uri='http://example.com/index.xml',
relation='self',
mimetype='application/atom+xml'),
Link(uri='http://example.com/',
relation='alternate',
mimetype='text/html')
])
rv = subs.subscribe(feed)
sub = next(iter(subs))
assert rv is sub
assert sub.feed_id == '0691e2f0c3ea1d7fa9da48e14a46ac8077815ad3'
assert sub.label == 'Feed title'
assert sub.feed_uri == 'http://example.com/index.xml'
assert sub.alternate_uri == 'http://example.com/'
def test_stage_subscription_list(fx_repo, fx_session):
stage = Stage(fx_session, fx_repo)
with stage:
stage.subscriptions = SubscriptionList()
subs = stage.subscriptions
subs.add(Category(label='Test'))
stage.subscriptions = subs
with stage:
assert (frozenset(stage.subscriptions) ==
frozenset([Category(label='Test')]))
def test_subscription_set_contains(fx_recursive_subscription_list,
fx_subscription):
tree = fx_recursive_subscription_list
game_c = next(c for c in tree if c.label == 'Game')
riot_c = next(c for c in game_c if c.label == 'Riot')
lol_s = next(s for s in riot_c if s.label == 'LOL')
none_c = Category(label='None')
assert none_c not in tree
assert not tree.contains(none_c)
assert not tree.contains(none_c, recursively=True)
assert fx_subscription not in tree
assert not tree.contains(fx_subscription)
assert not tree.contains(fx_subscription, recursively=True)
assert lol_s not in tree
assert not tree.contains(lol_s)
assert tree.contains(lol_s, recursively=True)
assert riot_c not in tree
assert not tree.contains(riot_c)
assert tree.contains(riot_c, recursively=True)
assert game_c in tree
assert tree.contains(game_c)
assert tree.contains(game_c, recursively=True)
| gpl-2.0 | -5,292,767,241,286,772,000 | 34.582915 | 80 | 0.65358 | false |
etingof/pysnmp | examples/hlapi/v3arch/asyncore/sync/manager/cmdgen/getnext-multiple-oids-to-eom.py | 1 | 1150 | """
Fetch two OID branches
++++++++++++++++++++++
Send a series of SNMP GETNEXT requests using the following options:
* with SNMPv2c, community 'public'
* over IPv4/UDP
* to an Agent at demo.snmplabs.com:161
* for two OIDs in string form
* stop when response OIDs leave the scopes of initial OIDs
Functionally similar to:
| $ snmpwalk -v2c -c public demo.snmplabs.com 1.3.6.1.2.1.2.2.1.2 1.3.6.1.2.1.2.2.1.3
"""#
from pysnmp.hlapi import *
iterator = nextCmd(
SnmpEngine(),
CommunityData('public'),
UdpTransportTarget(('demo.snmplabs.com', 161)),
ContextData(),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.2')),
ObjectType(ObjectIdentity('1.3.6.1.2.1.2.2.1.3')),
lexicographicMode=False
)
for errorIndication, errorStatus, errorIndex, varBinds in iterator:
if errorIndication:
print(errorIndication)
break
elif errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex)-1][0] or '?'))
break
else:
for varBind in varBinds:
print(' = '.join([ x.prettyPrint() for x in varBind ]))
| bsd-2-clause | 8,306,644,326,331,172,000 | 25.744186 | 85 | 0.636522 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/answers/publisher.py | 1 | 1383 | # Copyright 2010 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Answers's custom publication."""
__metaclass__ = type
__all__ = [
'AnswersBrowserRequest',
'AnswersLayer',
'answers_request_publication_factory',
]
from zope.interface import implements
from zope.publisher.interfaces.browser import (
IBrowserRequest,
IDefaultBrowserLayer,
)
from lp.services.webapp.publication import LaunchpadBrowserPublication
from lp.services.webapp.servers import (
LaunchpadBrowserRequest,
VHostWebServiceRequestPublicationFactory,
)
class AnswersLayer(IBrowserRequest, IDefaultBrowserLayer):
"""The Answers layer."""
class AnswersBrowserRequest(LaunchpadBrowserRequest):
"""Instances of AnswersBrowserRequest provide `AnswersLayer`."""
implements(AnswersLayer)
def __init__(self, body_instream, environ, response=None):
super(AnswersBrowserRequest, self).__init__(
body_instream, environ, response)
# Many of the responses from Answers vary based on language.
self.response.setHeader(
'Vary', 'Cookie, Authorization, Accept-Language')
def answers_request_publication_factory():
return VHostWebServiceRequestPublicationFactory(
'answers', AnswersBrowserRequest, LaunchpadBrowserPublication)
| agpl-3.0 | 8,352,647,769,189,373,000 | 29.733333 | 70 | 0.735358 | false |
NemoNessuno/SecretSanta | models.py | 1 | 4065 | from datetime import datetime
from sqlalchemy import Column, Integer, String, ForeignKey, Boolean, Date, Table, Enum
from sqlalchemy.orm import validates, relationship
from db_handler import Base
from password import Password
class User(Base):
__tablename__ = 'users'
email = Column(String(120), unique=True, primary_key=True)
password = Column(Password)
admin = Column(Boolean)
# Or specify a cost factor other than the default 12
# password = Column(Password(rounds=10))
@validates('password')
def _validate_password(self, key, password):
self.validated = getattr(type(self), key).type.validator(password)
return self.validated
def is_active(self):
return True
def get_id(self):
return self.email
def is_authenticated(self):
return self.validated
def is_admin(self):
return self.admin
def is_anonymous(self):
return False
def __init__(self, email=None, admin=False):
self.email = email
self.admin = admin
self.validated = False
def __repr__(self):
return '<User {}>'.format(self.email)
round_questions = Table(
'associations', Base.metadata,
Column('round_id', Integer, ForeignKey('rounds.id')),
Column('question_id', Integer, ForeignKey('questions.id'))
)
class Round(Base):
__tablename__ = 'rounds'
id = Column(Integer, primary_key=True)
running = Column(Boolean)
created_at = Column(Date)
questions = relationship("Question", secondary=round_questions)
def __init__(self, running=True, created_at=datetime.now()):
self.running = running
self.created_at = created_at
def __repr__(self):
return "<Round - Created at: {} Running: {}>".format(self.created_at,
self.running)
class Participation(Base):
__tablename__ = 'participations'
id = Column(Integer, primary_key=True)
round_id = Column(Integer, ForeignKey('rounds.id'))
cur_round = relationship("Round", foreign_keys=[round_id])
description_id = Column(Integer, ForeignKey('descriptions.id'))
description = relationship("Description", foreign_keys=[description_id])
other_description_id = Column(Integer, ForeignKey('descriptions.id'))
other_description = relationship("Description", foreign_keys=[other_description_id])
eligible = Column(Boolean)
def __init__(self, cur_round=None, description=None, other_description=None, eligible=False):
self.cur_round = cur_round
self.description = description
self.other_description = other_description
self.eligible = eligible
def __repr__(self):
return "<Participation {}: Round: {} Eligible: {}>".format(self.id, self.round_id, self.eligible)
class Description(Base):
__tablename__ = 'descriptions'
id = Column('id', Integer, primary_key=True)
user_id = Column(String(120), ForeignKey('users.email'))
user = relationship("User", foreign_keys=[user_id])
answers = []
def __init__(self, user=None, questions=None):
if questions is None:
questions = []
self.user = user
self.questions = questions
class Question(Base):
__tablename__ = 'questions'
id = Column('id', Integer, primary_key=True)
text = Column(String(512))
q_type = Column('type', Enum('text', 'image', 'sound'))
def __init__(self, text=None, q_type='text'):
self.text = text
self.q_type = q_type
class Answer(Base):
__tablename__ = 'answers'
id = Column('id', Integer, primary_key=True)
description_id = Column(Integer, ForeignKey('descriptions.id'))
description = relationship("Description", foreign_keys=[description_id])
question_id = Column(Integer, ForeignKey('questions.id'))
question = relationship("Question", foreign_keys=[question_id])
text = Column(String(256))
def __init__(self, description=None, question=None, text=None):
self.description = description
self.question = question
self.text = text
| mit | 8,756,999,794,135,497,000 | 30.511628 | 105 | 0.650431 | false |
pybel/pybel | src/pybel/struct/query/seeding.py | 1 | 4933 | # -*- coding: utf-8 -*-
"""Query builder."""
import json
import logging
import random
from collections import UserList
from typing import Any, Dict, List, Set, TextIO, Union
from .constants import SEED_TYPE_ANNOTATION, SEED_TYPE_INDUCTION, SEED_TYPE_NEIGHBORS, SEED_TYPE_SAMPLE
from .selection import get_subgraph
from ...dsl import BaseEntity
from ...struct import union
from ...tokens import parse_result_to_dsl
logger = logging.getLogger(__name__)
SEED_METHOD = 'type'
SEED_DATA = 'data'
MaybeNodeList = Union[BaseEntity, List[BaseEntity], List[Dict]]
class Seeding(UserList):
"""Represents a container of seeding methods to apply to a network."""
def append_induction(self, nodes: MaybeNodeList) -> 'Seeding':
"""Add a seed induction method.
:param nodes: A node or list of nodes
:returns: self for fluid API
"""
return self._append_seed_handle_nodes(SEED_TYPE_INDUCTION, nodes)
def append_neighbors(self, nodes: MaybeNodeList) -> 'Seeding':
"""Add a seed by neighbors.
:param nodes: A node or list of nodes
:returns: self for fluid API
"""
return self._append_seed_handle_nodes(SEED_TYPE_NEIGHBORS, nodes)
def append_annotation(self, annotation: str, values: Set[str]) -> 'Seeding':
"""Add a seed induction method for single annotation's values.
:param annotation: The annotation to filter by
:param values: The values of the annotation to keep
:returns: self for fluid API
"""
return self._append_seed(
SEED_TYPE_ANNOTATION, {
'annotations': {
annotation: values,
},
},
)
def append_sample(self, **kwargs) -> 'Seeding':
"""Add seed induction methods.
Kwargs can have ``number_edges`` or ``number_seed_nodes``.
:returns: self for fluid API
"""
data = {
'seed': random.randint(0, 1000000),
}
data.update(kwargs)
return self._append_seed(SEED_TYPE_SAMPLE, data)
def _append_seed(self, seed_type: str, data: Any) -> 'Seeding':
"""Add a seeding method and returns self.
:returns: self for fluid API
"""
self.append({
SEED_METHOD: seed_type,
SEED_DATA: data,
})
return self
def _append_seed_handle_nodes(self, seed_type: str, nodes: MaybeNodeList) -> 'Seeding':
"""Add a seeding method and returns self.
:param seed_type: The seed type
:param nodes: A node or list of nodes
:returns: self for fluid API
"""
return self._append_seed(seed_type, _handle_nodes(nodes))
def run(self, graph):
"""Seed the graph or return none if not possible.
:type graph: pybel.BELGraph
:rtype: Optional[pybel.BELGraph]
"""
if not self:
logger.debug('no seeding, returning graph: %s', graph)
return graph
subgraphs = []
for seed in self:
seed_method, seed_data = seed[SEED_METHOD], seed[SEED_DATA]
logger.debug('seeding with %s: %s', seed_method, seed_data)
subgraph = get_subgraph(graph, seed_method=seed_method, seed_data=seed_data)
if subgraph is None:
logger.debug('seed returned empty graph: %s', seed)
continue
subgraphs.append(subgraph)
if not subgraphs:
logger.debug('no subgraphs returned')
return
return union(subgraphs)
def to_json(self) -> List[Dict]:
"""Serialize this seeding container to a JSON object."""
return list(self)
def dump(self, file, sort_keys: bool = True, **kwargs) -> None:
"""Dump this seeding container to a file as JSON."""
json.dump(self.to_json(), file, sort_keys=sort_keys, **kwargs)
def dumps(self, sort_keys: bool = True, **kwargs) -> str:
"""Dump this query to a string as JSON."""
return json.dumps(self.to_json(), sort_keys=sort_keys, **kwargs)
@staticmethod
def from_json(data) -> 'Seeding':
"""Build a seeding container from a JSON list."""
return Seeding(data)
@staticmethod
def load(file: TextIO) -> 'Seeding':
"""Load a seeding container from a JSON file."""
return Seeding.from_json(json.load(file))
@staticmethod
def loads(s: str) -> 'Seeding':
"""Load a seeding container from a JSON string."""
return Seeding.from_json(json.loads(s))
def _handle_nodes(nodes: MaybeNodeList) -> List[BaseEntity]:
"""Handle node(s) that might be dictionaries."""
if isinstance(nodes, BaseEntity):
return [nodes]
return [
(
parse_result_to_dsl(node)
if not isinstance(node, BaseEntity) else
node
)
for node in nodes
]
| mit | -4,059,376,988,022,714,000 | 29.263804 | 103 | 0.595986 | false |
CodeNameGhost/shiva | thirdparty/scapy/layers/pflog.py | 1 | 2921 | ## This file is part of Scapy
## See http://www.secdev.org/projects/scapy for more informations
## Copyright (C) Philippe Biondi <[email protected]>
## This program is published under a GPLv2 license
"""
PFLog: OpenBSD PF packet filter logging.
"""
from scapy.packet import *
from scapy.fields import *
from scapy.layers.inet import IP
if conf.ipv6_enabled:
from scapy.layers.inet6 import IPv6
from scapy.config import conf
class PFLog(Packet):
name = "PFLog"
# from OpenBSD src/sys/net/pfvar.h and src/sys/net/if_pflog.h
fields_desc = [ ByteField("hdrlen", 0),
ByteEnumField("addrfamily", 2, {socket.AF_INET: "IPv4",
socket.AF_INET6: "IPv6"}),
ByteEnumField("action", 1, {0: "pass", 1: "drop",
2: "scrub", 3: "no-scrub",
4: "nat", 5: "no-nat",
6: "binat", 7: "no-binat",
8: "rdr", 9: "no-rdr",
10: "syn-proxy-drop" }),
ByteEnumField("reason", 0, {0: "match", 1: "bad-offset",
2: "fragment", 3: "short",
4: "normalize", 5: "memory",
6: "bad-timestamp",
7: "congestion",
8: "ip-options",
9: "proto-cksum",
10: "state-mismatch",
11: "state-insert",
12: "state-limit",
13: "src-limit",
14: "syn-proxy" }),
StrFixedLenField("iface", "", 16),
StrFixedLenField("ruleset", "", 16),
SignedIntField("rulenumber", 0),
SignedIntField("subrulenumber", 0),
SignedIntField("uid", 0),
IntField("pid", 0),
SignedIntField("ruleuid", 0),
IntField("rulepid", 0),
ByteEnumField("direction", 255, {0: "inout", 1: "in",
2:"out", 255: "unknown"}),
StrFixedLenField("pad", b"\x00\x00\x00", 3 ) ]
def mysummary(self):
return self.sprintf("%PFLog.addrfamily% %PFLog.action% on %PFLog.iface% by rule %PFLog.rulenumber%")
bind_layers(PFLog, IP, addrfamily=socket.AF_INET)
if conf.ipv6_enabled:
bind_layers(PFLog, IPv6, addrfamily=socket.AF_INET6)
conf.l2types.register(117, PFLog)
| mit | 6,969,530,504,792,507,000 | 48.508475 | 108 | 0.412872 | false |
atumanov/ray | python/ray/tests/utils.py | 1 | 2682 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
import sys
import tempfile
import time
import ray
def _pid_alive(pid):
"""Check if the process with this PID is alive or not.
Args:
pid: The pid to check.
Returns:
This returns false if the process is dead. Otherwise, it returns true.
"""
try:
os.kill(pid, 0)
return True
except OSError:
return False
def wait_for_pid_to_exit(pid, timeout=20):
start_time = time.time()
while time.time() - start_time < timeout:
if not _pid_alive(pid):
return
time.sleep(0.1)
raise Exception("Timed out while waiting for process to exit.")
def run_and_get_output(command):
with tempfile.NamedTemporaryFile() as tmp:
p = subprocess.Popen(command, stdout=tmp, stderr=tmp)
if p.wait() != 0:
raise RuntimeError("ray start did not terminate properly")
with open(tmp.name, "r") as f:
result = f.readlines()
return "\n".join(result)
def run_string_as_driver(driver_script):
"""Run a driver as a separate process.
Args:
driver_script: A string to run as a Python script.
Returns:
The script's output.
"""
# Save the driver script as a file so we can call it using subprocess.
with tempfile.NamedTemporaryFile() as f:
f.write(driver_script.encode("ascii"))
f.flush()
out = ray.utils.decode(
subprocess.check_output([sys.executable, f.name]))
return out
def run_string_as_driver_nonblocking(driver_script):
"""Start a driver as a separate process and return immediately.
Args:
driver_script: A string to run as a Python script.
Returns:
A handle to the driver process.
"""
# Save the driver script as a file so we can call it using subprocess. We
# do not delete this file because if we do then it may get removed before
# the Python process tries to run it.
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(driver_script.encode("ascii"))
f.flush()
return subprocess.Popen(
[sys.executable, f.name], stdout=subprocess.PIPE)
def relevant_errors(error_type):
return [info for info in ray.errors() if info["type"] == error_type]
def wait_for_errors(error_type, num_errors, timeout=10):
start_time = time.time()
while time.time() - start_time < timeout:
if len(relevant_errors(error_type)) >= num_errors:
return
time.sleep(0.1)
raise Exception("Timing out of wait.")
| apache-2.0 | -7,067,280,162,942,937,000 | 26.9375 | 78 | 0.636465 | false |
ntoll/checklistDSL | checklistdsl/lex.py | 1 | 2961 | """
Tokeniser for a checklist DSL. (I was using Ply, but it sucked and this is a
*lot* simpler.)
(c) 2012 Nicholas H.Tollervey
"""
import re
class Token(object):
"""
Represents a token matched by the lexer.
"""
def __init__(self, token, value, roles=None, size=None):
"""
token - the type of token this is.
value - the matched value.
roles - named roles who have authority to action the item.
size - the "size" of the heading. 1 = big, 6 = small.
"""
self.token = token
self.value = value
self.roles = roles
self.size = size
def __repr__(self):
return '%s: "%s"' % (self.token, self.value)
"""
A dictionary that contains the regex used to match tokens and the associated
token types.
"""
MATCHER = {
# == Heading == (becomes an h* element where * is number of equal signs)
'(?P<depth_start>=+)(?P<value>[^=]+)(?P<depth_end>=+)': 'HEADING',
# // This is a comment (ignored)
'\/\/(?P<value>.*)': 'COMMENT',
# [] item 1 (becomes a check box)
'\[\] *(?P<roles>{.*}|) *(?P<value>.*)': 'AND_ITEM',
# () item 1 (becomes a radio button)
'\(\) *(?P<roles>{.*}|) *(?P<value>.*)': 'OR_ITEM',
# --- (becomes an <hr/>)
'^-{3,}$': 'BREAK',
# Some text (becomes a <p>)
'(?P<value>[^=\/\[\(].*)': 'TEXT'
}
def get_tokens(data):
"""
Given some raw data will return a list of matched tokens. An example of the
simplest possible lexer.
"""
result = []
# Split on newline and throw away empty (un-needed) lines
split_by_lines = [line.strip() for line in data.split('\n')
if line.strip()]
for line in split_by_lines:
for regex in MATCHER.keys():
match = re.match(regex, line)
if match:
# Grab the named groups.
val = match.groupdict().get('value', '').strip()
roles = match.groupdict().get('roles', '').replace(
'{', '').replace('}', '').strip()
depth_start = match.groupdict().get('depth_start', '')
# Post process roles.
if roles:
roles = [role.lower().strip() for role in roles.split(',')]
else:
roles = None
# Post process depth_start to give the size of the heading.
if depth_start:
size = len(depth_start)
else:
size = None
# Instantiate the token depending on the match for the val
# named group.
if val:
token = Token(MATCHER[regex], val, roles=roles, size=size)
else:
token = Token(MATCHER[regex], match.string)
# Ignore comments
if token.token != 'COMMENT':
result.append(token)
break
return result
| mit | 6,374,691,532,482,705,000 | 32.269663 | 79 | 0.504222 | false |
jepler/linuxcnc-mirror | share/gscreen/skins/spartan/spartan_handler.py | 5 | 20745 | # This is a component of LinuxCNC
# Copyright 2013 Chris Morley <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#############################################################################
# This is a gscreen skin customized for a Bridgeport Interact Mill that used a
# Heidenhain TNC 151a controller.
# Chris Brady Oct 2015
#
import hal
import gtk
import gladevcp.makepins # needed for the dialog's calulator widget
import pango
import hal_glib
_MAN = 0;_MDI = 1;_AUTO = 2;_LOCKTOGGLE = 1
# This is a handler file for using Gscreen's infrastructure
# to load a completely custom glade screen
# The only things that really matters is that it's saved as a GTK builder project,
# the toplevel window is caller window1 (The default name) and you connect a destroy
# window signal else you can't close down linuxcnc
class HandlerClass:
# This will be pretty standard to gain access to everything
# emc is for control and status of linuxcnc
# data is important data from gscreen and linuxcnc
# widgets is all the widgets from the glade files
# gscreen is for access to gscreens methods
def __init__(self, halcomp,builder,useropts,gscreen):
self.emc = gscreen.emc
self.data = gscreen.data
self.widgets = gscreen.widgets
self.gscreen = gscreen
# Hide all menus at the bottom of the default gscreen page
self.widgets.mode0.hide()
self.widgets.mode1.hide()
self.widgets.mode2.hide()
self.widgets.mode3.hide()
self.widgets.mode4.hide()
self.widgets.button_mode.hide()
self.widgets.diameter_mode.hide()
self.widgets.aux_coolant_m7.hide()
self.widgets.aux_coolant_m8.hide()
self.widgets.show_dtg.hide()
self.widgets.diameter_mode.hide()
self.widgets.button_flood.hide()
self.widgets.button_run.hide()
# Initialize variables
self.data.lathe_mode=False
self.data.graphic_ypos=0
self.data.graphic_xpos=0
self.data.view=0
self.data.homed=0
self.data.jog_rates=[30,50,80,120]
self.data.jog_incrs=[0.0002,0.001,0.01,0.1]
self.data.jog_rate_idx=2
self.widgets.jog_r1.set_label("%5.4f"% self.data.jog_incrs[0])
self.widgets.jog_r2.set_label("%4.3f"% self.data.jog_incrs[1])
self.widgets.jog_r3.set_label("%3.2f"% self.data.jog_incrs[2])
self.widgets.jog_r4.set_label("%2.1f"% self.data.jog_incrs[3])
self.widgets.button_mode.hide()
self.widgets.button_home_all.hide()
# every 100 milli seconds this gets called
# we add calls to the regular functions for the widgets we are using.
# and add any extra calls/code
def periodic(self):
self.gscreen.update_dro()
self.gscreen.update_active_gcodes()
self.gscreen.update_active_mcodes()
self.gscreen.update_feed_speed_label()
self.gscreen.update_tool_label()
self.update_estop_led()
self.gscreen.update_machine_on_led()
self.gscreen.update_jog_rate_label()
self.gscreen.update_mode_label()
self.gscreen.update_units_button_label()
self.update_override_label()
self.update_spindle()
def update_spindle(self):
# Actual speed from hal
# Limit speed representation to 1 decimal point
speed = int(self.gscreen.halcomp["spindle-spd-disp"]*10)/10
self.widgets.meter_spindle_speed.set_property("value", speed)
# Initialize hal pins that we need access to
def initialize_pins(self):
self.gscreen.init_spindle_pins()
self.gscreen.init_coolant_pins()
self.gscreen.init_jog_pins()
self.gscreen.init_override_pins()
self.gscreen.init_control_pins()
self.gscreen.halcomp.newpin("spindle-spd-disp", hal.HAL_FLOAT, hal.HAL_IN)
self.gscreen.halcomp.newpin("jog-spd-out", hal.HAL_FLOAT, hal.HAL_OUT)
self.gscreen.halcomp.newpin("jog-inc-out", hal.HAL_FLOAT, hal.HAL_OUT)
self.data['ext-estop'] = hal_glib.GPin(self.gscreen.halcomp.newpin('ext-estop', hal.HAL_BIT, hal.HAL_IN))
self.data['ext-estop'].connect('value-changed', self.on_estop_in)
self.data['enc-fault-x'] = hal_glib.GPin(self.gscreen.halcomp.newpin('enc-fault-x', hal.HAL_BIT, hal.HAL_IN))
self.data['enc-fault-x'].connect('value-changed', self.on_x_enc_fault)
self.data['enc-fault-y'] = hal_glib.GPin(self.gscreen.halcomp.newpin('enc-fault-y', hal.HAL_BIT, hal.HAL_IN))
self.data['enc-fault-y'].connect('value-changed', self.on_y_enc_fault)
self.data['enc-fault-x'] = hal_glib.GPin(self.gscreen.halcomp.newpin('enc-fault-z', hal.HAL_BIT, hal.HAL_IN))
self.data['enc-fault-x'].connect('value-changed', self.on_z_enc_fault)
def on_emc_off(self,*args):
self.widgets.button_clear.show()
self.widgets.button_mode.hide()
self.widgets.button_home_all.hide()
# Force mode to manual
self.data.mode_order = (self.data._MAN,self.data._MDI,self.data._AUTO)
label = self.data.mode_labels
self.widgets.button_mode.set_label(label[self.data.mode_order[0]])
self.mode_changed(self.data.mode_order[0])
def on_btn_clear(self,widget):
if self.gscreen.halcomp["ext-estop"] == False:
self.emc.estop_reset(1)
self.emc.machine_on(1)
self.widgets.button_clear.hide()
self.widgets.button_home_all.show()
def on_estop_in(self,widget):
self.widgets.mode0.hide()
if self.gscreen.halcomp["ext-estop"] == True:
self.emc.estop_reset(1)
self.emc.machine_on(1)
else:
self.emc.machine_off(1)
self.emc.estop(1)
def update_estop_led(self):
if self.data.estopped:
self.widgets.led_estop.set_active(False)
else:
self.widgets.led_estop.set_active(True)
def on_x_enc_fault(self,hal_object):
print"X Encoder Fault"
self.gscreen.add_alarm_entry(_("X Axis Encoder Error"))
def on_y_enc_fault(self,hal_object):
print"Y Encoder Fault"
self.gscreen.add_alarm_entry(_("Y Axis Encoder Error"))
def on_z_enc_fault(self,hal_object):
print"Z Encoder Fault"
self.gscreen.add_alarm_entry(_("Z Axis Encoder Error"))
def homing(self,*args):
self.mode_changed(self.data._MAN)
self.widgets.button_mode.hide()
self.widgets.button_home_all.show()
self.widgets.button_move_to.set_sensitive(0)
def on_hal_status_all_homed(self,widget):
self.gscreen.on_hal_status_all_homed(1)
self.data.homed=1
self.widgets.button_home_all.hide()
self.widgets.button_mode.show()
self.widgets.jog_r3.set_active(1)
self.on_jog_rate(self.widgets.jog_r3)
self.gscreen.sensitize_widgets(self.data.sensitive_all_homed,1)
self.widgets.button_move_to.set_sensitive(1)
def on_interp_run(self,*args):
self.gscreen.sensitize_widgets(self.data.sensitive_run_idle,False)
self.widgets.button_reload.set_sensitive(0)
def on_interp_idle(self,widget):
self.gscreen.on_hal_status_interp_idle(widget)
self.widgets.button_reload.set_sensitive(1)
def on_jog_rate(self,widget):
if widget == self.widgets.jog_r1:
self.data.jog_rate_idx=0
speed = self.data.jog_rates[0]
self.widgets.jog_r2.set_active(0)
self.widgets.jog_r3.set_active(0)
self.widgets.jog_r4.set_active(0)
self.gscreen.halcomp["jog-spd-out"] = speed
self.gscreen.halcomp["jog-inc-out"] = self.data.jog_incrs[0]
elif widget == self.widgets.jog_r2:
self.data.jog_rate_idx=1
speed = self.data.jog_rates[1]
self.widgets.jog_r1.set_active(0)
self.widgets.jog_r3.set_active(0)
self.widgets.jog_r4.set_active(0)
self.gscreen.halcomp["jog-spd-out"] = speed
self.gscreen.halcomp["jog-inc-out"] = self.data.jog_incrs[1]
elif widget == self.widgets.jog_r3:
self.data.jog_rate_idx=2
speed = self.data.jog_rates[2]
self.widgets.jog_r1.set_active(0)
self.widgets.jog_r2.set_active(0)
self.widgets.jog_r4.set_active(0)
self.gscreen.halcomp["jog-spd-out"] = speed
self.gscreen.halcomp["jog-inc-out"] = self.data.jog_incrs[2]
elif widget == self.widgets.jog_r4:
self.data.jog_rate_idx=3
speed = self.data.jog_rates[3]
self.widgets.jog_r1.set_active(0)
self.widgets.jog_r2.set_active(0)
self.widgets.jog_r3.set_active(0)
self.gscreen.halcomp["jog-spd-out"] = speed
self.gscreen.halcomp["jog-inc-out"] = self.data.jog_incrs[3]
def jog_point1(self,widget):
if self.data.mode_order[0] == self.data._MAN: # if in manual mode
print "jog point1"
if widget == self.widgets.jog_plus:
self.do_jog(True,True)
else:
self.do_jog(False,True)
def do_jog(self,direction,action):
# if manual mode, if jogging
# if only one axis button pressed
# jog positive at selected rate
if self.data.mode_order[0] == self.data._MAN:
if len(self.data.active_axis_buttons) > 1:
print self.data.active_axis_buttons
elif self.data.active_axis_buttons[0][0] == None:
self.gscreen.homed_status_message = self.widgets.statusbar1.push(1,"No axis selected to jog")
else:
if not self.data.active_axis_buttons[0][0] == "s":
if not action: cmd = 0
elif direction: cmd = 1
else: cmd = -1
self.emc.jogging(1)
jogincr = self.data.jog_incrs[self.data.jog_rate_idx]
self.emc.incremental_jog(self.data.active_axis_buttons[0][1],cmd,jogincr)
def on_mode_clicked(self,widget,event):
# only change machine modes on click
if event.type == gtk.gdk.BUTTON_PRESS:
a,b,c = self.data.mode_order
self.data.mode_order = b,c,a
label = self.data.mode_labels
self.widgets.button_mode.set_label(label[self.data.mode_order[0]])
self.mode_changed(self.data.mode_order[0])
def mode_changed(self,mode):
print "Mode Change", mode
if mode == self.data._MAN:
self.widgets.notebook_mode.hide()
self.widgets.hal_mdihistory.hide()
self.widgets.dro_frame.show()
self.widgets.vmode0.show()
self.widgets.vmode1.hide()
self.widgets.button_run.set_active(0)
self.widgets.button_jog_mode.set_active(1)
self.widgets.button_view.emit("clicked")
elif mode == self.data._MDI:
if self.data.plot_hidden:
self.toggle_offset_view()
self.emc.set_mdi_mode()
self.widgets.hal_mdihistory.show()
self.widgets.vmode0.show()
self.widgets.vmode1.hide()
self.widgets.button_run.set_active(0)
self.widgets.notebook_mode.hide()
self.widgets.button_jog_mode.set_active(0)
elif mode == self.data._AUTO:
self.widgets.vmode0.hide()
self.widgets.vmode1.show()
self.widgets.button_run.set_active(0)
if self.data.full_graphics:
self.widgets.notebook_mode.hide()
else:
self.widgets.notebook_mode.show()
self.widgets.hal_mdihistory.hide()
self.widgets.button_jog_mode.set_active(0)
def on_button_flood(self,widget):
if self.widgets.button_flood.get_active():
self.gscreen.halcomp["aux-coolant-m8-out"] = True
else:
self.gscreen.halcomp["aux-coolant-m8-out"] = False
def on_ign_toolc_pressed(self, widget):
data = widget.get_active()
def on_tool_change(self,widget):
if self.widgets.ignore_toolchange.get_active() == True:
self.gscreen.halcomp["tool-changed"] = True
else:
h = self.gscreen.halcomp
c = h['change-tool']
n = h['tool-number']
cd = h['tool-changed']
print "tool change",c,cd,n
if c:
message = _("Please change to tool # %s, then click OK."% n)
self.gscreen.warning_dialog(message, True,pinname="TOOLCHANGE")
else:
h['tool-changed'] = False
def on_button_edit_clicked(self,widget):
state = widget.get_active()
if not state:
self.gscreen.edited_gcode_check()
self.widgets.notebook_main.set_current_page(0)
self.widgets.notebook_main.set_show_tabs(not (state))
self.edit_mode(state)
if state:
self.widgets.search_box.show()
else:
self.widgets.search_box.hide()
def edit_mode(self,data):
print "edit mode pressed",data
self.gscreen.sensitize_widgets(self.data.sensitive_edit_mode,not data)
if data:
self.widgets.mode6.show()
self.widgets.dro_frame.hide()
self.widgets.gcode_view.set_sensitive(1)
self.data.edit_mode = True
self.widgets.show_box.hide()
self.widgets.notebook_mode.show()
self.widgets.display_btns.hide()
else:
self.widgets.mode6.hide()
self.widgets.dro_frame.show()
self.widgets.gcode_view.set_sensitive(0)
self.data.edit_mode = False
self.widgets.show_box.show()
self.widgets.display_btns.show()
def on_button_full_view_clicked(self,widget):
self.set_full_graphics_view(widget.get_active())
def on_manual_spindle(self,widget):
if self.data.mode_order[0] == self.data._AUTO:
return
if self.widgets.button_man_spindle.get_active():
self.widgets.button_man_spindle.set_label("Stop")
self.emc.spindle_forward(1,self.data.spindle_start_rpm)
else:
print "Spindle stop"
self.widgets.button_man_spindle.set_label("Start")
self.emc.spindle_off(1)
def on_spindle_plus(self,widget):
if self.data.mode_order[0] != self.data._AUTO:
self.emc.spindle_faster(1)
def on_spindle_minus(self,widget):
if self.data.mode_order[0] != self.data._AUTO:
self.emc.spindle_slower(1)
def on_view_change(self,widget):
mode = self.data.mode_order[0]
if mode == self.data._AUTO:
self.data.view = self.data.view+1
if self.data.view > 3:
self.data.view = 0
view = self.data.view
else:
view = 0
print "view", view
if view == 0:
# Gremlin + Gcode + DRO
self.data.full_graphics = False
self.widgets.show_box.show()
if mode == self.data._AUTO:
self.widgets.notebook_mode.show()
self.widgets.dro_frame.show()
self.widgets.display_btns.show()
self.widgets.gremlin.set_property('enable_dro',False)
elif view == 1:
# Gremlin style DRO
self.data.full_graphics = True
self.widgets.show_box.show()
self.widgets.notebook_mode.hide()
self.widgets.dro_frame.hide()
self.widgets.gremlin.set_property('enable_dro',True)
elif view == 2:
# Gremlin + DRO
self.data.full_graphics = True
self.widgets.dro_frame.show()
self.widgets.notebook_mode.hide()
self.widgets.show_box.show()
self.widgets.gremlin.set_property('enable_dro',False)
elif view == 3:
# DRO + Gcode
self.data.full_graphics = False
self.widgets.dro_frame.show()
if mode == self.data._AUTO:
self.widgets.notebook_mode.show()
self.widgets.gcode_view.set_sensitive(0)
self.widgets.show_box.hide()
self.widgets.display_btns.hide()
self.widgets.gremlin.set_property('enable_dro',False)
def update_override_label(self):
self.widgets.fo.set_text("FO: %3d%%"%(round(self.data.feed_override,2)*100))
self.widgets.mv.set_text("RO: %3d%%"%(round(self.data.rapid_override,2)*100))
self.widgets.so.set_text("SO: %3d%%"%(round(self.data.spindle_override,2)*100))
# Gremlin display buttons
def on_d_zoomp_pressed(self,widget):
self.widgets.gremlin.zoom_in()
def on_d_zoomm_pressed(self,widget):
self.widgets.gremlin.zoom_out()
def on_d_up_pressed(self,widget):
self.data.graphic_ypos = self.data.graphic_ypos-8
self.widgets.gremlin.pan(self.data.graphic_xpos,self.data.graphic_ypos)
def on_d_down_pressed(self,widget):
self.data.graphic_ypos = self.data.graphic_ypos+8
self.widgets.gremlin.pan(self.data.graphic_xpos,self.data.graphic_ypos)
def on_d_right_pressed(self,widget):
self.data.graphic_xpos = self.data.graphic_xpos+8
self.widgets.gremlin.pan(self.data.graphic_xpos,self.data.graphic_ypos)
def on_d_left_pressed(self,widget):
self.data.graphic_xpos = self.data.graphic_xpos-8
self.widgets.gremlin.pan(self.data.graphic_xpos,self.data.graphic_ypos)
# Connect to gscreens regular signals and add a couple more
def connect_signals(self,handlers):
self.gscreen.connect_signals(handlers)
# connect to handler file callbacks:
self.gscreen.widgets.d_zoomp.connect("clicked", self.on_d_zoomp_pressed)
self.gscreen.widgets.d_zoomm.connect("clicked", self.on_d_zoomm_pressed)
self.gscreen.widgets.d_up.connect("clicked", self.on_d_up_pressed)
self.gscreen.widgets.d_down.connect("clicked", self.on_d_down_pressed)
self.gscreen.widgets.d_left.connect("clicked", self.on_d_left_pressed)
self.gscreen.widgets.d_right.connect("clicked", self.on_d_right_pressed)
self.gscreen.widgets.button_man_spindle.connect("clicked", self.on_manual_spindle)
self.gscreen.widgets.button_spindle_plus.connect("clicked", self.on_spindle_plus)
self.gscreen.widgets.button_spindle_minus.connect("clicked", self.on_spindle_minus)
self.gscreen.widgets.button_view.connect("clicked", self.on_view_change)
self.gscreen.widgets.button_mode.connect("button_press_event", self.on_mode_clicked)
self.gscreen.widgets.button_edit.connect("clicked", self.on_button_edit_clicked)
self.gscreen.widgets.button_flood.connect("clicked", self.on_button_flood)
self.gscreen.widgets.ignore_toolchange.connect("clicked", self.on_ign_toolc_pressed)
self.gscreen.widgets.jog_r1.connect("pressed", self.on_jog_rate)
self.gscreen.widgets.jog_r2.connect("pressed", self.on_jog_rate)
self.gscreen.widgets.jog_r3.connect("pressed", self.on_jog_rate)
self.gscreen.widgets.jog_r4.connect("pressed", self.on_jog_rate)
self.gscreen.widgets.jog_plus.connect("clicked", self.jog_point1)
self.gscreen.widgets.jog_minus.connect("clicked", self.jog_point1)
self.gscreen.widgets.button_homing.connect("clicked", self.homing)
self.widgets.hal_status.connect("all-homed",self.on_hal_status_all_homed)
self.widgets.hal_status.connect("state-off",self.on_emc_off)
self.gscreen.widgets.button_clear.connect("clicked", self.on_btn_clear)
self.widgets.hal_status.connect("interp-idle",self.on_interp_idle)
self.widgets.hal_status.connect("interp-run",self.on_interp_run)
# standard handler call
def get_handlers(halcomp,builder,useropts,gscreen):
return [HandlerClass(halcomp,builder,useropts,gscreen)]
| lgpl-2.1 | -4,601,279,397,498,643,000 | 43.326923 | 117 | 0.618752 | false |
dufferzafar/mitmproxy | mitmproxy/console/master.py | 1 | 23553 | from __future__ import absolute_import, print_function, division
import mailcap
import mimetypes
import os
import os.path
import shlex
import signal
import stat
import subprocess
import sys
import tempfile
import traceback
import weakref
import six
import urwid
from typing import Optional # noqa
from mitmproxy import builtins
from mitmproxy import contentviews
from mitmproxy import controller
from mitmproxy import exceptions
from mitmproxy import flow
from mitmproxy import script
from mitmproxy import utils
import mitmproxy.options
from mitmproxy.console import flowlist
from mitmproxy.console import flowview
from mitmproxy.console import grideditor
from mitmproxy.console import help
from mitmproxy.console import options
from mitmproxy.console import palettepicker
from mitmproxy.console import palettes
from mitmproxy.console import signals
from mitmproxy.console import statusbar
from mitmproxy.console import window
from mitmproxy.filt import FMarked
from netlib import tcp, strutils
EVENTLOG_SIZE = 500
class ConsoleState(flow.State):
def __init__(self):
flow.State.__init__(self)
self.focus = None
self.follow_focus = None
self.default_body_view = contentviews.get("Auto")
self.flowsettings = weakref.WeakKeyDictionary()
self.last_search = None
self.last_filter = ""
self.mark_filter = False
def __setattr__(self, name, value):
self.__dict__[name] = value
signals.update_settings.send(self)
def add_flow_setting(self, flow, key, value):
d = self.flowsettings.setdefault(flow, {})
d[key] = value
def get_flow_setting(self, flow, key, default=None):
d = self.flowsettings.get(flow, {})
return d.get(key, default)
def add_flow(self, f):
super(ConsoleState, self).add_flow(f)
self.update_focus()
return f
def update_flow(self, f):
super(ConsoleState, self).update_flow(f)
self.update_focus()
return f
def set_limit(self, limit):
ret = super(ConsoleState, self).set_limit(limit)
self.set_focus(self.focus)
return ret
def get_focus(self):
if not self.view or self.focus is None:
return None, None
return self.view[self.focus], self.focus
def set_focus(self, idx):
if self.view:
if idx is None or idx < 0:
idx = 0
elif idx >= len(self.view):
idx = len(self.view) - 1
self.focus = idx
else:
self.focus = None
def update_focus(self):
if self.focus is None:
self.set_focus(0)
elif self.follow_focus:
self.set_focus(len(self.view) - 1)
def set_focus_flow(self, f):
self.set_focus(self.view.index(f))
def get_from_pos(self, pos):
if len(self.view) <= pos or pos < 0:
return None, None
return self.view[pos], pos
def get_next(self, pos):
return self.get_from_pos(pos + 1)
def get_prev(self, pos):
return self.get_from_pos(pos - 1)
def delete_flow(self, f):
if f in self.view and self.view.index(f) <= self.focus:
self.focus -= 1
if self.focus < 0:
self.focus = None
ret = super(ConsoleState, self).delete_flow(f)
self.set_focus(self.focus)
return ret
def get_nearest_matching_flow(self, flow, filt):
fidx = self.view.index(flow)
dist = 1
fprev = fnext = True
while fprev or fnext:
fprev, _ = self.get_from_pos(fidx - dist)
fnext, _ = self.get_from_pos(fidx + dist)
if fprev and fprev.match(filt):
return fprev
elif fnext and fnext.match(filt):
return fnext
dist += 1
return None
def enable_marked_filter(self):
marked_flows = [f for f in self.flows if f.marked]
if not marked_flows:
return
marked_filter = "~%s" % FMarked.code
# Save Focus
last_focus, _ = self.get_focus()
nearest_marked = self.get_nearest_matching_flow(last_focus, marked_filter)
self.last_filter = self.limit_txt
self.set_limit(marked_filter)
# Restore Focus
if last_focus.marked:
self.set_focus_flow(last_focus)
else:
self.set_focus_flow(nearest_marked)
self.mark_filter = True
def disable_marked_filter(self):
marked_filter = "~%s" % FMarked.code
# Save Focus
last_focus, _ = self.get_focus()
nearest_marked = self.get_nearest_matching_flow(last_focus, marked_filter)
self.set_limit(self.last_filter)
self.last_filter = ""
# Restore Focus
if last_focus.marked:
self.set_focus_flow(last_focus)
else:
self.set_focus_flow(nearest_marked)
self.mark_filter = False
def clear(self):
marked_flows = [f for f in self.view if f.marked]
super(ConsoleState, self).clear()
for f in marked_flows:
self.add_flow(f)
f.marked = True
if len(self.flows.views) == 0:
self.focus = None
else:
self.focus = 0
self.set_focus(self.focus)
class Options(mitmproxy.options.Options):
def __init__(
self,
eventlog=False, # type: bool
follow=False, # type: bool
intercept=False, # type: bool
limit=None, # type: Optional[str]
palette=None, # type: Optional[str]
palette_transparent=False, # type: bool
no_mouse=False, # type: bool
**kwargs
):
self.eventlog = eventlog
self.follow = follow
self.intercept = intercept
self.limit = limit
self.palette = palette
self.palette_transparent = palette_transparent
self.no_mouse = no_mouse
super(Options, self).__init__(**kwargs)
class ConsoleMaster(flow.FlowMaster):
palette = []
def __init__(self, server, options):
flow.FlowMaster.__init__(self, options, server, ConsoleState())
self.stream_path = None
# This line is just for type hinting
self.options = self.options # type: Options
self.options.errored.connect(self.options_error)
r = self.set_intercept(options.intercept)
if r:
print("Intercept error: {}".format(r), file=sys.stderr)
sys.exit(1)
if options.limit:
self.set_limit(options.limit)
self.set_stream_large_bodies(options.stream_large_bodies)
self.palette = options.palette
self.palette_transparent = options.palette_transparent
self.logbuffer = urwid.SimpleListWalker([])
self.follow = options.follow
if options.client_replay:
self.client_playback_path(options.client_replay)
if options.server_replay:
self.server_playback_path(options.server_replay)
self.view_stack = []
if options.app:
self.start_app(self.options.app_host, self.options.app_port)
signals.call_in.connect(self.sig_call_in)
signals.pop_view_state.connect(self.sig_pop_view_state)
signals.replace_view_state.connect(self.sig_replace_view_state)
signals.push_view_state.connect(self.sig_push_view_state)
signals.sig_add_log.connect(self.sig_add_log)
self.addons.add(options, *builtins.default_addons())
def __setattr__(self, name, value):
self.__dict__[name] = value
signals.update_settings.send(self)
def options_error(self, opts, exc):
signals.status_message.send(
message=str(exc),
expire=1
)
def sig_add_log(self, sender, e, level):
if self.options.verbosity < utils.log_tier(level):
return
if level == "error":
signals.status_message.send(
message = "Error: %s" % str(e)
)
e = urwid.Text(("error", str(e)))
else:
e = urwid.Text(str(e))
self.logbuffer.append(e)
if len(self.logbuffer) > EVENTLOG_SIZE:
self.logbuffer.pop(0)
self.logbuffer.set_focus(len(self.logbuffer) - 1)
def add_log(self, e, level):
signals.add_log(e, level)
def sig_call_in(self, sender, seconds, callback, args=()):
def cb(*_):
return callback(*args)
self.loop.set_alarm_in(seconds, cb)
def sig_replace_view_state(self, sender):
"""
A view has been pushed onto the stack, and is intended to replace
the current view rather tha creating a new stack entry.
"""
if len(self.view_stack) > 1:
del self.view_stack[1]
def sig_pop_view_state(self, sender):
"""
Pop the top view off the view stack. If no more views will be left
after this, prompt for exit.
"""
if len(self.view_stack) > 1:
self.view_stack.pop()
self.loop.widget = self.view_stack[-1]
else:
signals.status_prompt_onekey.send(
self,
prompt = "Quit",
keys = (
("yes", "y"),
("no", "n"),
),
callback = self.quit,
)
def sig_push_view_state(self, sender, window):
"""
Push a new view onto the view stack.
"""
self.view_stack.append(window)
self.loop.widget = window
self.loop.draw_screen()
def _run_script_method(self, method, s, f):
status, val = s.run(method, f)
if val:
if status:
signals.add_log("Method %s return: %s" % (method, val), "debug")
else:
signals.add_log(
"Method %s error: %s" %
(method, val[1]), "error")
def run_script_once(self, command, f):
if not command:
return
signals.add_log("Running script on flow: %s" % command, "debug")
try:
s = script.Script(command)
s.load()
except script.ScriptException as e:
signals.status_message.send(
message='Error loading "{}".'.format(command)
)
signals.add_log('Error loading "{}":\n{}'.format(command, e), "error")
return
if f.request:
self._run_script_method("request", s, f)
if f.response:
self._run_script_method("response", s, f)
if f.error:
self._run_script_method("error", s, f)
s.unload()
signals.flow_change.send(self, flow = f)
def toggle_eventlog(self):
self.options.eventlog = not self.options.eventlog
self.view_flowlist()
signals.replace_view_state.send(self)
def _readflows(self, path):
"""
Utitility function that reads a list of flows
or prints an error to the UI if that fails.
Returns
- None, if there was an error.
- a list of flows, otherwise.
"""
try:
return flow.read_flows_from_paths(path)
except exceptions.FlowReadException as e:
signals.status_message.send(message=str(e))
def client_playback_path(self, path):
if not isinstance(path, list):
path = [path]
flows = self._readflows(path)
if flows:
self.start_client_playback(flows, False)
def server_playback_path(self, path):
if not isinstance(path, list):
path = [path]
flows = self._readflows(path)
if flows:
self.start_server_playback(
flows,
self.options.kill, self.options.rheaders,
False, self.options.nopop,
self.options.replay_ignore_params,
self.options.replay_ignore_content,
self.options.replay_ignore_payload_params,
self.options.replay_ignore_host
)
def spawn_editor(self, data):
text = not isinstance(data, bytes)
fd, name = tempfile.mkstemp('', "mproxy", text=text)
if six.PY2:
os.close(fd)
with open(name, "w" if text else "wb") as f:
f.write(data)
else:
with open(fd, "w" if text else "wb") as f:
f.write(data)
# if no EDITOR is set, assume 'vi'
c = os.environ.get("EDITOR") or "vi"
cmd = shlex.split(c)
cmd.append(name)
self.ui.stop()
try:
subprocess.call(cmd)
except:
signals.status_message.send(
message="Can't start editor: %s" % " ".join(c)
)
else:
with open(name, "r" if text else "rb") as f:
data = f.read()
self.ui.start()
os.unlink(name)
return data
def spawn_external_viewer(self, data, contenttype):
if contenttype:
contenttype = contenttype.split(";")[0]
ext = mimetypes.guess_extension(contenttype) or ""
else:
ext = ""
fd, name = tempfile.mkstemp(ext, "mproxy")
os.write(fd, data)
os.close(fd)
# read-only to remind the user that this is a view function
os.chmod(name, stat.S_IREAD)
cmd = None
shell = False
if contenttype:
c = mailcap.getcaps()
cmd, _ = mailcap.findmatch(c, contenttype, filename=name)
if cmd:
shell = True
if not cmd:
# hm which one should get priority?
c = os.environ.get("PAGER") or os.environ.get("EDITOR")
if not c:
c = "less"
cmd = shlex.split(c)
cmd.append(name)
self.ui.stop()
try:
subprocess.call(cmd, shell=shell)
except:
signals.status_message.send(
message="Can't start external viewer: %s" % " ".join(c)
)
self.ui.start()
os.unlink(name)
def set_palette(self, name):
self.palette = name
self.ui.register_palette(
palettes.palettes[name].palette(self.palette_transparent)
)
self.ui.clear()
def ticker(self, *userdata):
changed = self.tick(timeout=0)
if changed:
self.loop.draw_screen()
signals.update_settings.send()
self.loop.set_alarm_in(0.01, self.ticker)
def run(self):
self.ui = urwid.raw_display.Screen()
self.ui.set_terminal_properties(256)
self.set_palette(self.palette)
self.loop = urwid.MainLoop(
urwid.SolidFill("x"),
screen = self.ui,
handle_mouse = not self.options.no_mouse,
)
self.ab = statusbar.ActionBar()
if self.options.rfile:
ret = self.load_flows_path(self.options.rfile)
if ret and self.state.flow_count():
signals.add_log(
"File truncated or corrupted. "
"Loaded as many flows as possible.",
"error"
)
elif ret and not self.state.flow_count():
self.shutdown()
print("Could not load file: {}".format(ret), file=sys.stderr)
sys.exit(1)
self.loop.set_alarm_in(0.01, self.ticker)
if self.options.http2 and not tcp.HAS_ALPN: # pragma: no cover
def http2err(*args, **kwargs):
signals.status_message.send(
message = "HTTP/2 disabled - OpenSSL 1.0.2+ required."
" Use --no-http2 to silence this warning.",
expire=5
)
self.loop.set_alarm_in(0.01, http2err)
# It's not clear why we need to handle this explicitly - without this,
# mitmproxy hangs on keyboard interrupt. Remove if we ever figure it
# out.
def exit(s, f):
raise urwid.ExitMainLoop
signal.signal(signal.SIGINT, exit)
self.loop.set_alarm_in(
0.0001,
lambda *args: self.view_flowlist()
)
self.start()
try:
self.loop.run()
except Exception:
self.loop.stop()
sys.stdout.flush()
print(traceback.format_exc(), file=sys.stderr)
print("mitmproxy has crashed!", file=sys.stderr)
print("Please lodge a bug report at:", file=sys.stderr)
print("\thttps://github.com/mitmproxy/mitmproxy", file=sys.stderr)
print("Shutting down...", file=sys.stderr)
sys.stderr.flush()
self.shutdown()
def view_help(self, helpctx):
signals.push_view_state.send(
self,
window = window.Window(
self,
help.HelpView(helpctx),
None,
statusbar.StatusBar(self, help.footer),
None
)
)
def view_options(self):
for i in self.view_stack:
if isinstance(i["body"], options.Options):
return
signals.push_view_state.send(
self,
window = window.Window(
self,
options.Options(self),
None,
statusbar.StatusBar(self, options.footer),
options.help_context,
)
)
def view_palette_picker(self):
signals.push_view_state.send(
self,
window = window.Window(
self,
palettepicker.PalettePicker(self),
None,
statusbar.StatusBar(self, palettepicker.footer),
palettepicker.help_context,
)
)
def view_grideditor(self, ge):
signals.push_view_state.send(
self,
window = window.Window(
self,
ge,
None,
statusbar.StatusBar(self, grideditor.base.FOOTER),
ge.make_help()
)
)
def view_flowlist(self):
if self.ui.started:
self.ui.clear()
if self.state.follow_focus:
self.state.set_focus(self.state.flow_count())
if self.options.eventlog:
body = flowlist.BodyPile(self)
else:
body = flowlist.FlowListBox(self)
if self.follow:
self.toggle_follow_flows()
signals.push_view_state.send(
self,
window = window.Window(
self,
body,
None,
statusbar.StatusBar(self, flowlist.footer),
flowlist.help_context
)
)
def view_flow(self, flow, tab_offset=0):
self.state.set_focus_flow(flow)
signals.push_view_state.send(
self,
window = window.Window(
self,
flowview.FlowView(self, self.state, flow, tab_offset),
flowview.FlowViewHeader(self, flow),
statusbar.StatusBar(self, flowview.footer),
flowview.help_context
)
)
def _write_flows(self, path, flows):
if not path:
return
path = os.path.expanduser(path)
try:
f = open(path, "wb")
fw = flow.FlowWriter(f)
for i in flows:
fw.add(i)
f.close()
except IOError as v:
signals.status_message.send(message=v.strerror)
def save_one_flow(self, path, flow):
return self._write_flows(path, [flow])
def save_flows(self, path):
return self._write_flows(path, self.state.view)
def load_flows_callback(self, path):
if not path:
return
ret = self.load_flows_path(path)
return ret or "Flows loaded from %s" % path
def load_flows_path(self, path):
reterr = None
try:
flow.FlowMaster.load_flows_file(self, path)
except exceptions.FlowReadException as e:
reterr = str(e)
signals.flowlist_change.send(self)
return reterr
def accept_all(self):
self.state.accept_all(self)
def set_limit(self, txt):
v = self.state.set_limit(txt)
signals.flowlist_change.send(self)
return v
def set_intercept(self, txt):
return self.state.set_intercept(txt)
def change_default_display_mode(self, t):
v = contentviews.get_by_shortcut(t)
self.state.default_body_view = v
self.refresh_focus()
def edit_scripts(self, scripts):
self.options.scripts = [x[0] for x in scripts]
def stop_client_playback_prompt(self, a):
if a != "n":
self.stop_client_playback()
def stop_server_playback_prompt(self, a):
if a != "n":
self.stop_server_playback()
def quit(self, a):
if a != "n":
raise urwid.ExitMainLoop
def shutdown(self):
self.state.killall(self)
flow.FlowMaster.shutdown(self)
def clear_flows(self):
self.state.clear()
signals.flowlist_change.send(self)
def toggle_follow_flows(self):
# toggle flow follow
self.state.follow_focus = not self.state.follow_focus
# jump to most recent flow if follow is now on
if self.state.follow_focus:
self.state.set_focus(self.state.flow_count())
signals.flowlist_change.send(self)
def delete_flow(self, f):
self.state.delete_flow(f)
signals.flowlist_change.send(self)
def refresh_focus(self):
if self.state.view:
signals.flow_change.send(
self,
flow = self.state.view[self.state.focus]
)
def process_flow(self, f):
should_intercept = any(
[
self.state.intercept and f.match(self.state.intercept) and not f.request.is_replay,
f.intercepted,
]
)
if should_intercept:
f.intercept(self)
f.reply.take()
signals.flowlist_change.send(self)
signals.flow_change.send(self, flow = f)
def clear_events(self):
self.logbuffer[:] = []
# Handlers
@controller.handler
def error(self, f):
f = flow.FlowMaster.error(self, f)
if f:
self.process_flow(f)
return f
@controller.handler
def request(self, f):
f = flow.FlowMaster.request(self, f)
if f:
self.process_flow(f)
return f
@controller.handler
def response(self, f):
f = flow.FlowMaster.response(self, f)
if f:
self.process_flow(f)
return f
@controller.handler
def tcp_message(self, f):
super(ConsoleMaster, self).tcp_message(f)
message = f.messages[-1]
direction = "->" if message.from_client else "<-"
self.add_log("{client} {direction} tcp {direction} {server}".format(
client=repr(f.client_conn.address),
server=repr(f.server_conn.address),
direction=direction,
), "info")
self.add_log(strutils.bytes_to_escaped_str(message.content), "debug")
| mit | 3,386,347,636,308,486,700 | 29.273779 | 99 | 0.548295 | false |
nel215/reinforcement-learning | rl/agent/td_lambda.py | 1 | 1026 | # coding: utf-8
class TDLambda(object):
def __init__(self, action_space, alpha=0.1, gamma=0.9, ld=0.5):
self.action_space = action_space
self.V = {}
self.E = {} # eligibility trace
self.alpha = alpha
self.gamma = gamma
self.ld = ld
self.obs = None
self.reward = None
self.next_obs = None
def v_function(self, obs):
if obs not in self.V:
self.V[obs] = 10
return self.V[obs]
def action(self, obs):
# random policy
return self.action_space.sample()
def store(self, obs, act, reward, next_obs):
self.obs = obs
self.reward = reward
self.next_obs = next_obs
def update(self):
et = 1 + self.gamma * self.ld * self.E.get(self.obs, 1)
self.E[self.obs] = et
loss = self.reward + self.gamma * self.v_function(self.next_obs) - self.v_function(self.obs)
self.V[self.obs] += self.alpha * loss * et
def reset(self):
self.E = {}
| apache-2.0 | 209,233,239,150,808,030 | 26 | 100 | 0.542885 | false |
saurabh6790/frappe | frappe/model/naming.py | 1 | 10734 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import now_datetime, cint, cstr
import re
from six import string_types
from frappe.model import log_types
def set_new_name(doc):
"""
Sets the `name` property for the document based on various rules.
1. If amended doc, set suffix.
2. If `autoname` method is declared, then call it.
3. If `autoname` property is set in the DocType (`meta`), then build it using the `autoname` property.
4. If no rule defined, use hash.
:param doc: Document to be named.
"""
doc.run_method("before_naming")
autoname = frappe.get_meta(doc.doctype).autoname or ""
if autoname.lower() != "prompt" and not frappe.flags.in_import:
doc.name = None
if getattr(doc, "amended_from", None):
_set_amended_name(doc)
return
elif getattr(doc.meta, "issingle", False):
doc.name = doc.doctype
elif getattr(doc.meta, "istable", False):
doc.name = make_autoname("hash", doc.doctype)
if not doc.name:
set_naming_from_document_naming_rule(doc)
if not doc.name:
doc.run_method("autoname")
if not doc.name and autoname:
set_name_from_naming_options(autoname, doc)
# if the autoname option is 'field:' and no name was derived, we need to
# notify
if not doc.name and autoname.startswith("field:"):
fieldname = autoname[6:]
frappe.throw(_("{0} is required").format(doc.meta.get_label(fieldname)))
# at this point, we fall back to name generation with the hash option
if not doc.name and autoname == "hash":
doc.name = make_autoname("hash", doc.doctype)
if not doc.name:
doc.name = make_autoname("hash", doc.doctype)
doc.name = validate_name(
doc.doctype,
doc.name,
frappe.get_meta(doc.doctype).get_field("name_case")
)
def set_name_from_naming_options(autoname, doc):
"""
Get a name based on the autoname field option
"""
_autoname = autoname.lower()
if _autoname.startswith("field:"):
doc.name = _field_autoname(autoname, doc)
elif _autoname.startswith("naming_series:"):
set_name_by_naming_series(doc)
elif _autoname.startswith("prompt"):
_prompt_autoname(autoname, doc)
elif _autoname.startswith("format:"):
doc.name = _format_autoname(autoname, doc)
elif "#" in autoname:
doc.name = make_autoname(autoname, doc=doc)
def set_naming_from_document_naming_rule(doc):
'''
Evaluate rules based on "Document Naming Series" doctype
'''
if doc.doctype in log_types:
return
# ignore_ddl if naming is not yet bootstrapped
for d in frappe.get_all('Document Naming Rule',
dict(document_type=doc.doctype, disabled=0), order_by='priority desc', ignore_ddl=True):
frappe.get_cached_doc('Document Naming Rule', d.name).apply(doc)
if doc.name:
break
def set_name_by_naming_series(doc):
"""Sets name by the `naming_series` property"""
if not doc.naming_series:
doc.naming_series = get_default_naming_series(doc.doctype)
if not doc.naming_series:
frappe.throw(frappe._("Naming Series mandatory"))
doc.name = make_autoname(doc.naming_series+".#####", "", doc)
def make_autoname(key="", doctype="", doc=""):
"""
Creates an autoname from the given key:
**Autoname rules:**
* The key is separated by '.'
* '####' represents a series. The string before this part becomes the prefix:
Example: ABC.#### creates a series ABC0001, ABC0002 etc
* 'MM' represents the current month
* 'YY' and 'YYYY' represent the current year
*Example:*
* DE/./.YY./.MM./.##### will create a series like
DE/09/01/0001 where 09 is the year, 01 is the month and 0001 is the series
"""
if key == "hash":
return frappe.generate_hash(doctype, 10)
if "#" not in key:
key = key + ".#####"
elif "." not in key:
error_message = _("Invalid naming series (. missing)")
if doctype:
error_message = _("Invalid naming series (. missing) for {0}").format(doctype)
frappe.throw(error_message)
parts = key.split('.')
n = parse_naming_series(parts, doctype, doc)
return n
def parse_naming_series(parts, doctype='', doc=''):
n = ''
if isinstance(parts, string_types):
parts = parts.split('.')
series_set = False
today = now_datetime()
for e in parts:
part = ''
if e.startswith('#'):
if not series_set:
digits = len(e)
part = getseries(n, digits)
series_set = True
elif e == 'YY':
part = today.strftime('%y')
elif e == 'MM':
part = today.strftime('%m')
elif e == 'DD':
part = today.strftime("%d")
elif e == 'YYYY':
part = today.strftime('%Y')
elif e == 'timestamp':
part = str(today)
elif e == 'FY':
part = frappe.defaults.get_user_default("fiscal_year")
elif e.startswith('{') and doc:
e = e.replace('{', '').replace('}', '')
part = doc.get(e)
elif doc and doc.get(e):
part = doc.get(e)
else:
part = e
if isinstance(part, string_types):
n += part
return n
def getseries(key, digits):
# series created ?
current = frappe.db.sql("SELECT `current` FROM `tabSeries` WHERE `name`=%s FOR UPDATE", (key,))
if current and current[0][0] is not None:
current = current[0][0]
# yes, update it
frappe.db.sql("UPDATE `tabSeries` SET `current` = `current` + 1 WHERE `name`=%s", (key,))
current = cint(current) + 1
else:
# no, create it
frappe.db.sql("INSERT INTO `tabSeries` (`name`, `current`) VALUES (%s, 1)", (key,))
current = 1
return ('%0'+str(digits)+'d') % current
def revert_series_if_last(key, name, doc=None):
"""
Reverts the series for particular naming series:
* key is naming series - SINV-.YYYY-.####
* name is actual name - SINV-2021-0001
1. This function split the key into two parts prefix (SINV-YYYY) & hashes (####).
2. Use prefix to get the current index of that naming series from Series table
3. Then revert the current index.
*For custom naming series:*
1. hash can exist anywhere, if it exist in hashes then it take normal flow.
2. If hash doesn't exit in hashes, we get the hash from prefix, then update name and prefix accordingly.
*Example:*
1. key = SINV-.YYYY.-
* If key doesn't have hash it will add hash at the end
* prefix will be SINV-YYYY based on this will get current index from Series table.
2. key = SINV-.####.-2021
* now prefix = SINV-#### and hashes = 2021 (hash doesn't exist)
* will search hash in key then accordingly get prefix = SINV-
3. key = ####.-2021
* prefix = #### and hashes = 2021 (hash doesn't exist)
* will search hash in key then accordingly get prefix = ""
"""
if ".#" in key:
prefix, hashes = key.rsplit(".", 1)
if "#" not in hashes:
# get the hash part from the key
hash = re.search("#+", key)
if not hash:
return
name = name.replace(hashes, "")
prefix = prefix.replace(hash.group(), "")
else:
prefix = key
if '.' in prefix:
prefix = parse_naming_series(prefix.split('.'), doc=doc)
count = cint(name.replace(prefix, ""))
current = frappe.db.sql("SELECT `current` FROM `tabSeries` WHERE `name`=%s FOR UPDATE", (prefix,))
if current and current[0][0]==count:
frappe.db.sql("UPDATE `tabSeries` SET `current` = `current` - 1 WHERE `name`=%s", prefix)
def get_default_naming_series(doctype):
"""get default value for `naming_series` property"""
naming_series = frappe.get_meta(doctype).get_field("naming_series").options or ""
if naming_series:
naming_series = naming_series.split("\n")
return naming_series[0] or naming_series[1]
else:
return None
def validate_name(doctype, name, case=None, merge=False):
if not name:
frappe.throw(_("No Name Specified for {0}").format(doctype))
if name.startswith("New "+doctype):
frappe.throw(_("There were some errors setting the name, please contact the administrator"), frappe.NameError)
if case == "Title Case":
name = name.title()
if case == "UPPER CASE":
name = name.upper()
name = name.strip()
if not frappe.get_meta(doctype).get("issingle") and (doctype == name) and (name != "DocType"):
frappe.throw(_("Name of {0} cannot be {1}").format(doctype, name), frappe.NameError)
special_characters = "<>"
if re.findall("[{0}]+".format(special_characters), name):
message = ", ".join("'{0}'".format(c) for c in special_characters)
frappe.throw(_("Name cannot contain special characters like {0}").format(message), frappe.NameError)
return name
def append_number_if_name_exists(doctype, value, fieldname="name", separator="-", filters=None):
if not filters:
filters = dict()
filters.update({fieldname: value})
exists = frappe.db.exists(doctype, filters)
regex = "^{value}{separator}\\d+$".format(value=re.escape(value), separator=separator)
if exists:
last = frappe.db.sql("""SELECT `{fieldname}` FROM `tab{doctype}`
WHERE `{fieldname}` {regex_character} %s
ORDER BY length({fieldname}) DESC,
`{fieldname}` DESC LIMIT 1""".format(
doctype=doctype,
fieldname=fieldname,
regex_character=frappe.db.REGEX_CHARACTER),
regex)
if last:
count = str(cint(last[0][0].rsplit(separator, 1)[1]) + 1)
else:
count = "1"
value = "{0}{1}{2}".format(value, separator, count)
return value
def _set_amended_name(doc):
am_id = 1
am_prefix = doc.amended_from
if frappe.db.get_value(doc.doctype, doc.amended_from, "amended_from"):
am_id = cint(doc.amended_from.split("-")[-1]) + 1
am_prefix = "-".join(doc.amended_from.split("-")[:-1]) # except the last hyphen
doc.name = am_prefix + "-" + str(am_id)
return doc.name
def _field_autoname(autoname, doc, skip_slicing=None):
"""
Generate a name using `DocType` field. This is called when the doctype's
`autoname` field starts with 'field:'
"""
fieldname = autoname if skip_slicing else autoname[6:]
name = (cstr(doc.get(fieldname)) or "").strip()
return name
def _prompt_autoname(autoname, doc):
"""
Generate a name using Prompt option. This simply means the user will have to set the name manually.
This is called when the doctype's `autoname` field starts with 'prompt'.
"""
# set from __newname in save.py
if not doc.name:
frappe.throw(_("Name not set via prompt"))
def _format_autoname(autoname, doc):
"""
Generate autoname by replacing all instances of braced params (fields, date params ('DD', 'MM', 'YY'), series)
Independent of remaining string or separators.
Example pattern: 'format:LOG-{MM}-{fieldname1}-{fieldname2}-{#####}'
"""
first_colon_index = autoname.find(":")
autoname_value = autoname[first_colon_index + 1:]
def get_param_value_for_match(match):
param = match.group()
# trim braces
trimmed_param = param[1:-1]
return parse_naming_series([trimmed_param], doc=doc)
# Replace braced params with their parsed value
name = re.sub(r"(\{[\w | #]+\})", get_param_value_for_match, autoname_value)
return name
| mit | -6,251,812,142,639,861,000 | 28.98324 | 112 | 0.671325 | false |
YunoHost/moulinette | moulinette/interfaces/cli.py | 1 | 17327 | # -*- coding: utf-8 -*-
import os
import sys
import getpass
import locale
import logging
from argparse import SUPPRESS
from collections import OrderedDict
from datetime import date, datetime
import argcomplete
from moulinette import msignals, m18n
from moulinette.core import MoulinetteError
from moulinette.interfaces import (
BaseActionsMapParser,
BaseInterface,
ExtendedArgumentParser,
)
from moulinette.utils import log
logger = log.getLogger("moulinette.cli")
# CLI helpers ----------------------------------------------------------
CLI_COLOR_TEMPLATE = "\033[{:d}m\033[1m"
END_CLI_COLOR = "\033[m"
colors_codes = {
"red": CLI_COLOR_TEMPLATE.format(31),
"green": CLI_COLOR_TEMPLATE.format(32),
"yellow": CLI_COLOR_TEMPLATE.format(33),
"blue": CLI_COLOR_TEMPLATE.format(34),
"purple": CLI_COLOR_TEMPLATE.format(35),
"cyan": CLI_COLOR_TEMPLATE.format(36),
"white": CLI_COLOR_TEMPLATE.format(37),
}
def colorize(astr, color):
"""Colorize a string
Return a colorized string for printing in shell with style ;)
Keyword arguments:
- astr -- String to colorize
- color -- Name of the color
"""
if os.isatty(1):
return "{:s}{:s}{:s}".format(colors_codes[color], astr, END_CLI_COLOR)
else:
return astr
def plain_print_dict(d, depth=0):
"""Print in a plain way a dictionary recursively
Print a dictionary recursively for scripting usage to the standard output.
Output formatting:
>>> d = {'key': 'value', 'list': [1,2], 'dict': {'key2': 'value2'}}
>>> plain_print_dict(d)
#key
value
#list
1
2
#dict
##key2
value2
Keyword arguments:
- d -- The dictionary to print
- depth -- The recursive depth of the dictionary
"""
# skip first key printing
if depth == 0 and (isinstance(d, dict) and len(d) == 1):
_, d = d.popitem()
if isinstance(d, (tuple, set)):
d = list(d)
if isinstance(d, list):
for v in d:
plain_print_dict(v, depth + 1)
elif isinstance(d, dict):
for k, v in d.items():
print("{}{}".format("#" * (depth + 1), k))
plain_print_dict(v, depth + 1)
else:
if isinstance(d, unicode):
d = d.encode("utf-8")
print(d)
def pretty_date(_date):
"""Display a date in the current time zone without ms and tzinfo
Argument:
- date -- The date or datetime to display
"""
import pytz # Lazy loading, this takes like 3+ sec on a RPi2 ?!
# Deduce system timezone
nowutc = datetime.now(tz=pytz.utc)
nowtz = datetime.now()
nowtz = nowtz.replace(tzinfo=pytz.utc)
offsetHour = nowutc - nowtz
offsetHour = int(round(offsetHour.total_seconds() / 3600))
localtz = "Etc/GMT%+d" % offsetHour
# Transform naive date into UTC date
if _date.tzinfo is None:
_date = _date.replace(tzinfo=pytz.utc)
# Convert UTC date into system locale date
_date = _date.astimezone(pytz.timezone(localtz))
if isinstance(_date, datetime):
return _date.strftime("%Y-%m-%d %H:%M:%S")
else:
return _date.strftime("%Y-%m-%d")
def pretty_print_dict(d, depth=0):
"""Print in a pretty way a dictionary recursively
Print a dictionary recursively with colors to the standard output.
Keyword arguments:
- d -- The dictionary to print
- depth -- The recursive depth of the dictionary
"""
keys = d.keys()
if not isinstance(d, OrderedDict):
keys = sorted(keys)
for k in keys:
v = d[k]
k = colorize(str(k), "purple")
if isinstance(v, (tuple, set)):
v = list(v)
if isinstance(v, list) and len(v) == 1:
v = v[0]
if isinstance(v, dict):
print("{:s}{}: ".format(" " * depth, k))
pretty_print_dict(v, depth + 1)
elif isinstance(v, list):
print("{:s}{}: ".format(" " * depth, k))
for key, value in enumerate(v):
if isinstance(value, tuple):
pretty_print_dict({value[0]: value[1]}, depth + 1)
elif isinstance(value, dict):
pretty_print_dict({key: value}, depth + 1)
else:
if isinstance(value, unicode):
value = value.encode("utf-8")
elif isinstance(v, date):
v = pretty_date(v)
print("{:s}- {}".format(" " * (depth + 1), value))
else:
if isinstance(v, unicode):
v = v.encode("utf-8")
elif isinstance(v, date):
v = pretty_date(v)
print("{:s}{}: {}".format(" " * depth, k, v))
def get_locale():
"""Return current user eocale"""
try:
lang = locale.getdefaultlocale()[0]
except Exception:
# In some edge case the locale lib fails ...
# c.f. https://forum.yunohost.org/t/error-when-trying-to-enter-user-information-in-admin-panel/11390/11
lang = os.getenv("LANG")
if not lang:
return ""
return lang[:2]
# CLI Classes Implementation -------------------------------------------
class TTYHandler(logging.StreamHandler):
"""TTY log handler
A handler class which prints logging records for a tty. The record is
neverthemess formatted depending if it is connected to a tty(-like)
device.
If it's the case, the level name - optionnaly colorized - is prepended
to the message and the result is stored in the record as `message_key`
attribute. That way, a custom formatter can be defined. The default is
to output just the formatted message.
Anyway, if the stream is not a tty, just the message is output.
Note that records with a level higher or equal to WARNING are sent to
stderr. Otherwise, they are sent to stdout.
"""
LEVELS_COLOR = {
log.NOTSET: "white",
log.DEBUG: "white",
log.INFO: "cyan",
log.SUCCESS: "green",
log.WARNING: "yellow",
log.ERROR: "red",
log.CRITICAL: "red",
}
def __init__(self, message_key="fmessage"):
logging.StreamHandler.__init__(self)
self.message_key = message_key
def format(self, record):
"""Enhance message with level and colors if supported."""
msg = record.getMessage()
if self.supports_color():
level = ""
if self.level <= log.DEBUG:
# add level name before message
level = "%s " % record.levelname
elif record.levelname in ["SUCCESS", "WARNING", "ERROR", "INFO"]:
# add translated level name before message
level = "%s " % m18n.g(record.levelname.lower())
color = self.LEVELS_COLOR.get(record.levelno, "white")
msg = "{0}{1}{2}{3}".format(colors_codes[color], level, END_CLI_COLOR, msg)
if self.formatter:
# use user-defined formatter
record.__dict__[self.message_key] = msg
return self.formatter.format(record)
return msg
def emit(self, record):
# set proper stream first
if record.levelno >= log.WARNING:
self.stream = sys.stderr
else:
self.stream = sys.stdout
logging.StreamHandler.emit(self, record)
def supports_color(self):
"""Check whether current stream supports color."""
if hasattr(self.stream, "isatty") and self.stream.isatty():
return True
return False
class ActionsMapParser(BaseActionsMapParser):
"""Actions map's Parser for the CLI
Provide actions map parsing methods for a CLI usage. The parser for
the arguments is represented by a ExtendedArgumentParser object.
Keyword arguments:
- parser -- The ExtendedArgumentParser object to use
- subparser_kwargs -- Arguments to pass to the sub-parser group
- top_parser -- An ArgumentParser object whose arguments should
be take into account but not parsed
"""
def __init__(
self, parent=None, parser=None, subparser_kwargs=None, top_parser=None, **kwargs
):
super(ActionsMapParser, self).__init__(parent)
if subparser_kwargs is None:
subparser_kwargs = {"title": "categories", "required": False}
self._parser = parser or ExtendedArgumentParser()
self._subparsers = self._parser.add_subparsers(**subparser_kwargs)
self.global_parser = parent.global_parser if parent else None
if top_parser:
self.global_parser = self._parser.add_argument_group("global arguments")
# Append each top parser action to the global group
for action in top_parser._actions:
action.dest = SUPPRESS
self.global_parser._add_action(action)
# Implement virtual properties
interface = "cli"
# Implement virtual methods
@staticmethod
def format_arg_names(name, full):
if name.startswith("-") and full:
return [name, full]
return [name]
def has_global_parser(self):
return True
def add_category_parser(self, name, category_help=None, **kwargs):
"""Add a parser for a category
Keyword arguments:
- category_help -- A brief description for the category
Returns:
A new ActionsMapParser object for the category
"""
parser = self._subparsers.add_parser(
name, description=category_help, help=category_help, **kwargs
)
return self.__class__(self, parser, {"title": "subcommands", "required": True})
def add_subcategory_parser(self, name, subcategory_help=None, **kwargs):
"""Add a parser for a subcategory
Keyword arguments:
- subcategory_help -- A brief description for the category
Returns:
A new ActionsMapParser object for the category
"""
parser = self._subparsers.add_parser(
name,
type_="subcategory",
description=subcategory_help,
help=subcategory_help,
**kwargs
)
return self.__class__(self, parser, {"title": "actions", "required": True})
def add_action_parser(
self,
name,
tid,
action_help=None,
deprecated=False,
deprecated_alias=[],
**kwargs
):
"""Add a parser for an action
Keyword arguments:
- action_help -- A brief description for the action
- deprecated -- Wether the action is deprecated
- deprecated_alias -- A list of deprecated action alias names
Returns:
A new ExtendedArgumentParser object for the action
"""
return self._subparsers.add_parser(
name,
type_="action",
help=action_help,
description=action_help,
deprecated=deprecated,
deprecated_alias=deprecated_alias,
)
def add_global_arguments(self, arguments):
for argument_name, argument_options in arguments.items():
# will adapt arguments name for cli or api context
names = self.format_arg_names(
str(argument_name), argument_options.pop("full", None)
)
self.global_parser.add_argument(*names, **argument_options)
def auth_required(self, args, **kwargs):
# FIXME? idk .. this try/except is duplicated from parse_args below
# Just to be able to obtain the tid
try:
ret = self._parser.parse_args(args)
except SystemExit:
raise
except:
logger.exception("unable to parse arguments '%s'", " ".join(args))
raise MoulinetteError("error_see_log")
tid = getattr(ret, "_tid", None)
if self.get_conf(tid, "authenticate"):
authenticator = self.get_conf(tid, "authenticator")
# If several authenticator, use the default one
if isinstance(authenticator, dict):
if "default" in authenticator:
authenticator = "default"
else:
# TODO which one should we use?
pass
return authenticator
else:
return False
def parse_args(self, args, **kwargs):
try:
ret = self._parser.parse_args(args)
except SystemExit:
raise
except:
logger.exception("unable to parse arguments '%s'", " ".join(args))
raise MoulinetteError("error_see_log")
else:
self.prepare_action_namespace(getattr(ret, "_tid", None), ret)
self._parser.dequeue_callbacks(ret)
return ret
class Interface(BaseInterface):
"""Command-line Interface for the moulinette
Initialize an interface connected to the standard input/output
stream and to a given actions map.
Keyword arguments:
- actionsmap -- The ActionsMap instance to connect to
"""
def __init__(self, actionsmap):
# Set user locale
m18n.set_locale(get_locale())
# Connect signals to handlers
msignals.set_handler("display", self._do_display)
if os.isatty(1):
msignals.set_handler("authenticate", self._do_authenticate)
msignals.set_handler("prompt", self._do_prompt)
self.actionsmap = actionsmap
def run(self, args, output_as=None, password=None, timeout=None):
"""Run the moulinette
Process the action corresponding to the given arguments 'args'
and print the result.
Keyword arguments:
- args -- A list of argument strings
- output_as -- Output result in another format. Possible values:
- json: return a JSON encoded string
- plain: return a script-readable output
- none: do not output the result
- password -- The password to use in case of authentication
- timeout -- Number of seconds before this command will timeout because it can't acquire the lock (meaning that another command is currently running), by default there is no timeout and the command will wait until it can get the lock
"""
if output_as and output_as not in ["json", "plain", "none"]:
raise MoulinetteError("invalid_usage")
# auto-complete
argcomplete.autocomplete(self.actionsmap.parser._parser)
# Set handler for authentication
if password:
msignals.set_handler("authenticate", lambda a: a(password=password))
else:
if os.isatty(1):
msignals.set_handler("authenticate", self._do_authenticate)
try:
ret = self.actionsmap.process(args, timeout=timeout)
except (KeyboardInterrupt, EOFError):
raise MoulinetteError("operation_interrupted")
if ret is None or output_as == "none":
return
# Format and print result
if output_as:
if output_as == "json":
import json
from moulinette.utils.serialize import JSONExtendedEncoder
print(json.dumps(ret, cls=JSONExtendedEncoder))
else:
plain_print_dict(ret)
elif isinstance(ret, dict):
pretty_print_dict(ret)
else:
print(ret)
# Signals handlers
def _do_authenticate(self, authenticator):
"""Process the authentication
Handle the core.MoulinetteSignals.authenticate signal.
"""
# TODO: Allow token authentication?
help = authenticator.extra.get("help")
msg = m18n.n(help) if help else m18n.g("password")
return authenticator(password=self._do_prompt(msg, True, False, color="yellow"))
def _do_prompt(self, message, is_password, confirm, color="blue"):
"""Prompt for a value
Handle the core.MoulinetteSignals.prompt signal.
Keyword arguments:
- color -- The color to use for prompting message
"""
if is_password:
prompt = lambda m: getpass.getpass(colorize(m18n.g("colon", m), color))
else:
prompt = lambda m: raw_input(colorize(m18n.g("colon", m), color))
value = prompt(message)
if confirm:
m = message[0].lower() + message[1:]
if prompt(m18n.g("confirm", prompt=m)) != value:
raise MoulinetteError("values_mismatch")
return value
def _do_display(self, message, style):
"""Display a message
Handle the core.MoulinetteSignals.display signal.
"""
if isinstance(message, unicode):
message = message.encode("utf-8")
if style == "success":
print("{} {}".format(colorize(m18n.g("success"), "green"), message))
elif style == "warning":
print("{} {}".format(colorize(m18n.g("warning"), "yellow"), message))
elif style == "error":
print("{} {}".format(colorize(m18n.g("error"), "red"), message))
else:
print(message)
| agpl-3.0 | -4,121,375,827,570,136,000 | 31.087037 | 245 | 0.581347 | false |
ken-muturi/pombola | pombola/settings/south_africa_base.py | 1 | 2220 | from .apps import *
COUNTRY_APP = 'south_africa'
OPTIONAL_APPS = (
'speeches',
'za_hansard',
'pombola.interests_register',
'pombola.spinner',
)
OPTIONAL_APPS += APPS_REQUIRED_BY_SPEECHES
SPEECH_SUMMARY_LENGTH = 30
BLOG_RSS_FEED = ''
BREADCRUMB_URL_NAME_MAPPINGS = {
'info': ['Information', '/info/'],
'organisation': ['People', '/organisation/all/'],
'person': ['Politicians', '/person/all/'],
'place': ['Places', '/place/all/'],
'search': ['Search', '/search/'],
'mp-corner': ['MP Corner', '/blog/category/mp-corner'],
'newsletter': ['MONITOR Newsletter', '/info/newsletter'],
}
TWITTER_USERNAME = 'PeoplesAssem_SA'
TWITTER_WIDGET_ID = '431408035739607040'
MAP_BOUNDING_BOX_NORTH = -22.06
MAP_BOUNDING_BOX_EAST = 32.95
MAP_BOUNDING_BOX_SOUTH = -35.00
MAP_BOUNDING_BOX_WEST = 16.30
MAPIT_COUNTRY = 'ZA'
COUNTRY_CSS = {
'south-africa': {
'source_filenames': (
'sass/south-africa.scss',
'css/jquery.ui.datepicker.css',
),
'output_filename': 'css/south-africa.css'
}
}
COUNTRY_JS = {
'tabs': {
'source_filenames': (
'js/tabs.js',
),
'output_filename': 'js/tabs.js',
'template_name': 'pipeline/js-array.html',
},
'za-map-drilldown': {
'source_filenames': (
'js/za-map-drilldown.js',
),
'output_filename': 'js/za-map-drilldown.js',
'template_name': 'pipeline/js-array.html',
},
'za-map-drilldown': {
'source_filenames': (
'js/election_countdown.js',
),
'output_filename': 'js/election_countdown.js',
'template_name': 'pipeline/js-array.html',
},
'advanced-search': {
'source_filenames': (
'js/libs/jquery.ui.datepicker.js',
'js/advanced-search.js',
),
'output_filename': 'js/advanced-search.js',
'template_name': 'pipeline/js-array.html',
},
'interests-filter': {
'source_filenames' : (
'js/interests-filter.js',
),
'output_filename': 'js/interests-filter.js',
'template_name': 'pipeline/js-array.html',
}
}
INFO_PAGES_ALLOW_RAW_HTML = True
| agpl-3.0 | 5,006,298,211,363,065,000 | 24.517241 | 61 | 0.563063 | false |
sethbrin/QM | version3/Xq2EFT/test_eft_calculator.py | 1 | 1468 | #!/usr/bin/env python2
import numpy as np
from time import time
from eft_calculator import EFT_calculator
import tools
def load_coordinates(name):
lines = open('random/'+name).readlines()[-7:-1]
coors = [[float(item) for item in line.split()[2:5]] for line in lines]
return np.array(coors)
def test_random_set():
ener = []
force = []
torque = []
t0 = time()
Ofile = open('wtr01.txt', 'w')
fout1 = open('wtr_wtr_force.txt','w')
fout2 = open('wtr_wtr_torque.txt','w')
for i in range(2, 2000):
#for i in range(2, 3):
name = 'test%04d.inp' % i
coors = load_coordinates(name)
eft = calculator.eval(coors[:3], coors[3:])
Ofile.write('%s %12.7f\n'%(name, eft[0]))
Ofile.flush()
fout1.write('%s %12.7f %12.7f %12.7f\n'%(name, eft[1], eft[2], eft[3]))
fout1.flush()
fout2.write('%s %12.7f %12.7f %12.7f\n'%(name, eft[4], eft[5], eft[6]))
fout2.flush()
ener.append(eft[0])
force.append(eft[1:4])
torque.append(eft[4:7])
t1 = time()
print 'took %.1f s to evaluate the random set' % (t1 - t0)
Ofile.close()
fout1.close()
fout2.close()
return ener, force, torque
if __name__ == '__main__':
order = 2
calculator = EFT_calculator(order)
t0 = time()
calculator.setup('grid.dat')
t1 = time()
print 'took %.1f s to fill the grid' % (t1 - t0)
ener, force, torque = test_random_set()
| mit | 3,478,274,189,580,841,500 | 26.698113 | 79 | 0.56267 | false |
lcharleux/abapy | abapy/future.py | 1 | 21311 | import numpy as np
import copy, math
from scipy.spatial import distance, Delaunay
def tri_area(vertices):
u = vertices[0]
v = vertices[1]
w = vertices[2]
return np.linalg.norm(np.cross( v-u, w-u)) / 2.
def tetra_volume(vertices):
u = vertices[0]
v = vertices[1]
w = vertices[2]
x = vertices[3]
return np.cross(v-u, w-u).dot(x-u) / 6.
class MasterDict(dict):
"""
A dict with reverse reference to a master mesh.
"""
def __init__(self, master, *args,**kwargs):
self.master = master
dict.__init__(self,*args,**kwargs)
def __setitem__(self, key, val):
val.master = self.master
dict.__setitem__(self, key, val)
#-------------------------------------------------------------------------------
# Fields
class Field(object):
"""
A field meta class.
"""
def __init__(self, label, loc, data = {}, mesh = None):
self.label = label
self.data = data
self.master = master
self.loc = loc
def toarrays(self):
k = np.array(self.data.keys())
v = np.array(self.data.values())
return k, v
class ScalarField(Field):
"""
A scalar field class.
"""
pass
class VectorField(Field):
"""
A vector field class.
"""
pass
class TensorField(Field):
"""
A tensor field class.
"""
pass
#-------------------------------------------------------------------------------
# Nodes and related stuff
class Node(object):
def __init__(self, coords, sets = set(), master = None):
self.coords = np.asarray(coords, dtype= np.float64)
self.sets = set(sets)
self.master = master
def __repr__(self):
return "#NODE" + self.__str__()
def __str__(self):
return "{0}, sets={1}".format(self.coords, self.sets)
#-------------------------------------------------------------------------------
# Elements and related stuff
class Element(object):
_extrudable = False
def __init__(self, conn, sets = set(), surfaces = None, master = None):
self.conn = np.asarray(conn[:self._nvert], dtype = np.uint32)
self.master = master
self.sets = set(sets)
ns = self.ns()
self.surfaces = [set() for i in xrange(ns)]
if surfaces != None:
for k in surfaces.keys():
self.surfaces[k].add( surfaces[k] )
def __repr__(self):
return "#ELEMENT " + self.__str__()
def __str__(self):
name = self.type()
conn = ",".join([str(l) for l in self.conn])
return "{0}({1}), sets={2}, surf={3}".format(name, conn,
self.sets,
self.surfaces)
def edges(self):
"""
Returns the connectivity of the edges.
"""
return self.conn[self._edges]
def faces(self):
"""
Returns the faces of a volumic element, None otherwise.
"""
if self._space == 3:
return np.array([ self.conn[f] for f in self._faces_conn])
def simplex_conn(self):
"""
Returns the connectivity of the simplexes forming the element.
"""
return np.array([ self.conn[f] for f in self._simplex_conn])
def type(self):
return self.__class__.__name__
def ns(self):
if self._space == 1: n = 1
if self._space == 2: n = len(self._edges)
if self._space == 3: n = len(self._faces_conn)
return n
def extrude(self, offset, layer):
if self._extrudable:
newtype = self._extrude_as
oldconn = self.conn
newconn = np.concatenate([
oldconn + offset * layer,
oldconn + offset * (layer + 1)])[self._extrude_order]
return globals()[newtype](conn = newconn, sets = self.sets)
def collapsed(self):
return len(self.conn) - len(set(self.conn))
def collapsed_faces(self):
if self._space == 3:
faces = [self.conn[c] for c in self._faces_conn]
return np.array([ len(f) - len(set(f)) for f in faces ])
def collapsed_edges(self):
if self._space >= 2:
edges = self.conn[self._edges]
return np.array([ len(e) - len(set(e)) for e in edges ])
def simplex_decomposition(self):
conn = self.conn
if self._space == 3:
simplices = self.simplex_conn()
simplices2 = []
for i in range(len(simplices)):
simplex = simplices[i]
if (len(set(simplex)) == 4):
if tetra_volume([self.master.nodes[l].coords for l in simplex]) > 0.:
simplices2.append(simplices[i])
return [Tetra4(simplex) for simplex in simplices2]
"""
points = [self.master.nodes[l].coords for l in conn]
tetras = Delaunay(points).simplices
tetras2 = []
for i in xrange(len(tetras)):
if tetra_volume([points[j] for j in tetras[i]]) < 0.:
t = np.array([tetras[i][j] for j in [1, 0, 2, 3]])
else:
t = np.array([tetras[i][j] for j in [0, 1, 2, 3]])
return [Tetra4(conn[t]) for t in tetras]
"""
if self._space == 2:
if self.type() == "Tri3":
if len(conn) == 3:
return [self]
if self.type() == "Quad4":
if len(conn) == 4:
return [Tri3(self.conn[c]) for c in [[0, 1, 2], [1, 2, 3]]]
if len(conn) == 3:
count = np.array([(self.conn == i).sum() for i in conn])
rind = conn[np.where(count == 2)[0][0]]
rconn = self.conn.copy()
for i in range(4):
if (rconn[1] == rind) and (rconn[-1] == rind):
return [Tri3(rconn[[0,1,2]])]
rconn = np.roll(rconn, 1)
def clean_connectivity(self):
if self.collapsed() == 0:
return [self]
else:
return self.simplex_decomposition()
def node_set_to_surface(self, nodesetlabel, surfacelabel):
nodelabels = set([k for k in self.nodes.keys() if label in self.nodes[k].sets])
for element in self.elements.values:
for i in xrange(self.ns()):
if self._space == 3: surfconn = self.conn[self._faces_conn[i]]
# TO be completed
if nodelabels.issuperset(surfconn):
self.surfaces[i].add(surfacelabel)
def volume(self, add = True):
vertices = np.array([self.master.nodes[l].coords for l in self.conn ])
simplices = vertices[self._simplex_conn]
if self._space == 3:
v = np.array([tetra_volume(simplex) for simplex in simplices])
if add: v = v.sum()
return v
if self._space == 2:
v = np.array([tri_area(simplex) for simplex in simplices])
if add: v = v.sum()
return v
def centroid(self):
vertices = np.array([self.master.nodes[l].coords for l in self.conn ])
simplices = vertices[self._simplex_conn]
centroids = simplices.mean(axis = 1)
volumes = self.volume(add = False)[:,None]
return (centroids * volumes).sum(axis = 0) / volumes.sum()
class Line2(Element):
"""
A 1D 2 nodes line.
"""
_nvert = 2
_space = 1
_extrudable = True
_extrude_as = "Quad4"
_extrude_order = np.array([0, 1, 3, 2])
_simplex_conn = np.array([[0, 1]])
class Tri3(Element):
"""
A 2D 3 noded triangular element
"""
_nvert = 3
_space = 2
_edges = np.array([[0, 1],
[1, 2],
[2, 0]])
_extrudable = True
_extrude_as = "Prism6"
_extrude_order = np.array([0, 1, 2, 3, 4, 5])
_simplex_conn = np.array([[0, 1, 2]])
class Quad4(Element):
"""
A 2D 4 noded quadrangular element
"""
_nvert = 4
_space = 2
_edges = np.array([[0, 1],
[1, 2],
[2, 3],
[3, 0]])
_extrudable = True
_extrude_as = "Hexa8"
_extrude_order = np.array([0, 1, 2, 3, 4, 5, 6, 7])
_simplex_conn = np.array([[0, 1, 3],
[1, 2, 3]])
class Tetra4(Element):
"""
A 3D 4 noded tetrahedral element
"""
_nvert = 4
_space = 3
_faces_conn = np.array([[0, 1, 2],
[0, 3, 1],
[1, 3, 2],
[2, 3, 0]])
_faces_type = ["Tri3", "Tri3", "Tri3", "Tri3"]
_edges = np.array([[0, 1],
[1, 2],
[2, 0],
[0, 3],
[1, 3],
[2, 3]])
_simplex_conn = np.array([[0, 1, 3, 4]])
def clean_connectivity(self):
if self.collapsed():
return None
else:
return [self]
class Pyra5(Element):
"""
A 3D 5 noded pyramidal element
"""
_nvert = 5
_space = 3
_faces_conn = np.array([[0, 1, 2, 3],
[0, 1, 4],
[1, 2, 4],
[2, 3, 4],
[3, 0, 4]])
_faces_type = ["Quad4", "Tri3", "Tri3", "Tri3", "Tri3"]
_edges = np.array([[0, 1],
[1, 2],
[2, 3],
[3, 0],
[0, 4],
[1, 4],
[2, 4],
[3, 4]])
_simplex_conn = np.array([[0, 1, 3, 4],
[1, 2, 3, 4]])
class Prism6(Element):
"""
A 3D 6 noded prismatic element
"""
_nvert = 6
_space = 3
_faces_conn = np.array([[0, 1, 2],
[3, 5, 4],
[0, 3, 4, 1],
[1, 4, 5, 2],
[2, 5, 3, 0]])
_faces_type = ["Tri3", "Tri3", "Quad4", "Quad4", "Quad4"]
_edges = np.array([[0, 1],
[1, 2],
[2, 0],
[3, 4],
[4, 5],
[5, 3],
[0, 3],
[1, 4],
[2, 5]])
_simplex_conn = np.array([[0, 1, 2, 3],
[1, 2, 3, 4],
[2, 3, 4, 5]])
class Hexa8(Element):
"""
A 3D 8 noded hexahedric element
"""
_nvert = 8
_space = 3
_faces_conn = np.array([[0, 1, 2, 3],
[4, 7, 6, 5],
[0, 4, 5, 1],
[1, 5, 6, 2],
[2, 6, 7, 3],
[3, 7, 4, 0]])
_faces_type = ["Quad4",
"Quad4",
"Quad4",
"Quad4",
"Quad4",
"Quad4"]
_edges = np.array([[0, 1],
[1, 2],
[2, 3],
[3, 0],
[4, 5],
[5, 6],
[6, 7],
[7, 4],
[0, 4],
[1, 5],
[2, 6],
[3, 7]])
_simplex_conn = np.array([[0, 1, 3, 4],
[1, 2, 3, 4],
[3, 2, 7, 4],
[2, 6, 7, 4],
[1, 5, 2, 4],
[2, 5, 6, 4]])
#-------------------------------------------------------------------------------
# Mesh & related stuff
class Mesh(object):
def __init__(self, nodes = {}, elements = {}):
self.nodes = MasterDict(self)
for k, v in nodes.iteritems(): self.nodes[k] = v
self.elements = MasterDict(self)
for k, v in elements.iteritems(): self.elements[k] = v
def __repr__(self):
nn = len(self.nodes.values())
ne = len(self.elements.values())
return "#MESH: {0} nodes / {1} elements".format(nn, ne)
def __str__(self):
nodes, elements = self.nodes, self.elements
nk = sorted(nodes.keys())
ns = "\n".join( ["{0} {1}".format(k, str(nodes[k])) for k in nk])
ek = sorted(elements.keys())
es = "\n".join( ["{0} {1}".format(k, str(elements[k])) for k in ek])
"""
nsets, esets = self.nsets, self.esets
nsk = sorted(nsets.keys())
nss = "\n".join( ["{0} {1}".format(k, str(nsets[k])) for k in nsk])
esk = sorted(esets.keys())
print esk
ess = "\n".join( ["{0} {1}".format(k, str(esets[k])) for k in esk])
"""
return "MESH:\n*NODES:\n{0}\n*ELEMENTS:\n{1}".format(ns, es)
"""
def _add_set(self, kind, key, labels):
if kind == "nset": target = self.nodes
if kind == "eset": target = self.elements
for label in labels:
target[label].sets.add(key)
def add_nset(self, *args, **kwargs):
self._add_set(kind = "nset", *args, **kwargs)
def add_eset(self, *args, **kwargs):
self._add_set(kind = "eset", *args, **kwargs)
"""
def export(path):
return
def load(path):
return
def extrude(self, translation, layers):
translation = np.array(translation, dtype = np.float64)[:3]
newmesh = Mesh()
# Nodes:
node_offset = max(self.nodes.keys())
for l, n in self.nodes.iteritems():
for j in xrange(layers+1):
newnode = Node(coords = n.coords + translation * float(j) / layers,
sets = n.sets)
newmesh.nodes[l + j * node_offset] = newnode
# Elements:
element_offset = max(self.elements.keys())
for l, e in self.elements.iteritems():
for layer in xrange(layers):
newelement = e.extrude(offset = node_offset, layer = layer)
if newelement != None:
newmesh.elements[l + layer * element_offset] = newelement
return newmesh
def copy(self):
return copy.deepcopy(self)
def nodes_to_array(self):
labels = np.array([k for k in self.nodes.keys()])
n = len(labels)
p = np.empty((n,3))
for i in range(n):
p[i] = self.nodes[labels[i]].coords
return labels , p
def transform(self, transformation):
labels, p = self.nodes_to_array()
n = len(labels)
x, y, z = p.transpose()
newcoords = np.asarray(transformation(x, y, z),
dtype = np.float64).transpose()
newmesh = self.copy()
for i in range(n):
newmesh.nodes[labels[i]].coords = newcoords[i]
return newmesh
def overlapping_nodes(self, crit_dist = 1.e-6):
def calc_row_idx(k, n):
return int(math.ceil((1/2.) * (- (-8*k + 4 *n**2 -4*n - 7)**0.5 + 2*n -1) - 1))
def elem_in_i_rows(i, n):
return i * (n - 1 - i) + (i*(i + 1))/2
def calc_col_idx(k, i, n):
return int(n - elem_in_i_rows(i + 1, n) + k)
def condensed_to_square(k, n):
i = calc_row_idx(k, n)
j = calc_col_idx(k, i, n)
return np.array([i, j])
labels, points = self.nodes_to_array()
dist = distance.pdist(points)
n = len(labels)
loc = np.where(dist<=crit_dist)[0]
pairs = [labels[condensed_to_square(l, n)] for l in loc]
mapping = dict(zip(labels, labels))
for pair in pairs:
pair.sort()
mapping[pair[1]] = min(mapping[pair[1]], pair[0])
return mapping
def merge_nodes(self, mapping):
newmesh = self.copy()
for elem in newmesh.elements.values():
conn = elem.conn
for i in range(len(conn)):
conn[i] = mapping[conn[i]]
for label in newmesh.nodes.keys():
if mapping[label] != label:
del newmesh.nodes[label]
return newmesh
def simplex_decomposition(self):
"""
Returns a list of new simplex elements sharing the same vertices with the
orginal one with a viable connectivity.
"""
newmesh = self.copy()
newmesh.elements.clear()
label = 1
for el in self.elements.values():
simplices = el.simplex_decomposition()
if simplices != None:
for simplex in simplices:
newmesh.elements[label] = simplex
label += 1
return newmesh
def clean_connectivity(self):
newmesh = self.copy()
newmesh.elements.clear()
label = 1
for el in self.elements.values():
newels = el.clean_connectivity()
if newels != None:
for newel in newels:
newmesh.elements[label] = newel
label += 1
return newmesh
#-------------------------------------------------------------------------------
# Parsers & writers
# Abaqus INP
def parseInp(path):
# Some useful tools
def lineInfo(line):
out = {"type": "data"}
if line[0] == "*":
if line[1] == "*":
out["type"] = "comment"
out["text"] = line[2:]
else:
out["type"] = "command"
words = line[1:].split(",")
out["value"] = words[0].strip()
out["options"] = {}
for word in words[1:]:
key, value = [s.strip() for s in word.split("=")]
out["options"][key] = value
return out
def elementMapper(inpeltype):
if inpeltype == "t3d2": return "Line2"
if inpeltype[:3] in ["cps", "cpe", "cax"]:
if inpeltype[3] == "3": return "Tri3"
if inpeltype[3] == "4": return "Quad4"
if inpeltype[:3] in ["c3d"]:
if inpeltype[3] == "4": return "Tetra4"
if inpeltype[3] == "5": return "Pyra5"
if inpeltype[3] == "6": return "Prism6"
if inpeltype[3] == "8": return "Hexa8"
# Output mesh
m = Mesh()
# File preprocessing
lines = np.array([l.strip().lower() for l in open(path).readlines()])
# Data processing
env = None
setlabel = None
for line in lines:
d = lineInfo(line)
if d["type"] == "command":
env = d["value"]
# Nodes
if env == "node":
opt = d["options"]
currentset = None
if "nset" in opt.keys(): currentset = opt["nset"]
# Elements
if env == "element":
opt = d["options"]
eltype = elementMapper(opt["type"])
currentset = None
if "elset" in opt.keys(): currentset = opt["elset"]
# Nsets
if env == "nset":
opt = d["options"]
currentset = opt["nset"]
# Elsets
if env == "elset":
opt = d["options"]
currentset = opt["elset"]
if d["type"] == "data":
words = line.strip().split(",")
if env == "node":
label = int(words[0])
coords = np.array( [float(w) for w in words[1:]], dtype = np.float64 )
if currentset == None:
m.nodes[label] = Node(coords = coords)
else:
m.nodes[label] = Node(coords = coords, sets = set([currentset]))
if env == "element":
label = int(words[0])
conn = np.array( [int(w) for w in words[1:]], dtype = np.int32)
if currentset == None:
m.elements[label] = globals()[eltype](conn = conn)
else:
m.elements[label] = globals()[eltype](conn = conn, sets = set([currentset]))
if env == "nset":
[m.nodes[int(w)].sets.add(currentset) for w in words if len(w) != 0]
if env == "elset":
[m.elements[int(w)].sets.add(currentset) for w in words if len(w) != 0]
return m
def writeInp(mesh, mapping, path = None):
def exportset(s, d):
out = ""
labels = [str(k) for k,v in d.iteritems() if s in v.sets]
for i in xrange(len(labels)):
out += labels[i]
if (i+1)%10 != 0:
out += ", "
else:
out += ",\n"
if out[-1] != "\n": out += "\n"
return out
# Nodes
out = "*NODE\n"
for label, node in mesh.nodes.iteritems():
out += "{0}, {1}\n".format(label, ", ".join([ str(c) for c in node.coords]))
# Elements
etypes = set([e.type() for e in mesh.elements.values()])
for etype in etypes:
out +="*ELEMENT, TYPE={0}\n".format( mapping[etype])
for label, elem in mesh.elements.iteritems():
if elem.type() == etype:
out += "{0}, {1}\n".format(label, ", ".join([ str(c) for c in elem.conn]))
# Sets
nsets = set().union(*[n.sets for n in mesh.nodes.values()])
for s in nsets:
out += "*NSET, NSET={0}\n".format(s) + exportset(s , mesh.nodes)
esets = set().union(*[e.sets for e in mesh.elements.values()])
for s in esets:
out += "*ELSET, ELSET={0}\n".format(s) + exportset(s , mesh.elements)
if path == None:
return out
else:
open(path, "w").write(out)
def writeMsh(mesh, path = None):
elementMap = {"Tri3": 2, "Quad4":3, "Tetra4":4, "Hexa8":5, "Prism6":6, "Pyra5": 7}
pattern = """$MeshFormat
2.2 0 8
$EndMeshFormat
$Nodes
{0}
$EndNodes
$Elements
{1}
$EndElements"""
nodeout = "{0}\n".format(len(mesh.nodes.keys()))
nodelist = []
for k in mesh.nodes.keys():
node = mesh.nodes[k]
coords = node.coords
nodelist.append("{0} {1} {2} {3}".format(k, coords[0], coords[1], coords[2]) )
nodeout += "\n".join(nodelist)
elemout = ""
elemout = "{0}\n".format(len(mesh.elements.keys()))
elemlist = []
for k in mesh.elements.keys():
element = mesh.elements[k]
coords = node.coords
elemlist.append("{0} {1} 1 1 {2}".format(
k,
elementMap[element.__class__.__name__],
" ".join( [str(l) for l in element.conn ] ) ))
elemout += "\n".join(elemlist)
if path == None:
return pattern.format(nodeout, elemout)
else:
open(path, "w").write(pattern.format(nodeout, elemout))
| gpl-2.0 | 6,606,528,650,505,557,000 | 28.973277 | 92 | 0.483412 | false |
isc-projects/forge | tests/dhcpv6/process/test_v6_renew.py | 1 | 20506 | """DHCPv6 Renew"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_control
import srv_msg
import references
import misc
@pytest.mark.v6
@pytest.mark.renew
def test_v6_message_renew_reply():
# Testing server ability to perform message exchange RENEW - REPLY
# Message details Client Server
# SOLICIT -->
# <-- ADVERTISE
# REQUEST -->
# <-- REPLY
# correct message RENEW -->
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# IA-NA with suboption IA-Address
#
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::5-3000::55')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.renew
def test_v6_message_renew_reply_different_clients_the_same_iaid():
# Two clients try to renew address, using the same IA_ID but different Client-ID
misc.test_setup()
srv_control.set_time('renew-timer', 50)
srv_control.set_time('rebind-timer', 60)
srv_control.set_time('preferred-lifetime', 70)
srv_control.set_time('valid-lifetime', 80)
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
# client try to renew address that is not his
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'validlft', 0)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::2')
srv_msg.response_check_suboption_content(5, 3, 'validlft', 80)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::1')
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.renew
def test_v6_message_renew_reply_different_clients_the_same_iaid_2():
# Two clients try to renew address, using the same IA_ID but different Client-ID
misc.test_setup()
srv_control.set_time('renew-timer', 50)
srv_control.set_time('rebind-timer', 60)
srv_control.set_time('preferred-lifetime', 70)
srv_control.set_time('valid-lifetime', 80)
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
# client try to renew address that is his
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
# Response sub-option 5 from option 3 MUST contain validlft 0.
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::2')
srv_msg.response_check_suboption_content(5, 3, 'validlft', 80)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::1', expect_include=False)
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.renew
def test_v6_message_renew_reply_different_clients_the_same_iaid_expired():
# Two clients try to renew address, using the same IA_ID but different Client-ID
misc.test_setup()
srv_control.set_time('renew-timer', 5)
srv_control.set_time('rebind-timer', 6)
srv_control.set_time('preferred-lifetime', 7)
srv_control.set_time('valid-lifetime', 8)
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.forge_sleep(10, 'seconds')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::2')
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::1')
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.renew
def test_v6_message_renew_reply_different_clients_the_same_iaid_expired_2():
# Two clients try to renew address, using the same IA_ID but different Client-ID
misc.test_setup()
srv_control.set_time('renew-timer', 5)
srv_control.set_time('rebind-timer', 6)
srv_control.set_time('preferred-lifetime', 7)
srv_control.set_time('valid-lifetime', 8)
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::2')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:01')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.forge_sleep(10, 'seconds')
misc.test_procedure()
# client try to renew address that is his
srv_msg.client_sets_value('Client', 'ia_id', 666)
srv_msg.client_sets_value('Client', 'DUID', '00:03:00:01:f6:f5:f4:f3:f2:22')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(1)
srv_msg.response_check_include_option(2)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
# Response sub-option 5 from option 3 MUST contain validlft 0.
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::2')
srv_msg.response_check_suboption_content(5, 3, 'validlft', 8)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::1', expect_include=False)
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.renew
def test_v6_message_renew_reply_time_zero():
# Testing server ability to perform message exchange RENEW - REPLY
# In case when we expect that address is not appropriate for the link.
# Message details Client Server
# SOLICIT -->
# <-- ADVERTISE
# REQUEST -->
# Save IA_NA with IA_Addr <-- REPLY
# Reconfigure Server
# SOLICIT -->
# <-- ADVERTISE
# Create leases REQUEST -->
# for the same client <-- REPLY
# Use saved IA_NA RENEW -->
# (proper client ID, IA_NA, but wrong address)
# <-- REPLY
# Pass Criteria:
# REPLY MUST include option:
# client-id
# server-id
# IA-NA with suboption IA-Address with validlft set to 0.
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::66-3000::66')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.client_save_option('IA_NA')
misc.reconfigure()
srv_control.config_srv_subnet('3000::/64', '3000::100-3000::155')
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_add_saved_option()
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::66')
srv_msg.response_check_suboption_content(5, 3, 'validlft', 0)
references.references_check('RFC')
| isc | 6,372,698,305,668,353,000 | 37.910816 | 91 | 0.664391 | false |
lepisma/quint | quint/quint.py | 1 | 2711 | """
The main code implementing simple matrix Q-learning
"""
import numpy as np
import random
class quint:
"""
The class for creating a Q-learning system
"""
def __init__(self, reward_matrix, gamma, not_allowed_action = -1):
"""
Initializes a learner using the reward matrix
Reward Matrix structure
-----------------------
Columns represent actions
Rows represent states
Values inside represent reward
not_allowed_action reward value represent the action that are not allowed in the situation
"""
self.reward_matrix = reward_matrix
self.gamma = gamma
self.not_allowed_action = not_allowed_action
self.q_matrix = np.zeros(reward_matrix.shape) # Setting all q values to 0
def learn(self, goal_state, iterations):
"""
Learns the optimum path to goal_state and updates the q_matrix
"""
for x in range(iterations):
initial_state = random.choice(range(self.reward_matrix.shape[0]))
while initial_state != goal_state:
# While we reach our goal
actions = self.reward_matrix[initial_state]
initial_action = random.choice(actions)
while initial_action == self.not_allowed_action:
initial_action = random.choice(actions)
initial_action = np.where(actions == initial_action)[0][0]
next_state = self.act(initial_state, initial_action)
# update q matrix
self.q_matrix[initial_state, initial_action] = self.reward_matrix[initial_state, initial_action] + self.gamma * self.max_q(next_state)
initial_state = next_state
def act(self, current_state, action):
"""
Performs action on current state and returns the resulting state
* Assuming action number 'x' takes to state 'x'
* Override this method to implement your own actions
"""
return action
def max_q(self, state):
"""
Returns the maximum q value available in the given state considering all action
"""
max_q = 0
actions = self.reward_matrix[state]
for action_id in range(len(actions)):
if actions[action_id] != self.not_allowed_action:
if self.q_matrix[state, action_id] > max_q:
max_q = self.q_matrix[state, action_id]
return max_q
def normalize(self):
"""
Normalizes the q values
"""
max_value = float(self.q_matrix.max())
self.q_matrix /= max_value
def find_optimum_path(self, state):
"""
Returns the actions (and corresponding states) to follow to reach goal_state from given state
"""
actions = []
states = [state]
while state != goal_state:
action = np.where(self.q_matrix[state] == self.max_q(state))[0][0]
actions.append(action)
state = self.act(state, action)
states.append(state)
return actions, states | mit | -6,707,977,200,800,861,000 | 24.828571 | 138 | 0.671708 | false |
Petraea/jsonbot | jsb/lib/rest/server.py | 1 | 9836 | # jsb/socklib/rest/server.py
#
#
## jsb imports
from jsb.utils.exception import handle_exception, exceptionmsg
from jsb.utils.trace import calledfrom
from jsb.lib.persiststate import ObjectState
from jsb.lib.threads import start_new_thread
from jsb.version import version
## basic imports
from SocketServer import BaseServer, ThreadingMixIn
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from urllib import unquote_plus
from asyncore import dispatcher
from cgi import escape
import time
import sys
import select
import types
import socket
import logging
## RestServerBase class
class RestServerBase(HTTPServer):
""" REST web server """
allow_reuse_address = True
daemon_thread = True
def start(self):
""" start the REST server. """
self.name = calledfrom(sys._getframe(0))
self.stop = False
self.running = False
self.handlers = {}
self.webmods = {}
self.state = ObjectState()
self.state.define('whitelistenable', 0)
self.state.define('whitelist', [])
self.state.define('blacklist', [])
self.state.define('disable', [])
self.poll = select.poll()
self.poll.register(self)
start_new_thread(self.serve, ())
def shutdown(self):
""" shutdown the REST server. """
try:
self.stop = True
time.sleep(0.2)
self.server_close()
except Exception, ex: handle_exception()
def serve(self):
""" serving loop. """
logging.warn('rest.server - starting')
time.sleep(1)
while not self.stop:
self.running = True
try: got = self.poll.poll(100)
except Exception, ex: handle_exception()
if got and not self.stop:
try: self.handle_request()
except Exception, ex: handle_exception()
time.sleep(0.01)
self.running = False
logging.warn('rest.server - stopping')
def entrypoint(self, request):
""" check lists whether request should be allowed. """
ip = request.ip
if not self.whitelistenable() and ip in self.blacklist():
logging.warn('rest.server - denied %s' % ip)
request.send_error(401)
return False
if self.whitelistenable() and ip not in self.whitelist():
logging.warn('rest.server - denied %s' % ip)
request.send_error(401)
return False
return True
def whitelistenable(self):
""" enable whitelist? """
return self.state['whitelistenable']
def whitelist(self):
""" return the whitelist. """
return self.state['whitelist']
def blacklist(self):
""" return the black list. """
return self.state['blacklist']
def addhandler(self, path, type, handler):
""" add a web handler """
path = unquote_plus(path)
splitted = []
for i in path.split('/'):
if i: splitted.append(i)
else: splitted.append("/")
splitted = tuple(splitted)
if not self.handlers.has_key(splitted): self.handlers[splitted] = {}
self.handlers[splitted][type] = handler
logging.info('rest.server - %s %s handler added' % (splitted[0], type))
def enable(self, what):
""" enable an path. """
try:
self.state['disable'].remove(what)
logging.info('rest.server - enabled %s' % str(what))
except ValueError: pass
def disable(self, what):
""" disable an path. """
self.state['disable'].append(what)
logging.info('rest.server - disabled %s' % str(what))
def do(self, request):
""" do a request """
path = unquote_plus(request.path.strip())
path = path.split('?')[0]
#if path.endswith('/'): path = path[:-1]
splitted = []
for i in path.split('/'):
if i: splitted.append(i)
else: splitted.append("/")
splitted = tuple(splitted)
logging.warn("rest.server - incoming - %s" % str(splitted))
for i in self.state['disable']:
if i in splitted:
logging.warn('rest.server - %s - denied disabled %s' % (request.ip, i))
request.send_error(404)
return
request.splitted = splitted
request.value = None
type = request.command
try: func = self.handlers[splitted][type]
except (KeyError, ValueError):
try:
func = self.handlers[splitted][type]
request.value = splitted[-1]
except (KeyError, ValueError):
logging.error("rest.server - no handler found for %s" % str(splitted))
request.send_error(404)
return
result = func(self, request)
logging.info('rest.server - %s - result: %s' % (request.ip, str(result)))
return result
def handle_error(self, request, addr):
""" log the error """
ip = request.ip
exctype, excvalue, tb = sys.exc_info()
if exctype == socket.timeout:
logging.warn('rest.server - %s - socket timeout' % (ip, ))
return
if exctype == socket.error:
logging.warn('rest.server - %s - socket error: %s' % (ip, excvalue))
return
exceptstr = exceptionmsg()
logging.warn('rest.server - %s - error %s %s => %s' % (ip, exctype, excvalue, exceptstr))
## Mixin classes
class RestServer(ThreadingMixIn, RestServerBase):
pass
class RestServerAsync(RestServerBase, dispatcher):
pass
## RestReqeustHandler class
class RestRequestHandler(BaseHTTPRequestHandler):
""" timeserver request handler class """
def setup(self):
""" called on each incoming request. """
BaseHTTPRequestHandler.setup(self)
self.ip = self.client_address[0]
self.name = self.ip
self.size = 0
def writeheader(self, type='text/plain'):
""" write headers to the client. """
self.send_response(200)
self.send_header('Content-type', '%s; charset=%s ' % (type,sys.getdefaultencoding()))
self.send_header('Server', version)
self.end_headers()
def sendresult(self):
""" dispatch a call. """
try:
result = self.server.do(self)
if not result: return
self.size = len(result)
except Exception, ex:
handle_exception()
self.send_error(501)
return
self.writeheader()
self.wfile.write(result)
self.wfile.close()
def handle_request(self):
""" handle a REST request. """
if not self.server.entrypoint(self): return
self.sendresult()
do_DELETE = do_PUT = do_GET = do_POST = handle_request
def log_request(self, code):
""" log the request """
try: ua = self.headers['user-agent']
except: ua = "-"
try: rf = self.headers['referer']
except: rf = "-"
if hasattr(self, 'path'):
logging.debug('rest.server - %s "%s %s %s" %s %s "%s" "%s"' % (self.address_string(), self.command, self.path, self.request_version, code, self.size, rf, ua))
else:
logging.debug('rest.server - %s "%s %s %s" %s %s "%s" "%s"' % (self.address_string(), self.command, "none", self.request_version, code, self.size, rf, ua))
## secure classes .. not working yet
class SecureRestServer(RestServer):
def __init__(self, server_address, HandlerClass, keyfile, certfile):
from OpenSSL import SSL
BaseServer.__init__(self, server_address, HandlerClass)
ctx = SSL.Context(SSL.SSLv23_METHOD)
ctx.set_options(SSL.OP_NO_SSLv2)
logging.warn("rest.server - loading private key from %s" % keyfile)
ctx.use_privatekey_file (keyfile)
logging.warn('rest.server - loading certificate from %s' % certfile)
ctx.use_certificate_file(certfile)
logging.info('rest.server - creating SSL socket on %s' % str(server_address))
self.socket = SSL.Connection(ctx, socket.socket(self.address_family,
self.socket_type))
self.server_bind()
self.server_activate()
class SecureAuthRestServer(SecureRestServer):
def __init__(self, server_address, HandlerClass, chain, serverkey, servercert):
from OpenSSL import SSL
BaseServer.__init__(self, server_address, HandlerClass)
ctx = SSL.Context(SSL.SSLv23_METHOD)
logging.warn("rest.server - loading private key from %s" % serverkey)
ctx.use_privatekey_file (serverkey)
logging.warn('rest.server - loading certificate from %s' % servercert)
ctx.use_certificate_file(servercert)
logging.warn('rest.server - loading chain of certifications from %s' % chain)
ctx.set_verify_depth(2)
ctx.load_client_ca(chain)
#ctx.load_verify_locations(chain)
logging.info('rest.server - creating SSL socket on %s' % str(server_address))
callback = lambda conn,cert,errno,depth,retcode: retcode
ctx.set_verify(SSL.VERIFY_FAIL_IF_NO_PEER_CERT | SSL.VERIFY_PEER, callback)
ctx.set_session_id('jsb')
self.socket = SSL.Connection(ctx, socket.socket(self.address_family,
self.socket_type))
self.server_bind()
self.server_activate()
class SecureRequestHandler(RestRequestHandler):
def setup(self):
self.connection = self.request._sock
self.request._sock.setblocking(1)
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.rbufsize)
| mit | 3,100,006,748,161,896,000 | 34.128571 | 170 | 0.590077 | false |
jkettleb/iris | lib/iris/tests/unit/experimental/regrid/test__CurvilinearRegridder.py | 1 | 4309 | # (C) British Crown Copyright 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Unit tests for :class:`iris.experimental.regrid._CurvilinearRegridder`."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import mock
import numpy as np
from iris.experimental.regrid import _CurvilinearRegridder as Regridder
from iris.tests.stock import global_pp, lat_lon_cube
RESULT_DIR = ('analysis', 'regrid')
class Test___init__(tests.IrisTest):
def setUp(self):
self.ok = lat_lon_cube()
self.bad = np.ones((3, 4))
self.weights = np.ones(self.ok.shape, self.ok.dtype)
def test_bad_src_type(self):
with self.assertRaisesRegexp(TypeError, "'src_grid_cube'"):
Regridder(self.bad, self.ok, self.weights)
def test_bad_grid_type(self):
with self.assertRaisesRegexp(TypeError, "'target_grid_cube'"):
Regridder(self.ok, self.bad, self.weights)
class Test___call__(tests.IrisTest):
def setUp(self):
self.func = ('iris.experimental.regrid.'
'regrid_weighted_curvilinear_to_rectilinear')
self.ok = global_pp()
y = self.ok.coord('latitude')
x = self.ok.coord('longitude')
self.ok.remove_coord('latitude')
self.ok.remove_coord('longitude')
self.ok.add_aux_coord(y, 0)
self.ok.add_aux_coord(x, 1)
self.weights = np.ones(self.ok.shape, self.ok.dtype)
def test_same_src_as_init(self):
# Modify the names so we can tell them apart.
src_grid = self.ok.copy()
src_grid.rename('src_grid')
target_grid = self.ok.copy()
target_grid.rename('TARGET_GRID')
regridder = Regridder(src_grid, target_grid, self.weights)
with mock.patch(self.func,
return_value=mock.sentinel.regridded) as clr:
result = regridder(src_grid)
clr.assert_called_once_with(src_grid, self.weights, target_grid)
self.assertIs(result, mock.sentinel.regridded)
def test_diff_src_from_init(self):
# Modify the names so we can tell them apart.
src_grid = self.ok.copy()
src_grid.rename('SRC_GRID')
target_grid = self.ok.copy()
target_grid.rename('TARGET_GRID')
regridder = Regridder(src_grid, target_grid, self.weights)
src = self.ok.copy()
src.rename('SRC')
with mock.patch(self.func,
return_value=mock.sentinel.regridded) as clr:
result = regridder(src)
clr.assert_called_once_with(src, self.weights, target_grid)
self.assertIs(result, mock.sentinel.regridded)
class Test___call____bad_src(tests.IrisTest):
def setUp(self):
self.ok = global_pp()
y = self.ok.coord('latitude')
x = self.ok.coord('longitude')
self.ok.remove_coord('latitude')
self.ok.remove_coord('longitude')
self.ok.add_aux_coord(y, 0)
self.ok.add_aux_coord(x, 1)
weights = np.ones(self.ok.shape, self.ok.dtype)
self.regridder = Regridder(self.ok, self.ok, weights)
def test_bad_src_type(self):
with self.assertRaisesRegexp(TypeError, 'must be a Cube'):
self.regridder(np.ones((3, 4)))
def test_bad_src_shape(self):
with self.assertRaisesRegexp(ValueError,
'not defined on the same source grid'):
self.regridder(self.ok[::2, ::2])
if __name__ == '__main__':
tests.main()
| lgpl-3.0 | 5,790,575,823,600,031,000 | 35.516949 | 77 | 0.640288 | false |
lidavidm/sympy | sympy/liealgebras/type_b.py | 1 | 4610 | from __future__ import print_function, division
from sympy.core import Set, Dict, Tuple
from .cartan_type import Standard_Cartan
from sympy.matrices import eye
class TypeB(Standard_Cartan):
def __init__(self, n):
assert n >= 2
Standard_Cartan.__init__(self, "B", n)
def dimension(self):
"""
Return the dimension of the vector space
V underlying the Lie algebra
Example
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("B3")
>>> c.dimension()
3
"""
return self.n
def basic_root(self, i, j):
"""
This is a method just to generate roots
with a 1 iin the ith position and a -1
in the jth postion.
"""
root = [0]*self.n
root[i] = 1
root[j] = -1
return root
def simple_root(self, i):
"""
Every lie algebra has a unique root system.
Given a root system Q, there is a subset of the
roots such that an element of Q is called a
simple root if it cannot be written as the sum
of two elements in Q. If we let D denote the
set of simple roots, then it is clear that every
element of Q can be written as a linear combination
of elements of D with all coefficients non-negative.
In B_n the first n-1 simple roots are the same as the
roots in A_(n-1) (a 1 in the ith position, a -1 in
the (i+1)th position, and zeroes elsewhere). The n-th
simple root is the root with a 1 in the nth position
and zeroes elsewhere.
This method returns the ith simple root for the B series.
Example
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("B3")
>>> c.simple_root(2)
[0, 1, -1]
"""
n = self.n
if i < n:
return self.basic_root(i-1, i)
else:
root = [0]*self.n
root[n-1] = 1
return root
def positive_roots(self):
"""
This method generates all the positive roots of
A_n. This is half of all of the roots of B_n;
by multiplying all the positive roots by -1 we
get the negative roots.
Example
======
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A3")
>>> c.positive_roots()
{1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0],
5: [0, 1, 0, -1], 6: [0, 0, 1, -1]}
"""
n = self.n
posroots = {}
k = 0
for i in range(0, n-1):
for j in range(i+1, n):
k += 1
posroots[k] = self.basic_root(i, j)
k += 1
root = self.basic_root(i, j)
root[j] = 1
posroots[k] = root
for i in range(0, n):
k += 1
root = [0]*n
root[i] = 1
posroots[k] = root
return posroots
def roots(self):
"""
Returns the total number of roots for B_n"
"""
n = self.n
return 2*(n**2)
def cartan_matrix(self):
"""
Returns the Cartan matrix for B_n.
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>).
Example
=======
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType('B4')
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0, 0],
[-1, 2, -1, 0],
[ 0, -1, 2, -2],
[ 0, 0, -1, 2]])
"""
n = self.n
m = 2* eye(n)
i = 1
while i < n-1:
m[i, i+1] = -1
m[i, i-1] = -1
i += 1
m[0, 1] = -1
m[n-2, n-1] = -2
m[n-1, n-2] = -1
return m
def basis(self):
"""
Returns the number of independent generators of B_n
"""
n = self.n
return (n**2 - n)/2
def lie_algebra(self):
"""
Returns the Lie algebra associated with B_n
"""
n = self.n
return "so(" + str(2*n) + ")"
def dynkin_diagram(self):
n = self.n
diag = "---".join("0" for i in range (1, n)) + "=>=0\n"
diag += " ".join(str(i) for i in range (1, n+1))
return diag
| bsd-3-clause | 1,039,584,278,923,336,400 | 25.959064 | 80 | 0.480477 | false |
ruslanosipov/space | lib/ui.py | 1 | 6174 | MSG_COLORS = {
0: (100, 255, 100),
1: (100, 100, 100),
2: (200, 200, 200),
3: (200, 0, 0)}
UI_COLOR = (100, 100, 100)
SECONDARY_UI_COLOR = (165, 42, 42)
class UI(object):
global MSG_COLORS
global UI_COLOR
def __init__(self):
self.view_field = ''
self.colors = {}
self.chat_log = []
self.prompt = ''
self.evt_mode_desc = 24 * ' '
self.bottom_status_bar = ''
ver = 'v0.3.1-alpha'
self.top_status_bar = ' ' * (80 - len(ver)) + ver
self.target = None
self.look_pointer = None
self.equipment = None
self.inventory = None
self.is_pilot_mode = False
self.oscillator = 0
self.mode = 'chat'
def compose(self):
self.oscillator = self.oscillator + 1 if self.oscillator < 50 else 0
top_bar = self._compose_top_status_bar()
left_pane = self._compose_view_pane()
right_pane = getattr(self, '_compose_%s_pane' % self.mode)()
bottom_bar = self._compose_bottom_status_bar()
return top_bar, left_pane, right_pane, bottom_bar
#--------------------------------------------------------------------------
# composing panes
def _compose_bottom_status_bar(self):
bottom_status_bar = self.evt_mode_desc + self.bottom_status_bar
return (bottom_status_bar, UI_COLOR)
def _compose_chat_pane(self):
pane = []
for y in xrange(0, len(self.view_field)):
if len(self.chat_log) >= y + 1:
msg, msg_type = self.chat_log[y]
pane.append([(' ' + msg, MSG_COLORS[msg_type])])
elif y == len(self.view_field) - 1:
pane.append([(' > ' + self.prompt, UI_COLOR)])
else:
pane.append([])
return pane
def _compose_equipment_pane(self):
pane = []
y = 0
for k, v in self.equipment.items():
pane.append([(' %s: %s' % (k, v), UI_COLOR)])
y += 1
n = len(self.view_field)
for y in xrange(y, n):
if y == n - 1:
pane.append([(' i', SECONDARY_UI_COLOR),
('nventory ', UI_COLOR),
('u', SECONDARY_UI_COLOR),
('nequip ', UI_COLOR),
('Q', SECONDARY_UI_COLOR),
('uit', UI_COLOR)])
else:
pane.append([])
return pane
def _compose_inventory_pane(self):
pane = []
y = 0
if not len(self.inventory):
pane.append([('Your inventory is empty...', UI_COLOR)])
y += 1
for item, qty in self.inventory.items():
if qty > 1:
item = "%s (%d)" % (item, qty)
pane.append([(item, UI_COLOR)])
y += 1
n = len(self.view_field)
for y in xrange(y, n):
if y == n - 1:
pane.append([(' d', SECONDARY_UI_COLOR),
('rop ', UI_COLOR),
('e', SECONDARY_UI_COLOR),
('quip ', UI_COLOR),
('E', SECONDARY_UI_COLOR),
('quipment ', UI_COLOR),
('Q', SECONDARY_UI_COLOR),
('uit', UI_COLOR)])
else:
pane.append([])
return pane
def _compose_top_status_bar(self):
return (self.top_status_bar, UI_COLOR)
def _compose_view_pane(self):
pane = []
for y, line in enumerate(self.view_field):
l = []
for x, char in enumerate(line):
char, color = self._draw_element(x, y, char)
if len(l) and l[-1][1] == color:
l[-1][0] += char
else:
l.append([char, color])
pane.append(l)
return pane
def _draw_element(self, x, y, char):
if (x, y) in self.colors.keys():
color = self.colors[(x, y)]
else:
color = self.default_colors[char]
if self.oscillator < 25:
if (x, y) == self.target:
char = 'x'
color = self.default_colors[char]
elif (x, y) == self.look_pointer:
char = 'l'
color = self.default_colors[char]
return char, color
#--------------------------------------------------------------------------
# accessors
def get_equipment(self):
return self.equipment
def get_inventory(self):
return self.inventory
def get_prompt(self):
return self.prompt
def set_bottom_status_bar(self, bottom_status_bar):
self.bottom_status_bar = bottom_status_bar
def set_chat_log(self, chat_log):
self.chat_log = chat_log
def set_colors(self, colors):
self.colors = colors
def set_default_colors(self, int_colors, ext_colors):
self.default_colors = self.int_colors = int_colors
self.ext_colors = ext_colors
def set_pilot_mode(self):
if self.is_pilot_mode:
self.default_colors = self.int_colors
self.is_pilot_mode = False
else:
self.default_colors = self.ext_colors
self.is_pilot_mode = True
def set_evt_mode_desc(self, evt_mode_desc):
self.evt_mode_desc = evt_mode_desc + (24 - len(evt_mode_desc)) * ' '
def set_equipment(self, equipment):
self.equipment = equipment
def set_inventory(self, inventory):
self.inventory = inventory
def set_look_pointer(self, look_pointer):
self.look_pointer = look_pointer
self.oscillator = 0
def set_mode(self, mode='chat'):
self.mode = mode
def set_prompt(self, prompt):
self.prompt = prompt
def set_target(self, target):
self.target = target
self.oscillator = 0
def set_top_status_bar(self, top_status_bar):
self.top_status_bar = top_status_bar
def set_view_field(self, view_field):
self.view_field = view_field
| gpl-2.0 | 5,297,271,675,530,443,000 | 30.181818 | 79 | 0.479268 | false |
ActiveState/code | recipes/Python/231507_Interval_Test_Case/recipe-231507.py | 1 | 1227 | import unittest
class IntervalTestCase( unittest.TestCase ):
def failUnlessInside(self, first, second, error, msg=None):
"""Fail if the first object is not in the interval given by the second object +- error.
"""
if (first > second + error) or (first < second - error):
raise self.failureException, \
(msg or '%s != %s (+-%s)' % (`first`, `second`, `error`))
def failIfInside(self, first, second, error, msg=None):
"""Fail if the first object is in the interval given by the second object +- error.
"""
if (first <= second + error) and (first >= second - error):
raise self.failureException, \
(msg or '%s == %s (+-%s)' % (`first`, `second`, `error`))
assertInside = failUnlessInside
assertNotInside = failIfInside
class IntegerArithmenticTestCase( IntervalTestCase ):
def testAdd(self): ## test method names begin 'test*'
self.assertInside((1 + 2), 3.3, 0.5)
self.assertInside(0 + 1, 1.1, 0.01)
def testMultiply(self):
self.assertNotInside((0 * 10), .1, .05)
self.assertNotInside((5 * 8), 40.1, .2)
if __name__ == '__main__':
unittest.main()
| mit | 5,054,456,564,763,109,000 | 37.34375 | 95 | 0.581092 | false |
zepto/musio | musio/espeak_file.py | 1 | 9322 | #!/usr/bin/env python
# vim: sw=4:ts=4:sts=4:fdm=indent:fdl=0:
# -*- coding: UTF8 -*-
#
# A TTS module using the espeak library.
# Copyright (C) 2012 Josiah Gordon <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" A TTS module using the espeak library.
"""
from functools import wraps as functools_wraps
from sys import stderr as sys_stderr
from .io_base import AudioIO, io_wrapper
from .io_util import msg_out
from .import_util import LazyImport
# from .espeak import _espeak
_espeak = LazyImport('espeak._espeak', globals(), locals(), ['_espeak'], 1)
def issupported(filename, *args):
""" issupported(filename) -> Returns True if file is supported else False.
"""
import mimetypes
# Initialize mimetypes.
mimetypes.init()
# Get the mime type of filename.
mimetype, encoding = mimetypes.guess_type(filename)
# If no mimtype was found then filename is not supported.
if not mimetype:
return False
# File containing text are supported.
return True if 'text' in mimetype else False
__supported_dict = {
'ext': ['.txt'],
'issupported': issupported,
'handler': 'EspeakFile',
# 'default': True,
'dependencies': {
'ctypes': ['espeak'],
'python': []
}
}
class EspeakFile(AudioIO):
""" Espeak wrapper for text to speech synthesis
"""
# Valid bit depths.
_valid_depth = (16,)
# Only reading is supported
_supported_modes = 'r'
def __init__(self, filename: str, mode: str='r', voice: str='en-us',
**kwargs):
""" Espeak tts object.
"""
# Initialize espeak and get the sample rate.
output = _espeak.AUDIO_OUTPUT_RETRIEVAL
rate = self._err_check(_espeak.espeak_Initialize(output, 0, None,
0))
super(EspeakFile, self).__init__(filename, 'r', 16, rate, 1)
# Make the buffer big to avoid underruns.
self._buffer_size = 16384
self._voice = voice
self.voice = voice
self._position = 0
self._data_buffer = b''
self._speaking = False
self._done = False
# Set the retrieval callback
self._espeak_synth_callback = _espeak.t_espeak_callback(self)
_espeak.espeak_SetSynthCallback(self._espeak_synth_callback)
self._closed = False
def _open(self):
""" _open() -> Open the classes file and set it up for read/write
access.
"""
# Get the file and length
text = ''
with open(self._filename, 'r') as txt_file:
text = txt_file.read()
text = text.strip().encode() + b'\0'
text_length = len(text)
# Speak the file
self._err_check(_espeak.espeak_Synth(text, text_length, 0,
_espeak.POS_CHARACTER, 0,
_espeak.espeakCHARS_UTF8,
None, None))
def __repr__(self):
""" __repr__ -> Returns a python expression to recreate this instance.
"""
repr_str = "filename='%(_filename)s', mode='%(_mode)s', voice='%(_voice)s'" % self
return '%s(%s)' % (self.__class__.__name__, repr_str)
def __call__(self, wav, numsamples, events):
""" Make the class callable so it can be called as the espeak synth
callback.
"""
# Stop if the end of the synthesis is reached.
if not wav:
self._done = True
self._speaking = False
return 1
# Append the data to the buffer.
self._data_buffer += _espeak.string_at(wav, numsamples *
_espeak.sizeof(_espeak.c_short))
# Update length
self._length = len(self._data_buffer)
# Return value 0 means to keep playing 1 means to stop.
return 0 if self._speaking else 1
def _err_check(self, ret_val):
""" Checks the 'ret_val' for error status (<0) and prints and error
message. Returns 'ret_val' for the calling function to use.
"""
try:
assert(ret_val >= 0)
except Exception as err:
msg_out("There was and error %s %s" % (err, ret_val),
file=sys_stderr)
return ret_val
def _get_position(self) -> int:
""" Returns the current position.
"""
return self._position
def _set_position(self, position: int):
""" Change the position of playback.
"""
if position <= self._length:
self._position = position
@property
def range(self):
""" The current inflection range.
"""
return _espeak.espeak_GetParameter(_espeak.espeakRANGE, 1)
@range.setter
def range(self, value):
""" Set the inflection range.
"""
self._err_check(_espeak.espeak_SetParameter(_espeak.espeakRANGE,
int(value), 0))
@property
def pitch(self):
""" The current pitch.
"""
return _espeak.espeak_GetParameter(_espeak.espeakPITCH, 1)
@pitch.setter
def pitch(self, value):
""" Set the pitch.
"""
self._err_check(_espeak.espeak_SetParameter(_espeak.espeakPITCH,
int(value), 0))
@property
def volume(self):
""" The current volume.
"""
return _espeak.espeak_GetParameter(_espeak.espeakVOLUME, 1)
@volume.setter
def volume(self, value):
""" Set the pitch.
"""
self._err_check(_espeak.espeak_SetParameter(_espeak.espeakVOLUME,
int(value), 0))
@property
def speed(self):
""" The current rate.
"""
return _espeak.espeak_GetParameter(_espeak.espeakRATE, 1)
@speed.setter
def speed(self, value):
""" Set the rate.
"""
self._err_check(_espeak.espeak_SetParameter(_espeak.espeakRATE,
int(value), 0))
@property
def voice(self):
""" The current voice.
"""
voice = _espeak.espeak_GetCurrentVoice()
return voice.contents.languages[1:].decode()
@voice.setter
def voice(self, value):
""" Set the espeak voice.
"""
self._voice = value
if not isinstance(value, bytes):
value = value.encode()
self._err_check(_espeak.espeak_SetVoiceByName(value))
@property
def isspeaking(self):
""" Is it speaking.
"""
return self._speaking
def list_voices(self):
""" Print a list of available voices.
"""
voices = _espeak.espeak_ListVoices(None)
print("%-21s %-22s %s" % ("Language", "Name", "Identifier"))
print('-'*55)
for voice in voices:
if not voice:
break
voice = voice.contents
lang = voice.languages.decode()
name = voice.name.decode()
ident = voice.identifier.decode()
print("%-22s %-22s %s" % (lang, name, ident))
def close(self):
""" Stop speaking.
"""
if not self.closed:
self._speaking = False
self._err_check(_espeak.espeak_Cancel())
self._err_check(_espeak.espeak_Terminate())
self._closed = True
@io_wrapper
def read(self, size: int) -> bytes:
""" Read from the data buffer.
"""
# Start speaking
if not self._done and not self._speaking:
self._speaking = True
self._open()
# Data buffer for
data = b''
while len(data) < size:
size -= len(data)
data += self._data_buffer[self._position:self._position + size]
self._position += len(data)
# Check if the file is finished
if self._position == self._length and self._done:
# Loop if necessary
if self._loops != -1 and self._loop_count >= self._loops:
if len(data) != 0:
# Fill data buffer until it is the requested
# size.
data += b'\x00' * (size - len(data))
break
else:
# Increment the loop counter and seek to the start
self._loop_count += 1
self.seek(0)
continue
return data
| gpl-3.0 | 352,600,860,208,501,700 | 25.482955 | 90 | 0.536151 | false |
Balannen/LSMASOMM | atom3/Kernel/LayoutModule/HierarchicalLayoutModule/CrossingModule/CrossingCounter.py | 1 | 2510 | """
CrossingCounter.py
By Denis Dube, 2005
"""
def countAllCrossings(levelDictionary):
"""
Returns all the edge crossings in the graph
Input: levelDictionary where each level is a list of NodeWrapper objects
Output: # of crossings between all the layers
"""
edgeCrossings = 0
for i in range(0, len(levelDictionary) - 1):
edgeCrossings += countCrossings(levelDictionary[i], levelDictionary[i+1])
return edgeCrossings
def countCrossings(layerA, layerB, debug=False):
"""
Inputs: layerA and layerB are lists of NodeWrapper objects
Output: # of crossings between two node layers in O(|E| log |Vsmall|)
NOTE: Most other algorithms for this are O(|E| + Number of crossings)
Implementation of:
Simple and Efficient Bilayer Cross Counting
Wilhelm Barth, Michael Junger, and Petra Mutzel
GD 2002, LNCS 2528, pp. 130-141, 2002
"""
# Assumed that layerA is above layerB, so children of A are in B
# Now figure out which layer is smaller to improve running time a bit
if(len(layerA) < len(layerB)):
smallLayer = layerA
largeLayer = layerB
isParent2Child = False
else:
smallLayer = layerB
largeLayer = layerA
isParent2Child = True
# Sort the edges and come up with a sequence of edges (integer indices)
edgeSequence = []
for node in largeLayer:
tempList = []
# Get all possible nodes connected to this node
if(isParent2Child):
targetNodeList = node.children.keys()
else:
targetNodeList = node.parents.keys()
for targetNode in targetNodeList:
# Restrict ourselves to just those nodes that are in smallLayer
if(targetNode in smallLayer):
#print 'targetNode.getOrder()', targetNode, targetNode.getOrder()
tempList.append(targetNode.getOrder())
tempList.sort()
edgeSequence.extend(tempList)
# Build the accumulator tree
firstindex = 1
while(firstindex < len(smallLayer)):
firstindex *= 2
treesize = (2 * firstindex) - 1
firstindex -= 1
tree = dict() # Heh, python dictionaries are so abused :)
for i in range(0, treesize):
tree[i] = 0
# Count the crossings
crosscount = 0
for k in range(0, len(edgeSequence)):
index = edgeSequence[k] + firstindex
tree[index] += 1
while(index > 0):
if(index % 2):
crosscount += tree[index + 1]
index = (index - 1) / 2
tree[index] += 1
return crosscount
| gpl-3.0 | 5,428,156,959,330,971,000 | 27.904762 | 77 | 0.652988 | false |
fastcoinproject/fastcoin | qa/rpc-tests/mempool_spendcoinbase.py | 1 | 2777 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test spending coinbase transactions.
# The coinbase transaction in block N can appear in block
# N+100... so is valid in the mempool when the best block
# height is N+99.
# This test makes sure coinbase spends that will be mature
# in the next block are accepted into the memory pool,
# but less mature coinbase spends are NOT.
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
# Create one-input, one-output, no-fee transaction:
class MempoolSpendCoinbaseTest(BitcoinTestFramework):
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [ self.nodes[0].getblockhash(n) for n in range(101, 103) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises(JSONRPCException, self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].setgenerate(True, 1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
| mit | -5,688,630,582,393,530,000 | 38.112676 | 91 | 0.672668 | false |
koralarts/django-dynamic-settings | setup.py | 1 | 1400 | from setuptools import setup, find_packages
VERSION = "1.1.1"
LONG_DESCRIPTION = """
=================================
django-dynamic-setting
=================================
Django Dynamic Setting is a small module that allows you to create settings that can be edited
using the Django admin dashboard.
"""
setup(
name='django-dynamic-settings',
version=VERSION,
url='https://github.com/koralarts/django-dynamic-settings',
download_url='https://github.com/koralarts/django-dynamic-settings/tarball/v1.1.1.tar.gz',
description='Small module that allows you to generate dynamic settings that can be edited inside the Django admin dashboard',
long_description=LONG_DESCRIPTION,
author='Karl Castillo',
author_email='[email protected]',
maintainer='Karl Castillo',
maintainer_email='[email protected]',
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'License :: OSI Approved :: MIT',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7'
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: Site Management'
],
keywords=['django','settings','utility'],
packages=find_packages(),
)
| mit | 6,508,445,999,156,883,000 | 35.842105 | 129 | 0.641429 | false |
NLeSC/embodied-emotions-scripts | embem/spellingnormalization/normalize_dataset.py | 1 | 3306 | """Create multilabel data set with normalized spelling.
The input consists of a directory of text files containing the dataset in
historic spelling.
The data set consists of:
<sentence id>\t<sentence>\tEmotie_Liefde (embodied emotions labels separated by
_)
<sentence id>\t<sentence>\tNone ('None' if no words were tagged)
Usage: python normalize_dataset.py <input dir> <hist2modern dict> <output dir>
"""
import argparse
import codecs
import os
from collections import Counter
import json
import unicodedata
import string
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_dir', help='the name of the directory '
'containing text files that should be normalized.')
parser.add_argument('hist2modern', help='json file containing historic2'
'modern mapping (json object)')
parser.add_argument('output_dir', help='the directory where the '
'normalized data files should be saved.')
args = parser.parse_args()
input_dir = args.input_dir
output_dir = args.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# load hist2modern dictionary
with codecs.open(args.hist2modern, 'rb', 'utf-8') as f:
full_dict = json.load(f, 'utf-8')
# create simple historic word -> modern word mapping
# (full_dict may contain multiple alternatives for a word)
hist2modern = {}
for w in full_dict.keys():
if w not in full_dict[w]:
c = Counter(full_dict[w])
hist2modern[w] = c.most_common()[0][0]
print '#words in dict: {}'.format(len(hist2modern))
replacements = Counter()
num_words = 0
num_replaced = 0
text_files = [fi for fi in os.listdir(input_dir) if fi.endswith('.txt')]
for text_file in text_files:
print text_file
in_file = os.path.join(input_dir, text_file)
out_file = os.path.join(output_dir, text_file)
with codecs.open(in_file, 'rb', 'utf-8') as f:
lines = f.readlines()
with codecs.open(out_file, 'wb', 'utf-8') as f:
for line in lines:
parts = line.split('\t')
words = parts[1].split(' ')
new_words = []
for w in words:
if w not in string.punctuation:
num_words += 1
wo = w.lower()
if wo in hist2modern.keys():
new_words.append(hist2modern[wo])
num_replaced += 1
replacements[wo] += 1
else:
new_words.append(w)
# replace accented characters by unaccented ones
s = unicodedata.normalize('NFKD', ' '.join(new_words)) \
.encode('ascii', 'ignore')
f.write(u'{}\t{}\t{}'.format(parts[0], s, parts[2]))
# print number of replacements
print 'total words\t{}\ntotal replaced\t{}'.format(num_words, num_replaced)
for replaced, freq in replacements.most_common():
print '{}\t{}\t{}'.format(replaced.encode('utf-8'),
hist2modern[replaced].encode('utf-8'),
freq)
| apache-2.0 | 9,219,120,687,807,462,000 | 35.32967 | 79 | 0.569268 | false |
daweiwu/meta-iotqa-1 | lib/oeqa/runtime/sensor/test_light_vcnl4000.py | 1 | 2628 | """
@file test_light_vcnl4000.py
"""
##
# @addtogroup soletta sensor
# @brief This is sensor test based on soletta app
# @brief test sensor vcnl4000 on Galileo/MinnowMax/Edison
##
import os
import time
from oeqa.utils.helper import shell_cmd
from oeqa.oetest import oeRuntimeTest
from EnvirSetup import EnvirSetup
from oeqa.utils.decorators import tag
@tag(TestType="FVT", FeatureID="IOTOS-757")
class TestLightVcnl4000(oeRuntimeTest):
"""
@class TestLightVcnl4000
"""
def setUp(self):
'''Generate test app on target
@fn setUp
@param self
@return'''
print 'start!\n'
#connect sensor and DUT through board
#shell_cmd("sudo python "+ os.path.dirname(__file__) + "/Connector.py vcnl4000")
envir = EnvirSetup(self.target)
envir.envirSetup("vcnl4000","light")
def tearDown(self):
'''unload vcnl4000 driver
@fn tearDown
@param self
@return'''
(status, output) = self.target.run("cat /sys/devices/virtual/dmi/id/board_name")
if "Minnow" in output:
(status, output) = self.target.run(
"cd /sys/bus/i2c/devices; \
echo 0x13 >i2c-1/delete_device")
if "Galileo" in output:
(status, output) = self.target.run(
"cd /sys/bus/i2c/devices; \
echo 0x13 >i2c-0/delete_device")
if "BODEGA" in output:
(status, output) = self.target.run(
"cd /sys/bus/i2c/devices; \
echo 0x13 >i2c-6/delete_device")
def test_Light_VCNL4000(self):
'''Execute the test app and verify sensor data
@fn testLightVCNL4000
@param self
@return'''
print 'start reading data!'
(status, output) = self.target.run(
"chmod 777 /opt/apps/test_light_vcnl4000.fbp")
(status, output) = self.target.run(
"cd /opt/apps; ./test_light_vcnl4000.fbp >re.log")
error = output
(status, output) = self.target.run(
"cp /opt/apps/re.log /home/root/vcnl4000.log")
#verification of target sensor data
(status, output) = self.target.run("cat /opt/apps/re.log|grep float")
print output + "\n"
self.assertEqual(status, 0, msg="Error messages: %s" % error)
#make sure sensor data is valid
(status, output) = self.target.run("cat /opt/apps/re.log|grep ' 0.000000'")
self.assertEqual(status, 1, msg="Error messages: %s" % output)
| mit | -1,183,319,564,476,822,800 | 36.014085 | 88 | 0.570396 | false |
shub0/algorithm-data-structure | python/bulls_cows.py | 1 | 1816 | '''
You are playing the following Bulls and Cows game with your friend: You write down a number and ask your friend to guess what the number is. Each time your friend makes a guess, you provide a hint that indicates how many digits in said guess match your secret number exactly in both digit and position (called "bulls") and how many digits match the secret number but locate in the wrong position (called "cows"). Your friend will use successive guesses and hints to eventually derive the secret number.
For example:
Secret number: "1807"
Friend's guess: "7810"
Hint: 1 bull and 3 cows. (The bull is 8, the cows are 0, 1 and 7.)
Write a function to return a hint according to the secret number and friend's guess, use A to indicate the bulls and B to indicate the cows. In the above example, your function should return "1A3B".
Please note that both secret number and friend's guess may contain duplicate digits, for example:
Secret number: "1123"
Friend's guess: "0111"
In this case, the 1st 1 in friend's guess is a bull, the 2nd or 3rd 1 is a cow, and your function should return "1A1B".
You may assume that the secret number and your friend's guess only contain digits, and their lengths are always equal.
'''
class Solution(object):
def getHint(self, secret, guess):
"""
:type secret: str
:type guess: str
:rtype: str
"""
import collections
setA = [0] * 10
setB = [0] * 10
A = 0
size = len(secret)
for index in range(size):
if secret[index] == guess[index]:
A += 1
else:
setA[int(secret[index])] += 1
setB[int(guess[index])] += 1
B = sum([ min(setA[index], setB[index]) for index in range(10) ])
return "%dA%dB" % (A, B)
| bsd-3-clause | -4,210,370,346,375,803,000 | 46.789474 | 502 | 0.660793 | false |
messense/wechatpy | wechatpy/work/replies.py | 1 | 1503 | # -*- coding: utf-8 -*-
from wechatpy import replies
from wechatpy.fields import IntegerField
REPLY_TYPES = {}
def register_reply(reply_type):
def register(cls):
REPLY_TYPES[reply_type] = cls
return cls
return register
@register_reply('text')
class TextReply(replies.TextReply):
agent = IntegerField('AgentID', 0)
@register_reply('image')
class ImageReply(replies.ImageReply):
agent = IntegerField('AgentID', 0)
@register_reply('voice')
class VoiceReply(replies.VoiceReply):
agent = IntegerField('AgentID', 0)
@register_reply('video')
class VideoReply(replies.VideoReply):
agent = IntegerField('AgentID', 0)
@register_reply('news')
class ArticlesReply(replies.ArticlesReply):
agent = IntegerField('AgentID', 0)
def create_reply(reply, message=None, render=False):
r = None
if isinstance(reply, replies.BaseReply):
r = reply
if message:
r.source = message.target
r.target = message.source
r.agent = message.agent
elif isinstance(reply, str):
r = TextReply(
message=message,
content=reply
)
elif isinstance(reply, (tuple, list)):
if len(reply) > 10:
raise AttributeError("Can't add more than 10 articles"
" in an ArticlesReply")
r = ArticlesReply(
message=message,
articles=reply
)
if r and render:
return r.render()
return r
| mit | -2,669,429,921,473,059,000 | 21.772727 | 66 | 0.616101 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.