repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
googleads/google-ads-python | google/ads/googleads/v6/resources/types/customer_client_link.py | 1 | 2276 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v6.enums.types import manager_link_status
__protobuf__ = proto.module(
package="google.ads.googleads.v6.resources",
marshal="google.ads.googleads.v6",
manifest={"CustomerClientLink",},
)
class CustomerClientLink(proto.Message):
r"""Represents customer client link relationship.
Attributes:
resource_name (str):
Immutable. Name of the resource. CustomerClientLink resource
names have the form:
``customers/{customer_id}/customerClientLinks/{client_customer_id}~{manager_link_id}``
client_customer (str):
Immutable. The client customer linked to this
customer.
manager_link_id (int):
Output only. This is uniquely identifies a
customer client link. Read only.
status (google.ads.googleads.v6.enums.types.ManagerLinkStatusEnum.ManagerLinkStatus):
This is the status of the link between client
and manager.
hidden (bool):
The visibility of the link. Users can choose
whether or not to see hidden links in the Google
Ads UI. Default value is false
"""
resource_name = proto.Field(proto.STRING, number=1)
client_customer = proto.Field(proto.STRING, number=7, optional=True)
manager_link_id = proto.Field(proto.INT64, number=8, optional=True)
status = proto.Field(
proto.ENUM,
number=5,
enum=manager_link_status.ManagerLinkStatusEnum.ManagerLinkStatus,
)
hidden = proto.Field(proto.BOOL, number=9, optional=True)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -4,313,098,730,890,692,600 | 34.015385 | 98 | 0.681459 | false |
max-ionov/rucoref | tools/train-test-split.py | 1 | 3382 | #!/usr/bin/python2
# -!- coding: utf-8 -!-
import argparse
import logging
import sys
import re
import codecs
from sklearn import cross_validation
# temporary measure while there is no package installation
sys.path.append('/media/max/Extension/Projects/Coreference/rucoref')
from anaphoralib.corpora import rueval
from anaphoralib.tagsets import multeast
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('texts', help='CoNLL-like file with tokenized RuCoref corpus')
parser.add_argument('gs', help='CoNLL-like file with RuCoref annotations')
parser.add_argument('--test-size', '-s',
default=0.3,
help='the proportion of a test subcorpus',
type=float)
parser.add_argument('--random-state', help='random state for the pseudo-random number generation',
type=int,
default=None)
parser.add_argument('-v', help='More output',
dest='verbose',
action='store_true')
args = parser.parse_args()
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG if args.verbose else logging.INFO)
rucoref = rueval.RuCorefCorpus(multeast, rueval)
logging.info(u'Loading RuCoref texts from {}, GS from {}'.format(args.texts, args.gs))
logging.debug('Loading texts...')
rucoref.load_texts(args.texts)
logging.debug('Loading GS...')
rucoref.load_gs(args.gs)
doc_split = cross_validation.ShuffleSplit(len(rucoref.doc_ids),
n_iter=1,
test_size=args.test_size,
random_state=args.random_state)
train_set = [rucoref.doc_ids[i] for i in sorted(list(doc_split)[0][0])]
test_set = [rucoref.doc_ids[i] for i in sorted(list(doc_split)[0][1])]
logging.debug('Train set ({}): {}'.format(len(train_set), ', '.join(str(i) for i in train_set)))
logging.debug('Test set ({}): {}'.format(len(test_set), ', '.join(str(i) for i in test_set)))
rx_txt = re.compile('\\.txt$')
end_suffix_test = '.test.txt'
end_suffix_train = '.train.txt'
out_texts_train = rx_txt.sub(end_suffix_train, args.texts)
out_gs_train = rx_txt.sub(end_suffix_train, args.gs)
out_texts_test = rx_txt.sub(end_suffix_test, args.texts)
out_gs_test = rx_txt.sub(end_suffix_test, args.gs)
filenames = (('texts', args.texts, out_texts_train, out_texts_test),
('GS', args.gs, out_gs_train, out_gs_test))
for name, inp_filename, out_filename_train, out_filename_test in filenames:
logging.info('Saving train and test {}'.format(name))
out_file_train = codecs.open(out_filename_train, 'w', encoding='utf-8')
out_file_test = codecs.open(out_filename_test, 'w', encoding='utf-8')
with codecs.open(inp_filename, encoding='utf-8') as inp_file:
# writing headers
line = inp_file.readline()
out_file_test.write(line)
out_file_train.write(line)
for line in inp_file:
doc_id = int(line.split('\t')[0])
if doc_id in train_set:
out_file_train.write(line)
else:
out_file_test.write(line)
| lgpl-3.0 | 6,004,327,205,911,863,000 | 38.788235 | 115 | 0.590479 | false |
geomf/omf-fork | omf/solvers/gridlabd/__init__.py | 1 | 9449 | # Portions Copyright (C) 2015 Intel Corporation
''' Code for running Gridlab and getting results into pythonic data structures. '''
import sys
import os
import subprocess
import platform
import re
import datetime
import shutil
import traceback
import math
import time
import tempfile
import json
from os.path import join as pJoin
from copy import deepcopy
# Locational variables so we don't have to rely on OMF being in the system
# path.
_myDir = os.path.dirname(os.path.abspath(__file__))
_omfDir = os.path.dirname(os.path.dirname(_myDir))
sys.path.append(_omfDir)
# OMF imports.
from omf import feeder
import logging
def _addGldToPath():
''' Figure out what platform we're on and choose a suitable Gridlab binary.
Returns full path to binary as result. '''
# Do we have a version of GridlabD available?
if 0 == subprocess.call(["gridlabd"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
# There's a system-level install of Gridlab, so use it:
return "gridlabd"
else:
# No system-level version of Gridlab available, so add ours to the
# path.
enviro = os.environ
if sys.platform == 'win32' or sys.platform == 'cygwin':
if platform.machine().endswith('64'):
binary = _myDir + "\\win64\\gridlabd.exe"
enviro['GRIDLABD'] = _myDir + "\\win64"
enviro['GLPATH'] = _myDir + "\\win64\\"
else:
binary = _myDir + "\\win32\\gridlabd.exe"
enviro['GRIDLABD'] = _myDir + "\\win32"
enviro['GLPATH'] = _myDir + "\\win32\\"
return binary
elif sys.platform == 'darwin':
# Implement me, maybe.
pass
elif sys.platform == 'linux2':
binary = _myDir + "/linx64/gridlabd.bin"
enviro['GRIDLABD'] = _myDir + "/linx64"
enviro['GLPATH'] = _myDir + "/linx64"
# Uncomment the following line if we ever get all the linux libraries bundled. Hard!
# enviro['LD_LIBRARY_PATH'] = enviro['LD_LIBRARY_PATH'] + ':' + solverRoot + "/linx64"
return binary
else:
# Platform not supported, so just return the standard binary and
# pray it works:
return "gridlabd"
logger = logging.getLogger(__name__)
def runInFilesystem(feederTree, attachments=[], keepFiles=False, workDir=None, glmName=None):
''' Execute gridlab in the local filesystem. Return a nice dictionary of results. '''
logger.info(
'Running GridLab-D for %d feeders (working dir=%s)', len(feederTree), workDir)
try:
binaryName = "gridlabd"
# Create a running directory and fill it, unless we've specified where
# we're running.
if not workDir:
workDir = tempfile.mkdtemp()
print "gridlabD runInFilesystem with no specified workDir. Working in", workDir
# Need to zero out lat/lon data on copy because it frequently breaks
# Gridlab.
localTree = deepcopy(feederTree)
for key in localTree.keys():
try:
del localTree[key]["latitude"]
del localTree[key]["longitude"]
except:
pass # No lat lons.
# Write attachments and glm.
for attach in attachments:
with open(pJoin(workDir, attach), 'w') as attachFile:
attachFile.write(attachments[attach])
glmString = feeder.sortedWrite(localTree)
if not glmName:
glmName = "main." + \
datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + ".glm"
with open(pJoin(workDir, glmName), 'w') as glmFile:
glmFile.write(glmString)
logger.debug('Wrote GLM file: %s', glmName)
# RUN GRIDLABD IN FILESYSTEM (EXPENSIVE!)
with open(pJoin(workDir, 'stdout.txt'), 'w') as stdout, open(pJoin(workDir, 'stderr.txt'), 'w') as stderr, open(pJoin(workDir, 'PID.txt'), 'w') as pidFile:
# MAYBEFIX: turn standerr WARNINGS back on once we figure out how
# to supress the 500MB of lines gridlabd wants to write...
logger.info(
'Running <%s -w %s> in <%s>', binaryName, glmName, workDir)
proc = subprocess.Popen(
[binaryName, '-w', glmName], cwd=workDir, stdout=stdout, stderr=stderr)
pidFile.write(str(proc.pid))
logger.info('Launched gridlabd with pid=%d', proc.pid)
returnCode = proc.wait()
logger.info('gridlabd finished with exit code=%d', returnCode)
# Build raw JSON output.
rawOut = anaDataTree(workDir, lambda x: True)
with open(pJoin(workDir, 'stderr.txt'), 'r') as stderrFile:
rawOut['stderr'] = stderrFile.read().strip()
with open(pJoin(workDir, 'stdout.txt'), 'r') as stdoutFile:
rawOut['stdout'] = stdoutFile.read().strip()
logger.info('GridlabD STDOUT:\n%s', rawOut['stdout'])
logger.info('GridlabD STDERR:\n%s', rawOut['stderr'])
# Delete the folder and return.
if not keepFiles and not workDir:
# NOTE: if we've specify a working directory, don't just blow it
# away.
for attempt in range(5):
try:
shutil.rmtree(workDir)
break
except OSError:
# HACK: if we don't sleep 1 second, windows intermittantly fails to delete things and an exception is thrown.
# Probably cus dropbox is monkeying around in these folders
# on my dev machine. Disabled for now since it works when
# dropbox is off.
time.sleep(2)
return rawOut
except:
with open(pJoin(workDir, "stderr.txt"), "a+") as stderrFile:
traceback.print_exc(file=stderrFile)
return {}
def _strClean(x):
''' Helper function that translates csv values to reasonable floats (or header values to strings). '''
if x == 'OPEN':
return 1.0
elif x == 'CLOSED':
return 0.0
# Look for strings of the type '+32.0+68.32d':
elif x == '-1.#IND':
return 0.0
if x.endswith('d'):
matches = re.findall(
'^([+-]?\d+\.?\d*e?[+-]?\d+)[+-](\d+\.?\d*e?[+-]?\d*)d$', x)
if len(matches) == 0:
return 0.0
else:
floatConv = map(float, matches[0])
squares = map(lambda x: x**2, floatConv)
return math.sqrt(sum(squares))
elif re.findall('^([+-]?\d+\.?\d*e?[+-]?\d*)$', x) != []:
matches = re.findall('([+-]?\d+\.?\d*e?[+-]?\d*)', x)
if len(matches) == 0:
return 0.0
else:
try:
return float(matches[0])
except:
return 0.0 # Hack for crazy WTF occasional Gridlab output.
else:
return x
def csvToArray(fileName):
''' Take a Gridlab-export csv filename, return a list of timeseries vectors.'''
with open(fileName) as openfile:
data = openfile.read()
lines = data.splitlines()
array = map(lambda x: x.split(','), lines)
cleanArray = [map(_strClean, x) for x in array]
# Magic number 8 is the number of header rows in each GridlabD csv.
arrayNoHeaders = cleanArray[8:]
# Drop the timestamp column:
return arrayNoHeaders
def _seriesTranspose(theArray):
''' Transpose every matrix that's a value in a dictionary. Yikes. '''
return {i[0]: list(i)[1:] for i in zip(*theArray)}
def anaDataTree(studyPath, fileNameTest):
''' Take a study and put all its data into a nested object {fileName:{metricName:[...]}} '''
data = {}
csvFiles = os.listdir(studyPath)
for cName in csvFiles:
if fileNameTest(cName) and cName.endswith('.csv'):
arr = csvToArray(studyPath + '/' + cName)
data[cName] = _seriesTranspose(arr)
return data
def _tests():
print "Full path to Gridlab executable we're using:", _addGldToPath()
print "Testing string cleaning."
strTestCases = [("+954.877", 954.877),
("+2.18351e+006", 2183510.0),
("+7244.99+1.20333e-005d", 7244.99),
# ("+7244.99+120d", 7245.98372204), # Fails due to float rounding but should pass.
("+3.76184", 3.76184),
("1", 1.0),
("-32.4", -32.4),
("+7200+0d", 7200.0),
("+175020+003133", 0.0)]
for (string, result) in strTestCases:
assert _strClean(
string) == result, "A _strClean operation failed on: " + string
# Get a test feeder and test climate.
print "Testing GridlabD solver."
with open(pJoin(_omfDir, "data", "Feeder", "public", "Simple Market System.json"), "r") as feederFile:
feederJson = json.load(feederFile)
with open(pJoin(_omfDir, "data", "Climate", "AL-HUNTSVILLE.tmy2"), "r") as climateFile:
tmyStr = climateFile.read()
# Add climate in.
feederJson["attachments"]["climate.tmy2"] = tmyStr
testStudy = runInFilesystem(feederJson["tree"], feederJson["attachments"])
assert testStudy != {}, "Gridlab run failed and we got blank output."
print "GridlabD standard error:", testStudy['stderr']
print "GridlabD standard output:", testStudy['stdout']
if __name__ == '__main__':
_tests()
| gpl-2.0 | -5,696,401,509,147,885,000 | 39.904762 | 163 | 0.580167 | false |
TardigradeX/Space-Race-RX | backend/Commands_util.py | 1 | 1776 | from Commands import Commands, Targets, Defaults, Payloads
dd = Defaults.DELIMETER
dt = Defaults.TARGET_DELIMETER
dn = Defaults.NONE
""" COMMANDS TO BE SENT BY ANY """
def createLogin(targetType, roomid = dn ):
playerId = dn
payload = dn
target = dt.join([targetType, roomid, playerId])
msg = dd.join([Commands.LOGIN, target, payload])
return(msg)
def createLoginResponse(targetType, roomid, playerId):
target = dt.join([targetType, roomid, playerId])
payload = Payloads.SIGNUP;
msg = dd.join([Commands.LOGIN, target, payload])
return(msg)
def createPlayerJoined(roomid, playerId):
targetType = Targets.MASTER
payload = Payloads.JOINED
target = dt.join([targetType, roomid, playerId])
msg = dd.join([Commands.LOGIN, target, payload])
return(msg)
def createAnswer(targetType, roomid, playerId, payload):
target = dt.join([targetType, roomid, playerId])
msg = dd.join([Commands.ANSWER, target, payload])
return(msg)
def createMessage(source, targetType, roomid, playerId, payload):
target = dt.join([targetType, roomid, playerId])
msg = dd.join([Commands.MESSAGE, target, payload])
return(msg)
def createLogout(roomid, playerId):
payload = dn
targetType = Targets.MASTER
roomid = roomid
targetPlayer = playerId
payload = dn
target = dt.join([targetType, roomid, playerId])
msg = dd.join([Commands.LOGOUT, target, payload])
return(msg)
""" COMMANDS CREATED BY CONTROLLER ONLY """
def createGameCommand(command, targetType, roomid = Defaults.NONE, playerId = Defaults.NONE):
roomid = roomid
targetPlayer = playerId
target = dt.join([targetType, roomid, playerId])
payload = dn
msg = dd.join([command, target, payload])
return(msg)
| mit | -8,001,001,289,209,257,000 | 30.714286 | 93 | 0.693131 | false |
bennylope/django-organizations | src/organizations/views/default.py | 1 | 1324 | # -*- coding: utf-8 -*-
from organizations.models import Organization
from organizations.views.base import ViewFactory
from organizations.views.mixins import AdminRequiredMixin
from organizations.views.mixins import MembershipRequiredMixin
from organizations.views.mixins import OwnerRequiredMixin
bases = ViewFactory(Organization)
class OrganizationList(bases.OrganizationList):
pass
class OrganizationCreate(bases.OrganizationCreate):
"""
Allows any user to create a new organization.
"""
pass
class OrganizationDetail(MembershipRequiredMixin, bases.OrganizationDetail):
pass
class OrganizationUpdate(AdminRequiredMixin, bases.OrganizationUpdate):
pass
class OrganizationDelete(OwnerRequiredMixin, bases.OrganizationDelete):
pass
class OrganizationUserList(MembershipRequiredMixin, bases.OrganizationUserList):
pass
class OrganizationUserDetail(AdminRequiredMixin, bases.OrganizationUserDetail):
pass
class OrganizationUserUpdate(AdminRequiredMixin, bases.OrganizationUserUpdate):
pass
class OrganizationUserCreate(AdminRequiredMixin, bases.OrganizationUserCreate):
pass
class OrganizationUserRemind(AdminRequiredMixin, bases.OrganizationUserRemind):
pass
class OrganizationUserDelete(AdminRequiredMixin, bases.OrganizationUserDelete):
pass
| bsd-2-clause | 3,657,929,340,239,803,400 | 22.22807 | 80 | 0.817221 | false |
tomjon/piSpecMon | spectrum/ams_worker.py | 2 | 3009 | """ Define Worker process, for scanning the spectrum using a Keysight sensor.
"""
import sys
from spectrum.process import Process
from spectrum.common import log, parse_config, now
from pyams import Sensor
class Worker(Process):
""" Process implementation for spectrum scanning using a Keysight sensor.
"""
def __init__(self, data_store):
super(Worker, self).__init__(data_store, 'ams')
def get_capabilities(self):
return {'antenna': [{'value': 0, 'label': 'Antenna 1'}, {'value': 1, 'label': 'Antenna 2'}, {'value': 2, 'label': 'Test Signal'}, {'value': 3, 'label': 'Terminated'}],
'preamp': [{'value': 0, 'label': 'Off'}, {'value': 1, 'label': 'On'}],
'attenuation': [{'value': 0.0, 'label': 'Off'}, {'value': 10.0, 'label': '10dB'}, {'value': 20.0, 'label': '20dB'}],
'window': [{'value': 0, 'label': 'Hann'}, {'value': 1, 'label': 'Gauss Top'}, {'value': 2, 'label': 'Flat Top'}, {'value': 3, 'label': 'Uniform'}, {'value': 4, 'label': 'Unknown'}]}
def iterator(self, config, initial_count):
""" Scan the spectrum, storing data through the config object, and yield status.
"""
#FIXME the 'ams' parameter cold be replace by self.worker (or whatever) - do it automatically in the parent?
frange = parse_config(config.values, 'ams')[0] #FIXME assumes only a range
self.status.clear()
yield
values = config.values['ams'] #FIXME the 'ams' parameter cold be replace by self.worker (or whatever) - do it automatically in the parent?
with Sensor(values['address'], values['port']) as sensor:
debug = 'debug' in sys.argv
# go half a channel either side of the range (hf is half channel span)
hf = frange[2] * 0.5
minF = frange[0] - hf
maxF = frange[1] + hf
for sweep_idx, df, amps in sensor.iter_sweep(minF, maxF, **values['scan']):
# channelise the amplitudes at channel frequencies
c_amps = []
bf = minF + frange[2]
max_amp = None
for i in xrange(len(amps)):
f = minF + i * df
amp = int(amps[i])
max_amp = max(max_amp, amp)
if f > bf: # map [-120 to -95] to [0, 100]
c_amps.append(min(127, max(-128, max_amp))) #(max_amp + 120.0) * 4.0)))
bf += frange[2]
max_amp = None
time_0 = now()
self.status['sweep'] = {'timestamp': time_0}
self.status['sweep']['sweep_n'] = initial_count + sweep_idx #FIXME this sweep_n mechanism must go into process.py
freq_n = max(xrange(len(c_amps)), key=c_amps.__getitem__)
self.status['sweep']['peaks'] = [{'freq_n': freq_n, 'strength': c_amps[freq_n]}]
config.write_spectrum(self.prefix, time_0, c_amps)
yield
| gpl-2.0 | 5,497,617,256,850,688,000 | 50.87931 | 197 | 0.535726 | false |
sheeshmohsin/mozioproj | mozio/settings.py | 1 | 3500 | """
Django settings for mozio project.
Generated by 'django-admin startproject' using Django 1.9.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')@hh8efot31g^d)b$p(wy4d37gih!9c2q+*efe4v#jj1f#gza$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.gis',
'tastypie',
'tastypie_swagger',
'units',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mozio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mozio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'geology',
'USER': 'geouser',
'PASSWORD': 'geopassword',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Caching
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': 'localhost:6379',
},
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| mit | -3,582,490,734,909,463,600 | 24.547445 | 91 | 0.671429 | false |
ianmiell/shutit-distro | python3/python3.py | 1 | 1207 | """ShutIt module. See http://shutit.tk
"""
from shutit_module import ShutItModule
class python3(ShutItModule):
def build(self, shutit):
shutit.send('mkdir /tmp/build/python')
shutit.send('cd /tmp/build/python')
shutit.send('wget -qO- https://www.python.org/ftp/python/3.4.2/Python-3.4.2.tar.xz | xz -d | tar -xf -')
shutit.send('cd Python-*')
shutit.send('./configure --prefix=/usr --enable-shared --with-system-expat --with-system-ffi --enable-unicode=ucs4 --without-ensurepip')
shutit.send('make')
shutit.send('make install',check_exit=False) # why? seems ok
shutit.send('chmod -v 755 /usr/lib/libpython3.4m.so')
shutit.send('chmod -v 755 /usr/lib/libpython3.so')
return True
#def get_config(self, shutit):
# shutit.get_config(self.module_id,'item','default')
# return True
def finalize(self, shutit):
shutit.send('rm -rf /tmp/build/python')
return True
#def remove(self, shutit):
# return True
#def test(self, shutit):
# return True
def module():
return python3(
'shutit.tk.sd.python3.python3', 158844782.002553,
description='',
maintainer='',
depends=['shutit.tk.sd.libffi.libffi','shutit.tk.sd.sqlite.sqlite','shutit.tk.sd.make_certs.make_certs']
)
| gpl-2.0 | -146,777,834,580,436,640 | 27.738095 | 138 | 0.691798 | false |
Pikecillo/genna | external/4Suite-XML-1.0.2/test/Lib/test_time.py | 1 | 20142 | import time, calendar
from Ft.Lib import Time
def utcTupleToLocal8601(utctuple):
loc_tuple = time.localtime(calendar.timegm(utctuple))
if loc_tuple[8] == 1:
offset_secs = time.altzone
else:
offset_secs = time.timezone
if offset_secs == 0:
offset_str = 'Z'
else:
offset_str = '%+03d:%02d' % (-offset_secs / 3600, abs(offset_secs) % 60)
return time.strftime('%Y-%m-%dT%H:%M:%S' + offset_str, loc_tuple)
def test_instance(tester):
tester.startGroup("Test DateTime Instance")
tester.startGroup("Test Seconds")
tester.startTest('10s + 5ms')
d = Time.DT(0,0,0,0,0,10,5,0,"",0,0)
tester.compare(10,d.second())
tester.compare(5,d.milliSecond())
tester.testDone()
tester.startTest('10s + 5001ms')
d = Time.DT(0,0,0,0,0,10,5001,0,"",0,0)
tester.compare(15,d.second())
tester.compare(1,d.milliSecond())
tester.testDone()
tester.groupDone()
tester.startGroup("Test Minutes")
tester.startTest("1m 0s")
d = Time.DT(0,0,0,0,1,0,0,0,"",0,0)
tester.compare(1,d.minute())
tester.compare(1,d.minute(local=1))
tester.testDone()
tester.startTest("1m 0s, offset 20m")
d = Time.DT(0,0,0,0,1,0,0,0,"",0,20)
tester.compare(1,d.minute())
tester.compare(21,d.minute(local=1))
tester.testDone()
tester.startTest("1m 65s, offset 20m")
d = Time.DT(0,0,0,0,1,65,0,0,"",0,20)
tester.compare(2,d.minute())
tester.compare(22,d.minute(local=1))
tester.compare(5,d.second())
tester.testDone()
tester.groupDone()
tester.startGroup("Test Hour")
tester.startTest("1h 0m")
d = Time.DT(0,0,0,1,0,0,0,0,"",0,0)
tester.compare(1,d.hour())
tester.compare(1,d.hour(local=1))
tester.testDone()
tester.startTest("1h 0m, offset -2h")
d = Time.DT(0,0,0,1,0,0,0,0,"",-2,0)
tester.compare(1,d.hour())
tester.compare(23,d.hour(local=1))
tester.testDone()
tester.startTest("10h 125m, offset -15h 65m")
d = Time.DT(0,0,0,10,125,0,0,0,"",-15,65)
tester.compare(12,d.hour())
tester.compare(22,d.hour(local=1))
tester.compare(5,d.minute())
tester.compare(10,d.minute(local=1))
tester.testDone()
tester.groupDone()
tester.startGroup("Time Zones")
tester.startTest("0h 0m, offset -6h, summer")
d = Time.DT(0,0,0,0,0,0,0,1,"",-6,0)
tester.compare("MDT",d.tzName())
tester.compare(-6,d.tzHourOffset())
tester.compare(0,d.tzMinuteOffset())
tester.compare(18,d.hour(local=1))
tester.compare(0,d.hour())
tester.testDone()
tester.startTest("0h 0m, offset -6h, winter")
d = Time.DT(0,0,0,0,0,0,0,0,"",-6,0)
tester.compare("CST",d.tzName())
tester.compare(-6,d.tzHourOffset())
tester.compare(0,d.tzMinuteOffset())
tester.compare(18,d.hour(local=1))
tester.compare(0,d.hour())
tester.testDone()
tester.startTest("0h 0m, offset -7h, summer")
d = Time.DT(0,0,0,0,0,0,0,1,"",-7,0)
tester.compare("PDT",d.tzName())
tester.compare(-7,d.tzHourOffset())
tester.compare(0,d.tzMinuteOffset())
tester.compare(17,d.hour(local=1))
tester.compare(0,d.hour())
tester.testDone()
tester.startTest("0h 0m, offset -7h, winter")
d = Time.DT(0,0,0,0,0,0,0,0,"",-7,0)
tester.compare("MST",d.tzName())
tester.compare(-7,d.tzHourOffset())
tester.compare(0,d.tzMinuteOffset())
tester.compare(17,d.hour(local=1))
tester.compare(0,d.hour())
tester.testDone()
tester.groupDone()
tester.startGroup("Test Date")
tester.startTest("Y2001, M1, D1")
d = Time.DT(2001,1,1,0,0,0,0,0,"",0,0)
tester.compare(2001,d.year())
tester.compare(1,d.month())
tester.compare(1,d.day())
tester.compare(1,d.day(local=1))
tester.testDone()
tester.startTest("Y2001, M2, D1, 1h, offset -2h")
d = Time.DT(2001,2,1,1,0,0,0,0,"",-2,0)
tester.compare(2001,d.year())
tester.compare(2,d.month())
tester.compare(1,d.month(local=1))
tester.compare(1,d.day())
tester.compare(31,d.day(local=1))
tester.compare(23,d.hour(local=1))
tester.testDone()
tester.startTest("Y2001, M2, D1, 33h")
d = Time.DT(2001,2,1,33,0,0,0,0,"",0,0)
tester.compare(2001,d.year())
tester.compare(2,d.month())
tester.compare(2,d.day())
tester.compare(9,d.hour())
tester.testDone()
tester.startTest("Y2000, M2, D30")
d = Time.DT(2000,2,30,00,0,0,0,0,"",0,0)
tester.compare(2000,d.year())
tester.compare(3,d.month())
tester.compare(1,d.day())
tester.testDone()
tester.startTest("Y2001, M2, D30")
d = Time.DT(2001,2,30,00,0,0,0,0,"",0,0)
tester.compare(2001,d.year())
tester.compare(3,d.month())
tester.compare(2,d.day())
tester.testDone()
tester.groupDone()
tester.groupDone()
def test_iso(tester):
tester.startGroup("ISO Time Parser")
for i,h,m,s,ms in [("T232050",23,20,50,0),
("23:20:50",23,20,50,0),
("T23:20:50",23,20,50,0),
("T2320",23,20,0,0),
("T23:20",23,20,0,0),
("23:20",23,20,0,0),
("T23",23,0,0,0),
("T232050,5",23,20,50,500),
("T232050.5",23,20,50,500),
("T23:20:50,5",23,20,50,500),
("T23:20:50.5",23,20,50,500),
("23:20:50,5",23,20,50,500),
("23:20:50.5",23,20,50,500),
("T2320,9",23,20,54,0),
("T2320.9",23,20,54,0),
("T23:20,9",23,20,54,0),
("T23:20.9",23,20,54,0),
("23:20,9",23,20,54,0),
("23:20.9",23,20,54,0),
("T23,3",23,18,0,0),
("T23.3",23,18,0,0),
("T-2050",None,20,50,0),
("T-20:50",None,20,50,0),
("T-20",None,20,0,0),
("T--50",None,None,50,0),
("T11,3",11,18,0,0),
("T11.3",11,18,0,0),
("T-20,9",None,20,54,0),
("T-20.9",None,20,54,0),
("T-2050,5",None,20,50,500),
("T-2050.5",None,20,50,500),
("T-20:50,5",None,20,50,500),
("T-20:50.5",None,20,50,500),
("T--50,5",None,None,50,500),
("T--50.5",None,None,50,500),
("T000000",0,0,0,0),
("T00:00:00",0,0,0,0),
("T240000",0,0,0,0),
("T24:00:00",0,0,0,0),
]:
tester.startTest(i)
d = Time.FromISO8601(i)
if h is None:
h = time.localtime()[3]
if m is None:
m = time.localtime()[4]
tester.compare(h,d.hour())
tester.compare(m,d.minute())
tester.compare(s,d.second())
tester.compare(ms,d.milliSecond())
tester.testDone()
tester.groupDone()
tester.startGroup("ISO Time and TZ Parser")
for i,h,m,s,tzh,tzm,lh,lm in [("232030Z",23,20,30,0,0,23,20),
("T232030Z",23,20,30,0,0,23,20),
("23:20:30Z",23,20,30,0,0,23,20),
("T23:20:30Z",23,20,30,0,0,23,20),
("2320Z",23,20,0,0,0,23,20),
("23:20Z",23,20,0,0,0,23,20),
("T2320Z",23,20,0,0,0,23,20),
("T23:20Z",23,20,0,0,0,23,20),
("23Z",23,0,0,0,0,23,0),
("T23Z",23,0,0,0,0,23,0),
("T152746+0130",13,57,46,1,30,15,27),
("T152746+01",14,27,46,1,00,15,27),
("T15:27:46+01:30",13,57,46,1,30,15,27),
("T15:27:46+01",14,27,46,1,00,15,27),
("152746+0130",13,57,46,1,30,15,27),
("152746+01",14,27,46,1,00,15,27),
("15:27:46+01:30",13,57,46,1,30,15,27),
("15:27:46+01",14,27,46,1,00,15,27),
("T152746-0530",20,57,46,-5,-30,15,27),
("152746-0530",20,57,46,-5,-30,15,27),
("T15:27:46-05:30",20,57,46,-5,-30,15,27),
("15:27:46-05:30",20,57,46,-5,-30,15,27),
("T152746-05",20,27,46,-5,0,15,27),
("152746-05",20,27,46,-5,0,15,27),
("T15:27:46-05",20,27,46,-5,0,15,27),
("15:27:46-05",20,27,46,-5,0,15,27),
]:
tester.startTest(i)
d = Time.FromISO8601(i)
tester.compare(h,d.hour())
tester.compare(m,d.minute())
tester.compare(s,d.second())
#tester.compare(tzh,d.tzHourOffset())
#tester.compare(tzm,d.tzMinuteOffset())
#tester.compare(lh,d.hour(local=1))
#tester.compare(lm,d.minute(local=1))
tester.testDone()
tester.groupDone()
tester.startGroup("ISO Date Parser")
for i,y,m,d in [("19850412",1985,4,12),
("1985-04-12",1985,4,12),
("1985-04",1985,4,1),
("1985",1985,1,1),
("1900",1900,1,1),
("850412",2085,04,12),
("85-04-12",2085,04,12),
("-8504",2085,04,1),
("-85-04",2085,04,1),
("-85",2085,01,1),
("--0412",None,04,12),
("--04-12",None,04,12),
("--04",None,04,1),
("---12",None,None,12),
]:
tester.startTest(i)
dt = Time.FromISO8601(i)
now = time.localtime()
if y is None:
y = now[0]
if m is None:
m = now[1]
tester.compare(y,dt.year())
tester.compare(m,dt.month())
tester.compare(d,dt.day())
tester.testDone()
tester.groupDone()
tester.startGroup("ISO Ordinal Date Parser")
for i,y,m,d in [("1985102",1985,4,12),
("1985-102",1985,4,12),
("85102",2085,04,12),
("85-102",2085,04,12),
(calendar.isleap(time.localtime()[0])
and "-103" or "-102",None,04,12),
]:
tester.startTest(i)
dt = Time.FromISO8601(i)
now = time.localtime()
if y is None:
y = now[0]
if m is None:
m = now[1]
tester.compare(y,dt.year())
tester.compare(m,dt.month())
tester.compare(d,dt.day())
tester.testDone()
tester.groupDone()
tester.startGroup("ISO Week Date Parser")
for i,y,m,d in [("1985W155",1985,4,12),
("1985-W15-5",1985,4,12),
("1985W15",1985,4,8),
("1985-W15",1985,4,8),
("85W155",2085,04,13),
("85-W15-5",2085,04,13),
("85W15",2085,04,9),
("85-W15",2085,04,9),
("-5W155",2005,04,15),
("-5-W15-5",2005,04,15),
# date of week 15, day 5 varies from year to year
# ("-W155",None,04,13),
# ("-W15-5",None,04,13),
# ("-W15",None,04,9),
# ("-W15",None,04,9),
]:
tester.startTest(i)
dt = Time.FromISO8601(i)
now = time.localtime()
if y is None:
y = now[0]
if m is None:
m = now[1]
tester.compare(y,dt.year())
tester.compare(m,dt.month())
tester.compare(d,dt.day())
tester.testDone()
tester.groupDone()
tester.startGroup("ISO Combined Date Parser")
for i,y,m,d,h,min,s,ms,tzh,tzm,ld,lh,lm in [("19850412T101530",1985,4,12,10,15,30,0,0,0,12,10,15),
("19850412T1015",1985,4,12,10,15,0,0,0,0,12,10,15),
("19850412T10",1985,4,12,10,0,0,0,0,0,12,10,0),
("1985-04-12T10:15:30",1985,4,12,10,15,30,0,0,0,12,10,15),
("1985-04-12T10:15",1985,4,12,10,15,0,0,0,0,12,10,15),
("1985-04-12T10",1985,4,12,10,0,0,0,0,0,12,10,0),
("1985102T23:50:30",1985,4,12,23,50,30,0,0,0,12,23,50),
("1985102T23:50",1985,4,12,23,50,0,0,0,0,12,23,50),
("1985102T23",1985,4,12,23,0,0,0,0,0,12,23,0),
("1985-102T23:50:30",1985,4,12,23,50,30,0,0,0,12,23,50),
("1985-102T23:50",1985,4,12,23,50,0,0,0,0,12,23,50),
("1985-102T23",1985,4,12,23,0,0,0,0,0,12,23,0),
("1985W155T235030",1985,4,12,23,50,30,0,0,0,12,23,50),
("1985W155T2350",1985,4,12,23,50,0,0,0,0,12,23,50),
("1985W155T23",1985,4,12,23,0,0,0,0,0,12,23,0),
("1985-W15-5T23:50:30",1985,4,12,23,50,30,0,0,0,12,23,50),
("1985-W15-5T23:50",1985,4,12,23,50,0,0,0,0,12,23,50),
("1985-W15-5T23",1985,4,12,23,0,0,0,0,0,12,23,0),
#Some with TZ
("1985-04-12T10:15:30,5+03:30",1985,4,12,6,45,30,500,3,30,12,10,15),
]:
tester.startTest(i)
dt = Time.FromISO8601(i)
tester.compare(y,dt.year())
tester.compare(m,dt.month())
tester.compare(d,dt.day())
tester.compare(h,dt.hour())
tester.compare(min,dt.minute())
tester.compare(s,dt.second())
tester.compare(ms,dt.milliSecond())
tester.compare(tzh,dt.tzHourOffset())
tester.compare(tzm,dt.tzMinuteOffset())
tester.compare(ld,dt.day(local=1))
tester.compare(lh,dt.hour(local=1))
tester.compare(lm,dt.minute(local=1))
tester.testDone()
tester.groupDone()
def test_rfc822(tester):
tester.startGroup("RFC 822 Parsing")
for i,y,m,d,h,min,s,ms,tzh,tzm,ld,lh,lm in [("Thu, Jan 4 2001 09:15:39 MDT",
2001,
1,
4,
15,
15,
39,
0,
-6,
0,
4,
9,
15),
("Tue, May 18 1999 13:45:50 GMT",
1999,
5,
18,
13,
45,
50,
0,
0,
0,
18,
13,
45),
]:
tester.startTest(i)
dt = Time.FromRFC822(i)
tester.compare(y,dt.year())
tester.compare(m,dt.month())
tester.compare(d,dt.day())
tester.compare(h,dt.hour())
tester.compare(min,dt.minute())
tester.compare(s,dt.second())
tester.compare(ms,dt.milliSecond())
tester.compare(tzh,dt.tzHourOffset())
tester.compare(tzm,dt.tzMinuteOffset())
tester.compare(ld,dt.day(local=1))
tester.compare(lh,dt.hour(local=1))
tester.compare(lm,dt.minute(local=1))
tester.testDone()
tester.groupDone()
def test_serialize(tester):
tester.startGroup("ISO Time Serializer")
for i,o,ol in [("T10:30:50","T10:30:50Z","T10:30:50Z"),
("T10:30:50+0130","T09:00:50Z","T10:30:50+01:30"),
("T10:30:50,5+0130","T09:00:50,5Z","T10:30:50,5+01:30"),
]:
tester.startTest(i)
dt = Time.FromISO8601(i)
e = dt.asISO8601Time()
tester.compare(o,e)
e = dt.asISO8601Time(local=1)
tester.compare(ol,e)
tester.testDone()
tester.groupDone()
tester.startGroup("ISO Date Serializer")
for i,o in [("20011217","2001-12-17"),
("20010133","2001-02-02"),
]:
tester.startTest(i)
dt = Time.FromISO8601(i)
e = dt.asISO8601Date()
tester.compare(o,e)
e = dt.asISO8601Date(local=1)
tester.compare(o,e)
tester.testDone()
tester.groupDone()
tester.startGroup("ISO Date Time Serializer")
for i,o,ol in [("20011217T10:30:50","2001-12-17T10:30:50Z","2001-12-17T10:30:50Z"),
("20011217T10:30:50+0130","2001-12-17T09:00:50Z","2001-12-17T10:30:50+01:30"),
]:
tester.startTest(i)
dt = Time.FromISO8601(i)
e = dt.asISO8601DateTime()
tester.compare(o,e)
e = dt.asISO8601DateTime(local=1)
tester.compare(ol,e)
tester.testDone()
tester.groupDone()
tester.startGroup("RFC822 Date Time Serializer")
for i,o,ol in [("Thu, 04 Jan 2001 09:15:39 MDT","Thu, 04 Jan 2001 15:15:39 GMT","Thu, 04 Jan 2001 09:15:39 MDT"),
("Fri, 05 Jan 2001 09:15:39 GMT","Fri, 05 Jan 2001 09:15:39 GMT","Fri, 05 Jan 2001 09:15:39 GMT"),
]:
tester.startTest(i)
dt = Time.FromRFC822(i)
e = dt.asRFC822DateTime()
tester.compare(o,e)
e = dt.asRFC822DateTime(local=1)
tester.compare(ol,e)
tester.testDone()
tester.groupDone()
def test_python_tuple(tester):
tester.startGroup("Python time tuple")
for i,o in [((2001,12,17,13,15,30,0,0,-1),"2001-12-17T13:15:30Z"),
((2000,1,33,13,15,30,0,0,-1),"2000-02-02T13:15:30Z"),
]:
tester.startTest(repr(i))
ol = utcTupleToLocal8601(i)
dt = Time.FromPythonTimeTuple(i)
e = dt.asISO8601DateTime()
tester.compare(o,e)
e = dt.asISO8601DateTime(local=1)
tester.compare(ol,e)
e = dt.asPythonTimeTuple()
tester.testDone()
tester.groupDone()
tester.startTest("Python time")
t = time.time()
dt = Time.FromPythonTime(t)
test = time.gmtime(t)
tester.compare(dt.year(),test[0])
tester.compare(dt.month(),test[1])
tester.compare(dt.day(),test[2])
tester.compare(dt.hour(),test[3])
tester.compare(dt.minute(),test[4])
tester.compare(dt.second(),test[5])
tester.testDone()
return
def Test(tester):
test_instance(tester)
test_iso(tester)
test_rfc822(tester)
test_serialize(tester)
test_python_tuple(tester)
| gpl-2.0 | -8,939,293,995,501,782,000 | 34.967857 | 117 | 0.446132 | false |
att-comdev/drydock | drydock_provisioner/drivers/node/maasdriver/models/boot_resource.py | 1 | 3592 | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model for MaaS API boot_resource type."""
import drydock_provisioner.error as errors
import drydock_provisioner.drivers.node.maasdriver.models.base as model_base
class BootResource(model_base.ResourceBase):
resource_url = 'boot-resources/{resource_id}/'
fields = [
'resource_id',
'name',
'type',
'subarches',
'architecture',
]
json_fields = [
'name',
'type',
'subarches',
'architecture',
]
def __init__(self, api_client, **kwargs):
super().__init__(api_client, **kwargs)
def get_image_name(self):
"""Return the name that would be specified in a deployment.
Return None if this is not an ubuntu image, otherwise
the distro series name
"""
(os, release) = self.name.split('/')
# Only supply image names for ubuntu-based images
if os == 'ubuntu':
return release
else:
# Non-ubuntu images such as the uefi bootloader
# should never be selectable
return None
def get_kernel_name(self):
"""Return the kernel name that would be specified in a deployment."""
(_, kernel) = self.architecture.split('/')
return kernel
class BootResources(model_base.ResourceCollectionBase):
collection_url = 'boot-resources/'
collection_resource = BootResource
def __init__(self, api_client, **kwargs):
super().__init__(api_client)
def is_importing(self):
"""Check if boot resources are importing."""
url = self.interpolate_url()
self.logger.debug("Checking if boot resources are importing.")
resp = self.api_client.get(url, op='is_importing')
if resp.status_code == 200:
resp_json = resp.json()
self.logger.debug("Boot resource importing status: %s" % resp_json)
return resp_json
else:
msg = "Error checking import status of boot resources: %s - %s" % (
resp.status_code, resp.text)
self.logger.error(msg)
raise errors.DriverError(msg)
def get_available_images(self):
"""Get list of available deployable images."""
image_options = list()
for k, v in self.resources.items():
if v.get_image_name() not in image_options:
image_options.append(v.get_image_name())
return image_options
def get_available_kernels(self, image_name):
"""Get kernels available for image_name
Return list of kernel names available for
``image_name``.
:param image_name: str image_name (e.g. 'xenial')
"""
kernel_options = list()
for k, v in self.resources.items():
if (v.get_image_name() == image_name
and v.get_kernel_name() not in kernel_options):
kernel_options.append(v.get_kernel_name())
return kernel_options
| apache-2.0 | 7,456,596,019,089,050,000 | 31.954128 | 79 | 0.615256 | false |
xfire/pydzen | plugins/mem.py | 1 | 2041 | #
# Copyright (C) 2008 Rico Schiekel (fire at downgra dot de)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# vim:syntax=python:sw=4:ts=4:expandtab
import os
import re
import logging
from pydzen import utils
logger = logging.getLogger('plugin.mem')
RE_MEM = re.compile('^Mem:\s*(?P<total>\d+)\s+(?P<used>\d+)\s+(?P<free>\d+)\s+(?P<shared>\d+)\s+(?P<buffers>\d+)\s+(?P<cached>\d+).*$')
RE_SWAP = re.compile('^Swap:\s*(?P<total>\d+)\s+(?P<used>\d+)\s+(?P<free>\d+).*$')
def bar(used, total):
return utils.gdbar('%d %d' % (used, total), l = '%d%% ' % (100. / total * used))
@utils.cache(2)
def update():
try:
out = utils.execute('free', m = True)
lines = out.split('\n')
_mem = RE_MEM.match(lines[1]).groupdict()
_swap = RE_SWAP.match(lines[3]).groupdict()
if _mem and _swap:
mem_total = float(_mem['total'])
swap_total = float(_swap['total'])
mem_used = float(_mem['used']) - float(_mem['buffers']) - float(_mem['cached'])
swap_used = float(_swap['used'])
mem = bar(mem_used, mem_total)
swap = bar(swap_used, swap_total)
return ['Mem: %s' % mem,
'Mem: %s (%d/%d Mb) Swap: %s (%d/%d Mb)' % (mem, mem_used, mem_total, swap, swap_used, swap_total)]
except StandardError, e:
logger.warn(e)
return None
| gpl-2.0 | 772,684,951,767,942,500 | 34.189655 | 135 | 0.617834 | false |
atindale/business-glossary | tests/test_basics.py | 1 | 9678 | import os
import unittest
from flask import current_app
from flask import url_for
from app.core import create_app
from app.models import db
from flask_sqlalchemy import SQLAlchemy
from app.main.models import TermStatus, Document, DocumentType, Term, Category, Person, Link, Location, Table, Column, Rule
from app.config import BASE_DIR
class BasicTestCase(unittest.TestCase):
def _add_term(self):
p = Person(name='Jo Black')
ts = TermStatus(status='Approved')
desc1 = """Comprehensive credit reporting (CCR) commenced on 12 March 2014 under changes to the Privacy Act."""
t = Term(name='Comprehensive Credit Reporting', abbreviation='CCR', short_description='Hello', long_description=desc1, owner=p, steward=p, status=ts)
db.session.add(p)
db.session.add(ts)
db.session.add(t)
db.session.commit()
return(t)
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
self.app.config['WTF_CSRF_ENABLED'] = False
db.create_all()
# creates a test client
self.client = self.app.test_client()
# propogate the exceptions to the test client
self.client.testing = True
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_app_exists(self):
self.assertFalse(current_app is None)
def test_app_is_testing(self):
self.assertTrue(current_app.config['TESTING'])
def test_home_status_code(self):
# send HTTP GET request to the application on specified path
result = self.client.get('/')
# asset the status code of the response
self.assertEqual(result.status_code, 200)
def test_new_term(self):
p = Person(name='Jo Black')
self.assertTrue(p.name == 'Jo Black')
self.assertTrue(str(p) == 'Jo Black')
ts = TermStatus(status='Approved')
assert ts.status == 'Approved'
assert str(ts) == 'Approved'
desc1 = """Comprehensive credit reporting commenced on 12 March 2014 under changes to the Privacy Act."""
t = Term(name='Comprehensive Credit Reporting', abbreviation='CCR', short_description='Hello', long_description=desc1, owner=p, steward=p, status=ts)
self.assertTrue(str(t) == 'Comprehensive Credit Reporting')
db.session.add(p)
db.session.add(ts)
db.session.add(t)
db.session.commit()
assert t.id == int(t.get_id())
def test_term_relationship(self):
p = Person(name='Jo Black')
self.assertTrue(p.name == 'Jo Black')
ts = TermStatus(status='Approved')
assert ts.status == 'Approved'
desc1 = """Comprehensive credit reporting commenced on 12 March 2014 under changes to the Privacy Act."""
t1 = Term(name='Comprehensive Credit Reporting', abbreviation='CCR', short_description='Hello', long_description=desc1, owner=p, steward=p, status=ts)
desc2 = """A standard jointly developed by the Australian Bureau of Statistics and Statistics New Zealand."""
t2 = Term(name='Australian and New Zealand Standard Industrial Classification', abbreviation='ANZSIC', short_description='Hello2', long_description=desc2, owner=p, steward=p, status=ts)
t1.related_terms.append(t2)
db.session.add(p)
db.session.add(ts)
db.session.add(t1)
db.session.add(t2)
db.session.commit()
assert t1.id == int(t1.get_id())
assert t2.id == int(t2.get_id())
def test_term_status(self):
ts = TermStatus(status='Approved')
db.session.add(ts)
db.session.commit()
assert ts.status == 'Approved'
def test_term_category(self):
tc = Category(name='Test', description='Test category')
db.session.add(tc)
db.session.commit()
assert tc.name == 'Test'
self.assertTrue(Category.query.filter_by(id=1).first().name == 'Test')
def test_model_repr(self):
self.assertTrue(str(Category(name='Test')) == 'Test')
self.assertTrue(str(TermStatus(status='Test')) == 'Test')
self.assertTrue(str(Link(text='Test', address="http://x.com")) == 'Test')
self.assertTrue(str(Location(name='Test')) == 'Test')
self.assertTrue(str(Link(text='Test')) == 'Test')
self.assertTrue(str(Column(name='Test')) == 'Test')
self.assertTrue(str(Rule(name='Test')) == 'Test')
self.assertTrue(str(Document(name='Test')) == 'Test')
self.assertTrue(str(DocumentType(type='Test')) == 'Test')
self.assertTrue(str(DocumentType(type='Test')) == 'Test')
self.assertTrue(str(Table(name='Test')) == 'Test')
def test_term(self):
t = self._add_term()
self.assertTrue(Term.query.filter_by(id=1).first().name == 'Comprehensive Credit Reporting')
def test_term_page(self):
t = self._add_term()
# send HTTP GET request to the application on specified path
result = self.client.get(url_for('main.show_term', selected_term=1))
# asset the status code of the response
self.assertEqual(result.status_code, 200)
self.assertTrue('Comprehensive Credit' in result.get_data(as_text=True))
def test_term_assets(self):
term = self._add_term()
l = Location(name='test location')
t = Table(name='test location', location=l)
c = Column(name='test_column', table=t)
term.columns.append(c)
db.session.add(l)
db.session.add(t)
db.session.add(c)
db.session.commit()
# send HTTP GET request to the application on specified path
response = self.client.get(url_for('main.show_assets', selected_term=1))
self.assertTrue('test_column' in response.get_data(as_text=True))
def test_term_rules(self):
term = self._add_term()
r = Rule(identifier='BR001', name='Test Rule', description='', notes='')
term.rules.append(r)
db.session.add(r)
db.session.commit()
# send HTTP GET request to the application on specified path
response = self.client.get(url_for('main.show_rules', selected_term=1))
self.assertTrue('BR001' in response.get_data(as_text=True))
response = self.client.get(url_for('main.show_rule', selected_rule=1))
self.assertTrue('BR001' in response.get_data(as_text=True))
def test_location(self):
l = Location(name='test location')
t = Table(name='test_table', location=l)
c = Column(name='test_column', table=t)
db.session.add(l)
db.session.add(t)
db.session.add(c)
db.session.commit()
# send HTTP GET request to the application on specified path
response = self.client.get(url_for('main.show_location_details', selected_location=1))
self.assertTrue('test location' in response.get_data(as_text=True))
response = self.client.get(url_for('main.show_location_tables', selected_location=1))
self.assertTrue('test_table' in response.get_data(as_text=True))
response = self.client.get(url_for('main.show_table_details', selected_table=1))
self.assertTrue('test_table' in response.get_data(as_text=True))
response = self.client.get(url_for('main.show_table_columns', selected_table=1))
self.assertTrue('test_column' in response.get_data(as_text=True))
def test_search(self):
term = self._add_term()
l = Location(name='test location')
t = Table(name='test_table', location=l)
c = Column(name='test_ccr', table=t)
term.columns.append(c)
db.session.add(l)
db.session.add(t)
db.session.add(c)
db.session.commit()
response = self.client.get(url_for('main.search'))
self.assertTrue('Search for text in the term' in response.get_data(as_text=True))
response = self.client.post(url_for('main.search'), data={'search': 'ccr'})
self.assertTrue('Comprehensive Credit Reporting' in response.get_data(as_text=True))
self.assertTrue('test_ccr' in response.get_data(as_text=True))
def test_about(self):
response = self.client.get(url_for('main.about'))
self.assertTrue('Business Glossary' in response.get_data(as_text=True))
self.assertTrue('Version' in response.get_data(as_text=True))
def test_documents(self):
term = self._add_term()
dt = DocumentType(type='Test Plan')
d = Document(name='Test Document', path='/static/css/custom.css', description='Testing', types=[dt])
term.documents.append(d)
db.session.add(dt)
db.session.add(d)
db.session.commit()
response = self.client.get(url_for('main.show_documents', selected_term=1))
self.assertTrue('Test Document' in response.get_data(as_text=True))
def test_rule_documents(self):
term = self._add_term()
dt = DocumentType(type='Test Plan')
d = Document(name='Test Document', path='/static/css/custom.css', description='Testing', types=[dt])
term.documents.append(d)
r = Rule(identifier='BR001', name='Test Rule', description='', notes='')
term.rules.append(r)
r.documents.append(d)
db.session.add(r)
db.session.add(dt)
db.session.add(d)
db.session.commit()
response = self.client.get(url_for('main.show_rule_documents', selected_rule=1))
self.assertTrue('Test Document' in response.get_data(as_text=True))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -8,040,015,576,427,105,000 | 37.557769 | 193 | 0.630296 | false |
Micronaet/micronaet-mx | sale_address/address.py | 1 | 2562 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP module
# Copyright (C) 2010 Micronaet srl (<http://www.micronaet.it>)
#
# Italian OpenERP Community (<http://www.openerp-italia.com>)
#
#############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import os
import sys
import logging
import openerp
import openerp.netsvc as netsvc
import openerp.addons.decimal_precision as dp
from openerp.osv import fields, osv, expression, orm
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
from openerp.tools import (DEFAULT_SERVER_DATE_FORMAT,
DEFAULT_SERVER_DATETIME_FORMAT,
DATETIME_FORMATS_MAP,
float_compare)
_logger = logging.getLogger(__name__)
class SaleOrder(orm.Model):
''' Add extra address for delivery
'''
_inherit = 'sale.order'
# Override onchange for reset address name
def onchange_partner_id(self, cr, uid, ids, part, context=None):
res = super(SaleOrder, self).onchange_partner_id(
cr, uid, ids, part, context=context)
res['value']['address_id'] = False # reset address
res['value']['invoice_id'] = False # reset address
return res
_columns = {
'address_id': fields.many2one('res.partner', 'Delivery address'),
'invoice_id': fields.many2one('res.partner', 'Invoice address'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -4,763,169,489,420,876,000 | 35.6 | 78 | 0.633099 | false |
uranix/ttpy | tt/ksl/ksl.py | 1 | 6075 | """ Dynamical TT-approximation """
import numpy as np
import dyn_tt
import tt
def ksl(A, y0, tau, verb=1, scheme='symm', space=8, rmax=2000):
""" Dynamical tensor-train approximation based on projector splitting
This function performs one step of dynamical tensor-train approximation
for the equation
.. math ::
\\frac{dy}{dt} = A y, \\quad y(0) = y_0
and outputs approximation for :math:`y(\\tau)`
:References:
1. Christian Lubich, Ivan Oseledets, and Bart Vandereycken.
Time integration of tensor trains. arXiv preprint 1407.2042, 2014.
http://arxiv.org/abs/1407.2042
2. Christian Lubich and Ivan V. Oseledets. A projector-splitting integrator
for dynamical low-rank approximation. BIT, 54(1):171-188, 2014.
http://dx.doi.org/10.1007/s10543-013-0454-0
:param A: Matrix in the TT-format
:type A: matrix
:param y0: Initial condition in the TT-format,
:type y0: tensor
:param tau: Timestep
:type tau: float
:param scheme: The integration scheme, possible values: 'symm' -- second order, 'first' -- first order
:type scheme: str
:param space: Maximal dimension of the Krylov space for the local EXPOKIT solver.
:type space: int
:rtype: tensor
:Example:
>>> import tt
>>> import tt.ksl
>>> import numpy as np
>>> d = 8
>>> a = tt.qlaplace_dd([d, d, d])
>>> y0, ev = tt.eigb.eigb(a, tt.rand(2 , 24, 2), 1e-6, verb=0)
Solving a block eigenvalue problem
Looking for 1 eigenvalues with accuracy 1E-06
swp: 1 er = 1.1408 rmax:2
swp: 2 er = 190.01 rmax:2
swp: 3 er = 2.72582E-08 rmax:2
Total number of matvecs: 0
>>> y1 = tt.ksl.ksl(a, y0, 1e-2)
Solving a real-valued dynamical problem with tau=1E-02
>>> print tt.dot(y1, y0) / (y1.norm() * y0.norm()) - 1 #Eigenvectors should not change
0.0
"""
ry = y0.r.copy()
if scheme is 'symm':
tp = 2
else:
tp = 1
# Check for dtype
y = tt.vector()
if np.iscomplex(A.tt.core).any() or np.iscomplex(y0.core).any():
dyn_tt.dyn_tt.ztt_ksl(
y0.d,
A.n,
A.m,
A.tt.r,
A.tt.core + 0j,
y0.core + 0j,
ry,
tau,
rmax,
0,
10,
verb,
tp,
space)
y.core = dyn_tt.dyn_tt.zresult_core.copy()
else:
A.tt.core = np.real(A.tt.core)
y0.core = np.real(y0.core)
dyn_tt.dyn_tt.tt_ksl(
y0.d,
A.n,
A.m,
A.tt.r,
A.tt.core,
y0.core,
ry,
tau,
rmax,
0,
10,
verb,
tp,
space
)
y.core = dyn_tt.dyn_tt.dresult_core.copy()
dyn_tt.dyn_tt.deallocate_result()
y.d = y0.d
y.n = A.n.copy()
y.r = ry
y.get_ps()
return y
def diag_ksl(A, y0, tau, verb=1, scheme='symm', space=8, rmax=2000):
""" Dynamical tensor-train approximation based on projector splitting
This function performs one step of dynamical tensor-train approximation with diagonal matrix, i.e. it solves the equation
for the equation
.. math ::
\\frac{dy}{dt} = V y, \\quad y(0) = y_0
and outputs approximation for :math:`y(\\tau)`
:References:
1. Christian Lubich, Ivan Oseledets, and Bart Vandereycken.
Time integration of tensor trains. arXiv preprint 1407.2042, 2014.
http://arxiv.org/abs/1407.2042
2. Christian Lubich and Ivan V. Oseledets. A projector-splitting integrator
for dynamical low-rank approximation. BIT, 54(1):171-188, 2014.
http://dx.doi.org/10.1007/s10543-013-0454-0
:param A: Matrix in the TT-format
:type A: matrix
:param y0: Initial condition in the TT-format,
:type y0: tensor
:param tau: Timestep
:type tau: float
:param scheme: The integration scheme, possible values: 'symm' -- second order, 'first' -- first order
:type scheme: str
:param space: Maximal dimension of the Krylov space for the local EXPOKIT solver.
:type space: int
:rtype: tensor
:Example:
>>> import tt
>>> import tt.ksl
>>> import numpy as np
>>> d = 8
>>> a = tt.qlaplace_dd([d, d, d])
>>> y0, ev = tt.eigb.eigb(a, tt.rand(2 , 24, 2), 1e-6, verb=0)
Solving a block eigenvalue problem
Looking for 1 eigenvalues with accuracy 1E-06
swp: 1 er = 1.1408 rmax:2
swp: 2 er = 190.01 rmax:2
swp: 3 er = 2.72582E-08 rmax:2
Total number of matvecs: 0
>>> y1 = tt.ksl.ksl(a, y0, 1e-2)
Solving a real-valued dynamical problem with tau=1E-02
>>> print tt.dot(y1, y0) / (y1.norm() * y0.norm()) - 1 #Eigenvectors should not change
0.0
"""
ry = y0.r.copy()
if scheme is 'symm':
tp = 2
else:
tp = 1
# Check for dtype
y = tt.vector()
if np.iscomplex(A.core).any() or np.iscomplex(y0.core).any():
dyn_tt.dyn_diag_tt.ztt_diag_ksl(
y0.d,
A.n,
A.r,
A.core + 0j,
y0.core + 0j,
ry,
tau,
rmax,
0,
10,
verb,
tp,
space)
y.core = dyn_tt.dyn_diag_tt.zresult_core.copy()
else:
A.core = np.real(A.core)
y0.core = np.real(y0.core)
dyn_tt.dyn_diag_tt.dtt_diag_ksl(
y0.d,
A.n,
A.r,
A.core,
y0.core,
ry,
tau,
rmax,
0,
10,
verb,
tp,
space)
y.core = dyn_tt.dyn_diag_tt.dresult_core.copy()
dyn_tt.dyn_diag_tt.deallocate_result()
y.d = y0.d
y.n = A.n.copy()
y.r = ry
y.get_ps()
return y
| mit | 5,739,514,568,776,435,000 | 27.255814 | 129 | 0.523457 | false |
TeamHG-Memex/agnostic | tests/test_mysql.py | 1 | 1900 | import os
import unittest
import pymysql
from tests.abstract import AbstractDatabaseTest
class TestMysql(AbstractDatabaseTest, unittest.TestCase):
''' Integration tests for MySQL '''
# Note that MySQL uses "schema" and "database" interchangeably, which leads
# to some unintuitive code in this test suite.
@property
def db_type(self):
''' The database type as a string. '''
return 'mysql'
@property
def default_db(self):
''' The database to connect when dropping/creating a test database. '''
return 'mysql'
def connect_db(self, user, password, database):
''' Return a connection to the specified database. '''
connect_args = {
'host': os.getenv('MYSQL_HOST', 'localhost'),
'user': user,
'password': password,
'database': database,
'autocommit': True
}
port = os.getenv('MYSQL_PORT', None)
if port is not None:
connect_args['port'] = int(port)
return pymysql.connect(**connect_args)
def table_columns(self, cursor, database, table_name):
''' Return a list of columns in the specified table. '''
sql = '''
SELECT column_name
FROM information_schema.columns
WHERE table_schema = %s AND table_name = %s
ORDER BY ordinal_position
'''
cursor.execute(sql, (database, table_name))
return [row[0] for row in cursor.fetchall()]
def table_exists(self, cursor, database, table_name):
''' Return true if the specified table exists. '''
table_query = '''
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_schema = %s AND table_name = %s
'''
cursor.execute(table_query, (database, table_name))
return cursor.fetchone()[0] == 1
| mit | 5,651,551,595,704,391,000 | 27.787879 | 79 | 0.586842 | false |
openSUSE/polkit-default-privs | tools/remove_duplicate_entries.py | 1 | 2188 | #!/usr/bin/python3
# vim: ts=4 et sw=4 sts=4 :
import argparse
from pkcommon import *
class DuplicateEntryRemover:
def __init__(self):
self.m_parser = argparse.ArgumentParser(
description = "Removes superfluous duplicate entries from polkit profiles or warns about conflicting ones."
)
def run(self):
self.m_args = self.m_parser.parse_args()
for profile in PROFILES:
self.m_lines_to_drop = set()
self.m_actions_seen = {}
path = getProfilePath(profile)
for entry in parseProfile(path):
self.checkDuplicate(entry)
if self.m_lines_to_drop:
self.rewriteProfile(path, self.m_lines_to_drop)
else:
print("{}: no entries removed".format(path.name.ljust(35)))
def checkDuplicate(self, entry):
seen = self.m_actions_seen.get(entry.action, None)
if not seen:
self.m_actions_seen[entry.action] = entry
else:
if entry.settings == seen.settings:
self.m_lines_to_drop.add(entry.linenr)
print("{}:{}: removing redundant entry with same settings as in line {}".format(
entry.path.name.ljust(35),
str(entry.linenr).rjust(3),
seen.linenr
))
else:
printerr("{}:{}: {}: conflicting duplicate entry ({}), previously seen in line {} ({})".format(
seen.path.name.ljust(35),
str(entry.linenr).rjust(3),
seen.action,
':'.join(entry.settings),
seen.linenr,
':'.join(seen.settings)
))
def rewriteProfile(self, path, lines_to_drop):
lines = []
with open(path) as fd:
for linenr, line in enumerate(fd.readlines(), start = 1):
if linenr not in lines_to_drop:
lines.append(line)
with open(path, 'w') as fd:
fd.write(''.join(lines))
if __name__ == '__main__':
main = DuplicateEntryRemover()
main.run()
| gpl-2.0 | -189,961,837,190,509,020 | 27.789474 | 123 | 0.516453 | false |
gbanegas/HappyClient | happy/tests/models/__init__.py | 1 | 1611 | # -*- coding: utf-8 -*-
"""Unit test suite for the models of the application."""
from nose.tools import eq_
from happy.model import DBSession
from happy.tests import load_app
from happy.tests import setup_db, teardown_db
__all__ = ['ModelTest']
def setup():
"""Setup test fixture for all model tests."""
load_app()
setup_db()
def teardown():
"""Tear down test fixture for all model tests."""
teardown_db()
class ModelTest(object):
"""Base unit test case for the models."""
klass = None
attrs = {}
def setUp(self):
"""Setup test fixture for each model test method."""
try:
new_attrs = {}
new_attrs.update(self.attrs)
new_attrs.update(self.do_get_dependencies())
self.obj = self.klass(**new_attrs)
DBSession.add(self.obj)
DBSession.flush()
return self.obj
except:
DBSession.rollback()
raise
def tearDown(self):
"""Tear down test fixture for each model test method."""
DBSession.rollback()
def do_get_dependencies(self):
"""Get model test dependencies.
Use this method to pull in other objects that need to be created
for this object to be build properly.
"""
return {}
def test_create_obj(self):
"""Model objects can be created"""
pass
def test_query_obj(self):
"""Model objects can be queried"""
obj = DBSession.query(self.klass).one()
for key, value in self.attrs.items():
eq_(getattr(obj, key), value)
| apache-2.0 | 1,707,177,911,946,079,000 | 24.171875 | 72 | 0.58473 | false |
eepgwde/pyeg0 | pandas0/ch09/portfolio0.py | 1 | 2641 | import matplotlib.pyplot as plt
import pandas as pd
import pandas.io.data as web
from collections import defaultdict
names = ['AAPL', 'GOOG', 'MSFT', 'DELL', 'GS', 'MS', 'BAC', 'C']
def get_px(stock, start, end):
return web.get_data_yahoo(stock, start, end)['Adj Close']
#
px = pd.DataFrame({n: get_px(n, '1/1/2009', '6/1/2012') for n in names})
px = px.asfreq('B').fillna(method='pad')
rets = px.pct_change()
((1 + rets).cumprod() - 1).plot()
# For the portfolio construction, we’ll compute momentum over a
# certain lookback, then rank in descending order and standardize:
def calc_mom(price, lookback, lag):
mom_ret = price.shift(lag).pct_change(lookback)
ranks = mom_ret.rank(axis=1, ascending=False)
demeaned = ranks - ranks.mean(axis=1)
return demeaned / demeaned.std(axis=1)
# With this transform function in hand, we can set up a strategy
# backtesting function that computes a portfolio for a particular
# lookback and holding period (days between trading), returning the
# overall Sharpe ratio
compound = lambda x : (1 + x).prod() - 1
daily_sr = lambda x: x.mean() / x.std()
def strat_sr(prices, lb, hold):
# Compute portfolio weights
freq = '%dB' % hold
port = calc_mom(prices, lb, lag=1)
daily_rets = prices.pct_change()
# Compute portfolio returns
port = port.shift(1).resample(freq, how='first')
returns = daily_rets.resample(freq, how=compound)
port_rets = (port * returns).sum(axis=1)
return daily_sr(port_rets) * np.sqrt(252 / hold)
strat_sr(px, 70, 30)
# From there, you can evaluate the strat_sr function over a grid of
# parameters, storing them as you go in a defaultdict and finally
# putting the results in a DataFrame:
lookbacks = range(20, 90, 5)
holdings = range(20, 90, 5)
dd = defaultdict(dict)
for lb in lookbacks:
for hold in holdings:
dd[lb][hold] = strat_sr(px, lb, hold)
ddf = pd.DataFrame(dd)
ddf.index.name = 'Holding Period'
ddf.columns.name = 'Lookback Period'
# To visualize the results and get an idea of what’s going on, here is
# a function that uses matplotlib to produce a heatmap with some
# adornments:
def heatmap(df, cmap=plt.cm.gray_r):
fig = plt.figure()
ax = fig.add_subplot(111)
axim = ax.imshow(df.values, cmap=cmap, interpolation='nearest')
ax.set_xlabel(df.columns.name)
ax.set_xticks(np.arange(len(df.columns)))
ax.set_xticklabels(list(df.columns))
ax.set_ylabel(df.index.name)
ax.set_yticks(np.arange(len(df.index)))
ax.set_yticklabels(list(df.index))
plt.colorbar(axim)
# Calling this function on the backtest results, we get Figure 11-3:
heatmap(ddf)
| gpl-3.0 | -4,000,118,988,380,880,000 | 30.392857 | 72 | 0.687903 | false |
himaaaatti/qtile | libqtile/widget/generic_poll_text.py | 1 | 2336 | import json
import six
from six.moves.urllib.request import urlopen, Request
from libqtile.widget import base
from libqtile.log_utils import logger
class GenPollText(base.ThreadedPollText):
"""
A generic text widget that polls using poll function to get the text.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('func', None, 'Poll Function'),
]
def __init__(self, **config):
base.ThreadedPollText.__init__(self, **config)
self.add_defaults(GenPollText.defaults)
def poll(self):
if not self.func:
return "You need a poll function"
return self.func()
class GenPollUrl(base.ThreadedPollText):
"""
A generic text widget that polls an url and parses it using parse
function.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('url', None, 'Url'),
('data', None, 'Post Data'),
('parse', None, 'Parse Function'),
('json', True, 'Is Json?'),
('user_agent', 'Qtile', 'Set the user agent'),
('headers', {}, 'Extra Headers')
]
def __init__(self, **config):
base.ThreadedPollText.__init__(self, **config)
self.add_defaults(GenPollUrl.defaults)
def fetch(self, url, data=None, headers={}, is_json=True):
req = Request(url, data, headers)
res = urlopen(req)
if six.PY3:
charset = res.headers.get_content_charset()
else:
charset = res.headers.getparam('charset')
body = res.read()
if charset:
body = body.decode(charset)
if is_json:
body = json.loads(body)
return body
def poll(self):
if not self.parse or not self.url:
return "Invalid config"
data = self.data
headers = {"User-agent": self.user_agent}
if self.json:
headers['Content-Type'] = 'application/json'
if data and not isinstance(data, str):
data = json.dumps(data).encode()
headers.update(self.headers)
body = self.fetch(self.url, data, headers, self.json)
try:
text = self.parse(body)
except Exception:
logger.exception('got exception polling widget')
text = "Can't parse"
return text
| mit | -4,587,477,473,095,674,400 | 26.809524 | 77 | 0.577483 | false |
didicout/python_util | time_util.py | 1 | 4169 | # coding=utf8
"""
time util.
"""
__author__ = 'didicout <[email protected]>'
import time
import datetime
def date_str_2_stamp(date_str, millisecond=False):
if millisecond:
return int(time.mktime(time.strptime(date_str, '%Y-%m-%d'))) * 1000
else:
return int(time.mktime(time.strptime(date_str, '%Y-%m-%d')))
def time_str_2_stamp(time_str, millisecond=False):
if millisecond:
return int(time.mktime(time.strptime(time_str, '%Y-%m-%d %H:%M:%S'))) * 1000
else:
return int(time.mktime(time.strptime(time_str, '%Y-%m-%d %H:%M:%S')))
def datetime_var_2_stamp(datetime_var, millisecond=False):
if millisecond:
return int(time.mktime(datetime_var.timetuple())) * 1000
else:
return int(time.mktime(datetime_var.timetuple()))
def time_var_2_stamp(time_var, millisecond=False):
time_var = standardize_time(time_var)
if millisecond:
return int(time_var) * 1000
else:
return int(time_var)
def stamp_2_date_str(stamp, millisecond=False):
if millisecond:
stamp /= 1000
return time.strftime('%Y-%m-%d', time.localtime(stamp))
def stamp_2_datetime_str(stamp, millisecond=False):
if millisecond:
stamp /= 1000
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(stamp))
def time_str_2_date_str(time_str):
return stamp_2_date_str(time_str_2_stamp(time_str))
def datetime_2_date_str(datetime_var):
return stamp_2_date_str(datetime_var_2_stamp(datetime_var))
def time_2_date_str(time_var):
return stamp_2_date_str(time_var_2_stamp(time_var))
def time_2_datetime_str(time_var):
return stamp_2_datetime_str(time_var_2_stamp(time_var))
def time_minus_by_str(time_str1, time_str2):
return int(time.mktime(standardize_time(time_str1)) - time.mktime(standardize_time(time_str2)))
def date_range(date_str1, date_str2, step=1):
ret = []
step_seconds = 3600 * 24 * step
for i in range(date_str_2_stamp(date_str1), date_str_2_stamp(date_str2) + 1, step_seconds):
ret.append(stamp_2_date_str(i))
return ret
def get_monday_str(date_str):
datetime_var = datetime.datetime.strptime(date_str, '%Y-%m-%d')
monday = datetime_var - datetime.timedelta(days=datetime_var.weekday())
return datetime_2_date_str(monday)
def get_month_first_day_str(date_str):
datetime_var = datetime.datetime.strptime(date_str, '%Y-%m-%d')
first_day = datetime_var - datetime.timedelta(days=datetime_var.day - 1)
return datetime_2_date_str(first_day)
def get_today_str():
return datetime.date.today().strftime('%Y-%m-%d')
def get_yesterday_str():
return (datetime.date.today() - datetime.timedelta(days=1)).strftime('%Y-%m-%d')
def get_yesterday_str_by_date_str(date_str):
stamp = date_str_2_stamp(date_str) - 24 * 3600
return stamp_2_date_str(stamp)
def get_tomorrow_str():
return (datetime.date.today() + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
def day_minus_by_date_str(date_str1, date_str2):
tmp = date_str1.split('-')
a = datetime.datetime(int(tmp[0]), int(tmp[1]), int(tmp[2]))
tmp = date_str2.split('-')
b = datetime.datetime(int(tmp[0]), int(tmp[1]), int(tmp[2]))
return (a-b).days
def get_stamp_of_week(stamp, millisecond=False):
"""
get the stamp of monday morning of the week
取时间戳stamp所在周周一00:00:00的时间戳
"""
date_str = stamp_2_date_str(stamp, millisecond)
monday_str = get_monday_str(date_str)
return date_str_2_stamp(monday_str, millisecond)
def get_stamp_of_month(stamp, millisecond=False):
"""
get the stamp of the first day morning of the month
取时间戳stamp所在月1号0点的时间戳
"""
date_str = stamp_2_date_str(stamp, millisecond)
first_day = get_month_first_day_str(date_str)
return date_str_2_stamp(first_day, millisecond)
def standardize_time(time_var):
"""
avoid error when time has a time zone.
"""
return time.strptime(datetime.datetime.strftime(time_var, '%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S')
if __name__ == '__main__':
print get_yesterday_str_by_date_str('2014-05-01') | mit | -4,047,810,320,677,002,000 | 26.797297 | 104 | 0.651106 | false |
CMUSV-VisTrails/WorkflowRecommendation | vistrails/gui/preferences.py | 1 | 28361 | ###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from PyQt4 import QtGui, QtCore
from core import get_vistrails_application
from core.packagemanager import get_package_manager
from core.utils import InvalidPipeline
from core.utils.uxml import (named_elements,
elements_filter, enter_named_element)
from gui.configuration import (QConfigurationWidget, QGeneralConfiguration,
QThumbnailConfiguration)
from gui.module_palette import QModulePalette
from gui.pipeline_view import QPipelineView
from core.configuration import get_vistrails_persistent_configuration, \
get_vistrails_configuration
from core import debug
import os.path
##############################################################################
class QPackageConfigurationDialog(QtGui.QDialog):
def __init__(self, parent, package):
QtGui.QDialog.__init__(self, parent)
self.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
self.setWindowTitle('Configuration for package "%s"' % package.name)
self._package = package
c = package.configuration
self._configuration_object = c
assert c is not None
layout = QtGui.QVBoxLayout(self)
self.setLayout(layout)
self._status_bar = QtGui.QStatusBar(self)
self._configuration_widget = QConfigurationWidget(self, c, c,
self._status_bar)
layout.addWidget(self._configuration_widget)
btns = (QtGui.QDialogButtonBox.Close |
QtGui.QDialogButtonBox.RestoreDefaults)
self._button_box = QtGui.QDialogButtonBox(btns,
QtCore.Qt.Horizontal,
self)
self.connect(self._button_box,
QtCore.SIGNAL('clicked(QAbstractButton *)'),
self.button_clicked)
self.connect(self._configuration_widget._tree.treeWidget,
QtCore.SIGNAL('configuration_changed'),
self.configuration_changed)
layout.addWidget(self._status_bar)
layout.addWidget(self._button_box)
def button_clicked(self, button):
role = self._button_box.buttonRole(button)
if role == QtGui.QDialogButtonBox.ResetRole:
txt = ("This will reset all configuration values of " +
"this package to their default values. Do you " +
"want to proceed?")
msg_box = QtGui.QMessageBox(QtGui.QMessageBox.Question,
"Really reset?", txt,
(QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No))
if msg_box.exec_() == QtGui.QMessageBox.Yes:
self.reset_configuration()
else:
assert role == QtGui.QDialogButtonBox.RejectRole
self.close_dialog()
def reset_configuration(self):
self._package.reset_configuration()
conf = self._package.configuration
self._configuration_widget.configuration_changed(conf)
def close_dialog(self):
self.done(0)
def configuration_changed(self, item, new_value):
self._package.set_persistent_configuration()
##############################################################################
class QPackagesWidget(QtGui.QWidget):
# Signals that a package should be selected after the event loop updates (to remove old references)
select_package_after_update_signal = QtCore.SIGNAL("select_package_after_update_signal")
##########################################################################
# Initialization
def __init__(self, parent, status_bar):
QtGui.QWidget.__init__(self, parent)
self._status_bar = status_bar
base_layout = QtGui.QHBoxLayout(self)
left = QtGui.QFrame(self)
right = QtGui.QFrame(self)
base_layout.addWidget(left)
base_layout.addWidget(right, 1)
######################################################################
left_layout = QtGui.QVBoxLayout(left)
left_layout.setMargin(2)
left_layout.setSpacing(2)
left_layout.addWidget(QtGui.QLabel("Disabled packages:", left))
self._available_packages_list = QtGui.QListWidget(left)
left_layout.addWidget(self._available_packages_list)
left_layout.addWidget(QtGui.QLabel("Enabled packages:", left))
self._enabled_packages_list = QtGui.QListWidget(left)
left_layout.addWidget(self._enabled_packages_list)
self.connect(self._available_packages_list,
QtCore.SIGNAL('itemSelectionChanged()'),
self.selected_available_list,
QtCore.Qt.QueuedConnection)
self.connect(self._enabled_packages_list,
QtCore.SIGNAL('itemSelectionChanged()'),
self.selected_enabled_list,
QtCore.Qt.QueuedConnection)
sm = QtGui.QAbstractItemView.SingleSelection
self._available_packages_list.setSelectionMode(sm)
self._enabled_packages_list.setSelectionMode(sm)
######################################################################
right_layout = QtGui.QVBoxLayout(right)
info_frame = QtGui.QFrame(right)
info_layout = QtGui.QVBoxLayout(info_frame)
grid_frame = QtGui.QFrame(info_frame)
grid_frame.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
info_layout.addWidget(grid_frame)
grid_layout = QtGui.QGridLayout(grid_frame)
l1 = QtGui.QLabel("Package Name:", grid_frame)
grid_layout.addWidget(l1, 0, 0)
l2 = QtGui.QLabel("Identifier:", grid_frame)
grid_layout.addWidget(l2, 1, 0)
l3 = QtGui.QLabel("Version:", grid_frame)
grid_layout.addWidget(l3, 2, 0)
l4 = QtGui.QLabel("Dependencies:", grid_frame)
grid_layout.addWidget(l4, 3, 0)
l5 = QtGui.QLabel("Reverse Dependencies:", grid_frame)
grid_layout.addWidget(l5, 4, 0)
l6 = QtGui.QLabel("Description:", grid_frame)
grid_layout.addWidget(l6, 5, 0)
self._name_label = QtGui.QLabel("", grid_frame)
grid_layout.addWidget(self._name_label, 0, 1)
self._identifier_label = QtGui.QLabel("", grid_frame)
grid_layout.addWidget(self._identifier_label, 1, 1)
self._version_label = QtGui.QLabel("", grid_frame)
grid_layout.addWidget(self._version_label, 2, 1)
self._dependencies_label = QtGui.QLabel("", grid_frame)
grid_layout.addWidget(self._dependencies_label, 3, 1)
self._reverse_dependencies_label = QtGui.QLabel("", grid_frame)
grid_layout.addWidget(self._reverse_dependencies_label, 4, 1)
self._description_label = QtGui.QLabel("", grid_frame)
grid_layout.addWidget(self._description_label, 5, 1)
for lbl in [l1, l2, l3, l4, l5, l6,
self._name_label,
self._version_label,
self._dependencies_label,
self._identifier_label,
self._reverse_dependencies_label,
self._description_label]:
lbl.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
lbl.setWordWrap(True)
grid_layout.setRowStretch(4, 1)
grid_layout.setColumnStretch(1, 1)
right_layout.addWidget(info_frame)
self._enable_button = QtGui.QPushButton("&Enable")
self._enable_button.setEnabled(False)
self.connect(self._enable_button,
QtCore.SIGNAL("clicked()"),
self.enable_current_package)
self._disable_button = QtGui.QPushButton("&Disable")
self._disable_button.setEnabled(False)
self.connect(self._disable_button,
QtCore.SIGNAL("clicked()"),
self.disable_current_package)
self._configure_button = QtGui.QPushButton("&Configure...")
self._configure_button.setEnabled(False)
self.connect(self._configure_button,
QtCore.SIGNAL("clicked()"),
self.configure_current_package)
self._reload_button = QtGui.QPushButton("&Reload")
self._reload_button.setEnabled(False)
self.connect(self._reload_button,
QtCore.SIGNAL("clicked()"),
self.reload_current_package)
button_box = QtGui.QDialogButtonBox()
button_box.addButton(self._enable_button, QtGui.QDialogButtonBox.ActionRole)
button_box.addButton(self._disable_button, QtGui.QDialogButtonBox.ActionRole)
button_box.addButton(self._configure_button, QtGui.QDialogButtonBox.ActionRole)
button_box.addButton(self._reload_button, QtGui.QDialogButtonBox.ActionRole)
right_layout.addWidget(button_box)
self.connect(self,
self.select_package_after_update_signal,
self.select_package_after_update_slot,
QtCore.Qt.QueuedConnection)
# pm = get_package_manager()
# self.connect(pm,
# pm.reloading_package_signal,
# self.reload_current_package_finisher,
# QtCore.Qt.QueuedConnection)
app = get_vistrails_application()
app.register_notification("pm_reloading_package",
self.reload_current_package_finisher)
app.register_notification("package_added", self.package_added)
app.register_notification("package_removed", self.package_removed)
self.populate_lists()
self._current_package = None
self.erase_cache = False
def populate_lists(self):
pkg_manager = get_package_manager()
enabled_pkgs = sorted(pkg_manager.enabled_package_list())
enabled_pkg_dict = dict([(pkg.codepath, pkg) for
pkg in enabled_pkgs])
self._enabled_packages_list.clear()
for pkg in enabled_pkgs:
self._enabled_packages_list.addItem(pkg.codepath)
self._enabled_packages_list.sortItems()
available_pkg_names = [pkg for pkg in
sorted(pkg_manager.available_package_names_list())
if pkg not in enabled_pkg_dict]
self._available_packages_list.clear()
for pkg in available_pkg_names:
self._available_packages_list.addItem(pkg)
self._available_packages_list.sortItems()
##########################################################################
def enable_current_package(self):
av = self._available_packages_list
inst = self._enabled_packages_list
item = av.currentItem()
pos = av.indexFromItem(item).row()
codepath = str(item.text())
pm = get_package_manager()
dependency_graph = pm.dependency_graph()
new_deps = self._current_package.dependencies()
from core.modules.basic_modules import identifier as basic_modules_identifier
if self._current_package.identifier != basic_modules_identifier:
new_deps.append(basic_modules_identifier)
try:
pm.check_dependencies(self._current_package, new_deps)
except self._current_package.MissingDependency, e:
debug.critical("Missing dependencies", str(e))
else:
palette = QModulePalette.instance()
palette.setUpdatesEnabled(False)
try:
pm.late_enable_package(codepath)
except self._current_package.InitializationFailed, e:
debug.critical("Initialization of package '%s' failed" %
codepath, str(e))
raise
finally:
palette.setUpdatesEnabled(True)
palette.treeWidget.expandAll()
av.takeItem(pos)
inst.addItem(item)
inst.sortItems()
self.erase_cache = True
self.select_package_after_update(codepath)
self.invalidate_current_pipeline()
def disable_current_package(self):
av = self._available_packages_list
inst = self._enabled_packages_list
item = inst.currentItem()
pos = inst.indexFromItem(item).row()
codepath = str(item.text())
pm = get_package_manager()
dependency_graph = pm.dependency_graph()
identifier = pm.get_package_by_codepath(codepath).identifier
if dependency_graph.in_degree(identifier) > 0:
rev_deps = dependency_graph.inverse_adjacency_list[identifier]
debug.critical("Missing dependency",
("There are other packages that depend on this:\n %s" +
"Please disable those first.") % rev_deps)
else:
pm.late_disable_package(codepath)
inst.takeItem(pos)
av.addItem(item)
av.sortItems()
self.erase_cache = True
self.select_package_after_update(codepath)
self.invalidate_current_pipeline()
def configure_current_package(self):
dlg = QPackageConfigurationDialog(self, self._current_package)
dlg.exec_()
def reload_current_package(self):
# DISABLES the current package and all reverse dependencies
inst = self._enabled_packages_list
item = inst.currentItem()
pm = get_package_manager()
codepath = str(item.text())
palette = QModulePalette.instance()
palette.setUpdatesEnabled(False)
pm.reload_package_disable(codepath)
self.erase_cache = True
def reload_current_package_finisher(self, codepath, reverse_deps, prefix_dictionary):
# REENABLES the current package and all reverse dependencies
pm = get_package_manager()
try:
pm.reload_package_enable(reverse_deps, prefix_dictionary)
except self._current_package.InitializationFailed, e:
debug.critical("Re-initialization of package '%s' failed" %
codepath, str(e))
raise
finally:
self.populate_lists()
palette = QModulePalette.instance()
palette.setUpdatesEnabled(True)
palette.treeWidget.expandAll()
self.erase_cache = True
self.select_package_after_update(codepath)
self.invalidate_current_pipeline()
def package_added(self, codepath):
# package was added, we need to update list
av = self._available_packages_list
inst = self._enabled_packages_list
for item in av.findItems(codepath, QtCore.Qt.MatchExactly):
pos = av.indexFromItem(item).row()
av.takeItem(pos)
inst.addItem(item)
inst.sortItems()
self.erase_cache = True
self.select_package_after_update(codepath)
def package_removed(self, codepath):
# package was removed, we need to update list
av = self._available_packages_list
inst = self._enabled_packages_list
for item in inst.findItems(codepath, QtCore.Qt.MatchExactly):
pos = inst.indexFromItem(item).row()
inst.takeItem(pos)
av.addItem(item)
av.sortItems()
self.erase_cache = True
self.select_package_after_update(codepath)
def select_package_after_update(self, codepath):
# Selecting the package causes self._current_package to be set,
# which reference prevents the package from being freed, so we
# queue it to select after the event loop completes.
self.emit(self.select_package_after_update_signal, codepath)
def select_package_after_update_slot(self, codepath):
inst = self._enabled_packages_list
av = self._available_packages_list
for item in av.findItems(codepath, QtCore.Qt.MatchExactly):
av.setCurrentItem(item)
for item in inst.findItems(codepath, QtCore.Qt.MatchExactly):
inst.setCurrentItem(item)
def set_buttons_to_enabled_package(self):
self._enable_button.setEnabled(False)
assert self._current_package
pm = get_package_manager()
from core.modules.basic_modules import identifier as basic_modules_identifier
from core.modules.abstraction import identifier as abstraction_identifier
is_not_basic_modules = (self._current_package.identifier != basic_modules_identifier)
is_not_abstraction = (self._current_package.identifier != abstraction_identifier)
can_disable = (pm.can_be_disabled(self._current_package.identifier) and
is_not_basic_modules and
is_not_abstraction)
self._disable_button.setEnabled(can_disable)
if not can_disable and is_not_basic_modules and is_not_abstraction:
msg = ("Module has reverse dependencies that must\n"+
"be first disabled.")
self._disable_button.setToolTip(msg)
else:
self._disable_button.setToolTip("")
conf = self._current_package.configuration is not None
self._configure_button.setEnabled(conf)
self._reload_button.setEnabled(is_not_basic_modules)
def set_buttons_to_available_package(self):
self._configure_button.setEnabled(False)
self._disable_button.setEnabled(False)
self._enable_button.setEnabled(True)
self._reload_button.setEnabled(False)
def set_package_information(self):
"""Looks at current package and sets all labels (name,
dependencies, etc.) appropriately.
"""
assert self._current_package
p = self._current_package
try:
p.load()
except Exception, e:
msg = 'ERROR: Could not load package.'
self._name_label.setText(msg)
self._version_label.setText(msg)
self._identifier_label.setText(msg)
self._dependencies_label.setText(msg)
self._description_label.setText(msg)
self._reverse_dependencies_label.setText(msg)
debug.critical('Cannot load package', str(e))
else:
self._name_label.setText(p.name)
deps = ', '.join(str(d) for d in p.dependencies()) or \
'No package dependencies.'
try:
pm = get_package_manager()
reverse_deps = \
(', '.join(pm.reverse_dependencies(p.identifier)) or
'No reverse dependencies.')
except KeyError:
reverse_deps = ("Reverse dependencies only " +
"available for enabled packages.")
self._identifier_label.setText(p.identifier)
self._version_label.setText(p.version)
self._dependencies_label.setText(deps)
self._description_label.setText(p.description)
self._reverse_dependencies_label.setText(reverse_deps)
##########################################################################
# Signal handling
def selected_enabled_list(self):
item = self._enabled_packages_list.currentItem()
if item is None:
return # prevent back and forth looping when clearing selection
self._available_packages_list.setCurrentItem(None)
codepath = str(item.text())
pm = get_package_manager()
self._current_package = pm.get_package_by_codepath(codepath)
self.set_buttons_to_enabled_package()
self.set_package_information()
self._enabled_packages_list.setFocus()
def selected_available_list(self):
item = self._available_packages_list.currentItem()
if item is None:
return # prevent back and forth looping when clearing selection
self._enabled_packages_list.setCurrentItem(None)
codepath = str(item.text())
pm = get_package_manager()
self._current_package = pm.look_at_available_package(codepath)
self.set_buttons_to_available_package()
self.set_package_information()
self._available_packages_list.setFocus()
def invalidate_current_pipeline(self):
# Reconstruct the current pipelines from root
from core.interpreter.cached import CachedInterpreter
CachedInterpreter.flush()
def reload_view(view):
view.version_selected(view.controller.current_version,
True, from_root=True)
# def reload_tab(tab):
# scene = tab.scene()
# if scene.current_pipeline:
# scene.current_pipeline.is_valid = False
# scene.current_pipeline= \
# view.controller.vistrail.getPipeline(
# scene.current_version)
# view.controller.validate(scene.current_pipeline)
# scene.setupScene(scene.current_pipeline)
#
# for i in xrange(view.stack.count()):
# tab = view.stack.widget(i)
# if isinstance(tab, QPipelineView):
# reload_tab(tab)
# for tab in view.detached_views:
# if isinstance(tab, QPipelineView):
# reload_tab(tab)
from gui.vistrails_window import _app
for i in xrange(_app.stack.count()):
view = _app.stack.widget(i)
reload_view(view)
for view in _app.windows:
reload_view(view)
class QPreferencesDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QDialog.__init__(self, parent)
self._status_bar = QtGui.QStatusBar(self)
self.setWindowTitle('VisTrails Preferences')
layout = QtGui.QHBoxLayout(self)
layout.setMargin(0)
layout.setSpacing(0)
self.setLayout(layout)
f = QtGui.QFrame()
layout.addWidget(f)
l = QtGui.QVBoxLayout(f)
f.setLayout(l)
self._tab_widget = QtGui.QTabWidget(f)
l.addWidget(self._tab_widget)
self._tab_widget.setSizePolicy(QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
self._general_tab = self.create_general_tab()
self._tab_widget.addTab(self._general_tab, 'General Configuration')
self._thumbs_tab = self.create_thumbs_tab()
self._tab_widget.addTab(self._thumbs_tab, 'Thumbnails Configuration')
self._packages_tab = self.create_packages_tab()
self._tab_widget.addTab(self._packages_tab, 'Module Packages')
self._configuration_tab = self.create_configuration_tab()
self._tab_widget.addTab(self._configuration_tab, 'Expert Configuration')
self._button_box = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Close,
QtCore.Qt.Horizontal,
f)
self.connect(self._tab_widget,
QtCore.SIGNAL('currentChanged(int)'),
self.tab_changed)
self.connect(self._button_box,
QtCore.SIGNAL('clicked(QAbstractButton *)'),
self.close_dialog)
self.connect(self._configuration_tab._tree.treeWidget,
QtCore.SIGNAL('configuration_changed'),
self.configuration_changed)
self.connect(self._general_tab,
QtCore.SIGNAL('configuration_changed'),
self.configuration_changed)
self.connect(self._thumbs_tab,
QtCore.SIGNAL('configuration_changed'),
self.configuration_changed)
l.addWidget(self._button_box)
l.addWidget(self._status_bar)
def close_dialog(self):
self.done(0)
def create_general_tab(self):
""" create_general_tab() -> QGeneralConfiguration
"""
return QGeneralConfiguration(self,
get_vistrails_persistent_configuration(),
get_vistrails_configuration())
def create_thumbs_tab(self):
""" create_thumbs_tab() -> QThumbnailConfiguration
"""
return QThumbnailConfiguration(self,
get_vistrails_persistent_configuration(),
get_vistrails_configuration())
def create_configuration_tab(self):
return QConfigurationWidget(self,
get_vistrails_persistent_configuration(),
get_vistrails_configuration(),
self._status_bar)
def create_packages_tab(self):
return QPackagesWidget(self, self._status_bar)
def sizeHint(self):
return QtCore.QSize(800, 600)
def tab_changed(self, index):
""" tab_changed(index: int) -> None
Keep general and advanced configurations in sync
"""
self._configuration_tab.configuration_changed(
get_vistrails_persistent_configuration(),
get_vistrails_configuration())
self._general_tab.update_state(
get_vistrails_persistent_configuration(),
get_vistrails_configuration())
def configuration_changed(self, item, new_value):
""" configuration_changed(item: QTreeWidgetItem *,
new_value: QString) -> None
Write the current session configuration to startup.xml.
Note: This is already happening on close to capture configuration
items that are not set in preferences. We are doing this here too, so
we guarantee the changes were saved before VisTrails crashes.
"""
from PyQt4 import QtCore
from gui.application import get_vistrails_application
get_vistrails_application().save_configuration()
| bsd-3-clause | 1,854,136,796,564,567,600 | 41.712349 | 103 | 0.587356 | false |
getsmap/smap4 | WS/email/sendEmail.py | 1 | 1254 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import smtplib for the actual sending function
import sys, os
import smtplib
import email
# Import the email modules we'll need
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.header import Header
def sendEmail(fromEmail="", password=None, toEmails=[], smtp="smtp.gmail.com",\
port=25, msg=""):
server = smtplib.SMTP(smtp, port)
server.ehlo()
if password!=None:
server.starttls()
server.login(fromEmail, password)
server.sendmail(fromEmail, toEmails, msg.as_string())
server.close()
if __name__=='__main__':
pass
"""fromEmail = "[email protected]" #"[email protected]"
password = None
smtp = "mail2.malmo.se"
port = 25
toEmails = ["[email protected]"]
subject = "Testar ÅÄÖ åäö"
content = "ÅÄÖ åäö Nu testar jag skicka en länk...\n\n/Johan"
msg = MIMEText(content, "plain", "utf-8")
msg['Subject'] = subject
msg['From'] = fromEmail
msg['To'] = ";".join(toEmails)
sendEmail(fromEmail, password, \
toEmails=toEmails, msg=msg, \
smtp=smtp, port=port)"""
| apache-2.0 | 6,888,675,901,893,147,000 | 26.577778 | 79 | 0.63336 | false |
RedhawkSDR/integration-gnuhawk | components/pwr_squelch_cc/tests/test_pwr_squelch_cc.py | 1 | 4071 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of GNUHAWK.
#
# GNUHAWK is free software: you can redistribute it and/or modify is under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# GNUHAWK is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see http://www.gnu.org/licenses/.
#
import unittest
import ossie.utils.testing
import os
from omniORB import any
class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
"""Test for all component implementations in pwr_squelch_cc"""
def testScaBasicBehavior(self):
#######################################################################
# Launch the component with the default execparams
execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
self.launch(execparams)
#######################################################################
# Verify the basic state of the component
self.assertNotEqual(self.comp, None)
self.assertEqual(self.comp.ref._non_existent(), False)
self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True)
#######################################################################
# Validate that query returns all expected parameters
# Query of '[]' should return the following set of properties
expectedProps = []
expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
props = self.comp.query([])
props = dict((x.id, any.from_any(x.value)) for x in props)
# Query may return more than expected, but not less
for expectedProp in expectedProps:
self.assertEquals(props.has_key(expectedProp.id), True)
#######################################################################
# Verify that all expected ports are available
for port in self.scd.get_componentfeatures().get_ports().get_uses():
port_obj = self.comp.getPort(str(port.get_usesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True)
for port in self.scd.get_componentfeatures().get_ports().get_provides():
port_obj = self.comp.getPort(str(port.get_providesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a(port.get_repid()), True)
#######################################################################
# Make sure start and stop can be called without throwing exceptions
self.comp.start()
self.comp.stop()
#######################################################################
# Simulate regular component shutdown
self.comp.releaseObject()
# TODO Add additional tests here
#
# See:
# ossie.utils.bulkio.bulkio_helpers,
# ossie.utils.bluefile.bluefile_helpers
# for modules that will assist with testing components with BULKIO ports
if __name__ == "__main__":
ossie.utils.testing.main("../pwr_squelch_cc.spd.xml") # By default tests all implementations
| gpl-3.0 | 4,106,507,131,166,885,400 | 46.894118 | 133 | 0.592238 | false |
qbilius/autoart | dots/dots.py | 1 | 1438 | import numpy as np
from PIL import Image
import scipy.ndimage
import matplotlib.pyplot as plt
def gabor(
theta=0,
gamma=1,
sigma=2,
lam=5.6,
k=10
):
# Mutch and Lowe, 2006
theta -= np.pi/2
x,y = np.meshgrid(np.arange(-k,k),np.arange(-k,k))
X = x*np.cos(theta) - y*np.sin(theta)
Y = x*np.sin(theta) + y*np.cos(theta)
g = np.exp( - (X**2 + (gamma*Y)**2) / (2*sigma**2) ) * np.cos( 2*np.pi*X/lam )
g -= np.mean(g) # mean 0
g /= np.sum(g**2) # energy 1
g[np.abs(g)<.001] = 0
return g
def get_edges(stim, oris, sf=1):
gabor_max = stim
edge_map = np.zeros((len(oris),)+gabor_max.shape)
sf=1
for oi, ori in enumerate(oris):
gab = gabor(theta=ori, sigma=2*sf,lam=5.6*sf,k=10*sf)
edges = scipy.ndimage.correlate(gabor_max,gab)
edge_map[oi] = edges
gabor_max = np.max(edge_map, axis=0)
gabor_argmax = np.argmax(edge_map, axis=0)
return gabor_max, gabor_argmax
im = Image.open('dots_input.png').convert('L')
stim = np.asarray(im)*1.
stim = stim[50:125,50:125]
oris = np.pi/8*np.arange(8)
gabor_max, gabor_argmax = get_edges(stim, oris, sf=1)
hist, bin_edges = np.histogram(gabor_max.ravel(),bins=1)
threshold = bin_edges[-2]
inds = gabor_max>threshold
gabor_max[np.logical_not(inds)] = 0
plt.imshow(gabor_max)
plt.axis('off')
#plt.show()
plt.savefig('dots.jpg', dpi=300, format='jpg',
bbox_inches='tight', pad_inches=0)
| mit | -7,737,220,422,988,155,000 | 25.62963 | 82 | 0.609875 | false |
keishi/chromium | tools/isolate/run_test_cases_smoke_test.py | 1 | 2333 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import subprocess
import sys
import unittest
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(ROOT_DIR, 'data', 'gtest_fake'))
import gtest_fake
class TraceTestCases(unittest.TestCase):
def setUp(self):
# Make sure there's no environment variable that could do side effects.
os.environ.pop('GTEST_SHARD_INDEX', '')
os.environ.pop('GTEST_TOTAL_SHARDS', '')
def test_simple(self):
target = os.path.join(ROOT_DIR, 'data', 'gtest_fake', 'gtest_fake.py')
cmd = [
sys.executable,
os.path.join(ROOT_DIR, 'run_test_cases.py'),
'--no-dump',
target,
]
logging.debug(' '.join(cmd))
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# pylint is confused.
out, err = proc.communicate() or ('', '')
self.assertEquals(0, proc.returncode)
if sys.platform == 'win32':
out = out.replace('\r\n', '\n')
lines = out.splitlines()
expected_out_re = [
r'\[\d/\d\] \d\.\d\ds .+',
r'\[\d/\d\] \d\.\d\ds .+',
r'\[\d/\d\] \d\.\d\ds .+',
r'\[\d/\d\] \d\.\d\ds .+',
r'\[\d/\d\] \d\.\d\ds .+',
r'\[\d/\d\] \d\.\d\ds .+',
re.escape('Note: Google Test filter = Baz.Fail'),
r'',
] + [
re.escape(l) for l in gtest_fake.get_test_output('Baz.Fail').splitlines()
] + [
'',
] + [
re.escape(l) for l in gtest_fake.get_footer(1).splitlines()
] + [
'',
re.escape('Success: 3 75.00%'),
re.escape('Flaky: 0 0.00%'),
re.escape('Fail: 1 25.00%'),
r'\d+\.\ds Done running 4 tests with 6 executions. \d+\.\d test/s',
]
for index in range(len(expected_out_re)):
line = lines.pop(0)
self.assertTrue(
re.match('^%s$' % expected_out_re[index], line),
(index, expected_out_re[index], repr(line)))
self.assertEquals([], lines)
self.assertEquals('', err)
if __name__ == '__main__':
VERBOSE = '-v' in sys.argv
logging.basicConfig(level=logging.DEBUG if VERBOSE else logging.ERROR)
unittest.main()
| bsd-3-clause | 5,622,570,705,021,971,000 | 29.697368 | 79 | 0.571367 | false |
SUNET/eduid-common | src/eduid_common/api/msg.py | 1 | 4576 | # -*- coding: utf-8 -*-
import logging
from typing import List
import eduid_msg
from eduid_common.api.exceptions import MsgTaskFailed
from eduid_common.config.base import MsgConfigMixin
__author__ = 'lundberg'
logger = logging.getLogger(__name__)
TEMPLATES_RELATION = {
'mobile-validator': 'mobile-confirm',
'mobile-reset-password': 'mobile-reset-password',
'nin-validator': 'nin-confirm',
'nin-reset-password': 'nin-reset-password',
}
LANGUAGE_MAPPING = {
'en': 'en_US',
'sv': 'sv_SE',
}
class MsgRelay(object):
def __init__(self, config: MsgConfigMixin):
self.conf = config
eduid_msg.init_app(config.celery)
# these have to be imported _after_ eduid_msg.init_app()
from eduid_msg.tasks import get_postal_address, get_relations_to, pong, send_message, sendsms
self._get_postal_address = get_postal_address
self._get_relations_to = get_relations_to
self._send_message = send_message
self._send_sms = sendsms
self._pong = pong
@staticmethod
def get_language(lang: str) -> str:
return LANGUAGE_MAPPING.get(lang, 'en_US')
def get_postal_address(self, nin: str, timeout: int = 25) -> dict:
"""
:param nin: Swedish national identity number
:param timeout: Max wait time for task to finish
:return: Official name and postal address
The expected address format is:
OrderedDict([
(u'Name', OrderedDict([
(u'GivenNameMarking', u'20'),
(u'GivenName', u'personal name'),
(u'SurName', u'thesurname')
])),
(u'OfficialAddress', OrderedDict([
(u'Address2', u'StreetName 103'),
(u'PostalCode', u'74141'),
(u'City', u'STOCKHOLM')
]))
])
"""
rtask = self._get_postal_address.apply_async(args=[nin])
try:
ret = rtask.get(timeout=timeout)
if ret is not None:
return ret
raise MsgTaskFailed('No postal address returned from Navet')
except Exception as e:
rtask.forget()
raise MsgTaskFailed(f'get_postal_address task failed: {e}')
def get_relations_to(self, nin: str, relative_nin: str, timeout: int = 25) -> List[str]:
"""
Get a list of the NAVET 'Relations' type codes between a NIN and a relatives NIN.
Known codes:
M = spouse (make/maka)
B = child (barn)
FA = father
MO = mother
VF = some kind of legal guardian status. Children typically have ['B', 'VF'] it seems.
:param nin: Swedish National Identity Number
:param relative_nin: Another Swedish National Identity Number
:param timeout: Max wait time for task to finish
:return: List of codes. Empty list if the NINs are not related.
"""
rtask = self._get_relations_to.apply_async(args=[nin, relative_nin])
try:
ret = rtask.get(timeout=timeout)
if ret is not None:
return ret
raise MsgTaskFailed('No postal address returned from Navet')
except Exception as e:
rtask.forget()
raise MsgTaskFailed(f'get_relations_to task failed: {e}')
def sendsms(self, recipient: str, message: str, reference: str, timeout: int = 25) -> None:
"""
:param recipient: the recipient of the sms
:param message: message as a string (160 chars per sms)
:param reference: Audit reference to help cross reference audit log and events
:param timeout: Max wait time for task to finish
"""
logger.info(f'Trying to send SMS with reference: {reference}')
logger.debug(f'Recipient: {recipient}. Message: {message}')
rtask = self._send_sms.apply_async(args=[recipient, message, reference])
try:
res = rtask.get(timeout=timeout)
logger.info(f'SMS with reference {reference} sent. Task result: {res}')
except Exception as e:
rtask.forget()
raise MsgTaskFailed(f'sendsms task failed: {repr(e)}')
def ping(self, timeout: int = 1) -> str:
rtask = self._pong.apply_async()
try:
return rtask.get(timeout=timeout)
except Exception as e:
rtask.forget()
raise MsgTaskFailed(f'ping task failed: {repr(e)}')
| bsd-3-clause | 5,227,135,136,925,630,000 | 35.31746 | 101 | 0.577797 | false |
leethargo/geonet | geonet/degeneracy.py | 1 | 2109 | '''
Detecting degeneracy and merging zero-length edges.
'''
from geonet.network import SteinerTree, merge_pos
from geonet.geometry import distance
from geonet.constants import abstol
def degenerate_edges(tree, steiner_pos, abstol=abstol):
'''list of edges with (numerically) zero length'''
assert isinstance(tree, SteinerTree)
pos = merge_pos(tree, steiner_pos)
return [(u,v) for (u,v) in tree.get_arcs()
if (tree.is_steiner(u) or tree.is_steiner(v))
and distance(pos[u], pos[v]) <= abstol]
def is_degenerate(tree, steiner_pos, abstol=abstol):
return degenerate_edges(tree, steiner_pos, abstol) != []
def merged(tree, steiner_pos, abstol=abstol):
'''build new tree that merges all degenerate edges.
when merging an edge, the lexicographically smaller node will
survive. returns a tree and a matching dict of steiner node
positions.
'''
degedges = degenerate_edges(tree, steiner_pos, abstol)
# key: removed node, value: remaining node (taking over)
turn_into = {}
for u, v in degedges:
if tree.is_terminal(u) and tree.is_terminal(v):
# don't merge terminals
continue
elif tree.is_terminal(u):
# keep terminals
pass
elif tree.is_terminal(v):
# keep terminals
u, v = v, u
elif v < u:
# keep lexicographically smaller node
u, v = v, u
turn_into[v] = u
# merge nodes into transitive end-point
for v, u in turn_into.iteritems():
while u in turn_into:
u = turn_into[u]
turn_into[v] = u
# build new tree data
new_nodes = [u for u in tree.get_nodes() if u not in turn_into]
new_edges = []
for u, v in tree.get_arcs():
uu, vv = turn_into.get(u, u), turn_into.get(v, v)
if uu != vv: # remove self-loops
new_edges.append((uu, vv))
new_tree = SteinerTree(new_nodes, new_edges, tree.get_terminal_positions())
new_pos = {s:steiner_pos[s] for s in steiner_pos if s in new_nodes}
return new_tree, new_pos
| mit | 4,646,769,807,229,963,000 | 30.954545 | 79 | 0.614983 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/lib/ops/data/_errordata.py | 1 | 2335 |
import datetime
import os.path
import subprocess
import time
import xml.etree.ElementTree
import dsz
import ops
XALAN = os.path.join(ops.RESDIR, 'ExternalLibraries', 'java-j2se_1.6-sun', 'xalan.jar')
STYLESHEET = os.path.join(ops.DATA, 'DszErrorExtractor.xsl')
class DszCommandError(list, ):
def __init__(self, timestamp, cmdid):
self.timestamp = timestamp
self.__cmdid = cmdid
list.__init__(self)
def __str__(self):
msg = ('Error running command %d: %s\n' % (self.__cmdid, dsz.cmd.data.Get('commandmetadata::fullcommand', dsz.TYPE_STRING, cmdId=self.__cmdid)[0]))
if len(self):
for i in self:
msg += (' - %s' % i)
else:
msg += ' - No additional information available. Try viewing the logs.'
return msg
class DszCommandErrorData(object, ):
def __init__(self, type, text, timestamp):
self.type = type
self.text = text
self.timestamp = timestamp
def __str__(self):
return ('%s: %s' % (self.type, self.text))
def getLastError():
return getErrorFromCommandId(cmdid=dsz.cmd.LastId())
def getErrorFromCommandId(cmdid):
if (cmdid < 1):
return []
dataDir = os.path.join(ops.LOGDIR, 'Data')
files = []
for file in os.listdir(dataDir):
fullpath = os.path.join(dataDir, file)
if (not os.path.isfile(fullpath)):
continue
try:
if (int(file.split('-', 1)[0]) == cmdid):
files.append(fullpath)
except ValueError:
pass
errorSets = []
for file in files:
errorSets.append(_parseXML(file, cmdid))
return errorSets
def _parseXML(fullpath, cmdid):
xsltoutput = subprocess.Popen(['javaw', '-jar', XALAN, '-in', fullpath, '-xsl', STYLESHEET], stdout=subprocess.PIPE).communicate()[0]
tree = xml.etree.ElementTree.fromstring(xsltoutput)
if (not tree.get('timestamp')):
return DszCommandError(timestamp='', data=[], cmdid=cmdid)
timestamp = datetime.datetime(*time.strptime(tree.get('timestamp'), '%Y-%m-%dT%H:%M:%S')[0:6])
errors = DszCommandError(timestamp=timestamp, cmdid=cmdid)
for error in tree:
errors.append(DszCommandErrorData(type=error.get('type'), text=unicode(error.text, 'utf_8'), timestamp=timestamp))
return errors | unlicense | -5,100,924,808,575,776,000 | 32.855072 | 155 | 0.6197 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_virtual_hubs_operations.py | 1 | 33305 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualHubsOperations:
"""VirtualHubsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
virtual_hub_name: str,
**kwargs
) -> "_models.VirtualHub":
"""Retrieves the details of a VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualHub, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.VirtualHub
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_hub_name: str,
virtual_hub_parameters: "_models.VirtualHub",
**kwargs
) -> "_models.VirtualHub":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_hub_parameters, 'VirtualHub')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_hub_name: str,
virtual_hub_parameters: "_models.VirtualHub",
**kwargs
) -> AsyncLROPoller["_models.VirtualHub"]:
"""Creates a VirtualHub resource if it doesn't exist else updates the existing VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param virtual_hub_parameters: Parameters supplied to create or update VirtualHub.
:type virtual_hub_parameters: ~azure.mgmt.network.v2020_08_01.models.VirtualHub
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualHub or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_08_01.models.VirtualHub]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
virtual_hub_parameters=virtual_hub_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
virtual_hub_name: str,
virtual_hub_parameters: "_models.TagsObject",
**kwargs
) -> "_models.VirtualHub":
"""Updates VirtualHub tags.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param virtual_hub_parameters: Parameters supplied to update VirtualHub tags.
:type virtual_hub_parameters: ~azure.mgmt.network.v2020_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualHub, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.VirtualHub
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualHub"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(virtual_hub_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualHub', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
virtual_hub_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_hub_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a VirtualHub.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.ListVirtualHubsResult"]:
"""Lists all the VirtualHubs in a resource group.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_08_01.models.ListVirtualHubsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs'} # type: ignore
def list(
self,
**kwargs
) -> AsyncIterable["_models.ListVirtualHubsResult"]:
"""Lists all the VirtualHubs in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVirtualHubsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_08_01.models.ListVirtualHubsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVirtualHubsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVirtualHubsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualHubs'} # type: ignore
async def _get_effective_virtual_hub_routes_initial(
self,
resource_group_name: str,
virtual_hub_name: str,
effective_routes_parameters: Optional["_models.EffectiveRoutesParameters"] = None,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._get_effective_virtual_hub_routes_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if effective_routes_parameters is not None:
body_content = self._serialize.body(effective_routes_parameters, 'EffectiveRoutesParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_get_effective_virtual_hub_routes_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/effectiveRoutes'} # type: ignore
async def begin_get_effective_virtual_hub_routes(
self,
resource_group_name: str,
virtual_hub_name: str,
effective_routes_parameters: Optional["_models.EffectiveRoutesParameters"] = None,
**kwargs
) -> AsyncLROPoller[None]:
"""Gets the effective routes configured for the Virtual Hub resource or the specified resource .
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param effective_routes_parameters: Parameters supplied to get the effective routes for a
specific resource.
:type effective_routes_parameters: ~azure.mgmt.network.v2020_08_01.models.EffectiveRoutesParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_effective_virtual_hub_routes_initial(
resource_group_name=resource_group_name,
virtual_hub_name=virtual_hub_name,
effective_routes_parameters=effective_routes_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_effective_virtual_hub_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/effectiveRoutes'} # type: ignore
| mit | 2,354,238,087,108,972,000 | 49.309668 | 223 | 0.645188 | false |
AustereCuriosity/astropy | astropy/coordinates/builtin_frames/__init__.py | 1 | 5052 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This package contains the coordinate frames actually implemented by astropy.
Users shouldn't use this module directly, but rather import from the
`astropy.coordinates` module. While it is likely to exist for the long-term,
the existence of this package and details of its organization should be
considered an implementation detail, and is not guaranteed to hold for future
versions of astropy.
Notes
-----
The builtin frame classes are all imported automatically into this package's
namespace, so there's no need to access the sub-modules directly.
To implement a new frame in Astropy, a developer should add the frame as a new
module in this package. Any "self" transformations (i.e., those that transform
from one frame to another frame of the same class) should be included in that
module. Transformation functions connecting the new frame to other frames
should be in a separate module, which should be imported in this package's
``__init__.py`` to ensure the transformations are hooked up when this package is
imported. Placing the trasnformation functions in separate modules avoids
circular dependencies, because they need references to the frame classes.
"""
from .baseradec import BaseRADecFrame
from .icrs import ICRS
from .fk5 import FK5
from .fk4 import FK4, FK4NoETerms
from .galactic import Galactic
from .galactocentric import Galactocentric
from .lsr import LSR, GalacticLSR
from .supergalactic import Supergalactic
from .altaz import AltAz
from .gcrs import GCRS, PrecessedGeocentric
from .cirs import CIRS
from .itrs import ITRS
from .hcrs import HCRS
from .ecliptic import (GeocentricTrueEcliptic, BarycentricTrueEcliptic,
HeliocentricTrueEcliptic, BaseEclipticFrame)
from .skyoffset import SkyOffsetFrame
# need to import transformations so that they get registered in the graph
from . import icrs_fk5_transforms
from . import fk4_fk5_transforms
from . import galactic_transforms
from . import supergalactic_transforms
from . import icrs_cirs_transforms
from . import cirs_observed_transforms
from . import intermediate_rotation_transforms
from . import ecliptic_transforms
# we define an __all__ because otherwise the transformation modules get included
__all__ = ['ICRS', 'FK5', 'FK4', 'FK4NoETerms', 'Galactic', 'Galactocentric',
'Supergalactic', 'AltAz', 'GCRS', 'CIRS', 'ITRS', 'HCRS',
'PrecessedGeocentric', 'GeocentricTrueEcliptic',
'BarycentricTrueEcliptic', 'HeliocentricTrueEcliptic',
'SkyOffsetFrame', 'GalacticLSR', 'LSR',
'BaseEclipticFrame', 'BaseRADecFrame']
def _make_transform_graph_docs():
"""
Generates a string for use with the coordinate package's docstring
to show the available transforms and coordinate systems
"""
import inspect
from textwrap import dedent
from ...extern import six
from ..baseframe import BaseCoordinateFrame, frame_transform_graph
isclass = inspect.isclass
coosys = [item for item in six.itervalues(globals())
if isclass(item) and issubclass(item, BaseCoordinateFrame)]
# currently, all of the priorities are set to 1, so we don't need to show
# then in the transform graph.
graphstr = frame_transform_graph.to_dot_graph(addnodes=coosys,
priorities=False)
docstr = """
The diagram below shows all of the coordinate systems built into the
`~astropy.coordinates` package, their aliases (useful for converting
other coordinates to them using attribute-style access) and the
pre-defined transformations between them. The user is free to
override any of these transformations by defining new transformations
between these systems, but the pre-defined transformations should be
sufficient for typical usage.
The color of an edge in the graph (i.e. the transformations between two
frames) is set by the type of transformation; the legend box defines the
mapping from transform class name to color.
.. graphviz::
"""
docstr = dedent(docstr) + ' ' + graphstr.replace('\n', '\n ')
# colors are in dictionary at the bottom of transformations.py
from ..transformations import trans_to_color
html_list_items = []
for cls, color in trans_to_color.items():
block = u"""
<li style='list-style: none;'>
<p style="font-size: 12px;line-height: 24px;font-weight: normal;color: #848484;padding: 0;margin: 0;">
<b>{0}:</b>
<span style="font-size: 24px; color: {1};"><b>➝</b></span>
</p>
</li>
""".format(cls.__name__, color)
html_list_items.append(block)
graph_legend = u"""
.. raw:: html
<ul>
{}
</ul>
""".format("\n".join(html_list_items))
docstr = docstr + dedent(graph_legend)
return docstr
_transform_graph_docs = _make_transform_graph_docs()
| bsd-3-clause | 1,821,318,041,240,630,800 | 38.76378 | 118 | 0.70396 | false |
skosukhin/spack | var/spack/repos/builtin/packages/py-wheel/package.py | 1 | 1626 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyWheel(PythonPackage):
"""A built-package format for Python."""
homepage = "https://pypi.python.org/pypi/wheel"
url = "https://pypi.io/packages/source/w/wheel/wheel-0.29.0.tar.gz"
version('0.29.0', '555a67e4507cedee23a0deb9651e452f')
version('0.26.0', '4cfc6e7e3dc7377d0164914623922a10')
depends_on('py-setuptools', type='build')
| lgpl-2.1 | -6,316,427,960,259,754,000 | 42.945946 | 78 | 0.674662 | false |
syhpoon/xyzcmd | libxyz/core/logger/loglevel.py | 1 | 1508 | #-*- coding: utf8 -*
#
# Max E. Kuznecov ~syhpoon <[email protected]> 2008
#
# This file is part of XYZCommander.
# XYZCommander is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# XYZCommander is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
# You should have received a copy of the GNU Lesser Public License
# along with XYZCommander. If not, see <http://www.gnu.org/licenses/>.
class LogLevel(object):
"""
Available log levels
"""
def __init__(self):
self._levels = {"NONE": 0,
"ERROR": 1,
"WARNING": 2,
"INFO": 4,
"DEBUG": 8,
"UNKNOWN": 16,
"PANIC": 32,
"ALL": 63,
}
self._str_levels = dict([(v, k) for k, v in self._levels.iteritems()])
for _k, _v in self._levels.iteritems():
setattr(self, _k, _v)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def str_level(self, level):
"""
Return string level representation
"""
return self._str_levels[level]
| gpl-3.0 | -8,236,682,833,509,522,000 | 32.511111 | 78 | 0.552387 | false |
RedBulli/CourseDeadlines | CourseDeadlines/settings.py | 1 | 6055 | # Django settings for CourseDeadlines project.
import os
settings_dir = os.path.dirname(__file__)
project_dir = os.path.join(os.path.split(os.path.realpath(__file__))[0], os.path.pardir)
DEBUG = True
TASTYPIE_FULL_DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'database.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Helsinki'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
project_dir + '/static/',
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'fb4)u@=p!d4py3eqh_2bm%^f(d4!u5!$1rex(9e%6u%u8(xo_!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'CourseDeadlines.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'CourseDeadlines.wsgi.application'
TEMPLATE_DIRS = (
project_dir + '/templates'
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
AUTHENTICATION_BACKENDS = (
'django_openid_auth.auth.OpenIDBackend',
'django.contrib.auth.backends.ModelBackend',
)
OPENID_CREATE_USERS = True
OPENID_UPDATE_DETAILS_FROM_SREG = True
LOGIN_URL = '/openid/login/'
LOGIN_REDIRECT_URL = '/'
OPENID_SSO_SERVER_URL = 'https://www.google.com/accounts/o8/id'
OPENID_USE_EMAIL_FOR_USERNAME = True
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'django_openid_auth',
'tastypie',
'templatetag_handlebars',
'Course',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| mit | 1,575,287,465,834,630,700 | 33.016854 | 127 | 0.688852 | false |
nict-isp/scn-openflow-driver | src/ncps_openflow/scn/plugins/middleware/interface.py | 1 | 6895 | # -*- coding: utf-8 -*-
"""
scn.plugins.middleware.interface
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright (c) 2015, National Institute of Information and Communications Technology.All rights reserved.
:license: GPL3, see LICENSE for more details.
"""
import json
from pox.core import core
from pox.lib.addresses import IPAddr
from pox.lib.revent import EventMixin
from events import (CmdResp,
InitializeReq, InitializeResp, CreateBiPathReq, CreateBiPathResp,
DeleteBiPathReq, DeleteBiPathResp, UpdatePathReq, UpdatePathResp,
OptimizeReq, OptimizeResp, PushReq, HeartBeatReq, DumpReq, DumpResp)
from utils.widgets import Transport, Peer
from utils.connection import MWTcpServer, MWUdpServer, MWTcpClient
log = core.getLogger()
def send_tcp_payload(dst_peer, payload):
"""send TCP message I/F using MWTcpClient(original TCP client for SCN).
dst_peer need for getting application port.
"""
# get registed node.
node = core.topology.getHost(dst_peer.ipaddr)
ofp = node.ofp
switch = ofp.ofs
dpid = switch.dpid
port = ofp.number
src_mac = ofp.hwAddr
src_ip = ofp.ipAddr
src = (src_mac, src_ip)
dst_mac = node.macAddr
dst_ip = dst_peer.ipaddr
dst_port = dst_peer.port
dst = (dst_mac, dst_ip, dst_port)
log.info("request : dpid=%s, port=%s,src=%s, dst=%s" % (dpid, port, src, dst))
log.debug("payload : %s" % str(payload))
tcp_client = MWTcpClient(dpid, port, src, dst, payload)
core.protocols.addClient(tcp_client)
tcp_client.start()
class Interface(EventMixin):
"""request and response I/F from/to node(SCN) or Switch(OFC)
"""
_eventMixin_events = [
CmdResp,
InitializeReq,
CreateBiPathReq,
UpdatePathReq,
DeleteBiPathReq,
OptimizeReq,
HeartBeatReq,
DumpReq
]
supported = {
Peer.TCP : send_tcp_payload
}
def __init__(self):
EventMixin.__init__(self)
udp_server = MWUdpServer(self.process_command, Transport.LPORT)
core.protocols.addServer(udp_server)
tcp_server = MWTcpServer(self.process_command, Transport.LPORT)
core.protocols.addServer(tcp_server,
needSend=True)
core.middleware.listenTo(self)
self.register_event_handler()
# register decode class for input (request/response) message.
# 本OFCサーバへの入力メッセージをデコードするクラスを登録する
# (入力メッセージボディのNAMEプロパティから,対応するクラスメソッドが呼びだされる)
self.decode_classes = {
# JSON CMD Name : called Class
InitializeReq.NAME : InitializeReq,
CreateBiPathReq.NAME : CreateBiPathReq,
UpdatePathReq.NAME : UpdatePathReq,
DeleteBiPathReq.NAME : DeleteBiPathReq,
OptimizeReq.NAME : OptimizeReq,
HeartBeatReq.NAME : HeartBeatReq,
DumpReq.NAME : DumpReq
}
def register_event_handler(self):
"""register handler for event raised middlewar.py
request handler is for innter domain request.
"""
for req in [InitializeReq, CreateBiPathReq, UpdatePathReq, \
DeleteBiPathReq, OptimizeReq, HeartBeatReq, DumpReq]:
core.middleware.addListenerByName(req.__name__, self.handle_request)
for resp in [InitializeResp, CreateBiPathResp, UpdatePathResp, \
DeleteBiPathResp, OptimizeResp, DumpResp, PushReq, CmdResp]:
core.middleware.addListenerByName(resp.__name__, self.handle_response)
def process_command(self, node, data):
"""input handler.
call when MWTcpServer receive payload.
@param [ScnOpenFlowHost] node input src node
@param [string] data JSON format
"""
log.debug('process_command = [%s]' % repr(data))
event = self.decode_json(data)
if not node:
# if not node -> create registerd node instance from listen_peer
node = core.topology.getHost(IPAddr(event.dst_peer.ipaddr))
self.raiseEvent(event, node)
def decode_json(self, data):
"""decode json protocol cmd.
use reigisted class(self.decode_class)
"""
try:
kwargs = json.loads(data)
kwargs['buf'] = data
cls = self.decode_classes.get(kwargs['NAME'])
if not cls:
log.warn('Unknown Command Type')
return CmdResp()
decoded = cls.from_dict(kwargs)
if not decoded:
log.warn('No Data ? Class=%s' % str(cls))
return CmdResp()
except (TypeError, ValueError) as inst:
log.exception(inst)
log.error("Could not decode json : [%s]" % str(data))
return CmdResp()
log.info('\n--\n%s command received\n%s\n--\n' % (decoded.NAME, repr(data)))
return decoded
# ========= Handler from OFC Server(middlewar.py) raise (request/response) message =========== #
def handle_response(self, resp):
""" handler to send response.
"""
log.info("send response to node :%s" % str(resp))
self.__send_data__(resp, resp.dst_peer)
def handle_request(self, req):
""" handler to send resquest.
"""
log.info("send request to other OFC :%s" % str(req))
self.__send_data__(req, req.dst_peer)
def __send_data__(self, send_cls, dst_peer):
"""send data.
check protocol and convert data.
do send method.
"""
if not dst_peer:
log.warning('Peer is none. It might be a static service with no listen peer...')
return
if not self.__check_supported_protocol__(dst_peer.protocol):
log.warn("not supported protocol.%s" % str(dst_peer.protocol))
return
payload = None
try:
payload = send_cls.to_json() + MWTcpServer.DELIMITER
except (TypeError, ValueError) as inst:
log.exception(inst)
if not payload:
log.warn("no payload")
return
log.info('\n--\n%s: %s to\n%s\n--\n' % (send_cls.NAME, repr(payload), dst_peer))
self.__get_request_method__(dst_peer.protocol)(dst_peer, payload)
def __check_supported_protocol__(self, protocol):
"""check that protocol can use ?
"""
return self.supported.has_key(protocol)
def __get_request_method__(self, protocol):
"""get sender method.
"""
return self.supported[protocol]
def launch(**kwargs):
"""middlewareCmds launch
**kwargs is need for option args.
see __init__.py also.
"""
log.debug(kwargs)
Interface()
| gpl-3.0 | -4,565,760,055,426,813,400 | 32.009756 | 116 | 0.598493 | false |
yeming233/rally | rally/plugins/openstack/context/quotas/quotas.py | 1 | 4765 | # Copyright 2014: Dassault Systemes
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common.i18n import _
from rally.common import logging
from rally.common import validation
from rally import consts
from rally import osclients
from rally.plugins.openstack.context.quotas import cinder_quotas
from rally.plugins.openstack.context.quotas import designate_quotas
from rally.plugins.openstack.context.quotas import manila_quotas
from rally.plugins.openstack.context.quotas import neutron_quotas
from rally.plugins.openstack.context.quotas import nova_quotas
from rally.task import context
LOG = logging.getLogger(__name__)
@validation.add("required_platform", platform="openstack", admin=True)
@context.configure(name="quotas", platform="openstack", order=300)
class Quotas(context.Context):
"""Sets OpenStack Tenants quotas."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"additionalProperties": False,
"properties": {
"nova": nova_quotas.NovaQuotas.QUOTAS_SCHEMA,
"cinder": cinder_quotas.CinderQuotas.QUOTAS_SCHEMA,
"manila": manila_quotas.ManilaQuotas.QUOTAS_SCHEMA,
"designate": designate_quotas.DesignateQuotas.QUOTAS_SCHEMA,
"neutron": neutron_quotas.NeutronQuotas.QUOTAS_SCHEMA
}
}
def __init__(self, ctx):
super(Quotas, self).__init__(ctx)
self.clients = osclients.Clients(
self.context["admin"]["credential"],
api_info=self.context["config"].get("api_versions"))
self.manager = {
"nova": nova_quotas.NovaQuotas(self.clients),
"cinder": cinder_quotas.CinderQuotas(self.clients),
"manila": manila_quotas.ManilaQuotas(self.clients),
"designate": designate_quotas.DesignateQuotas(self.clients),
"neutron": neutron_quotas.NeutronQuotas(self.clients)
}
self.original_quotas = []
def _service_has_quotas(self, service):
return len(self.config.get(service, {})) > 0
@logging.log_task_wrapper(LOG.info, _("Enter context: `quotas`"))
def setup(self):
for tenant_id in self.context["tenants"]:
for service in self.manager:
if self._service_has_quotas(service):
# NOTE(andreykurilin): in case of existing users it is
# required to restore original quotas instead of reset
# to default ones.
if "existing_users" in self.context:
self.original_quotas.append(
(service, tenant_id,
self.manager[service].get(tenant_id)))
self.manager[service].update(tenant_id,
**self.config[service])
def _restore_quotas(self):
for service, tenant_id, quotas in self.original_quotas:
try:
self.manager[service].update(tenant_id, **quotas)
except Exception as e:
LOG.warning("Failed to restore quotas for tenant %(tenant_id)s"
" in service %(service)s \n reason: %(exc)s" %
{"tenant_id": tenant_id, "service": service,
"exc": e})
def _delete_quotas(self):
for service in self.manager:
if self._service_has_quotas(service):
for tenant_id in self.context["tenants"]:
try:
self.manager[service].delete(tenant_id)
except Exception as e:
LOG.warning("Failed to remove quotas for tenant "
"%(tenant_id)s in service %(service)s "
"\n reason: %(exc)s"
% {"tenant_id": tenant_id,
"service": service, "exc": e})
@logging.log_task_wrapper(LOG.info, _("Exit context: `quotas`"))
def cleanup(self):
if self.original_quotas:
# existing users
self._restore_quotas()
else:
self._delete_quotas()
| apache-2.0 | 7,832,287,058,500,353,000 | 41.544643 | 79 | 0.590766 | false |
akkana/scripts | wpnet.py | 1 | 17033 | #!/usr/bin/env python3
# A wrapper script to make it easier to use wpa_cli to connect.
# https://wiki.archlinux.org/index.php/WPA_supplicant#Connecting_with_wpa_cli
# was very helpful.
#
# For extending this to eth0, browse /etc/dhcpcd.conf
# and /usr/share/dhcpcd/hooks/10-wpa_supplicant on raspbian,
# where dhcpcd is the master and is in charge of stopping
# and starting wpa_supplicant.
#
# Copyright 2018 by Akkana Peck: share and enjoy under the GPLv2 or later.
import subprocess
import os, sys
import argparse
import getpass
import urllib.request
import time
verbose = False
"""
To run this as a normal user, not under sudo:
edit /etc/wpa_supplicant/wpa_supplicant.conf
and add a line like:
ctrl_interface_group=adm
using whatever group you think should have network permissions.
Commands this script runs:
** Get the wireless interface:
iw dev
** Start the daemon:
wpa_supplicant -B -i $iface -c /etc/wpa_supplicant/wpa_supplicant.conf
** List known networks:
wpa_cli list_networks
** List available networks:
wpa_cli scan
wpa_cli scan_results
** Define a new SSID:
wpa_cli add_network
(prints new $ID. Then:)
NOT : wpa_cli set_network $ID
** Connect to a new open SSID:
wpa_cli set_network $ID ssid $SSID key_mgmt NONE
** Connect to a new WPA SSID:
wpa_cli set_network $ID ssid $SSID psk $PASSWORD
wpa_cli enable_network $ID
wpa_cli save_config
WORKED:
wpa_supplicant -B -i wlp2s0 -c /etc/wpa_supplicant/wpa_supplicant.conf
wpa_cli list_networks
wpa_cli scan
wpa_cli scan_results
wpa_cli add_network
wpa_cli set_network 1 (this gave an error, I think)
wpa_cli set_network 1 ssid '"LAC-Public Library"'
wpa_cli set_network 1 key_mgmt NONE
(idiot bash lost this command, probably enable?)
wpa_cli save_config
dhclient -v wlp2s0
"""
def run_as_root(cmdargs):
"""Run cmdargs inside sudo, unless we're already root.
return (stdout, stderr) as strings.
"""
if os.getpid() != 0:
cmdargs = ["sudo"] + cmdargs
if verbose:
print("\n** Run:", ' '.join(cmdargs))
proc = subprocess.Popen(cmdargs, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# proc.communicate() returns bytes, so change them to strings:
return ( b.decode() for b in proc.communicate() )
def run_cmd(cmdargs):
"""Run and return (stdout, stderr) as strings.
"""
if verbose:
print("\n** Run:", ' '.join(cmdargs))
proc = subprocess.Popen(cmdargs, shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# proc.communicate() returns bytes, so change them to strings:
return ( b.decode() for b in proc.communicate() )
def start_wpa_supplicant(iface):
# https://wiki.archlinux.org/index.php/WPA_supplicant
if is_wpa_running():
if verbose:
print("wpa_supplicant is already running")
return
args = ['sudo', 'wpa_supplicant', '-B', '-i', iface,
'-c', '/etc/wpa_supplicant/wpa_supplicant.conf']
if verbose:
print("Starting wpa_supplicant:", ' '.join(args), end='')
subprocess.call(args)
time.sleep(5)
def is_wpa_running():
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
for pid in pids:
try:
args = open(os.path.join('/proc', pid, 'cmdline'),
'rb').read().decode().split('\0')
if args[0] == 'wpa_supplicant':
return True
except IOError: # proc has already terminated
continue
return False
def start_dhcp(iface):
if verbose:
print("Starting dhcp")
# Can't use run_cmd here because the output takes time
# and the usr might want to see it, especially if it fails.
return subprocess.call(['sudo', 'dhclient', '-v', iface])
def get_available_accesspoints(iface):
aps = {}
start_wpa_supplicant(iface)
run_cmd(["wpa_cli", "scan"])
out, err = run_cmd(["wpa_cli", "scan_results"])
stdout_lines = out.split('\n')
for line in stdout_lines:
if not line or line.startswith('Selected') \
or line.startswith('bssid /'):
continue
words = line.strip().split(maxsplit=4)
# Get the ssid if it's not hidden, else use the MAC
if len(words) == 4:
ssid = '[%s]' % words[0]
else:
ssid = words[4]
aps[ssid] = { 'MAC': words[0],
'flags': words[3],
'signal': int(words[2]),
}
return aps
def get_current():
"""
<iridum>- sudo wpa_cli list_networks
Selected interface 'wlp2s0'
network id / ssid / bssid / flags
0 clink any
1 LAC-Public Library any [CURRENT]
2 CommunityLab any [DISABLED]
3 COAFreeWireless any
4 LAC-Public Library any
"""
start_wpa_supplicant(iface)
networks = {}
out, err = run_cmd(["wpa_cli", "list_networks"])
stdout_lines = out.split('\n')
for line in stdout_lines:
line = line.strip()
if line.endswith('[CURRENT]'):
words = line.split('\t')
return words[1]
return None
def get_known_networks():
start_wpa_supplicant(iface)
networks = {}
out, err = run_cmd(["wpa_cli", "list_networks"])
stdout_lines = out.split('\n')
for line in stdout_lines:
line = line.strip()
if not line:
continue
words = line.split('\t')
if words[0].isdigit():
networks[int(words[0])] = words[1]
return networks
def match_ssid(pat, ssids):
for net in ssids:
if pat in net:
return net
return None
def get_wireless_ifaces():
# For a list of all devices, ls /sys/class/net
ifaces = []
# Get a list of wireless interfaces.
# iwconfig lists wireless interfaces on stdout, wired and lo on stderr.
proc = subprocess.Popen(["iw", "dev"], shell=False,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout_lines = proc.communicate()[0].decode().split('\n')
for line in stdout_lines:
line = line.strip()
if line.startswith('Interface'):
ifaces.append(line.split()[1])
# could get MAC and ssid if appropriate
return ifaces
def show_browser_if_redirect():
"""Try to fetch a test URL. If we're redirected to some other URL
(probably a stupid login page), pop up a browser.
"""
# Alas, there's no universal page everyone can use.
# So make one on your own website, or find a trusted page,
# and put that URL in ~/.config/netscheme/testurl
testurl = None
testurlfile = os.path.expanduser("~/.config/netscheme/testurl")
if not os.path.exists(testurlfile):
print("No test URL file; not testing connection")
return
with open(testurlfile) as tufile:
testurl = tufile.read().strip()
with open(testurlfile + ".out") as tufile:
content_from_file = tufile.read()
if verbose and not testurl:
print("No test URL set; not checking for redirects")
return
content_from_web = ''
print("Trying to fetch test URL", testurl)
try:
response = urllib.request.urlopen(testurl, timeout=100)
# Were we redirected? In theory response.geturl() will tell us that,
# but in practice, it doesn't, so we have to fetch the content
# of a page and compare it to the expected value.
content_from_web = response.read().decode('utf-8')
# Lots of ways this can fail.
# e.g. ValueError, "unknown url type"
# or BadStatusLine: ''
except Exception as e:
print("Couldn't fetch test URL %s: probably redirected." % testurl, e)
content_from_web = ''
if content_from_web == content_from_file:
print("Looks like we're really connected -- no redirect")
return
print("Couldn't make a test connection -- probably redirected.")
# Don't want to run a browser as root, so figure out if we're root
# and if so, de-escalate privilege.
# os.getuid(), os.geteuid() and psutil.uids() are all zero under sudo,
# but sudo helpfully sets an env variable we can use.
orig_uid = os.getenv("SUDO_UID")
if orig_uid:
print("De-escalating back to UID", orig_uid)
orig_uid = int(orig_uid)
os.setuid(orig_uid)
print("Calling quickbrowse", testurl)
try:
subprocess.call(["quickbrowse", testurl])
except Exception as e:
print("Problem starting a browser", e)
raise e
def show_available_networks():
accesspoints = get_available_accesspoints(iface)
aps = accesspoints.keys()
known_nets = get_known_networks()
# Print the ones we have saved already:
format = "%-20s %4s %7s %s"
print(format % ("SSID", "#", "Signal", "Encryption"))
print(format % ("----", "--", "------", "----------"))
known = []
for i in sorted(known_nets):
if known_nets[i] in aps:
print(format % (known_nets[i],
i,
accesspoints[known_nets[i]]['signal'],
accesspoints[known_nets[i]]['flags']))
known.append(known_nets[i])
'''
Sample flags:
SSID Signal # Encryption
---- ------ -- ----------
LAC-Wireless -86 [WPA2-EAP-CCMP][ESS]
Historical -84 [WPA-PSK-TKIP][WPA2-PSK-CCMP+TKIP][ESS]
LAC PUBLIC -85 [ESS]
Public-LAC -90 [ESS]
NMC-Main -79 [WPA2-PSK-CCMP][ESS]
<iridum>- wpa_cli scan_results ~
Selected interface 'wlp2s0'
bssid / frequency / signal level / flags / ssid
58:bf:ea:92:ba:c0 2437 -48 [WPA2-EAP-CCMP][ESS] LAC-Wireless
6c:70:9f:de:4d:7c 2462 -84 [WPA-PSK-TKIP][WPA2-PSK-CCMP+TKIP][ESS]Historical
58:bf:ea:92:ba:c2 2437 -56 [ESS] LAC PUBLIC
24:01:c7:3a:91:b0 2462 -64 [ESS] Public-LAC
Selected interface 'wlp2s0'
https://askubuntu.com/questions/541704/how-can-one-use-wpa-cli-to-connect-to-a-wpa-network-without-a-password
> scan
OK
CTRL-EVENT-SCAN-RESULTS
> scan_results
bssid / frequency / signal level / flags / ssid
f8:d1:11:23:c2:2f 2412 76 [ESS] BAYINET
f8:d1:11:23:c1:e9 2412 47 [ESS] BAYINET
> add_network
0
> set_network 0 ssid "Public-LAC"
OK
> set_network 0 key_mgmt NONE
OK
> enable_network 0
OK
CTRL-EVENT-SCAN-RESULTS
Trying to associate with f8:d1:11:23:c2:2f (SSID='BAYINET' freq=2412 MHz)
Association request to the driver failed
Associated with f8:d1:11:23:c2:2f
CTRL-EVENT-CONNECTED - Connection to f8:d1:11:23:c2:2f completed (auth) [id=1 id_str=]
> quit
'''
# Print the ones we don't know:
print()
for ap in aps:
if ap not in known:
print(format % (ap,
'',
accesspoints[ap]['signal'],
accesspoints[ap]['flags']))
def connect_to(to_ap):
if verbose:
print("Connecting to", to_ap)
accesspoints = get_available_accesspoints(iface)
aps = list(accesspoints.keys())
known_nets = get_known_networks()
known = [ known_nets[i] for i in known_nets ]
known_index = None
if to_ap not in aps:
# But maybe it's a number for a known network?
if to_ap.isdigit():
known_index = int(to_ap)
if known_index not in known_nets:
print("No network %d known" % known_index)
sys.exit(1)
to_ap = known_nets[known_index]
if to_ap not in aps:
print("Network %d, '%s', not visible" % (known_index,
to_ap))
sys.exit(1)
else:
matched = match_ssid(to_ap, accesspoints.keys())
if not matched:
print("'%s' isn't visible" % to_ap)
sys.exit(1)
to_ap = matched
print("Matched:", matched)
# Now to_ap is an SSID that's known.
if to_ap in known:
if verbose:
print("Great, we see", to_ap, "and we know it already")
if known_index is None:
for i in known_nets:
if known_nets[i] == to_ap:
known_index = i
break
if known_index is None:
print("Internal error, lost track of SSID %s" % to_ap)
if verbose:
print("Enabling network", to_ap)
run_cmd(["wpa_cli", "enable_network", str(known_index)])
if start_dhcp(iface):
print("DHCP failed")
else:
show_browser_if_redirect()
sys.exit(0)
# New network, hasn't been stored yet. But it is seen.
if verbose:
print(to_ap, "must be a new network")
thisap = accesspoints[to_ap]
out, err = run_cmd(["wpa_cli", "add_network"])
# The last (second) line of the output is the new network number.
# But split('\n') gives a bogus empty final line.
# To be safer, try iterating to find a line that's just a single number.
lines = out.split('\n')
netnum_str = None
for line in lines:
if not line:
continue
words = line.split()
if len(words) == 1 and words[0].isdigit():
netnum_str = words[0]
break
if not netnum_str:
print("Unexpected output from wpa_cli add_network:")
print(out)
print("---")
sys.exit(1)
if verbose:
print("new netnum:", netnum_str)
def check_fail(out, err, errmsg=None):
if 'FAIL' in out or 'FAIL' in err:
if errmsg:
print("Error:", errmsg)
if out:
print("==== FAIL: out")
print(out)
if err:
print("==== FAIL: err")
print(err)
sys.exit(1)
if out or err:
print("SUCCESS:")
if out:
print(out)
if err:
print(err)
out, err = run_cmd(["wpa_cli", "set_network", netnum_str, "ssid",
'"%s"' % to_ap])
check_fail(out, err, "Set network")
if 'WPA' in thisap['flags'] or 'PSK' in thisap['flags']:
password = getpass.getpass("Password: ")
out, err = run_cmd(["wpa_cli", "set_network", netnum_str,
"psk", '"%s"' % password])
check_fail(out, err, "Set password")
else:
if verbose:
print("Trying to connect to %s with no password" % to_ap)
out, err = run_cmd(["wpa_cli", "set_network", netnum_str,
"key_mgmt", "NONE"])
check_fail(out, err, "Set key management")
if verbose:
print("Waiting a little ...", end='')
time.sleep(5)
if verbose:
print()
if verbose:
print("Enabling network", netnum_str)
out, err = run_cmd(["wpa_cli", "enable_network", netnum_str])
check_fail(out, err, "Enable network")
if verbose:
print("Waiting a little ...", end='')
time.sleep(5)
if verbose:
print()
if verbose:
print("Saving configuration")
out, err = run_cmd(["wpa_cli", "save_config"])
check_fail(out, err, "Save configuration")
if verbose:
print(out, err, "Saved configuration")
start_dhcp(iface)
show_browser_if_redirect()
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-k', "--known", dest="known",
action="store_true", help="List known networks")
parser.add_argument('-a', "--available", dest="available",
action="store_true", help="Show available accesspoints")
parser.add_argument('connect_to', nargs='?',
help="The essid or numeric specifier to connect to")
args = parser.parse_args(sys.argv[1:])
ifaces = get_wireless_ifaces()
if not ifaces:
print("No wireless interface, sorry")
sys.exit(1)
if len(ifaces) > 1:
print("Multiple wireless interfaces:", ' '.join(get_wireless_ifaces()))
print("Using", ifaces[0])
iface = ifaces[0]
if not iface:
print("No interface!")
sys.exit(1)
if verbose:
print("Interface:", iface)
if args.available:
show_available_networks()
sys.exit(0)
if args.known:
known_nets = get_known_networks()
for i in sorted(known_nets.keys()):
print('%3d: %s' % (i, known_nets[i]))
sys.exit(0)
# If no flags specified, then we should have one arg,
# either a numeric specifier or an essid.
if not args.connect_to:
current = get_current()
if current:
print("Connected to", current)
else:
print("Not connected")
sys.exit(0)
connect_to(args.connect_to)
| gpl-2.0 | 2,445,823,374,214,298,000 | 29.634892 | 109 | 0.573358 | false |
peterwilletts24/Monsoon-Python-Scripts | geopotential/plot_geopotential_diff_from_global.py | 1 | 16864 | """
Load mean geopotential heights and plot in colour
"""
import os, sys
import matplotlib.pyplot as plt
import matplotlib.cm as mpl_cm
from mpl_toolkits.basemap import Basemap
import iris
import numpy as np
import imp
import h5py
import cartopy.crs as ccrs
import scipy.interpolate
from textwrap import wrap
model_name_convert_title = imp.load_source('util', '/home/pwille/python_scripts/model_name_convert_title.py')
def main():
def unrotate_pole(rotated_lons, rotated_lats, pole_lon, pole_lat):
"""
Convert rotated-pole lons and lats to unrotated ones.
Example::
lons, lats = unrotate_pole(grid_lons, grid_lats, pole_lon, pole_lat)
.. note:: Uses proj.4 to perform the conversion.
"""
src_proj = ccrs.RotatedGeodetic(pole_longitude=pole_lon,
pole_latitude=pole_lat)
target_proj = ccrs.Geodetic()
res = target_proj.transform_points(x=rotated_lons, y=rotated_lats,
src_crs=src_proj)
unrotated_lon = res[..., 0]
unrotated_lat = res[..., 1]
return unrotated_lon, unrotated_lat
# Set rotated pole longitude and latitude, not ideal but easier than trying to find how to get iris to tell me what it is.
plot_levels = [925, 850, 700, 500]
#plot_levels = [925]
experiment_id = 'djznw'
p_levels = [1000, 950, 925, 850, 700, 500, 400, 300, 250, 200, 150, 100, 70, 50, 30, 20, 10]
expmin1 = experiment_id[:-1]
plot_type='mean'
# for pl in plot_diags:
plot_diag='temp'
fname_h = '/projects/cascade/pwille/temp/408_pressure_levels_interp_pressure_%s_%s' % (experiment_id, plot_type)
fname_d = '/projects/cascade/pwille/temp/%s_pressure_levels_interp_%s_%s' % (plot_diag, experiment_id, plot_type)
print fname_h
print fname_d
# Height data file
with h5py.File(fname_h, 'r') as i:
mh = i['%s' % plot_type]
mean_heights = mh[. . .]
print mean_heights.shape
with h5py.File(fname_d, 'r') as i:
mh = i['%s' % plot_type]
mean_var = mh[. . .]
print mean_var.shape
#lon_low= 60
#lon_high = 105
#lat_low = -10
#lat_high = 30
f_oro = '/projects/cascade/pwille/moose_retrievals/%s/%s/33.pp' % (expmin1, experiment_id)
oro = iris.load_cube(f_oro)
print oro
for i, coord in enumerate (oro.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_oro = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_oro = i
fu = '/projects/cascade/pwille/moose_retrievals/%s/%s/30201_mean.pp' % (expmin1, experiment_id)
u_wind,v_wind = iris.load(fu)
# Wind may have different number of grid points so need to do this twice
lat_w = u_wind.coord('grid_latitude').points
lon_w = u_wind.coord('grid_longitude').points
p_levs = u_wind.coord('pressure').points
lat = oro.coord('grid_latitude').points
lon = oro.coord('grid_longitude').points
cs_w = u_wind.coord_system('CoordSystem')
cs = oro.coord_system('CoordSystem')
if isinstance(cs_w, iris.coord_systems.RotatedGeogCS):
print ' Wind - %s - Unrotate pole %s' % (experiment_id, cs_w)
lons_w, lats_w = np.meshgrid(lon_w, lat_w)
lons_w,lats_w = unrotate_pole(lons_w,lats_w, cs_w.grid_north_pole_longitude, cs_w.grid_north_pole_latitude)
lon_w=lons_w[0]
lat_w=lats_w[:,0]
csur_w=cs_w.ellipsoid
for i, coord in enumerate (u_wind.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_uwind = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_uwind = i
u_wind.remove_coord('grid_latitude')
u_wind.remove_coord('grid_longitude')
u_wind.add_dim_coord(iris.coords.DimCoord(points=lat_w, standard_name='grid_latitude', units='degrees', coord_system=csur_w),lat_dim_coord_uwind )
u_wind.add_dim_coord(iris.coords.DimCoord(points=lon_w, standard_name='grid_longitude', units='degrees', coord_system=csur_w), lon_dim_coord_uwind)
v_wind.remove_coord('grid_latitude')
v_wind.remove_coord('grid_longitude')
v_wind.add_dim_coord(iris.coords.DimCoord(points=lat_w, standard_name='grid_latitude', units='degrees', coord_system=csur_w), lat_dim_coord_uwind)
v_wind.add_dim_coord(iris.coords.DimCoord(points=lon_w, standard_name='grid_longitude', units='degrees', coord_system=csur_w),lon_dim_coord_uwind )
if isinstance(cs, iris.coord_systems.RotatedGeogCS):
print ' 33.pp - %s - Unrotate pole %s' % (experiment_id, cs)
lons, lats = np.meshgrid(lon, lat)
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
lon_corners, lat_corners = np.meshgrid((lon_low, lon_high), (lat_low, lat_high))
lons,lats = unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon_corner_u,lat_corner_u = unrotate_pole(lon_corners, lat_corners, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
#lon_highu,lat_highu = unrotate_pole(lon_high, lat_high, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
lon=lons[0]
lat=lats[:,0]
lon_low = lon_corner_u[0,0]
lon_high = lon_corner_u[0,1]
lat_low = lat_corner_u[0,0]
lat_high = lat_corner_u[1,0]
for i, coord in enumerate (oro.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_oro = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_oro = i
csur=cs.ellipsoid
oro.remove_coord('grid_latitude')
oro.remove_coord('grid_longitude')
oro.add_dim_coord(iris.coords.DimCoord(points=lat, standard_name='grid_latitude', units='degrees', coord_system=csur), lat_dim_coord_oro)
oro.add_dim_coord(iris.coords.DimCoord(points=lon, standard_name='grid_longitude', units='degrees', coord_system=csur), lon_dim_coord_oro)
else:
lons, lats = np.meshgrid(lon, lat)
lons_w, lats_w = np.meshgrid(lon_w, lat_w)
lon_low= np.min(lons)
lon_high = np.max(lons)
lat_low = np.min(lats)
lat_high = np.max(lats)
######## Regrid to global, and difference #######
############################################################################
## Heights
f_glob_h = '/projects/cascade/pwille/temp/408_pressure_levels_interp_pressure_djznw_%s' % (plot_type)
f_glob_d = '/projects/cascade/pwille/temp/%s_pressure_levels_interp_djznw_%s' % (plot_diag, plot_type)
with h5py.File(f_glob_h, 'r') as i:
mh = i['%s' % plot_type]
mean_heights_global = mh[. . .]
with h5py.File(f_glob_d, 'r') as i:
mh = i['%s' % plot_type]
mean_var_global = mh[. . .]
# Wind
fw_global = '/projects/cascade/pwille/moose_retrievals/djzn/djznw/30201_mean.pp'
fo_global = '/projects/cascade/pwille/moose_retrievals/djzn/djznw/33.pp'
u_global,v_global = iris.load(fw_global)
oro_global = iris.load_cube(fo_global)
# Unrotate global coordinates
cs_glob = u_global.coord_system('CoordSystem')
cs_glob_v = v_global.coord_system('CoordSystem')
cs_glob_oro = oro_global.coord_system('CoordSystem')
lat_g = u_global.coord('grid_latitude').points
lon_g = u_global.coord('grid_longitude').points
lat_g_oro = oro_global.coord('grid_latitude').points
lon_g_oro = oro_global.coord('grid_longitude').points
if cs_glob!=cs_glob_v:
print 'Global model u and v winds have different poles of rotation'
# Unrotate global winds
if isinstance(cs_glob, iris.coord_systems.RotatedGeogCS):
print ' Global Model - Winds - djznw - Unrotate pole %s' % cs_glob
lons_g, lats_g = np.meshgrid(lon_g, lat_g)
lons_g,lats_g = unrotate_pole(lons_g,lats_g, cs_glob.grid_north_pole_longitude, cs_glob.grid_north_pole_latitude)
lon_g=lons_g[0]
lat_g=lats_g[:,0]
for i, coord in enumerate (u_global.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_uglobal = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_uglobal = i
csur_glob=cs_glob.ellipsoid
u_global.remove_coord('grid_latitude')
u_global.remove_coord('grid_longitude')
u_global.add_dim_coord(iris.coords.DimCoord(points=lat_g, standard_name='grid_latitude', units='degrees', coord_system=csur_glob), lat_dim_coord_uglobal)
u_global.add_dim_coord(iris.coords.DimCoord(points=lon_g, standard_name='grid_longitude', units='degrees', coord_system=csur_glob), lon_dim_coord_uglobal)
#print u_global
v_global.remove_coord('grid_latitude')
v_global.remove_coord('grid_longitude')
v_global.add_dim_coord(iris.coords.DimCoord(points=lat_g, standard_name='grid_latitude', units='degrees', coord_system=csur_glob), lat_dim_coord_uglobal)
v_global.add_dim_coord(iris.coords.DimCoord(points=lon_g, standard_name='grid_longitude', units='degrees', coord_system=csur_glob), lon_dim_coord_uglobal)
#print v_global
# Unrotate global model
if isinstance(cs_glob_oro, iris.coord_systems.RotatedGeogCS):
print ' Global Model - Orography - djznw - Unrotate pole %s - Winds and other diagnostics may have different number of grid points' % cs_glob_oro
lons_go, lats_go = np.meshgrid(lon_g_oro, lat_g_oro)
lons_go,lats_go = unrotate_pole(lons_go,lats_go, cs_glob_oro.grid_north_pole_longitude, cs_glob_oro.grid_north_pole_latitude)
lon_g_oro=lons_go[0]
lat_g_oro=lats_go[:,0]
for i, coord in enumerate (oro_global.coords()):
if coord.standard_name=='grid_latitude':
lat_dim_coord_og = i
if coord.standard_name=='grid_longitude':
lon_dim_coord_og = i
csur_glob_oro=cs_glob_oro.ellipsoid
oro_global.remove_coord('grid_latitude')
oro_global.remove_coord('grid_longitude')
oro_global.add_dim_coord(iris.coords.DimCoord(points=lat_g_oro, standard_name='grid_latitude', units='degrees', coord_system=csur_glob_oro), lat_dim_coord_og)
oro_global.add_dim_coord(iris.coords.DimCoord(points=lon_g_oro, standard_name='grid_longitude', units='degrees', coord_system=csur_glob_oro), lon_dim_coord_og)
############## Regrid and Difference #################################
# Regrid Height and Temp/Specific humidity to global grid
h_regrid = np.empty((len(lat_g_oro), len(lon_g_oro), len(p_levels)))
v_regrid = np.empty((len(lat_g_oro), len(lon_g_oro), len(p_levels)))
for y in range(len(p_levels)):
h_regrid[:,:,y] = scipy.interpolate.griddata((lats.flatten(),lons.flatten()),mean_heights[:,:,y].flatten() , (lats_go,lons_go),method='cubic')
v_regrid[:,:,y] = scipy.interpolate.griddata((lats.flatten(),lons.flatten()),mean_var[:,:,y].flatten() , (lats_go,lons_go),method='cubic')
# Difference heights
mean_heights = np.where(np.isnan(h_regrid), np.nan, h_regrid - mean_heights_global)
#Difference temperature/specific humidity
mean_var = np.where(np.isnan(v_regrid), np.nan, v_regrid - mean_var_global)
# Difference winds
u_wind_regrid = iris.analysis.interpolate.regrid(u_wind, u_global, mode='bilinear')
v_wind_regrid = iris.analysis.interpolate.regrid(v_wind, v_global, mode='bilinear')
u_wind=u_wind_regrid-u_global
v_wind=v_wind_regrid-v_global
#######################################################################################
# 2 degree lats lon lists for wind regridding on plot
lat_wind_1deg = np.arange(lat_low,lat_high, 2)
lon_wind_1deg = np.arange(lon_low,lon_high, 2)
lons_w,lats_w = np.meshgrid(lon_wind_1deg, lat_wind_1deg)
for p in plot_levels:
m_title = 'Height of %s-hPa level (m)' % (p)
# Set pressure height contour min/max
if p == 925:
clev_min = -24.
clev_max = 24.
elif p == 850:
clev_min = -24.
clev_max = 24.
elif p == 700:
clev_min = -24.
clev_max = 24.
elif p == 500:
clev_min = -24.
clev_max = 24.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p == 925:
clevpt_min = -3.
clevpt_max = 3.
elif p == 850:
clevpt_min = -3.
clevpt_max = 3.
elif p == 700:
clevpt_min = -3.
clevpt_max = 3.
elif p == 500:
clevpt_min = -3.
clevpt_max = 3.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p == 925:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p == 850:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p == 700:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p == 500:
clevsh_min = -0.0025
clevsh_max = 0.0025
else:
print 'Specific humidity min/max not set for this pressure level'
#clevs_col = np.arange(clev_min, clev_max)
clevs_lin = np.linspace(clev_min, clev_max, num=24)
s = np.searchsorted(p_levels[::-1], p)
sc = np.searchsorted(p_levs, p)
# Set plot contour lines for pressure levels
plt_h = mean_heights[:,:,-(s+1)]
#plt_h[plt_h==0] = np.nan
# Set plot colours for variable
plt_v = mean_var[:,:,-(s+1)]
#plt_v[plt_v==0] = np.nan
# Set u,v for winds, linear interpolate to approx. 1 degree grid
u_interp = u_wind[sc,:,:]
v_interp = v_wind[sc,:,:]
sample_points = [('grid_latitude', lat_wind_1deg), ('grid_longitude', lon_wind_1deg)]
u = iris.analysis.interpolate.linear(u_interp, sample_points).data
v = iris.analysis.interpolate.linear(v_interp, sample_points).data
lons_w, lats_w = np.meshgrid(lon_wind_1deg, lat_wind_1deg)
m =\
Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high,projection='mill')
#x, y = m(lons, lats)
x, y = m(lons_go, lats_go)
x_w, y_w = m(lons_w, lats_w)
fig=plt.figure(figsize=(8,8))
ax = fig.add_axes([0.05,0.05,0.9,0.85])
m.drawcoastlines(color='gray')
m.drawcountries(color='gray')
m.drawcoastlines(linewidth=0.5)
#m.fillcontinents(color='#CCFF99')
#m.drawparallels(np.arange(-80,81,10),labels=[1,1,0,0])
#m.drawmeridians(np.arange(0,360,10),labels=[0,0,0,1])
cs_lin = m.contour(x,y, plt_h, clevs_lin,colors='k',linewidths=0.5)
#cs_lin = m.contour(x,y, plt_h,colors='k',linewidths=0.5)
#wind = m.barbs(x_w,y_w, u, v, length=6)
if plot_diag=='temp':
cs_col = m.contourf(x,y, plt_v, np.linspace(clevpt_min, clevpt_max), cmap=plt.cm.RdBu_r, colorbar_extend='both')
#cs_col = m.contourf(x,y, plt_v, cmap=plt.cm.RdBu_r)
cbar = m.colorbar(cs_col,location='bottom',pad="5%", format = '%d')
cbar.set_label('K')
plt.suptitle('Difference from Global Model (Model - Global Model ) of Height, Potential Temperature and Wind Vectors at %s hPa'% (p), fontsize=10)
elif plot_diag=='sp_hum':
cs_col = m.contourf(x,y, plt_v, np.linspace(clevsh_min, clevsh_max), cmap=plt.cm.RdBu_r)
cbar = m.colorbar(cs_col,location='bottom',pad="5%", format = '%.3f')
cbar.set_label('kg/kg')
plt.suptitle('Difference from Global Model (Model - Global Model ) of Height, Specific Humidity and Wind Vectors at %s hPa'% (p), fontsize=10)
wind = m.quiver(x_w,y_w, u, v, scale=150)
qk = plt.quiverkey(wind, 0.1, 0.1, 5, '5 m/s', labelpos='W')
plt.clabel(cs_lin, fontsize=10, fmt='%d', color='black')
#plt.title('%s\n%s' % (m_title, model_name_convert_title.main(experiment_id)), fontsize=10)
plt.title('\n'.join(wrap('%s' % (model_name_convert_title.main(experiment_id)), 80)), fontsize=10)
#plt.show()
if not os.path.exists('/home/pwille/figures/%s/%s' % (experiment_id, plot_diag)): os.makedirs('/home/pwille/figures/%s/%s' % (experiment_id, plot_diag))
plt.savefig('/home/pwille/figures/%s/%s/geop_height_difference_%shPa_%s_%s.tiff' % (experiment_id, plot_diag, p, experiment_id, plot_diag), format='tiff', transparent=True)
if __name__ == '__main__':
main()
| mit | -4,436,724,991,769,473,000 | 39.057007 | 180 | 0.59968 | false |
mr-martian/potential-doodle | doodle.py | 1 | 78676 | #!/usr/bin/env python3
import re, itertools, random, copy, os
from collections import defaultdict
from subprocess import Popen, PIPE
from os.path import isfile
from types import SimpleNamespace
Globals = SimpleNamespace(path=os.path.abspath(__file__)[:-9], unknown_error=True, flat=False, partial=True, keepmeta=True, spacing=1, blob=True, treebank=[], output=None, usetreebank=False)
#path: The directory containing the program
#unknown_error: Should an error be raised when trying parse a non-existent morpheme?
#flat: Read |[XP] as [XP a b c d] rather than [XP a [Xmod b [Xbar c d]]]
#partial: Return incomplete translations
#keepmeta: Copy glosses and metadata from input to output
#spacing: Number of newlines to put between segments of output
#blob: Write all lines in one operation rather than as they are generated
#treebank: Local storage for trees to be reused by later commands
#output: Where to write output (use None for treebank or stdout)
#usetreebank: Write to treebank rather than stdout
class PatternElement:
"""Base class for elements of trees (both sentences and rule patterns)"""
CheckType = False
#by default, items of different subclasses can be equivalent
#for pattern-matching purposes
def __init__(self, ntype, props=None, loc=None):
self.ntype = ntype
self.props = props or {}
self.loc = loc or ''
#the file line that generated the object, for debugging
def __getitem__(self, key):
return self.props[key]
def __setitem__(self, key, value):
self.props[key] = value
def __contains__(self, key):
return key in self.props
def __str__(self):
return type(self).__name__ + self.ntype + str(self.props)
def __repr__(self):
return self.__str__()
def matchcondlist(self, cndls):
"""Given a list of (key, value) pairs, check that they all appear in self.props."""
return all(k in self and self[k] == v for k,v in cndls)
def getvars(self, tree, vrs):
"""Use self as a pattern and check against tree,
storing any variable values in vrs.
If tree does not match self,
a failure message is stored in vrs[' failed'].
"""
if self.CheckType and type(tree) != type(self):
vrs[' failed'] = 'type'
return vrs
if tree == None:
vrs[' failed'] = 'tree is None'
return vrs
if self.ntype and self.ntype != tree.ntype:
vrs[' failed'] = 'ntype'
return vrs
for p in self.props:
if p not in tree:
vrs[' failed'] = 'nonexistent property %s' % p
return vrs
if isinstance(self.props[p], str) or self.props[p] == None:
if self.props[p] != tree[p]:
vrs[' failed'] = 'property value mismatch'
return vrs
else:
self.props[p].getvars(tree[p], vrs)
if vrs[' failed']:
return vrs
return vrs
def putvars(self, vrs):
"""Reinsert variables (vrs) into pattern (self), inverse of getvars()."""
return self
def check(self, tree):
return self.getvars(tree, {' failed': False}) == False
class DataElement(PatternElement):
"""Base class for elements of sentences"""
CheckType = True
def trans(self, tr):
"""Apply a translation tr to self and return the result
extract variables from context and then from form,
apply operations and reinsert variables
"""
vrs = tr.context.getvars(self, {' failed': False})
if vrs[' failed'] or not isinstance(vrs[' '], DataElement):
if tr.debug:
print('Debugging rule failure on context for %s' % tr.name)
print(' Tree was: %s' % self)
print(' Reason was: %s' % vrs[' failed'])
print(' @ was: %s\n\n' % vrs[' '])
return []
vrs = tr.form.getvars(vrs[' '], vrs)
if vrs[' failed']:
if tr.debug:
print('Debugging rule failure on form for %s' % tr.name)
print(' Tree was: %s' % vrs[' '])
print(' Reason was: %s\n\n' % vrs[' failed'])
return []
applyrules(tr.result, vrs)
return copy.deepcopy(tr.context).putvars(vrs)
def transmulti(self, tr):
"""Apply a multirule (tr) to self and return the result
extract variables at each level and then apply operations in reverse order
"""
if tr.ntypelist and self.ntype not in tr.ntypelist:
return []
vrs = {' failed': False, ' ': self}
path = []
for l in tr.layers:
for i, f in enumerate(l):
vrs2 = f[0].getvars(vrs[' '], vrs.copy())
if not vrs2[' failed']:
vrs = vrs2
path.append(f[1:])
break
else:
return []
for result in reversed(path):
applyrules(result, vrs)
return vrs[' ']
def transform(self, pats, returnself=True):
"""Apply a set of rules to self.
If none of the rules produce output, return self if returnself is True.
Otherwise return [].
All returned nodes will either be self or self after 1 rule application.
"""
if len(pats) > 0:
nodes = []
retstr = ['[]']
for i, p in enumerate(pats):
if isinstance(p, Translation):
x = self.trans(p)
else:
x = self.transmulti(p)
s = str(x)
if s not in retstr:
nodes.append(x)
retstr.append(s)
if not nodes and returnself:
nodes = [self]
return nodes
elif returnself:
return [self]
else:
return []
###VARIABLES
class Variable(PatternElement):
"""Pattern element for extracting data"""
pattern = re.compile('^\\$?([^:?!+\\.&]*):?([^:?!+\\.&]*)\\.?([^:?!+\\.&]*)([?!+&]*)$')
def __init__(self, label, ntype=None, prop=None, opt=False, neg=False, group=False, descend=False, cond=None, loc=None):
PatternElement.__init__(self, ntype, loc=loc)
self.label = label
self.prop = prop
self.opt = opt
self.neg = neg
self.group = group
self.descend = descend
self.cond = cond
def fromstring(s):
"""Convert a string into a into a Variable object."""
m = Variable.pattern.match(s)
if m:
g = m.groups()
return Variable(g[0], g[1], g[2], '?' in g[3], '!' in g[3], '+' in g[3], '&' in g[3])
else:
print('no match with %s' % s)
def checkset(self, vrs):
"""Given a set of variable values, verify that self's conditions are met."""
if self.label not in vrs:
return self.neg or self.opt
if self.group:
return all(self.check(x) for x in vrs[self.label])
else:
return self.check(vrs[self.label])
def check(self, v):
"""Check whether an element satisfies self's conditions."""
if self.neg:
return v == None
if v == None:
return self.opt
if not PatternElement.check(self, v):
return False
if self.cond:
return self.cond.check(v)
return True
def retrieve(self, vrs):
"""Extract property values or children from a set of values."""
if self.label in vrs:
node = vrs[self.label]
if not node:
if not self.prop or self.opt:
return node
else:
raise Exception('Variable %s cannot retrieve properties from None.' % self)
if self.descend:
while True:
if node.ntype == 'conjP':
node = node.children[0]
elif node.ntype[-1] == 'P' and len(node.children) in [2,4]:
if len(node.children) == 4:
node = node.children[2]
else:
node = node.children[1].children[1].children[0]
else:
break
if self.prop:
if self.prop in node:
return node[self.prop]
elif self.opt:
return None
else:
raise Exception('Error with variable %s and node %s, property does not exist.' % (self, node))
else:
return node
elif self.opt:
return None
else:
print(vrs)
print(self.label)
raise Exception('Variable %s does not exist.' % self)
def place(self, vrs, val):
"""Insert a value into a dictionary."""
if self.label in vrs and vrs[self.label]:
if self.group:
for v in vrs[self.label]:
v.props[self.prop] = val
else:
vrs[self.label].props[self.prop] = val
def getvars(self, node, vrs):
PatternElement.getvars(self, node, vrs)
if not vrs[' failed'] and self.cond:
self.cond.getvars(node, vrs)
if node == None and (self.opt or self.neg):
vrs[' failed'] = False
if self.neg and node:
vrs[' failed'] = 'node is not None'
if not vrs[' failed']:
if self.label in vrs:
if self.group:
vrs[self.label].append(node)
else:
#perhaps overwriting the previous value is the wrong approach
#but since this hasn't yet come up in practice I'm inclined to ignore it
# -D.S. 2018-07-27
vrs[self.label] = node
else:
vrs[self.label] = [node] if self.group else node
return vrs
def putvars(self, vrs):
if self.label not in vrs:
return None
else:
return vrs[self.label]
def __str__(self):
return '$'+self.label + \
((':'+self.ntype) if self.ntype else '') + \
(('.'+self.prop) if self.prop else '') + \
('?' if self.opt else '') + \
('!' if self.neg else '') + \
('+' if self.group else '') + \
('&' if self.descend else '') + \
(('(' + str(self.cond) + ')') if self.cond else '')
def __deepcopy__(self, memo):
return self
#Variables aren't modified, so we don't care about copying them
class Unknown(Variable):
"""Variable that will match anything at all"""
count = 0
def __init__(self):
Variable.__init__(self, ' '+str(Unknown.count), opt=True)
Unknown.count += 1
def getvars(self, tree, vrs):
vrs[self.label] = tree
return vrs
def check(self, v):
return True
def __str__(self):
return '*'
###DATA STRUCTURES
class Morpheme(DataElement):
"""Word, tense, punctuation mark, or other non-structural sentence element"""
__AllMorphemes = defaultdict(lambda: defaultdict(lambda: defaultdict(lambda: None)))
def __init__(self, lang, ntype, root, props=None, isref=False, loc=None):
PatternElement.__init__(self, ntype, props, loc)
self.lang = lang
self.root = root
self.isref = isref
if not isref:
roots = [root]
if 'searchkey' in self.props:
roots.append(self.props['searchkey'])
pos = [ntype]
if 'altpos' in self.props:
pos.append(self.props['altpos'])
for p in pos:
for r in roots:
Morpheme.__AllMorphemes[lang][p][r] = self
else:
try:
Morpheme.get(lang, ntype, root, loc or '(see stacktrace)')
except:
if Globals.unknown_error:
raise
else:
f = open(Globals.path + 'missing_morphemes.txt', 'a')
f.write(str(lang) + ': ' + ntype + '=' + root + '\n')
f.close()
def __str__(self):
return self.ntype + '=' + self.root
def getref(self):
"""Create a separate Morpheme that points back to self."""
return Morpheme(self.lang, self.ntype, self.root, isref=True)
def itermorph(lang):
return Morpheme.__AllMorphemes[lang]
def tagify(self, regex=False):
"""Produce input for a morphological transducer.
If regex is True the output will be a regex to be used in parse()
"""
lang = Language.getormake(self.lang)
format = ''
tagset = []
defaults = {}
for typ in lang.tags:
if typ['ntype'] != self.ntype:
continue
if not self.matchcondlist(typ['conds']):
continue
format = typ['format']
tagset = typ['tags']
defaults = typ['defaults']
break
else:
format = '{root[0]}<%s>' % self.ntype
tagset = {}
defaults = {}
tags = {'root': self.root.split('#')[0].split(lang.tags_rootsplit)}
if 'root' in self:
tags['root'] = self['root'].split(lang.tags_rootsplit)
for tg in tagset:
if isinstance(tagset[tg], str):
if tagset[tg] in self:
t = self[tagset[tg]]
tags[tg] = '<' + t + '>' if t else ''
else:
for cs in tagset[tg]:
if self.matchcondlist(cs['conds']):
tags[tg] = cs['tag']
break
if tg not in tags:
if regex:
tags[tg] = '<[^<>]*>'
else:
tags[tg] = defaults[tg]
ret = format.format(**tags) or self.root
if regex:
ret = '\t' + ret.replace('+', '\\+')
return ret
def get(lang, ntype, root, loc):
"""Retrieve elements from Morpheme.__AllMorphemes."""
if lang not in Morpheme.__AllMorphemes:
raise Exception('Error at %s: Language %s not loaded.' % (loc, lang))
else:
d = Morpheme.__AllMorphemes[lang]
if ntype not in d:
raise Exception('Error at %s: Non-existent part of speech %s' % (loc, ntype))
else:
d = d[ntype]
if root not in d:
raise Exception('Error at %s: Undefined morpheme %s=%s' % (loc, ntype, root))
else:
return d[root]
def __getitem__(self, key):
if key in self.props:
return self.props[key]
elif self.isref:
ref = Morpheme.get(self.lang, self.ntype, self.root, None)
if key in ref.props:
return ref.props[key]
else:
raise KeyError('Morpheme %s does not have property %s.' % (self, key))
def __contains__(self, key):
if key in self.props:
return True
if self.isref:
return key in Morpheme.get(self.lang, self.ntype, self.root, None).props
return False
def getvars(self, tree, vrs):
PatternElement.getvars(self, tree, vrs)
if not vrs[' failed']:
if self.lang != tree.lang or self.root != tree.root:
vrs[' failed'] = 'lang or root'
return vrs
def putvars(self, vrs):
return self
class Node(DataElement):
"""Structural element of a sentence"""
def __init__(self, ntype, children, props=None, loc=None):
PatternElement.__init__(self, ntype, props, loc)
self.children = children
self.rotate = False
def swapchildren(self, ls):
"""Return a Node with the same properties but different children."""
return Node(self.ntype, ls, self.props.copy())
def getvars(self, tree, vrs):
PatternElement.getvars(self, tree, vrs)
if not vrs[' failed']:
if len(self.children) != len(tree.children):
vrs[' failed'] = 'number of children'
return vrs
for s,t in zip(self.children, tree.children):
if s:
s.getvars(t, vrs)
elif t:
vrs[' failed'] = 'non-null child'
if vrs[' failed']:
return vrs
return vrs
def putvars(self, vrs):
ch = []
for c in self.children:
try:
a = c.putvars(vrs)
if isinstance(a, list):
ch += a
else:
ch.append(a)
except AttributeError:
ch.append(c)
return Node(self.ntype, ch, self.props.copy())
def transform(self, pats, returnself=True):
"""Apply DataElement.transform() to children and then to self."""
chs = []
for c in self.children:
if c:
chs.append(c.transform(pats, True))
else:
chs.append([c])
swap = map(lambda x: self.swapchildren(list(x)), itertools.product(*chs))
ret = list(itertools.chain.from_iterable(map(lambda x: DataElement.transform(x, pats, True), swap)))
if returnself and not ret:
ret = [self]
return ret
def __str__(self):
if isinstance(self.children, list):
s = '[' + ' '.join([str(x) for x in self.children]) + ']'
else:
s = str(self.children)
return '%s%s%s' % (self.ntype, s, str(self.props))
def debug(self, depth=0):
"""Convert self to a multi-line indented string."""
ls = [(' '*depth) + ('%s[' % self.ntype)]
for c in self.children:
if isinstance(c, Node):
l.append(c.debug(depth+1))
else:
l.append(' '*(depth+1) + str(c))
ls.append(' '*depth + ']' + str(self.props))
return '\n'.join(ls)
def writecompile(self):
"""Convert self to a string that can be parsed back to self."""
if len(self.children) == 1 and isinstance(self.children[0], str):
return self.ntype + '=' + self.children[0]
l = [self.ntype]
for c in self.children:
if isinstance(c, Node):
l.append(c.writecompile())
elif not c:
l.append('~')
else:
l.append(str(c))
return '[' + ' '.join(l) + ']'
def graph(self, name, ishead=False):
"""Convert self to a dot graph."""
ret = ''
if ishead:
ret += 'digraph {'
ret += '%s [label="%s"];' % (name, self.ntype)
for i, c in enumerate(self.children):
ret += '%s -> %s%d;' % (name, name, i)
if isinstance(c, Node):
ret += c.graph(name+str(i))
else:
ret += '%s%d [label="%s"];' % (name, i, str(c))
if ishead:
ret += '}'
return ret
def flatten(self):
"""Flatten X-bar phrases to single nodes (destructive).
Converts [XP specifier [Xmod modifier [Xbar head complement]]]
to [XP specifier modifier head complement]
"""
for c in self.children:
if isinstance(c, Node):
c.flatten()
if self.ntype[-1] == 'P':
n = self.ntype[:-1]
if len(self.children) != 2: return None
if not isinstance(self.children[1], Node): return None
m = self.children[1]
if m.ntype != n+'mod': return None
if len(m.children) != 2: return None
if not isinstance(m.children[1], Node): return None
b = m.children[1]
if b.ntype != n+'bar': return None
if len(b.children) != 2: return None
self.children = [self.children[0], m.children[0], b.children[0], b.children[1]]
def unflatten(self):
"""Transform nodes with 4 children to X-bar phrases (destructive).
Inverse of flatten()
Converts [XP specifier modifier head complement]
to [XP specifier [Xmod modifier [Xbar head complement]]]
"""
for c in self.children:
if isinstance(c, Node):
c.unflatten()
if self.ntype[-1] == 'P' and len(self.children) == 4:
ch = self.children
n = self.ntype[:-1]
self.children = [ch[0], Node(n+'mod', [ch[1], Node(n+'bar', [ch[2], ch[3]])])]
def rotated(self, lang):
"""Determine whether the children should be reversed for sentence generation."""
return self.ntype in Language.getormake(lang).rotate != self.rotate
def tagify_all(self, lang):
"""Run Morpheme.tagify() on all Morphemes in a tree."""
rev = self.rotated(lang)
ret = []
for c in self.children:
if isinstance(c, Node):
a = c.tagify_all()
elif isinstance(c, Morpheme):
a = [c.tagify()]
else:
a = [c] if c else []
if rev:
ret = a + ret
else:
ret += a
return ret
def linear(self, lang):
"""Convert a tree to an ordered list of Morphemes."""
l = []
for c in self.children:
if isinstance(c, Node):
l.append(c.linear(lang))
elif c:
l.append([c])
if self.rotated(lang):
l.reverse()
r = []
for c in l:
r += c
return r
def iternest(self):
"""Iterate over all elements in a tree."""
yield self
for ch in self.children:
if isinstance(ch, Node):
yield from ch.iternest()
else:
yield ch
def roots(self):
"""Return the roots of all Morphemes in a tree."""
ret = []
for ch in self.children:
if isinstance(ch, Morpheme):
ret.append(ch.root)
elif isinstance(ch, Node):
ret += ch.roots()
return ret
def alllang(self, lang):
"""Verify that all Morphemes in a tree are in the target language."""
for n in self.iternest():
if isinstance(n, Morpheme) and n.lang != lang:
return False
return True
class UnorderedCollector(PatternElement):
"""Collection of Variables that matches the children of a Node
Matched children are associated with the first matching Variable.
Variables with .group and .opt both False will match exactly 1 child,
or the match will fail.
These are typically used to match [I] Nodes of verbal conjugations.
"""
def __init__(self, ntype, children, loc):
PatternElement.__init__(self, ntype, None, loc)
self.children = children
def getvars(self, tree, vrs):
PatternElement.getvars(self, tree, vrs)
if not vrs[' failed']:
if not isinstance(tree, Node):
vrs[' failed'] = 'UnorderedCollector only matches Nodes'
return vrs
found = set()
for c in tree.children:
if not c:
continue
for i, v in enumerate(self.children):
v.getvars(c, vrs)
if not vrs[' failed']:
found.add(i)
break
else:
vrs[' failed'] = False
else:
vrs[' failed'] = 'no matching variables found for %s' % c
break
else:
for i, v in enumerate(self.children):
if isinstance(v, Variable) and v.label not in vrs:
if v.opt:
vrs[v.label] = None
found.add(i)
else:
vrs[' failed'] = 'unmatched variable'
break
if len(found) < len(self.children):
vrs[' failed'] = 'unmatched element'
return vrs
def putvars(self, vrs):
ch = []
for v in self.children:
a = v.putvars(vrs)
if isinstance(a, list):
ch += a
else:
ch.append(a)
return Node(self.ntype, ch)
def __str__(self):
return '<%s %s>' % (self.ntype, ' '.join(str(x) for x in self.children))
###TRANSFORMATIONS
class Rule:
"""Base class for transformations
Rule applications are ordered by stage, starting with 0 and no guarantees
are made about ordering within a single stage.
"""
def __init__(self, langs, category='', mode='syntax', stage=0, name='', debug=False):
self.langs = langs
self.category = category
self.mode = mode
self.stage = stage
self.name = name
self.debug = debug
if self.langs[0] == self.langs[1]:
l = Language.getormake(self.langs[0])
if mode == 'linear':
l.linear[category].append(self)
elif mode == 'linear-text':
l.lineartext[category].append(self)
else:
x = len(l.movement[category])
l.movement[category].append(self)
assert(len(l.movement[category]) > x)
else:
l = LangLink.getormake(self.langs[0], self.langs[1])
if mode == 'syntax':
l.syntax.append(self)
else:
l.pats[category].append(self)
class Translation(Rule):
"""Transformation consisting of a context, form, and result
Applies result to form when form is embedded in context.
"""
def __init__(self, form, result, category, langs, context=None, mode='syntax', stage=0, name=''):
self.form = form
self.result = result
self.roots = [] #roots of all morphemes in form
if isinstance(form, Node):
self.roots = form.roots()
self.rootset = set(self.roots)
self.resultroots = []
if isinstance(result, Node):
self.resultroots = result.roots()
self.resultrootset = set(self.resultroots)
self.addedroots = self.resultrootset - self.rootset
self.context = context or Variable(' ')
Rule.__init__(self, langs, category, mode, stage, name)
def __str__(self):
return '{%s => %s}%s' % (self.form, self.result, self.roots)
def __repr__(self):
return self.__str__()
class MultiRule(Rule):
"""Multi-layer transformation
Each layer contains 1 or more forms, each of which has an associated
result and serves as a context for the next layer.
"""
def __init__(self, layers, category, langs, mode='syntax', stage=0, name=''):
self.layers = layers
self.roots = [] #roots of all morphemes in form
self.rootset = set(self.roots)
self.resultroots = []
self.resultrootset = set(self.resultroots)
self.addedroots = self.resultrootset - self.rootset
self.ntypelist = []
if all(isinstance(x[0], Node) for x in layers[0]):
self.ntypelist = [x[0].ntype for x in layers[0]]
Rule.__init__(self, langs, category, mode, stage, name)
def applyrules(rules, vrs):
"""Apply the output of a rule to a set of variables."""
putback = {}
for rule in rules:
if isinstance(rule, DataElement) or isinstance(rule, UnorderedCollector):
vrs[' '] = rule.putvars(vrs)
elif isinstance(rule, list):
if rule[0] == 'setlang':
vrs[' '].lang = rule[1]
elif rule[0] == 'setdisplay':
vrs[' '].props['display'] = rule[1]
elif rule[0] == 'set':
vrs[' '].props.update(rule[1])
elif rule[0] == 'setprop':
if isinstance(rule[2], str):
rule[1].place(vrs, rule[2])
else:
rule[1].place(vrs, rule[2].retrieve(vrs))
elif rule[0] == 'rotate':
vrs[' '].rotate = True
elif rule[0] == 'makevar':
vrs[rule[1]] = copy.deepcopy(rule[2])
elif rule[0] == 'order':
ch = []
for v in rule[2:]:
if v.label in vrs and vrs[v.label]:
ch.append(vrs[v.label])
vrs[' '] = Node(rule[1], ch)
elif rule[0] == 'node':
vrs[' '] = toobj(*rule[1:], at=vrs[' ']).putvars(vrs)
elif rule[0] == 'cond':
for op in rule[1:]:
if all(v.checkset(vrs) for v in op[0]):
applyrules(op[1:], vrs)
break
elif rule[0] == 'distribute':
src = rule[1]
dst = rule[2]
try:
val = vrs[rule[3].label][src]
except:
print(vrs[' '])
print(rule)
raise
for l in rule[4:]:
nv = None
for v in (l if isinstance(l, list) else [l]):
if v.label in vrs and vrs[v.label]:
vrs[v.label].props[dst] = val
if src in vrs[v.label]:
nv = vrs[v.label][src]
if nv:
val = nv
elif rule[0] == 'log':
print(rule[1].retrieve(vrs))
elif rule[0] == 'print':
print(rule[1])
elif rule[0] == 'pull':
putback[rule[1]] = vrs[rule[1]]
vrs[rule[1]] = None
elif rule[0] == 'replace':
putback[rule[1]] = vrs[rule[1]]
vrs[rule[1]] = vrs[rule[2]]
vrs.update(putback)
###GENERATION
class SyntaxPat:
"""Pattern for syntax tree generation
Contains a list of sets of conditions, each with an associated tree output
and an associated list of requirements that will stop generation in the
parser if unmet (intended to speed up the parser).
"""
def __init__(self, name, conds, opts, vrs, require):
self.name = name
self.conds = conds
self.opts = opts
self.vrs = vrs
self.require = require
def __str__(self):
return 'SyntaxPat(%s, %s, %s, %s)' % (self.name, self.conds, self.opts, self.vrs)
def __repr__(self):
return self.__str__()
class Language:
"""Collection of language-wide per-language settings"""
__alllangs = {}
def __init__(self, lang):
#Metadata
self.name = ''
self.names = {}
self.creator = ''
#General
self.lang = lang
self.syntax = {}
self.rotate = []
self.syntaxstart = None
#Movement
self.movement = defaultdict(list)
self.linear = defaultdict(list)
self.lineartext = defaultdict(list)
#Transducer
self.lexc = ''
self.lexc_lexicons = []
self.tags = []
self.tags_rootsplit = '' #for cases where it's easiest to have tags between parts of the root
self.morph_mode = '' #hfst or lttoolbox
self.capitalize = False
Language.__alllangs[lang] = self
def isloaded(lang):
"""Check whether a language has been loaded from its data file."""
return lang in Language.__alllangs
def getormake(lang):
"""Return the associate Language object, loading from file if needed."""
if lang in Language.__alllangs:
return Language.__alllangs[lang]
else:
return loadlang(lang)
def getpats(self):
"""Return a dictionary of patterns for sentence generation."""
r = {}
r.update(self.syntax)
for k, v in Morpheme.itermorph(self.lang).items():
r[k] = list(v.values())
return r
def movefind(self, roots):
"""Return a list of movement rules, sorted by stage."""
s = set(roots + [''])
ret = defaultdict(list)
for r in s:
for p in self.movement[r]:
if p.rootset < s:
ret[p.stage].append(p)
return [ret[k] for k in sorted(ret.keys())]
def domovement(self, sen):
"""Retrieve and apply movement rules to a sentence."""
pats = self.movefind(sen.roots())
tr = [sen]
for p in pats:
ntr = []
for s in tr:
ntr += s.transform(p, False)
tr = ntr or tr
return tr
def totext(self, sen):
"""Generate the default surface form of a sentence."""
return dolinear(self.domovement(sen)[0], self.lang)
def allnames():
"""Return the names of all loaded languages."""
return [(x, Language.__alllangs[x].name) for x in sorted(Language.__alllangs.keys())]
def iterlex(self):
"""Iterate over all Morphemes in this language."""
dct = Morpheme.itermorph(self.lang)
for ntype in dct:
for root in dct[ntype]:
yield dct[ntype][root]
class LangLink:
"""Container for translations for a language pair in a particular direction"""
__alllinks = {}
def __init__(self, fromlang, tolang):
self.fromlang = fromlang
self.tolang = tolang
self.syntax = []
self.pats = defaultdict(list)
LangLink.__alllinks['%s-%s' % (fromlang, tolang)] = self
def find(self, _roots):
"""Retrieve all rules applicable to a set of roots."""
roots = _roots + ['']
s = set(roots)
ret = defaultdict(list)
for r in roots:
for p in self.pats[r]:
if p.rootset < s:
ret[p.stage].append(p)
return [ret[k] for k in sorted(ret.keys())]
def getormake(fromlang, tolang):
"""Retrieve a LangLink, loading from file if needed."""
s = '%s-%s' % (fromlang, tolang)
if s in LangLink.__alllinks:
return LangLink.__alllinks[s]
else:
return loadtrans(fromlang, tolang)
def translate(self, sen):
"""Translate a sentence."""
pats = self.find(sen.roots())
tr = [sen]
for p in pats:
ntr = []
for s in tr:
ntr += s.transform(p, False)
if ntr:
tr = ntr
return tr
def run(prog, *args, data=None):
"""Launch an external program, pass data to it and return its output."""
proc = Popen([prog] + list(args), stdin=PIPE, stdout=PIPE, universal_newlines=True)
if data:
return proc.communicate(data)[0]
def transduce(data, lang, gen=True):
"""Pass data to a transducer.
gen=True for generation, gen=False for parsing
"""
mode = Language.getormake(lang).morph_mode
if mode not in ['hfst', 'lttoolbox']:
raise Exception('Unknown morphology mode %s' % mode)
path = Globals.path + 'langs/%d/.generated/' % lang
path += ('gen' if gen else 'parse') + ('hfst' if mode == 'hfst' else 'bin')
if gen:
data = '\n'.join(data) if mode == 'hfst' else '^'+('$\n^'.join(data))+'$'
if mode == 'hfst':
result = run('hfst-lookup', '-q', '-b', '0', '-i', path, data=data)
return [x.split('\t')[1] for x in result.strip().split('\n\n')]
else:
result = run('lt-proc', '-g', path, data=data)
return [x[1:] if x[0] == '~' else x for x in result.split('\n')]
else:
if mode == 'hfst':
result = run('hfst-proc', '-x', '-w', path, data=data+'\n').split('\n\n')
resplus = run('hfst-proc', '-x', '-w', path, data=data.replace(' ', '+')+'\n')
return result + [x for x in resplus.split('\n\n') if '+' in x]
def dolinear(sen, _lang):
"""Apply rules that manipulate adjacent Morphemes rather than trees."""
lin = sen.linear(_lang)
lang = Language.getormake(_lang)
for i, m in enumerate(lin):
for pat in lang.linear[m.root]:
if not pat.form.check(m):
continue
if isinstance(pat.context, list):
for d, p in pat.context:
if i+d < 0 or i+d >= len(lin):
break
if p.check(lin[i+d]):
break
else:
for d, r in pat.result:
if r == 'inaudible':
lin[i+d]['audible'] = 'false'
elif isinstance(r, list) and r[0] == 'display':
lin[i+d]['display'] = r[1]
else:
lin[i+d] = r
lintxt = transduce([x.tagify() for x in lin], _lang)
for i, m in enumerate(lin):
for pat in lang.lineartext[m.root]:
if isinstance(pat.context, list):
for d, p in pat.context:
if i+d < 0 or i+d >= len(lintxt):
break
if isinstance(p, str) and lintxt[i+d] != p:
break
if not isinstance(p, str) and not p.match(lintxt[i+d]):
break
else:
lintxt[i] = pat.result
final = []
for i, m in enumerate(lin):
if 'audible' in m and m['audible'] == 'false':
continue
elif 'display' in m:
final.append(m['display'])
else:
final.append(lintxt[i])
ret = ' '.join(final).replace('+', ' ').replace('- -', '').replace('- ', '').replace(' -', '')
if lang.capitalize:
for i, c in enumerate(ret):
if c.isalpha():
ret = ret[:i] + ret[i].capitalize() + ret[i+1:]
break
return ret
###PARSING
def tokenize(s):
"""Tokenize a string."""
ret = []
add = False
digraph = False
for c in s:
if c in '[]<>$(){}=@~*':
if digraph:
ret[-1] += c
digraph = False
else:
ret.append(c)
add = False
elif c == '|':
if digraph:
ret[-1] += c
else:
ret.append(c)
digraph = True
elif c.isspace():
add = False
digraph = False
elif add:
ret[-1] += c
else:
ret.append(c)
add = True
return ret
def toobj(s, lang, loc):
"""Parse a string into language lang from original source loc."""
assert(isinstance(lang, int))
Language.getormake(lang)
rest = tokenize(s)
def destring():
nonlocal rest
cur = rest.pop(0)
def ok(th):
return th[0] not in '[]<>$(){}=@|~*'
if cur == '~':
return None
elif cur == '*':
return Unknown()
elif cur == '@':
return Variable(' ', loc=loc)
elif cur == '$': #Variable
ret = Variable.fromstring(rest.pop(0))
if not ret:
raise ParseError('Badly formed variable at %s' % loc)
ret.loc = loc
if rest and rest[0] == '{':
ret.props.update(destring())
if rest and rest[0] == '(':
rest.pop(0)
if len(rest) >= 2 and rest[1] == ')':
ret[rest.pop(0)] = Unknown()
rest.pop(0)
elif len(rest) >= 4 and ok(rest[0]) and rest[1] == '=' and ok(rest[2]) and rest[3] == ')':
ret[rest[0]] = rest[2]
rest = rest[4:]
else:
if rest[0] == '%': rest.pop(0) #@TODO total hack
# later go through and switch to {} for properties and have () be only .cond
ret.cond = destring()
if rest[0] != ')':
raise ParseError('Badly formed variable condition on line %s (remainder was %s).' % (loc, rest))
rest.pop(0)
return ret
elif cur == '[': #Syntax
ntype = rest.pop(0)
ch = []
while rest[0] != ']':
ch.append(destring())
d = {}
rest.pop(0)
if rest and rest[0] == '{':
d = destring()
return Node(ntype, ch, d, loc=loc)
elif cur == '|[': #xbar Sytnax
if rest[0][0] == '?':
rest = ['?', rest[0][1:]] + rest[1:]
if rest[0] not in '*?$~':
rest = ['~'] + rest
mode = rest.pop(0)
name = rest.pop(0)[:-1]
spots = ['spec', 'mod', 'head', 'comp']
sub = {'*': [Unknown(), Unknown(), Unknown(), Unknown()],
'?': [Variable(name+s, opt=True, loc=loc) for s in spots],
'$': [Variable(name+s, loc=loc) for s in spots],
'~': [None, None, None, None]}[mode]
ch = []
while rest and rest[0] != ']':
ch.append(destring())
if rest:
rest.pop(0)
else:
raise ParseError('Syntax mode is missing closing bracket at %s' % loc)
if len(ch) == 0: #nothing
ch.insert(0, sub[2]) #insert head
if len(ch) == 1: #just head
ch.insert(1, sub[3]) #insert comp
if len(ch) == 2: #head and comp
ch.insert(0, sub[0]) #insert spec
if len(ch) == 3: #spec, head, and comp
ch.insert(1, sub[1]) #insert mod
d = {}
if rest and rest[0] == '{':
d = destring()
if Globals.flat:
return Node(name+'P', ch, d, loc=loc)
else:
bar = Node(name+'bar', ch[2:], loc=loc)
mod = Node(name+'mod', [ch[1], bar], loc=loc)
return Node(name+'P', [ch[0], mod], d, loc=loc)
elif cur == '<': #UnorderedCollector
ntype = rest.pop(0)
ch = []
while rest and rest[0] != '>':
ch.append(destring())
if not rest:
raise ParseError('Incomplete Unordered Collector, missing > at %s' % loc)
rest.pop(0)
return UnorderedCollector(ntype, ch, loc=loc)
elif cur == '{': #props pattern
d = {}
while rest[0] != '}':
p = rest.pop(0)
assert(rest.pop(0) == '=')
d[p] = rest.pop(0)
rest.pop(0)
return d
else:
if rest[0] == '=': #Morpheme
pos = cur
root = rest.pop(1)
rest.pop(0)
d = {}
if rest and rest[0] == '{':
d = destring()
return Morpheme(lang, pos, root, isref=True, props=d, loc=loc)
else:
rest = ['$', ':'+cur] + rest
return destring()
try:
ret = destring()
except:
print('original line: %s' % s)
print('problem on line %s, add more checks, unparsed remainder was %s' % (loc, rest))
raise
if rest != []:
print('problem on line %s, unparsed remainder was %s' % (loc, rest))
assert(rest == [])
return ret
###FILES
class ParseError(Exception):
pass
class ParseLine:
"""Line from a data file, has label, arguments, value, and children"""
def __init__(self, num, label, args=None, val=None, children=None):
self.num = num
self.label = label
self.args = args or []
self.arg = '; '.join(self.args)
self.val = val or ''
self.vals = [val] if val else []
self.children = children or []
def fromstring(fstr, num):
"""Parse a line (without leading whitespace).
Allowed formats:
label (arg1; arg2): value
label: value
label (arg1; arg2)
label
"""
i = 0
r = ParseLine(num, '', [], '', [])
while i < len(fstr) and fstr[i] not in ' :(':
r.label += fstr[i]
i += 1
r.label = r.label.strip()
while i < len(fstr) and fstr[i] == ' ':
i += 1
p = 0
if i < len(fstr)-1 and fstr[i] == '(':
i += 1
s = ''
while fstr[i] != ')' or p != 0:
s += fstr[i]
if fstr[i] == '(':
p += 1
if fstr[i] == ')':
p -= 1
i += 1
i += 1
r.args = [x.strip() for x in s.split(';') if not x.isspace()]
r.arg = s.strip()
if i < len(fstr)-1 and fstr[i] == ':':
i += 2
r.val = fstr[i:].strip()
r.vals = [x.strip() for x in r.val.split(';') if not x.isspace()]
i = len(fstr)
if i != len(fstr):
raise ParseError('Something is wrong with line %s.\nString was "%s", position: %d' % (num, fstr, i))
else:
return r
def fromfile(fname):
"""Parse a file and return a list of ParseLines."""
r = ParseLine(-1, '', [], '', [])
depth = 0
with open(fname) as f:
for i, l in enumerate(f):
if l.isspace() or l.lstrip()[0] == '#':
continue
while not l.startswith(' '*depth):
depth -= 1
lobj = ParseLine.fromstring(l.rstrip()[depth*2:], 'line %s of %s' % (i+1, fname))
at = r
for d in range(depth):
at = at.children[-1]
at.children.append(lobj)
depth += 1
return r.children
def tofilestr(self, indent):
"""Convert self back to a string."""
r = ' '*indent + '%s' % self.label
if self.args:
r += ' (' + '; '.join(self.args) + ')'
if self.vals:
r += ': ' + '; '.join(self.vals)
r += '\n'
for c in self.children:
r += c.tofilestr(indent+1)
return r
def tofile(self, fname):
"""Convert self to string and write to a file."""
f = open(fname, 'w')
f.write(self.tofilestr(0))
f.close()
def __str__(self):
return '%d %s (%s): %s\n' % (self.num, self.label, '; '.join(self.args), self.val) + ''.join([str(x) for x in self.children])
def __getitem__(self, key):
"""Iterates of children that have label == key."""
for ch in self.children:
if ch.label == key:
yield ch
def __contains__(self, key):
for ch in self.children:
if ch.label == key:
return True
return False
def child_vals(self, key):
"""Iterate over values of self[key]."""
for ch in self[key]:
yield ch.val
def first(self, key):
"""Return first child with label == key."""
for ch in self.children:
if ch.label == key:
return ch
def firstval(self, key):
"""Return value of first child with label == key."""
return self.first(key).val
def fvo(self, key, lang, default=None):
"""Parse the value of the first child with label == key.
Use default if no value is found.
"""
f = self.first(key)
if f:
return toobj(f.val, lang, f.num)
elif default:
return toobj(default, lang, self.num)
else:
raise ParseError('Line %s does not have required child %s.' % (self.num, key))
def avo(self, key, lang, default=None): #all val objects
"""Parse the values of all children with label == key.
Use default if none are found.
"""
c = 0
for ch in self.children:
if ch.label == key:
c += 1
yield toobj(ch.val, lang, ch.num)
if c == 0:
if default:
yield toobj(default, lang, self.num)
else:
raise ParseError('Line %s does not have required child(ren) %s.' % (self.num, key))
def condlist(ch):
"""Parse the argument of a ParseLine.
Transforms "(a=b; c=d)" into [['a', 'b'], ['c', 'd']].
"""
ret = []
for s in ch.args:
k,v = s.split('=')
ret.append([k.strip(), v.strip()])
return ret
def readresult(node, lang):
"""Read the results section of a rule definition."""
ret = []
def mkvar(_s, loc):
if '$' in _s or '@' in _s:
s = _s.replace('@', ' ')
else:
s = '$ .'+s
r = Variable.fromstring(s)
if r == None:
raise ParseError('Cannot interpret variable %s on line %s.' % (_s, loc))
return r
for ch in node.children:
if ch.label == 'result':
ret.append(toobj(ch.val, lang, ch.num))
elif ch.label == 'setprop':
ret.append(['setprop', mkvar(ch.arg, ch.num), mkvar(ch.val, ch.num)])
elif ch.label == 'setval':
ret.append(['setprop', mkvar(ch.arg, ch.num), ch.val])
elif ch.label == 'setdisplay':
ret.append(['setdisplay', ch.val])
elif ch.label == 'setprops':
d = {}
for prop in ch.children:
d[prop.label] = prop.val
ret.append(['set', d])
elif ch.label == 'blank':
ret.append(['setdisplay', ''])
elif ch.label == 'set':
ret.append(['set', dict(condlist(ch))])
elif ch.label == 'rotate':
ret.append(['rotate'])
elif ch.label == 'cond':
com = ['cond']
for op in ch.children:
if op.label == 'option':
com.append([[toobj(x, lang, op.num) for x in op.args]] + readresult(op, lang))
ret.append(com)
elif ch.label == 'if':
ret.append(['cond', [[toobj(x, lang, ch.num) for x in ch.args]] + readresult(ch, lang)])
elif ch.label == 'distribute':
ret.append(['distribute'] + ch.args + [toobj(x, lang, ch.num) for x in ch.vals])
elif ch.label == 'order':
ret.append(['order', ch.arg] + [toobj(x, lang, ch.num) for x in ch.vals])
elif ch.label == 'log':
ret.append(['log', toobj(ch.val, lang, ch.num)])
elif ch.label == 'print':
ret.append(['print', ch.val])
elif ch.label == 'makevar':
ret.append(['makevar', ch.arg, toobj(ch.val, lang, ch.num)])
elif ch.label == 'pull':
ret.append(['pull', ch.val])
elif ch.label == 'replace':
ret.append(['replace', ch.arg, ch.val])
return ret
def readrule(node, lfrom, _lto, mode, category, _stage):
"""Read a rule definition."""
if 'samelang' in node:
lto = lfrom
else:
lto = _lto
if 'stage' in node:
stage = int(node.firstval('stage'))
elif node.arg:
stage = int(node.arg)
else:
stage = _stage
if node.label == 'rule':
con = node.fvo('context', lfrom, '@')
form = node.fvo('form', lfrom, '@')
res = readresult(node, lto)
return Translation(form, res, category, [lfrom, lto], context=con, mode=mode, stage=stage, name=node.val)
elif node.label == 'multirule':
layers = []
for ly in node.children:
if ly.val and 'form' not in ly and ly.label[-1] == '~':
ly.label = ly.label[:-1]
ly.children.append(ParseLine(ly.num, 'result', [], ly.val, []))
if ly.val and 'form' not in ly:
ly.children = [ParseLine(ly.num, 'form', [], ly.val, ly.children)]
if ly.label == 'layer?':
ly.children.append(ParseLine(-1, 'form', [], '@', [ParseLine(-1, 'result', [], '@', [])]))
ly.label = 'layer'
if ly.label != 'layer':
continue
for p in ly['form~']:
p.label = 'form'
p.children.append(ParseLine(p.num, 'result', [], p.val, []))
l = []
for p in ly['form']:
op = [toobj(p.val, lfrom, p.num)]
op += readresult(p, lfrom)
l.append(op)
layers.append(l)
return MultiRule(layers, category, [lfrom, lto], mode=mode, stage=stage, name=node.val)
elif node.label == 'linear':
pass
elif node.label == 'linear-text':
pass
def loadlexicon(lang):
"""Read a lexicon file."""
rootslist = ParseLine.fromfile(Globals.path + 'langs/%s/lexicon.txt' % lang)
defaults = defaultdict(lambda: defaultdict(dict))
if rootslist[0].label == 'defaults':
for pat in rootslist.pop(0).children:
defaults[pat.label][pat.val] = {ch.label:ch.val for ch in pat.children}
for root in rootslist:
m = Morpheme(lang, root.arg, root.label, isref=False, props=defaults[root.arg][root.val].copy())
if 'output' not in m.props:
m.props['output'] = []
for p in root.children:
if p.label in ['rule', 'multirule']:
readrule(p, lang, lang, 'lex', root.label, 1)
elif p.label == 'output':
o = [p.arg, p.val, '#']
if '=' in p.arg:
o[0] = condlist(p)
if 'lexicon' in p:
o[2] = p.firstval('lexicon')
m.props['output'].append(o)
elif p.label == 'linear':
con = []
res = []
if p.val:
res.append([0, toobj(p.val, lang, p.num)])
for ch in p.children:
try: idx = int(ch.label)
except: continue
con.append([idx, toobj(ch.val, lang, ch.num)])
if 'inaudible' in ch:
res.append([idx, 'inaudible'])
elif 'to' in ch:
res.append([idx, ch.fvo('to', lang)])
elif 'display' in ch:
res.append([idx, ['display', ch.firstval('display')]])
Translation(m.getref(), res, root.label, [lang, lang], context=con, mode='linear')
elif p.label == 'linear-text':
con = []
for ch in p.children:
if ch.label.isnumeric() or (ch.label[0] == '-' and ch.label[1:].isnumeric()):
if ch.val[0] == '/' and ch.val[-1] == '/':
con.append([int(ch.label), re.compile(ch.val[1:-1])])
else:
con.append([int(ch.label), ch.val])
Translation(m, p.val, root.label, [lang, lang], context=con, mode='linear-text')
else:
m.props[p.label] = p.val
for pos in root['altpos']:
p2 = m.props.copy()
for l in pos.children:
p2[l.label] = l.val
Morpheme(lang, pos.val, m.root, props=p2, isref=False)
def loadlang(lang):
"""Read a language file."""
things = ParseLine.fromfile(Globals.path + 'langs/%s/lang.txt' % lang)
ret = Language(lang)
loadlexicon(lang)
for th in things:
if th.label == 'syntax':
for ch in th.children:
if ch.label == 'start-with':
ret.syntaxstart = ch.val
elif ch.label == 'node-types':
for ty in ch.children:
vrs = [toobj(s, lang, ty.num) for s in ty.child_vals('variable')]
if not list(ty['option']):
ty.children = [ParseLine(-1, 'option', [], '', ty.children)]
conds = []
ops = []
require = []
for op in ty['option']:
if 'xbar' in op:
line = op.first('xbar')
nodes = line.vals
if len(nodes) != 4:
ParseError('Wrong number of nodes given to xbar on line %s, expected 4, got %s' % (line.num, len(nodes)))
xargs = []
for s, arg in zip(nodes, ['spec', 'mod', 'head', 'comp']):
if s[0] == '$' or s == '~':
xargs.append(s)
else:
xargs.append('$%s:%s'%(arg,s))
node = toobj('|[%s %s]' % (ty.label, ' '.join(xargs)), lang, line.num)
else:
st = op.first('structure')
node = toobj(st.val, lang, st.num)
conds.append([toobj(x, lang, op.num) for x in op.args])
ops.append(node)
req = []
for r in op['require']:
req.append(r.val)
require.append(req)
ret.syntax[ty.label] = SyntaxPat(ty.label, conds, ops, vrs, require)
if th.label == 'transform':
for ch in th.children:
if ch.label == 'rotate':
ret.rotate.append(ch.val)
else:
readrule(ch, lang, lang, 'syntax', '', 0)
if th.label == 'metadata':
if 'creator' in th:
ret.creator = th.firstval('creator')
if 'name' in th:
for ch in th.first('name').children:
if ch.label == 'local':
ret.name = ch.val
ret.names[lang] = ch.val
else:
ret.names[int(ch.label)] = ch.val
if th.label == 'lexc':
ret.morph_mode = th.val
for ch in th.children:
if ch.label == 'split-root':
ret.tags_rootsplit = ch.val
continue
elif ch.label == 'capitalize-first-letter':
ret.capitalize = True
continue
cases = []
if 'lexicon' not in ch:
cases = [ch]
else:
cases = ch.first('lexicon').children
for cs in cases:
ap = {'ntype': ch.label, 'conds': condlist(cs)}
if 'bland' in cs:
ap['lexicon-in'] = ch.label + 'Root'
ap['lexicon-to'] = ch.label + 'Infl'
ap['bland'] = cs.firstval('bland')
ch.children.append(ParseLine(-1, 'format', '', '{root[0]}'+ap['bland'], []))
else:
ap['lexicon-in'] = cs.firstval('in')
ap['lexicon-to'] = cs.firstval('to')
ap['bland'] = False
if 'regex-match' in cs:
ap['regex'] = [cs.firstval('regex-match'), cs.firstval('regex-replace')]
ret.lexc_lexicons.append(ap)
tags = {}
defaults = {}
ls = ch.first('tags').children if 'tags' in ch else []
for tg in ls:
if tg.val:
tags[tg.label] = tg.val
else:
tags[tg.label] = []
for cs in tg['case']:
tags[tg.label].append({'conds': condlist(cs), 'tag': cs.val})
defaults[tg.label] = tg.firstval('default')
if defaults[tg.label] == '_':
defaults[tg.label] = ''
ret.tags.append({'format': ch.firstval('format'), 'tags': tags, 'ntype': ch.label, 'conds': condlist(ch), 'defaults': defaults})
return ret
def loadtrans(lfrom, lto):
"""Read a translation file."""
fname = Globals.path + 'langs/%s/translate/%s.txt' % (lfrom, lto)
ret = LangLink(lfrom, lto)
if isfile(fname):
trans = ParseLine.fromfile(fname)
if trans and trans[0].label != 'stage':
trans = [ParseLine(-1, 'stage', [], '', trans)]
for i, stage in enumerate(trans):
for lex in stage.children:
if lex.label in ['rule', 'multirule']:
readrule(lex, lfrom, lto, 'lex', '', i)
else:
m = toobj(lex.label, lfrom, lex.num)
if lex.val:
for g in lex.vals:
d = toobj(g, lto, lex.num)
Translation(m, [d], category=m.root, langs=[lfrom, lto], mode='lex', stage=i)
for tr in lex.children:
readrule(tr, lfrom, lto, 'lex', m.root, i)
return ret
def loadlangset(langs):
"""Given a set of languages, load them and all associated translation files."""
loaded = []
for l in langs:
if l not in loaded and l != 0:
loadlang(l)
loaded.append(l)
for lf in loaded:
for lt in loaded:
loadtrans(lf, lt)
def addmissing():
"""Add entries for everything in missing_morphemes.txt to the relevant
lexicon files.
"""
f = open('missing_morphemes.txt')
lns = list(set(f.readlines()))
lns.sort()
lang = ''
for _line in lns:
line = _line.strip()
if not line: continue
s = line.split()
l = s[0][:-1]
p,r = s[1].split('=')
if l != lang:
f.close()
f = open(Globals.path + 'langs/%s/lexicon.txt' % l, 'a')
f.write('\n\n#Generated from missing_morphemes.txt\n')
lang = l
print('Writing to langs/%s/lexicon.txt' % l)
f.write('%s (%s)\n' % (r,p))
f.close()
f = open('missing_morphemes.txt', 'w')
f.write('\n')
f.close()
def filltrans(lfrom, lto):
"""Generate empty translation rules for any words in source language
which do not have translation rules to the target language.
"""
Language.getormake(lfrom)
Language.getormake(lto)
LangLink.getormake(lfrom, lto)
fname = Globals.path + 'langs/%s/translate/%s.txt' % (lfrom, lto)
have = []
out = '#Automatically generated from langs/%s/lexicon.txt\n' % lfrom
joinby = '\n'
if isfile(fname):
pl = ParseLine.fromfile(fname)
for l in pl:
if l.label == 'stage':
have += [x.label for x in l.children]
else:
have.append(l.label)
out = '\n\n' + out + 'stage\n '
joinby += ' '
morphdict = Morpheme.itermorph(lfrom)
foundany = False
for pos in sorted(morphdict.keys()):
for root in sorted(morphdict[pos].keys()):
s = pos + '=' + root
if s not in have:
out += s + ': ~' + joinby
foundany = True
if foundany:
f = open(fname, 'a')
f.write(out)
f.close()
class Sentence:
def __init__(self, lang, name, trees, gloss):
self.lang = lang
self.name = name
self.trees = trees
self.gloss = gloss
def fromparseline(pl, lang):
trees = {'':None}
if pl.val:
trees[''] = toobj(pl.val, lang, pl.num)
for l in pl.children:
if l.label != 'gloss':
trees[l.label] = toobj(l.val, lang, l.num)
g = pl.first('gloss')
return Sentence(lang, pl.label, trees, g.val if g else '')
def toparseline(self):
ret = ParseLine(0, self.name, [], None, [])
if self.gloss:
ret.children.append(ParseLine(0, 'gloss', [], self.gloss, []))
for k in sorted(self.trees.keys()):
if not k:
ret.val = self.trees[k].writecompile()
else:
ret.children.append(ParseLine(0, k, [], self.trees[k].writecompile(), []))
return ret
def translate(self, tlang):
ret = Sentence(self.lang, self.name, {}, self.gloss if Globals.keepmeta else '')
if not self.trees:
return ret
tr = LangLink.getormake(self.lang, tlang)
for k in self.trees:
if not self.trees[k]:
continue
#if a sentence doesn't have a tree it will show up as None
for i, s in enumerate(tr.translate(self.trees[k])):
if Globals.partial or s.alllang(tlang):
ret.trees[k+'-'+str(i) if k else str(i)] = s
return ret
def totext(self):
lang = Language.getormake(self.lang)
for k in sorted(self.trees.keys()):
#this should default to tree ''
if self.trees[k]:
return lang.totext(self.trees[k])
return ''
def graph(self):
for k in sorted(self.trees.keys()):
self.trees[k].flatten()
f = open(Globals.path + 'test/%s-%s.dot' % (self.name, k), 'w')
f.write(self.trees[k].graph('n', True))
f.close()
yield '<h3>%s</h3>' % (k or '(default)'), '%s-%s.dot' % (self.name, k)
def readfile(fname):
"""Read in a .pdtxt file, return the Language and a list of Sentences."""
pl = ParseLine.fromfile(fname)
lang = int(pl[0].firstval('lang'))
Language.getormake(lang)
return lang, [Sentence.fromparseline(l, lang) for l in pl[1:]]
def graphtext(infile, outfile):
"""Use GraphViz to generate a set of images for the trees of a document."""
gls = []
f = open(outfile, 'w')
f.write('<html><head></head><body>\n')
for s in readfile(infile)[1]:
f.write('<h1>%s</h1>\n' % s.name)
for h3, fn in s.graph():
f.write('%s<img src="%s.svg"></img>\n' % (h3, fn))
gls.append('test/' + fn)
f.write('</body></html>')
f.close()
run('dot', '-Tsvg', '-O', *gls)
def translatefile(infile, outfile, tlang):
"""Read in a .pdtxt file, translate it, and write it out to another file."""
pl = ParseLine.fromfile(infile)
flang = int(pl[0].firstval('lang'))
if isinstance(outfile, str):
f = open(outfile, 'w')
else:
f = outfile
if Globals.keepmeta:
meta = pl[0]
for x in meta.children:
if x.label == 'lang':
x.vals = [str(tlang)]
else:
meta = ParseLine(0, 'metadata', children=[ParseLine(1, 'lang', val=str(tlang))])
f.write(meta.tofilestr(0))
for l in pl[1:]:
f.write(Sentence.fromparseline(l, flang).translate(tlang).toparseline().tofilestr(0))
if isinstance(outfile, str):
f.close()
class GeneratorError(Exception):
pass
def gen(pats, tree, depth, setvars):
"""Generate a random sentence."""
if isinstance(tree, Node):
r = copy.copy(tree)
rc = []
for c in copy.deepcopy(r.children):
rc.append(gen(pats, c, depth+1, setvars))
r.children = rc
return r
elif isinstance(tree, list):
return random.choice(tree)
elif isinstance(tree, Variable):
if not tree.opt or random.randint(1,100) < 10/depth:
if tree.label in setvars:
return setvars[tree.label]
else:
newtree = pats[tree.ntype]
if isinstance(newtree, list):
newtree = random.choice(newtree)
return gen(pats, newtree, depth+1, setvars)
elif isinstance(tree, SyntaxPat):
vrs = {}
for v in tree.vrs:
vrs[v.label] = gen(pats, v, depth, {})
il = []
for i, cl in enumerate(tree.conds):
for c in cl:
if not c.checkset(vrs):
break
else:
il.append(i)
if not il:
raise GeneratorError("None of the conditions for generation rule '%s' could be satisfied." % tree.name)
return gen(pats, tree.opts[random.choice(il)], depth, vrs)
else:
return tree
def make(lang):
"""Generate a random sentence. Wrapper for gen()"""
p = lang.getpats()
return gen(p, p[lang.syntaxstart], 1, {})
class LimitList:
"""List wrapper for tracking which Morphemes in it are in use"""
def __init__(self, few, many):
self.few = few
self.many = many
def each(self):
for i, x in enumerate(self.few):
yield x, LimitList(self.few[:i]+self.few[i+1:], self.many)
for x in self.many:
yield x, self
def __len__(self):
return len(self.few)+len(self.many)
def __str__(self):
return str(self.few + self.many)
def makeall(words):
"""Generate all possible trees containing a particular set of Morphemes."""
if not words:
return []
lang = Language.getormake(words[0].lang)
pats = lang.getpats()
for k in pats:
if isinstance(pats[k], list):
many = [x for x in pats[k] if 'audible' in x and x['audible'] == 'false']
few = [x for x in words if x.ntype == k]
pats[k] = LimitList(few, many)
def product(ls):
if len(ls) == 0:
yield ()
else:
for x in genall(*ls[0]):
for y in product(ls[1:]):
yield (x,) + y
def genall(tree, setvars):
nonlocal pats
if isinstance(tree, Node):
for ch in product([[c, setvars] for c in tree.children]):
yield tree.swapchildren(ch)
elif isinstance(tree, list):
yield from tree
elif isinstance(tree, Variable):
if tree.label in setvars:
yield setvars[tree.label]
elif isinstance(pats[tree.ntype], LimitList):
old = pats[tree.ntype]
for r, l in old.each():
pats[tree.ntype] = l
yield r
pats[tree.ntype] = old
else:
yield from genall(pats[tree.ntype], setvars)
if tree.opt:
yield None
elif isinstance(tree, SyntaxPat):
idx = []
for i, req in enumerate(tree.require):
if all(len(pats[x]) > 0 for x in req):
idx.append(i)
if idx:
labs = [v.label for v in tree.vrs]
for vrs in product([[v, {}] for v in tree.vrs]):
dct = dict(zip(labs, vrs))
for i in idx:
if all(c.checkset(dct) for c in tree.conds[i]):
for x in genall(tree.opts[i], dct):
yield x
else:
yield tree
return genall(pats[lang.syntaxstart], {})
def parse(lang, num, text):
"""Attempt to a parse a sentence by generating possible corresponding trees."""
ret = Sentence(lang, str(num), {}, text)
tags = transduce(text, lang, False)
w = []
for m in Target.iterlex():
r = re.compile(m.tagify(True))
for t in tags:
if r.search(t):
w.append(m)
ln = Language.getormake(lang)
n = 0
for x in makeall(w):
if ln.totext(x) == text:
n += 1
ret.trees[str(n)] = x
return ret
def trans(sen, flang, tlang):
"""Translate a sentence."""
tr = LangLink.getormake(flang, tlang).translate(sen)
ret = []
for s in tr:
if Globals.partial or s.alllang(tlang):
ret.append(s)
return ret
if __name__ == '__main__':
import argparse, sys
parser = argparse.ArgumentParser(description='Generate, translate, and parse sentences.')
def writelines(lines, where):
j = '\n'*Globals.spacing or ' '
if where:
f = open(where[0], 'w')
if Globals.blob:
f.write(j.join(lines) + '\n')
else:
for l in lines:
f.write(l + j)
f.close()
elif Globals.blob:
print(j.join(lines))
else:
for l in lines:
print(l, end=j)
def readline(lang, src):
if src.isnumeric():
pass
STDINLINE = 1
class TranslateAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if len(values) == 0:
print('Translate single tree: -t L1 [SRC] L2 [DEST]\n' + \
' read a tree from SRC (leave blank for stdin)\n' + \
' translate from L1 to L2\n' + \
' output to DEST (leave blank for stdout)\n' + \
'Translate .pdtxt file: -t SRC LANG [DEST]\n' + \
' translate contents of file SRC to LANG\n' + \
' output to DEST (leave blank for stdout)')
elif values[0].isnumeric():
flang = int(values.pop(0))
if values[0].isnumeric():
line = sys.stdin.readline()
global STDINLINE
where = 'standard input line %s' % STDINLINE
STDINLINE += 1
else:
where = values.pop(0)
f = open(where)
where += ' line 1'
line = f.readline()
f.close()
tree = toobj(line, flang, where)
tr = trans(tree, int(values.pop(0)))
writelines((t.writecompile() for t in tr), values)
else:
if len(values) >= 3:
translatefile(values[0], values[2], int(values[1]))
else:
translatefile(values[0], sys.stdout, int(values[1]))
class GenerateAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
lang = Language.getormake(int(values.pop(0)))
sen = make(lang)
writelines([sen.writecompile()], values)
class ParseAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values[0].isnumeric():
lines = [sys.stdin.readline()]
else:
f = open(values.pop(0))
lines = f.readlines()
f.close()
lang = int(values.pop(0))
if values:
out = open(values[0], 'w')
else:
out = sys.stdout
for i, t in enumerate(lines):
l = t.strip()
if l:
out.write(parse(lang, i+1, l).toparseline().tofilestr(0))
if values:
out.close()
class DisplayAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values[0].isnumeric() or (len(values) > 1 and values[1].isnumeric()):
if values[0].isnumeric():
line = sys.stdin.readline()
global STDINLINE
where = 'standard input line %s' % STDINLINE
STDINLINE += 1
else:
where = values.pop(0)
f = open(where)
where += ' line 1'
line = f.readline()
f.close()
lang = int(values.pop(0))
if values:
f = open(values[0], 'w')
else:
f = sys.stdout
txt = Language.getormake(lang).totext(toobj(line, lang, where))
f.write(txt + '\n')
if values:
f.close()
else:
lines = readfile(values.pop(0))[1]
writelines((l.totext() for l in lines), values)
class BlankAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if values:
filltrans(int(values[0]), int(values[1]))
else:
addmissing()
class SetGlobal(argparse.Action):
def __init__(self, *args, **kwargs):
self.todo = kwargs['todo']
del kwargs['todo']
kwargs['nargs'] = 0
argparse.Action.__init__(self, *args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
global Globals
Globals.__dict__[self.todo[0]] = self.todo[1]
parser.add_argument('-t', '--translate', type=str, nargs='*', action=TranslateAction, metavar='ARG', help="Translate trees (run 'doodle.py -t' for detailed help)")
parser.add_argument('-g', '--generate', type=str, nargs='+', action=GenerateAction, metavar=('LANG', 'DEST'), help='Randomly generate a tree in LANG and output to DEST or stdout')
parser.add_argument('-p', '--parse', type=str, nargs='+', action=ParseAction, metavar=('[SRC] LANG', 'DEST'), help='Attempt to parse SRC or next line of std into trees in LANG, output to DEST or stdout')
parser.add_argument('-d', '--display', type=str, nargs='+', action=DisplayAction, metavar=('SRC [LANG]', 'DEST'), help='Get trees from SRC or stdin, convert to text and output to DEST or stdout')
parser.add_argument('-F', '--flatten', action=SetGlobal, todo=('flat', True), help='Start flattening phrases into single nodes')
parser.add_argument('-DF', '--dont-flatten', action=SetGlobal, todo=('flat', False), help='Stop flattening phrases')
parser.add_argument('-U', '--use-unknown', action=SetGlobal, todo=('unknown_error', False), help='Begin logging unknown morphemes to missing_morphemes.txt, don\'t error')
parser.add_argument('-am', '--add-missing', nargs=0, action=BlankAction, help='Append everything in missing_morphemes.txt to the relevant lexicon files')
parser.add_argument('-ft', '--fill-trans', nargs=2, action=BlankAction, metavar=('LANG1', 'LANG2'), help='Add blank entries in translation file from LANG1 to LANG2 for any morpheme not already listed')
args = parser.parse_args()
| mit | 781,176,970,670,820,000 | 39.28469 | 207 | 0.488751 | false |
dmerejkowsky/qibuild | python/qitoolchain/feed.py | 1 | 3967 | ## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
""" Toolchain feeds
"""
import os
import sys
import hashlib
import urlparse
from xml.etree import ElementTree
from qisys import ui
import qisys
import qisys.archive
import qisys.remote
import qisys.version
import qibuild.config
import qitoolchain
def is_url(location):
""" Check that a given location is an URL """
return "://" in location
def raise_parse_error(package_tree, feed, message):
""" Raise a nice pasing error about the given
package_tree element.
"""
as_str = ElementTree.tostring(package_tree)
mess = "Error when parsing feed: '%s'\n" % feed
mess += "Could not parse:\t%s\n" % as_str
mess += message
raise Exception(mess)
def tree_from_feed(feed_location):
""" Returns an ElementTree object from an
feed location
"""
fp = None
tree = None
try:
if os.path.exists(feed_location):
fp = open(feed_location, "r")
else:
if is_url(feed_location):
fp = qisys.remote.open_remote_location(feed_location)
else:
raise Exception("Feed location is not an existing path nor an url")
tree = ElementTree.ElementTree()
tree.parse(fp)
except Exception:
ui.error("Could not parse", feed_location)
raise
finally:
if fp:
fp.close()
return tree
class ToolchainFeedParser:
""" A class to handle feed parsing
"""
def __init__(self):
self.packages = list()
# A dict name -> version used to only keep the latest
# version
self.blacklist = list()
self._versions = dict()
def get_packages(self):
""" Get the parsed packages """
res = [x for x in self.packages if not x.name in self.blacklist]
return res
def append_package(self, package_tree):
""" Add a package to self.packages.
If an older version of the package exists,
replace by the new version
"""
version = package_tree.get("version")
name = package_tree.get("name")
names = self._versions.keys()
if name not in names:
self._versions[name] = version
self.packages.append(qitoolchain.qipackage.from_xml(package_tree))
else:
if version is None:
# if version not defined, don't keep it
return
prev_version = self._versions[name]
if prev_version and qisys.version.compare(prev_version, version) > 0:
return
else:
self.packages = [x for x in self.packages if x.name != name]
self.packages.append(qitoolchain.qipackage.from_xml(package_tree))
self._versions[name] = version
def parse(self, feed):
""" Recursively parse the feed, filling the self.packages
"""
tree = tree_from_feed(feed)
package_trees = tree.findall("package")
package_trees.extend(tree.findall("svn_package"))
for package_tree in package_trees:
package_tree.set("feed", feed)
self.append_package(package_tree)
feeds = tree.findall("feed")
for feed_tree in feeds:
feed_url = feed_tree.get("url")
if feed_url:
# feed_url can be relative to feed:
if not "://" in feed_url:
feed_url = urlparse.urljoin(feed, feed_url)
self.parse(feed_url)
select_tree = tree.find("select")
if select_tree is not None:
blacklist_trees = select_tree.findall("blacklist")
for blacklist_tree in blacklist_trees:
name = blacklist_tree.get("name")
if name:
self.blacklist.append(name)
| bsd-3-clause | -1,921,144,499,196,732,200 | 29.05303 | 83 | 0.586589 | false |
sertansenturk/tomato | setup.py | 1 | 6202 | import configparser
import os
import re
import subprocess
import zipfile
from io import BytesIO
from urllib.request import urlopen
from setuptools import find_packages, setup
HERE = os.path.abspath(os.path.dirname(__file__))
TOMATO_DIR = "src"
def get_version():
""" Read version from __init__.py
Raises:
ValueError: if __init__ is not read, or __version__ is not in __init__
Returns:
str -- value of __version__ as defined in __init__.py
"""
version_file2 = os.path.join(HERE, TOMATO_DIR, "tomato", "__init__.py")
with open(version_file2) as f:
init_contents = f.read().strip()
exp = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(exp, init_contents, re.M)
if mo:
return mo.group(1)
raise ValueError("Unable to find version string in %s." % (f,))
def get_long_description():
"""Get the long description from the README file
Returns:
str -- the README content in the markdown format
"""
try:
with open(os.path.join(HERE, "README.md"), encoding="utf-8") as f:
return f.read()
except FileNotFoundError: # not necessary, e.g. in Docker
return ""
class BinarySetup:
@classmethod
def setup(cls):
"""Downloads compiled binaries for the OS from the relevant git repos
Raises:
OSError: if the OS is not supported.
"""
bin_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
TOMATO_DIR,
"tomato",
"bin"
)
# find os
sys_os = cls._get_os()
# read configuration file
config_file = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
TOMATO_DIR,
"tomato",
"config",
"bin.cfg",
)
config = configparser.ConfigParser()
config.optionxform = str
config.read(config_file)
# Download binaries
for bin_name, bin_url in config.items(sys_os):
bin_path = os.path.join(bin_folder, bin_name)
cls._download_binary(bin_path, bin_url, sys_os)
@staticmethod
def _get_os():
process_out = (
subprocess.check_output(["uname"]).lower().decode("utf-8")
)
if any(ss in process_out for ss in ["darwin", "macosx"]):
sys_os = "macosx"
elif "linux" in process_out:
sys_os = "linux"
else:
raise OSError("Unsupported OS.")
return sys_os
@staticmethod
def _download_binary(fpath, bin_url, sys_os):
response = urlopen(bin_url)
if fpath.endswith(".zip"): # binary in zip
with zipfile.ZipFile(BytesIO(response.read())) as z:
z.extractall(os.path.dirname(fpath))
if sys_os == "macosx": # mac executables are in .app
fpath = os.path.splitext(fpath)[0] + ".app"
else: # remove the zip extension
fpath = os.path.splitext(fpath)[0]
else: # binary itself
with open(fpath, "wb") as fp:
fp.write(response.read())
# make the binary executable
subprocess.call(["chmod -R +x " + fpath], shell=True)
print("downloaded %s to %s" % (bin_url, fpath))
# download binaries in advance so they are detected as package data during
# instalation
BinarySetup.setup()
setup(
name="tomato",
version=get_version(),
author="Sertan Senturk",
author_email="contact AT sertansenturk DOT com",
maintainer="Sertan Senturk",
maintainer_email="contact AT sertansenturk DOT com",
url="https://github.com/sertansenturk/tomato",
description="Turkish-Ottoman Makam (M)usic Analysis TOolbox",
long_description=get_long_description(),
long_description_content_type="text/markdown",
download_url=(
"https://github.com/sertansenturk/tomato.git"
if "dev" in get_version()
else "https://github.com/sertansenturk/tomato/releases/tag/"
"v{0:s}".format(get_version())
),
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: GNU Affero General Public License v3 or "
"later (AGPLv3+)",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Multimedia :: Sound/Audio :: Analysis",
"Topic :: Scientific/Engineering :: Information Analysis",
],
platforms="Linux",
license="agpl 3.0",
keywords=(
"music-scores analysis tomato audio-recordings lilypond tonic "
"makam-music score music-information-retrieval "
"computational-analysis"
),
packages=find_packages(TOMATO_DIR),
package_dir={"": TOMATO_DIR},
include_package_data=True,
python_requires=">=3.5,<3.8",
install_requires=[
"numpy>=1.9.0", # numerical operations
"scipy>=0.17.0", # temporary mat file saving for MCR binary inputs
"pandas>=0.18.0,<=0.24.2", # tabular data processing
"matplotlib>=1.5.1,<=3.0.3", # plotting
"json_tricks>=3.12.1", # saving json files with classes and numpy
"eyeD3>=0.7.5,<=0.8.11", # reading metadata embedded in recordings
"python-Levenshtein>=0.12.0", # semiotic structure labeling
"networkx>=1.11", # semiotic structure labeling clique computation
"lxml>=3.6.0", # musicxml conversion
"musicbrainzngs>=0.6", # metadata crawling from musicbrainz
"essentia>=2.1b5;platform_system=='Linux'", # audio signal processing
],
extras_require={
"development": [
"black",
"flake8",
"pylint",
"pylint-fail-under",
"pytest",
"rope",
"tox"
],
"demo": ["jupyter"],
},
)
| agpl-3.0 | -656,132,530,625,944,800 | 31.134715 | 78 | 0.575298 | false |
onyedikilo/tacotron | prepro.py | 1 | 1722 | # -*- coding: utf-8 -*-
#/usr/bin/python2
'''
By kyubyong park. [email protected].
https://www.github.com/kyubyong/tacotron
'''
import numpy as np
import librosa
from hyperparams import Hyperparams as hp
import glob
import re
import os
import csv
import codecs
def load_vocab():
vocab = "E abcdefghijklmnopqrstuvwxyz'" # E: Empty
char2idx = {char:idx for idx, char in enumerate(vocab)}
idx2char = {idx:char for idx, char in enumerate(vocab)}
return char2idx, idx2char
def create_train_data():
# Load vocabulary
char2idx, idx2char = load_vocab()
texts, sound_files = [], []
reader = csv.reader(codecs.open(hp.text_file, 'rb', 'utf-8'))
for row in reader:
sound_fname, text, duration = row
sound_file = hp.sound_fpath + "/" + sound_fname + ".wav"
text = re.sub(r"[^ a-z']", "", text.strip().lower())
if (len(text) <= hp.max_len) and (1. < float(duration) <= hp.max_duration):
texts.append(np.array([char2idx[char] for char in text], np.int32).tostring())
sound_files.append(sound_file)
return texts, sound_files
def load_train_data():
"""We train on the whole data but the last mini-batch."""
texts, sound_files = create_train_data()
return texts[:-hp.batch_size], sound_files[:-hp.batch_size]
def load_eval_data():
"""We evaluate on the last mini-batch."""
texts, _ = create_train_data()
texts = texts[-hp.batch_size:]
X = np.zeros(shape=[hp.batch_size, hp.max_len], dtype=np.int32)
for i, text in enumerate(texts):
_text = np.fromstring(text, np.int32) # byte to int
X[i, :len(_text)] = _text
return X
| apache-2.0 | -1,971,481,082,590,631,700 | 28.689655 | 90 | 0.613821 | false |
hroyrh/svt | applications_scalability/websockets_perf/test_scripts/v_user.py | 1 | 2067 | from websocket import create_connection
from ConfigParser import SafeConfigParser
import ssl
import gevent
import time
import json
class Transaction(object):
def __init__(self, varfile='ose_vars.cfg'):
"""
Gets instantiated once only
"""
parser = SafeConfigParser()
parser.read(varfile)
self.ose_server = parser.get('wss', 'ose_server')
self.ose_project = parser.get('wss', 'ose_project')
self.ose_resver = parser.get('wss', 'ose_resver')
self.ose_token = parser.get('wss', 'ose_token')
self.custom_timers = {}
def run(self):
"""
Each thread runs this method independently
"""
url = 'wss://{}/api/v1/namespaces/{}/events?watch={}&resourceVersion={}&access_token={}'.format(self.ose_server,
self.ose_project,
'true',
self.ose_resver,
self.ose_token)
start = time.time()
# Ignore self signed certificates
ws = create_connection(url, sslopt={"cert_reqs": ssl.CERT_NONE})
self.ws = ws
def _receive():
while True:
res = ws.recv()
start_at = time.time()
data = json.loads(res)
print(res, data)
end_at = time.time()
response_time = int((end_at - start_at))
gevent.spawn(_receive)
def on_quit(self):
self.ws.close()
if __name__ == '__main__':
trans = Transaction()
trans.run()
| apache-2.0 | -8,251,251,100,229,687,000 | 34.637931 | 159 | 0.400581 | false |
ztane/jaspyx | jaspyx/visitor/function.py | 1 | 2365 | from __future__ import absolute_import, division, print_function
import ast
from jaspyx.ast_util import ast_call, ast_load
from jaspyx.context.function import FunctionContext
from jaspyx.visitor import BaseVisitor
from jaspyx.compat import get_arg_id
class Function(BaseVisitor):
def visit_FunctionDef(self, node):
if node.name:
self.stack[-1].scope.declare(node.name)
args = [get_arg_id(arg) for arg in node.args.args]
if node.args.kwarg is not None:
raise Exception('**kwargs not supported')
func = FunctionContext(self.stack[-1], args)
self.push(func)
# Emit vararg
if node.args.vararg is not None:
self.visit(
ast.Assign(
[ast.Name(node.args.vararg, ast.Store())],
ast_call(
ast_load('Array.prototype.slice.call'),
ast.Name('arguments', ast.Load()),
ast.Num(len(args)),
)
)
)
# Emit default arguments
def_args = node.args.defaults
for arg_name, arg_val in zip(args[-len(def_args):], def_args):
self.block([
ast.If(
ast.Compare(
ast_call(
ast.Name('type', ast.Load()),
ast.Name(arg_name, ast.Load()),
),
[ast.Eq(), ],
[ast.Str('undefined'), ],
),
[
ast.Assign(
[ast.Name(arg_name, ast.Store())],
arg_val
),
],
[],
)
])
# Emit function body
self.block(node.body)
body = ast_call(
ast_load('JS'),
ast.Str(str(self.stack.pop())),
)
for decorator in node.decorator_list:
body = ast_call(
decorator,
body
)
if not node.name:
self.visit(body)
else:
self.visit(
ast.Assign(
[ast_load(node.name)],
body,
)
)
| mit | -2,808,383,720,302,337,500 | 28.936709 | 70 | 0.421564 | false |
jacobian/valor | valor/link.py | 1 | 6044 | import re
import six
import json
import requests
from .model import model_factory
from .utils import is_ref, python_attr
PARAMETER_REGEX = re.compile(r'\{\([%\/a-zA-Z0-9_-]*\)\}')
class Link(object):
def __init__(self, schema, session, url, link_schema):
self._schema = schema
self._session = session
self._url = url
self._link = link_schema
self._name = python_attr(link_schema['title'])
def __call__(self, *args, **kwargs):
# Prepare a request object. We do this instead of using
# session.request() so that we can re-use the prepared request further
# down if the response is paginated.
request = requests.Request(
method = self._link['method'],
url = self.interpolate_args(args),
data = self.construct_body(kwargs)
)
request = self._session.prepare_request(request)
# FIXME: verify SSL - don't want to just to verify=True because that
# makes testing hard, but it should be true by default and overridable
# by passing in a different session. Not sure how to make that work
# though.
response = self._session.send(request)
# FIXME: are we 100% sure the response is always JSON?
response_body = response.json()
# Handle 206 (partial conteent) by paginating.
# See https://devcenter.heroku.com/articles/platform-api-reference#ranges
if response.status_code == 206:
next_range = response.headers['Next-Range']
while next_range:
request.headers['range'] = next_range
response = self._session.send(request)
response_body.extend(response.json())
next_range = response.headers.get('Next-Range', None)
# FIXME: if-none-match???
elif response.status_code not in (200, 201, 202):
response.raise_for_status()
# targetSchema is the schema for the object(s) returned by the API call.
# It can either be an array, in which case the schema is actually
# link.targetSchema.items, or it can be a dict in which case the
# targetSchema itself is the schema.
model_schema = self._link['targetSchema']
if model_schema.get('type') == ['array']:
target_type = 'multi'
model_schema = model_schema['items']
else:
target_type = 'single'
# If the target schema was a ref, resolve it.
if is_ref(model_schema):
model_schema = self._schema.resolve_ref(model_schema['$ref'])
# If the target schema has patternProperties, the response is a plain
# old dict, so just return that. I'm not sure if this is the right way
# of handling this; we may want Model to understand patternProperties
# instead.
if 'patternProperties' in model_schema:
return response_body
# Create a Model subclass representing the expected return object.
# FIXME: this feels super jank for a name, but is there a better way?
name = model_schema['title'].split('-', 1)[-1]
name = re.sub(r'[^\w]', '', name)
# Python 3 excepts text class names; Python 2 expects bytes. No way to
# to work around it without version checkking.
if six.PY2:
name = name.encode('ascii', 'ignore')
cls = model_factory(name, self._schema, model_schema)
if target_type == 'multi':
return [cls(**i) for i in response_body]
else:
return cls(**response_body)
def interpolate_args(self, args):
"""
Interpolate arguments into the link's URL.
"""
# This doesn't really validate the definition refs embedded in the URL
# patterns, but in practice that doesn't seem to matter much.
num_expected_args = len(PARAMETER_REGEX.findall(self._url))
if num_expected_args != len(args):
raise TypeError("%s() takes exactly %s arguments (%s given)" % (self._name, num_expected_args, len(args)))
# I can't figure out how to get the match number in a re.sub() callback,
# so sub one at a time. This feels inelegant, but I can't find a better
# option, so (shrug).
url = self._url
for i, arg in enumerate(args):
url = PARAMETER_REGEX.sub(format_path_parameter(arg), url, count=1)
return url
def construct_body(self, kwargs):
"""
Construct a request body based on given arguments.
"""
# This does do some light validation on the *keys* of the body params,
# but doesn't validate the contents of the body. I'm not sure if this
# will prove to matter in practice or not.
if 'schema' not in self._link:
if kwargs:
raise TypeError("%s() got unexpected keyword arguments: %s" % (self._name, kwargs.keys()))
return None
# If we've got patternProperties, then this API takes arbitrary params,
# so just punt on any sort of validation.
if 'patternProperties' in self._link['schema']:
return json.dumps(kwargs)
given_keys = set(kwargs.keys())
possible_keys = set(self._link['schema']['properties'].keys())
required_keys = set(self._link['schema'].get('required', []))
if required_keys - given_keys:
raise TypeError("%s() missing required arguments: %s")
if given_keys - possible_keys:
raise TypeError("%s() got unepected keyword arguments: %s" % (self._name, list(given_keys - possible_keys)))
# Is that really all?
return json.dumps(kwargs)
def format_path_parameter(val):
"""
Format a path paramater.
Basically: convert to string, with a special rule for datetime objects.
"""
if hasattr(val, 'identity'):
val = val.identity()
if hasattr(val, 'strftime'):
val = val.strftime('%Y-%m-%dT%H:%M:%SZ')
return six.text_type(val)
| bsd-3-clause | 8,497,193,895,521,270,000 | 39.02649 | 120 | 0.604732 | false |
kevinpetersavage/BOUT-dev | examples/MMS/GBS/circle.py | 3 | 3721 | # Generates an input mesh for circular, large aspect-ratio
# simulations:
#
# o Constant magnetic field
# o Curvature output as a 3D logB variable
# o Z is poloidal direction
# o Y is parallel (toroidal)
#
# NOTE: This reverses the standard BOUT/BOUT++ convention
# so here Bt and Bp are reversed
#
from __future__ import division
from __future__ import print_function
from builtins import range
from numpy import zeros, ndarray, pi, cos, sin, outer, linspace,sqrt
from boututils import DataFile # Wrapper around NetCDF4 libraries
def generate(nx, ny,
R = 2.0, r=0.2, # Major & minor radius
dr=0.05, # Radial width of domain
Bt=1.0, # Toroidal magnetic field
q=5.0, # Safety factor
mxg=2,
file="circle.nc"
):
# q = rBt / RBp
Bp = r*Bt / (R*q)
# Minor radius as function of x. Choose so boundary
# is half-way between grid points
h = dr / (nx - 2.*mxg) # Grid spacing in r
rminor = linspace(r - 0.5*dr - (mxg-0.5)*h,
r + 0.5*dr + (mxg-0.5)*h,
nx)
# mesh spacing in x and y
dx = ndarray([nx,ny])
dx[:,:] = r*Bt*h # NOTE: dx is toroidal flux
dy = ndarray([nx,ny])
dy[:,:] = 2.*pi / ny
# LogB = log(1/(1+r/R cos(theta))) =(approx) -(r/R)*cos(theta)
logB = zeros([nx, ny, 3]) # (constant, n=1 real, n=1 imag)
# At y = 0, Rmaj = R + r*cos(theta)
logB[:,0,1] = -(rminor/R)
# Moving in y, phase shift by (toroidal angle) / q
for y in range(1,ny):
dtheta = y * 2.*pi / ny / q # Change in poloidal angle
logB[:,y,1] = -(rminor/R)*cos(dtheta)
logB[:,y,2] = -(rminor/R)*sin(dtheta)
# Shift angle from one end of y to the other
ShiftAngle = ndarray([nx])
ShiftAngle[:] = 2.*pi / q
Rxy = ndarray([nx,ny])
Rxy[:,:] = r # NOTE : opposite to standard BOUT convention
Btxy = ndarray([nx,ny])
Btxy[:,:] = Bp
Bpxy = ndarray([nx,ny])
Bpxy[:,:] = Bt
Bxy = ndarray([nx,ny])
Bxy[:,:] = sqrt(Bt**2 + Bp**2)
hthe = ndarray([nx,ny])
hthe[:,:] = R
print("Writing to file '"+file+"'")
f = DataFile()
f.open(file, create=True)
# Mesh size
f.write("nx", nx)
f.write("ny", ny)
# Mesh spacing
f.write("dx", dx)
f.write("dy", dy)
# Metric components
f.write("Rxy", Rxy)
f.write("Btxy", Btxy)
f.write("Bpxy", Bpxy)
f.write("Bxy", Bxy)
f.write("hthe", hthe)
# Shift
f.write("ShiftAngle", ShiftAngle);
# Curvature
f.write("logB", logB)
# Input parameters
f.write("R", R)
f.write("r", r)
f.write("dr", dr)
f.write("Bt", Bt)
f.write("q", q)
f.write("mxg", mxg)
f.close()
def coordinates(nx, ny, nz,
R = 2.0, r=0.2, # Major & minor radius
dr=0.05, # Radial width of domain
Bt=1.0, # Toroidal magnetic field
q=5.0, # Safety factor
mxg=2
):
"""
Returns coordinates (R,Z) as a pair of arrays
"""
h = dr / (nx - 2.*mxg) # Grid spacing in r
rminor = linspace(r - 0.5*dr - (mxg-0.5)*h,
r + 0.5*dr + (mxg-0.5)*h,
nx)
print("Grid spacing: Lx = %e, Lz = %e" % (h, 2.*pi*r/nz))
Rxyz = ndarray([nx, ny, nz])
Zxyz = ndarray([nx, ny, nz])
for y in range(0,ny):
dtheta = y * 2.*pi / ny / q # Change in poloidal angle
theta = linspace(0,2.*pi, nz, endpoint=False) + dtheta
Rxyz[:,y,:] = R + outer(rminor, cos(theta))
Zxyz[:,y,:] = outer(rminor, sin(theta))
return Rxyz, Zxyz
| gpl-3.0 | -8,111,180,562,648,416,000 | 24.312925 | 68 | 0.515453 | false |
gurneyalex/odoo | addons/l10n_it_edi/models/ir_mail_server.py | 4 | 18207 | # -*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import zipfile
import io
import re
import logging
import email
import dateutil
import pytz
import base64
try:
from xmlrpc import client as xmlrpclib
except ImportError:
import xmlrpclib
from lxml import etree
from datetime import datetime
from odoo import api, fields, models, tools, _
from odoo.exceptions import ValidationError, UserError
_logger = logging.getLogger(__name__)
class FetchmailServer(models.Model):
_name = 'fetchmail.server'
_inherit = 'fetchmail.server'
l10n_it_is_pec = fields.Boolean('PEC server', help="If PEC Server, only mail from '[email protected]' will be processed.")
l10n_it_last_uid = fields.Integer(string='Last message UID', default=1)
@api.constrains('l10n_it_is_pec', 'server_type')
def _check_pec(self):
for record in self:
if record.l10n_it_is_pec and record.server_type != 'imap':
raise ValidationError(_("PEC mail server must be of type IMAP."))
def fetch_mail(self):
""" WARNING: meant for cron usage only - will commit() after each email! """
MailThread = self.env['mail.thread']
for server in self.filtered(lambda s: s.l10n_it_is_pec):
_logger.info('start checking for new emails on %s PEC server %s', server.server_type, server.name)
count, failed = 0, 0
imap_server = None
try:
imap_server = server.connect()
imap_server.select()
result, data = imap_server.uid('search', None, '(FROM "@pec.fatturapa.it")', '(UID %s:*)' % (server.l10n_it_last_uid))
new_max_uid = server.l10n_it_last_uid
for uid in data[0].split():
if int(uid) <= server.l10n_it_last_uid:
# We get always minimum 1 message. If no new message, we receive the newest already managed.
continue
result, data = imap_server.uid('fetch', uid, '(RFC822)')
if not data[0]:
continue
message = data[0][1]
# To leave the mail in the state in which they were.
if "Seen" not in data[1].decode("utf-8"):
imap_server.uid('STORE', uid, '+FLAGS', '\\Seen')
else:
imap_server.uid('STORE', uid, '-FLAGS', '\\Seen')
# See details in message_process() in mail_thread.py
if isinstance(message, xmlrpclib.Binary):
message = bytes(message.data)
if isinstance(message, str):
message = message.encode('utf-8')
msg_txt = email.message_from_bytes(message)
try:
self._attachment_invoice(msg_txt)
new_max_uid = max(new_max_uid, int(uid))
except Exception:
_logger.info('Failed to process mail from %s server %s.', server.server_type, server.name, exc_info=True)
failed += 1
self._cr.commit()
count += 1
server.write({'l10n_it_last_uid': new_max_uid})
_logger.info("Fetched %d email(s) on %s server %s; %d succeeded, %d failed.", count, server.server_type, server.name, (count - failed), failed)
except Exception:
_logger.info("General failure when trying to fetch mail from %s server %s.", server.server_type, server.name, exc_info=True)
finally:
if imap_server:
imap_server.close()
imap_server.logout()
server.write({'date': fields.Datetime.now()})
return super(FetchmailServer, self.filtered(lambda s: not s.l10n_it_is_pec)).fetch_mail()
def _attachment_invoice(self, msg_txt):
parsed_values = self.env['mail.thread']._message_parse_extract_payload(msg_txt)
body, attachments = parsed_values['body'], parsed_values['attachments']
from_address = tools.decode_smtp_header(msg_txt.get('from'))
for attachment in attachments:
split_attachment = attachment.fname.rpartition('.')
if len(split_attachment) < 3:
_logger.info('E-invoice filename not compliant: %s', attachment.fname)
continue
attachment_name = split_attachment[0]
attachment_ext = split_attachment[2]
split_underscore = attachment_name.rsplit('_', 2)
if len(split_underscore) < 2:
_logger.info('E-invoice filename not compliant: %s', attachment.fname)
continue
if attachment_ext != 'zip':
if split_underscore[1] in ['RC', 'NS', 'MC', 'MT', 'EC', 'SE', 'NE', 'DT']:
# we have a receipt
self._message_receipt_invoice(split_underscore[1], attachment)
elif re.search("([A-Z]{2}[A-Za-z0-9]{2,28}_[A-Za-z0-9]{0,5}.(xml.p7m|xml))", attachment.fname):
# we have a new E-invoice
self._create_invoice_from_mail(attachment.content, attachment.fname, from_address)
else:
if split_underscore[1] == 'AT':
# Attestazione di avvenuta trasmissione della fattura con impossibilità di recapito
self._message_AT_invoice(attachment)
else:
_logger.info('New E-invoice in zip file: %s', attachment.fname)
self._create_invoice_from_mail_with_zip(attachment, from_address)
def _create_invoice_from_mail(self, att_content, att_name, from_address):
if self.env['account.move'].search([('l10n_it_einvoice_name', '=', att_name)], limit=1):
# invoice already exist
_logger.info('E-invoice already exist: %s', att_name)
return
invoice_attachment = self.env['ir.attachment'].create({
'name': att_name,
'datas': base64.encodestring(att_content),
'type': 'binary',
})
try:
tree = etree.fromstring(att_content)
except Exception:
raise UserError(_('The xml file is badly formatted : {}').format(att_name))
invoice = self.env['account.move']._import_xml_invoice(tree)
invoice.l10n_it_send_state = "new"
invoice.source_email = from_address
self._cr.commit()
_logger.info('New E-invoice: %s', att_name)
def _create_invoice_from_mail_with_zip(self, attachment_zip, from_address):
with zipfile.ZipFile(io.BytesIO(attachment_zip.content)) as z:
for att_name in z.namelist():
if self.env['account.move'].search([('l10n_it_einvoice_name', '=', att_name)], limit=1):
# invoice already exist
_logger.info('E-invoice in zip file (%s) already exist: %s', attachment_zip.fname, att_name)
continue
att_content = z.open(att_name).read()
self._create_invoice_from_mail(att_content, att_name, from_address)
def _message_AT_invoice(self, attachment_zip):
with zipfile.ZipFile(io.BytesIO(attachment_zip.content)) as z:
for attachment_name in z.namelist():
split_name_attachment = attachment_name.rpartition('.')
if len(split_name_attachment) < 3:
continue
split_underscore = split_name_attachment[0].rsplit('_', 2)
if len(split_underscore) < 2:
continue
if split_underscore[1] == 'AT':
attachment = z.open(attachment_name).read()
_logger.info('New AT receipt for: %s', split_underscore[0])
try:
tree = etree.fromstring(attachment)
except:
_logger.info('Error in decoding new receipt file: %s', attachment_name)
return
elements = tree.xpath('//NomeFile')
if elements and elements[0].text:
filename = elements[0].text
else:
return
related_invoice = self.env['account.move'].search([
('l10n_it_einvoice_name', '=', filename)])
if not related_invoice:
_logger.info('Error: invoice not found for receipt file: %s', filename)
return
related_invoice.l10n_it_send_state = 'failed_delivery'
info = self._return_multi_line_xml(tree, ['//IdentificativoSdI', '//DataOraRicezione', '//MessageId', '//PecMessageId', '//Note'])
related_invoice.message_post(
body=(_("ES certify that it has received the invoice and that the file \
could not be delivered to the addressee. <br/>%s") % (info))
)
def _message_receipt_invoice(self, receipt_type, attachment):
try:
tree = etree.fromstring(attachment.content)
except:
_logger.info('Error in decoding new receipt file: %s', attachment.fname)
return {}
elements = tree.xpath('//NomeFile')
if elements and elements[0].text:
filename = elements[0].text
else:
return {}
if receipt_type == 'RC':
# Delivery receipt
# This is the receipt sent by the ES to the transmitting subject to communicate
# delivery of the file to the addressee
related_invoice = self.env['account.move'].search([
('l10n_it_einvoice_name', '=', filename),
('l10n_it_send_state', '=', 'sent')])
if not related_invoice:
_logger.info('Error: invoice not found for receipt file: %s', attachment.fname)
return
related_invoice.l10n_it_send_state = 'delivered'
info = self._return_multi_line_xml(tree, ['//IdentificativoSdI', '//DataOraRicezione', '//DataOraConsegna', '//Note'])
related_invoice.message_post(
body=(_("E-Invoice is delivery to the destinatory:<br/>%s") % (info))
)
elif receipt_type == 'NS':
# Rejection notice
# This is the receipt sent by the ES to the transmitting subject if one or more of
# the checks carried out by the ES on the file received do not have a successful result.
related_invoice = self.env['account.move'].search([
('l10n_it_einvoice_name', '=', filename),
('l10n_it_send_state', '=', 'sent')])
if not related_invoice:
_logger.info('Error: invoice not found for receipt file: %s', attachment.fname)
return
related_invoice.l10n_it_send_state = 'invalid'
error = self._return_error_xml(tree)
related_invoice.message_post(
body=(_("Errors in the E-Invoice :<br/>%s") % (error))
)
activity_vals = {
'activity_type_id': self.env.ref('mail.mail_activity_data_todo').id,
'invoice_user_id': related_invoice.invoice_user_id.id if related_invoice.invoice_user_id else self.env.user.id
}
related_invoice.activity_schedule(summary='Rejection notice', **activity_vals)
elif receipt_type == 'MC':
# Failed delivery notice
# This is the receipt sent by the ES to the transmitting subject if the file is not
# delivered to the addressee.
related_invoice = self.env['account.move'].search([
('l10n_it_einvoice_name', '=', filename),
('l10n_it_send_state', '=', 'sent')])
if not related_invoice:
_logger.info('Error: invoice not found for receipt file: %s', attachment.fname)
return
info = self._return_multi_line_xml(tree, [
'//IdentificativoSdI',
'//DataOraRicezione',
'//Descrizione',
'//MessageId',
'//Note'])
related_invoice.message_post(
body=(_("The E-invoice is not delivered to the addressee. The Exchange System is\
unable to deliver the file to the Public Administration. The Exchange System will\
contact the PA to report the problem and request that they provide a solution. \
During the following 15 days, the Exchange System will try to forward the FatturaPA\
file to the Administration in question again. More informations:<br/>%s") % (info))
)
elif receipt_type == 'NE':
# Outcome notice
# This is the receipt sent by the ES to the invoice sender to communicate the result
# (acceptance or refusal of the invoice) of the checks carried out on the document by
# the addressee.
related_invoice = self.env['account.move'].search([
('l10n_it_einvoice_name', '=', filename),
('l10n_it_send_state', '=', 'delivered')])
if not related_invoice:
_logger.info('Error: invoice not found for receipt file: %s', attachment.fname)
return
elements = tree.xpath('//Esito')
if elements and elements[0].text:
if elements[0].text == 'EC01':
related_invoice.l10n_it_send_state = 'delivered_accepted'
elif elements[0].text == 'EC02':
related_invoice.l10n_it_send_state = 'delivered_refused'
info = self._return_multi_line_xml(tree,
['//Esito',
'//Descrizione',
'//IdentificativoSdI',
'//DataOraRicezione',
'//DataOraConsegna',
'//Note'
])
related_invoice.message_post(
body=(_("Outcome notice: %s<br/>%s") % (related_invoice.l10n_it_send_state, info))
)
if related_invoice.l10n_it_send_state == 'delivered_refused':
activity_vals = {
'activity_type_id': self.env.ref('mail.mail_activity_data_todo').id,
'invoice_user_id': related_invoice.invoice_user_id.id if related_invoice.invoice_user_id else self.env.user.id
}
related_invoice.activity_schedule(summary='Outcome notice: Refused', **activity_vals)
# elif receipt_type == 'MT':
# Metadata file
# This is the file sent by the ES to the addressee together with the invoice file,
# containing the main reference data of the file useful for processing, including
# the IdentificativoSDI.
# Useless for Odoo
elif receipt_type == 'DT':
# Deadline passed notice
# This is the receipt sent by the ES to both the invoice sender and the invoice
# addressee to communicate the expiry of the maximum term for communication of
# acceptance/refusal.
related_invoice = self.env['account.move'].search([
('l10n_it_einvoice_name', '=', filename), ('l10n_it_send_state', '=', 'delivered')])
if not related_invoice:
_logger.info('Error: invoice not found for receipt file: %s', attachment.fname)
return
related_invoice.l10n_it_send_state = 'delivered_expired'
info = self._return_multi_line_xml(tree, [
'//Descrizione',
'//IdentificativoSdI',
'//Note'])
related_invoice.message_post(
body=(_("Expiration of the maximum term for communication of acceptance/refusal:\
%s<br/>%s") % (filename, info))
)
def _return_multi_line_xml(self, tree, element_tags):
output_str = "<ul>"
for element_tag in element_tags:
elements = tree.xpath(element_tag)
if not elements:
continue
for element in elements:
if element.text:
text = " ".join(element.text.split())
output_str += "<li>%s: %s</li>" % (element.tag, text)
return output_str + "</ul>"
def _return_error_xml(self, tree):
output_str = "<ul>"
elements = tree.xpath('//Errore')
if not elements:
return
for element in elements:
descrizione = " ".join(element[1].text.split())
if descrizione:
output_str += "<li>Errore %s: %s</li>" % (element[0].text, descrizione)
return output_str + "</ul>"
class IrMailServer(models.Model):
_name = "ir.mail_server"
_inherit = "ir.mail_server"
def build_email(self, email_from, email_to, subject, body, email_cc=None, email_bcc=None, reply_to=False,
attachments=None, message_id=None, references=None, object_id=False, subtype='plain', headers=None,
body_alternative=None, subtype_alternative='plain'):
if self.env.context.get('wo_bounce_return_path') and headers:
headers['Return-Path'] = email_from
return super(IrMailServer, self).build_email(email_from, email_to, subject, body, email_cc=email_cc, email_bcc=email_bcc, reply_to=reply_to,
attachments=attachments, message_id=message_id, references=references, object_id=object_id, subtype=subtype, headers=headers,
body_alternative=body_alternative, subtype_alternative=subtype_alternative)
| agpl-3.0 | -5,329,390,525,778,321,000 | 47.420213 | 159 | 0.543502 | false |
viktorTarasov/PyKMIP | docs/conf.py | 1 | 11284 | # -*- coding: utf-8 -*-
#
# PyKMIP documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 25 17:12:29 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyKMIP'
copyright = u'2016, JHUAPL'
author = u'JHUAPL'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.4'
# The full version, including alpha/beta/rc tags.
release = u'0.4.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyKMIPdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyKMIP.tex', u'PyKMIP Documentation',
u'JHUAPL', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pykmip', u'PyKMIP Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyKMIP', u'PyKMIP Documentation',
author, 'PyKMIP', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| apache-2.0 | -7,127,595,137,265,836,000 | 30.431755 | 79 | 0.705601 | false |
MaxInGaussian/GomPlex | applications/starter/train.py | 1 | 4448 | import numpy as np
import numpy.random as npr
import pandas as pd
import gc
from sys import path
path.append("../../")
from GomPlex import *
print('Loading data ...')
p, t = 0.1, 0.3
iter_tol = 30
ratio = 0.3
cv_folds = 3
score_rerun = 20
model_path = 'best.pkl'
plot_error = False
metric = Metric('mae')
train = pd.read_csv('train2016.csv')
prop = pd.read_csv('prop2016.csv')
print('Preprocessing data ...')
missing = (prop.isnull().sum(axis=0)/prop.shape[0]).reset_index()
missing.columns = ['column', 'missing_p']
drop_cols = missing.ix[missing['missing_p']>0.8]['column'].values.tolist()
cnt_cols = [col for col in prop.columns if 'cnt' in col]
prop[cnt_cols] = prop[cnt_cols].fillna(value=0)
yr_cols = [col for col in prop.columns if 'year' in col]
filter_cols = []
for col in cnt_cols+yr_cols:
if(np.unique(prop[col]).shape[0]<50):
filter_cols.append(col)
prop[col] = prop[col].astype('category')
else:
prop[col] -= prop[col].min()
prop[yr_cols] = prop[yr_cols].fillna(value=0)
df_data = pd.get_dummies(prop[filter_cols])
prop['taxdelinquencyflag'] = pd.get_dummies(prop['taxdelinquencyflag'])['Y']
cat_cols = [col for col in prop.columns if 'id' in col]
cat_cols.remove('parcelid')
prop[cat_cols] = prop[cat_cols].fillna(value=0)
filter_cat_cols = []
for col in cat_cols:
if(np.unique(prop[col]).shape[0]<50):
filter_cat_cols.append(col)
prop[col] = prop[col].astype('category')
cat_data = pd.get_dummies(prop[filter_cat_cols])
df_data = pd.concat([df_data, ], axis=1)
num_cols = [col for col in prop.columns if col not in cat_cols+filter_cols]
df_data = pd.concat([df_data, prop[num_cols]], axis=1)
dates = pd.to_datetime(pd.Series(train['transactiondate'].tolist()))
df_data['date'] = ((dates-dates.min()).astype('timedelta64[D]').astype(int))
df_data = df_data.fillna(value=0)
print('Generating training data ...')
df_train = train.merge(df_data, how='left', on='parcelid')
y_train = df_train['logerror'].values
y_train = np.sign(y_train)*(np.abs(y_train)**p)
y_train_r = np.maximum(0, y_train)
y_train_i = np.maximum(0, -y_train)
y_train = (y_train_r+1j*y_train_i)[:, None]
X_train = df_train.drop(['parcelid', 'logerror', 'transactiondate', 'propertyzoningdesc', 'propertycountylandusecode', 'censustractandblock',
'rawcensustractandblock'], axis=1)
columns = X_train.columns
print(X_train.shape, y_train.shape)
print("Start training ...")
split = int(X_train.shape[0]*(1-t))
X_train, y_train, X_valid, y_valid =\
X_train[:split], y_train[:split], X_train[split:], y_train[split:]
X_train = X_train.as_matrix()
X_valid = X_valid.as_matrix()
print("Generating testing data ...")
sample = pd.read_csv('sample.csv')
sample['parcelid'] = sample['ParcelId']
df_test = sample.merge(df_data, on='parcelid', how='left')
X_test = df_test[columns]
X_test = X_test.as_matrix()
result = pd.read_csv('sample.csv')
del df_train, df_test, df_data; gc.collect()
print(' Gathered %d Training Examples.'%(X_train.shape[0]))
print(' Gathered %d Testing Examples.'%(X_test.shape[0]))
print(' Done.')
while (True):
print('# Training GomPlex')
gp = GomPlex(npr.randint(int(np.log(X_train.shape[0]))*3)+8, True)
gp.fit(X_train, y_train, cost_type=metric.metric,
iter_tol=iter_tol, cv_folds=cv_folds, plot=plot_error)
print(' Done.')
print('# Choosing GomPlex Models')
score = metric.eval(y_valid, *gp.predict(X_valid))
print(' new score = %.3f'%(score))
if(not os.path.exists(model_path)):
gp.save(model_path)
else:
best_gp = GomPlex().load(model_path).fit(X_train, y_train)
best_score = metric.eval(y_valid, *best_gp.predict(X_valid))
print(' best score = %.3f'%(best_score))
if(score > best_score):
gp.save(model_path)
backup_path = 'save_models/%s_%.6f.pkl'%(
best_gp.hashed_name, best_score)
best_gp.save(backup_path)
print(' Found New Model!')
print("Start prediction ...")
test_dates = [288, 319, 349, 653, 684, 714]
for i, test_date in enumerate(test_dates):
X_test[:, -1] = test_date
y_test = gp.predict(X_test)[0].ravel()
result[result.columns[i+1]] = y_test.real-y_test.imag
print("Start write result ...")
result.to_csv(gp.hashed_name+'.csv', index=False, float_format='%.6f')
| bsd-3-clause | 1,739,478,253,687,472,000 | 31 | 141 | 0.63152 | false |
genome/flow-workflow | flow_workflow/parallel_id.py | 1 | 1987 | from collections import OrderedDict
import json
import logging
LOG = logging.getLogger(__name__)
class ParallelIdentifier(object):
def __init__(self, parallel_id=[]):
self._entries = OrderedDict([(int(op_id), int(par_idx))
for op_id, par_idx in parallel_id])
@property
def index(self):
if self._entries:
return self._entries.values()[-1]
def refers_to(self, operation):
return int(operation.operation_id) in self._entries
@property
def _parent_entries(self):
parent_entries = OrderedDict(self._entries)
parent_entries.popitem()
return parent_entries
@property
def parent_identifier(self):
return ParallelIdentifier(self._parent_entries.iteritems())
def _child_entries(self, operation_id, parallel_idx):
if int(operation_id) in self._entries:
raise ValueError('operation_id already in ParallelIdentifier '
'op_id (%r) in %r' % (operation_id, self._entries))
child_entries = OrderedDict(self._entries)
child_entries[int(operation_id)] = int(parallel_idx)
return child_entries
def child_identifier(self, operation_id, parallel_idx):
return ParallelIdentifier(self._child_entries(
operation_id, parallel_idx).iteritems())
@property
def stack_iterator(self):
current_id = self
while len(current_id):
yield current_id
current_id = current_id.parent_identifier
yield current_id
def __iter__(self):
return self._entries.iteritems()
def __len__(self):
return len(self._entries)
def __repr__(self):
return 'ParallelIdentifier(%r)' % list(self)
def __cmp__(self, other):
return cmp(self._entries, other._entries)
def serialize(self):
return json.dumps(list(self))
@classmethod
def deserialize(cls, data='[]'):
return cls(json.loads(data))
| agpl-3.0 | -4,437,954,520,125,725,700 | 26.985915 | 74 | 0.618017 | false |
garaud/puppetmaster | host.py | 1 | 17187 | # Copyright (C) 2010 INRIA - EDF R&D
# Authors: Damien Garaud
#
# This file is part of the PuppetMaster project. It provides facilities to
# deal with computations over a Linux network.
#
# This script is free; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
"""
Provides class ``Host`` designed to manage host.
author Damien Garaud
"""
import os
import sys
import commands
import socket
import popen2
import subprocess
########
# HOST #
########
## It may be written in a temporary SSH configuration file.
__sshconfig__ = """
Host *
ForwardAgent yes
HashKnownHosts yes
ForwardX11Trusted yes
StrictHostKeyChecking no
NoHostAuthenticationForLocalhost yes
"""
class Host:
"""Dedicated to host management."""
def __init__(self, host = socket.gethostname(),
forced_ssh_config = False):
"""The constructor.
Initializes the attributes and checks the SSH connection to the host.
``host`` The name of the host.
``forced_ssh_config`` Would like to use the PuppetMaster SSH
configuration? (True or False). See the variable ``__sshconfig__``.
"""
## Name of the host.
self.name = ""
## The number of processors.
self.Nprocessor = 0
## The total memory.
self.total_memory = 0
## Connection failed?
self.connection = False
## The default SSH command.
self.ssh = "ssh "
if forced_ssh_config:
if not os.path.isfile('/tmp/ssh-config-puppet'):
ssh_file = open('/tmp/ssh-config-puppet', 'w')
ssh_file.writelines(__sshconfig__)
ssh_file.close()
self.ssh = "ssh -F /tmp/ssh-config-puppet "
# Checks type argument.
self.CheckArgument(host)
# Checks SSH connection. Changed the SSH command if necessary.
try:
self.CheckSSH()
except SystemError:
if not os.path.isfile('/tmp/ssh-config-puppet'):
ssh_file = open('/tmp/ssh-config-puppet', 'w')
ssh_file.writelines(__sshconfig__)
ssh_file.close()
self.ssh = "ssh -F /tmp/ssh-config-puppet "
try:
self.CheckSSH()
except SystemError:
self.connection = False
if self.connection:
# Gets the number of processors.
self.GetProcessorNumber()
# Get the total memory.
self.GetTotalMemory()
def __del__(self):
"""The destructor.
Deletes the temporary SSH configuration file.
"""
try:
if os.path.isfile('/tmp/ssh-config-puppet'):
os.remove('/tmp/ssh-config-puppet')
except:
pass
def CheckArgument(self, host):
"""
Checks the argument::
host # The name of the host.
"""
# Argument is a string.
if isinstance(host, str):
self.name = host
# Argument is a tuple or a list (hostname, Nprocessor).
elif isinstance(host, tuple) or isinstance(host, list):
self.CheckArgumentTupleList(host)
else:
raise ValueError, "The argument must be the host name (str)" \
+ ", a tuple (hostname, Ncpu) or a list [hostname, Ncpu]."
# If the host name is empty.
if len(self.name) == 0:
raise ValueError, "The name of host is empty."
def CheckArgumentTupleList(self, host):
"""Checks the length and the content of the tuple/list ``host``.
"""
# Length of tuple or list must be 2.
if len(host) != 2:
raise ValueError, "The length of the tuple/list must be 2."
# You must have ('hostname', CPU_number).
if isinstance(host[0], str):
self.name = host[0]
else:
raise ValueError, "The first element must be " \
+ "the host name (str)."
# Number of cores must be a positive integer.
if not isinstance(host[-1], int) or host[-1] < 0:
raise ValueError, "The number of processors must be " \
+ "an integer strictly positive."
else:
self.Nprocessor = host[-1]
def CheckSSH(self):
"""Checks the SSH connection.
Launches a simple command via SSH (uptime). This command must be
returned no error and just a single line.
"""
if self.name == socket.gethostname():
self.connection = True
else:
# SSH 'pwd' test.
command_name = self.ssh + self.name + " pwd 2> /dev/null"
status, out = commands.getstatusoutput(command_name)
if status != 0 or len(out) == 0:
self.connection = False
raise SystemError
else:
self.connection = True
def GetProcessorNumber(self):
"""Returns the number of processors.
@return An integer.
"""
if self.Nprocessor != 0:
return self.Nprocessor
else:
command_name = " cat /proc/cpuinfo | grep ^processor | wc -l"
# If the host is the localhost.
if self.name == socket.gethostname():
status, out = commands.getstatusoutput(command_name)
# The command must be returned a non-zero status and a single
# line.
if status != 0 or len(out.split('\n')) != 1:
print("The command '%s' returns the status '%i'" %
(command_name, status))
print("with the message:")
print("%s" % out)
sys.exit(0)
self.Nprocessor = int(out)
else:
try:
self.CheckSSH()
except SystemError:
pass
if self.connection:
command_name = self.ssh + self.name + " 2>/dev/null"\
+ command_name
status, out = commands.getstatusoutput(command_name)
self.Nprocessor = int(out)
return self.Nprocessor
def GetTotalMemory(self):
"""Returns the total memory (kB by default).
@return A integer.
"""
if self.total_memory != 0:
return self.total_memory
else:
command_name = " cat /proc/meminfo | grep ^MemTotal " \
+ "| cut -d : -f 2"
# If the host is the localhost.
if self.name == socket.gethostname():
status, out = commands.getstatusoutput(command_name)
# The command must be returned a non-zero status and a single
# line.
if status != 0 or len(out.split('\n')) != 1:
print("The command '%s' returns the status '%i'" %
(command_name, status))
print("with the message:")
print("%s" % out)
sys.exit(0)
self.total_memory = int(out.split()[0])
return self.total_memory
else:
try:
self.CheckSSH()
except SystemError:
pass
if self.connection:
command_name = self.ssh + self.name + " 2>/dev/null" \
+ command_name
status, out = commands.getstatusoutput(command_name)
self.total_memory = int(out.split()[0])
return self.total_memory
def GetUptime(self):
"""Returns the system load averages for the past (1, 5 and 15
minutes).
@return A list of floats or a string if the connection failed.
"""
command_name = " uptime"
# If the host is the localhost.
if self.name == socket.gethostname():
status, out = commands.getstatusoutput(command_name)
# The command must be returned a non-zero status and a single
# line.
if status != 0 or len(out.split('\n')) != 1:
print("The command '%s' returns the status '%i'" %
(command_name, status))
print("with the message:")
print("%s" % out)
sys.exit(0)
try:
out = out.split()
out = [float(x.strip(",")) for x in out[-3:]]
except:
# Connection failed?
out = "off"
return out
else:
try:
self.CheckSSH()
except SystemError:
return "off"
if self.connection:
command_name = self.ssh + self.name + " 2>/dev/null" \
+ command_name
status, out = commands.getstatusoutput(command_name)
try:
out = out.split()
out = [float(x.strip(",")) for x in out[-3:]]
except:
# Connection failed?
out = "off"
return out
def GetUsedMemory(self):
"""Returns the used memory (kB by default).
@return An integer or a string if the connection failed.
"""
command_name = " free | cut -d : -f 2"
# If the host is the localhost.
if self.name == socket.gethostname():
status, out = commands.getstatusoutput(command_name)
# The command must be returned a non-zero status.
if status != 0:
print("The command '%s' returns the status '%i'" %
(command_name, status))
print("with the message:")
print("%s" % out)
sys.exit(0)
try:
out = out.split('\n')[2]
out = int(out.split()[0])
except:
# Connection failed?
out = "off"
return out
else:
try:
self.CheckSSH()
except SystemError:
return "off"
if self.connection:
command_name = self.ssh + self.name + " 2>/dev/null" \
+ command_name
status, out = commands.getstatusoutput(command_name)
try:
out = out.split('\n')[2]
out = int(out.split()[0])
except:
# Connection failed?
out = "off"
return out
def LaunchInt(self, command):
"""Launches a command in interactive mode (using os.system).
\param command The name of the command.
@return The status of the command.
"""
if self.name == socket.gethostname():
return os.system(command)
else:
return os.system(self.ssh + self.name + ' ' + command)
def LaunchFG(self, command):
"""Launches a command in the foreground.
\param command The name of the command.
@return The output and the status of the command in a tuple.
"""
if self.name == socket.gethostname():
return commands.getstatusoutput(command)
else:
return commands.getstatusoutput(self.ssh + self.name
+ ' ' + command)
def LaunchBG(self, command):
"""Launches a command in the background and returns a Popen4 object.
\param command The name of the command.
@return A Popen4 object.
"""
# Output is redirected.
command = "( " + command + "; ) &> /dev/null"
if self.name == socket.gethostname():
return popen2.Popen4(command)
else:
return popen2.Popen4(self.ssh + self.name +
" \"" + command + "\"")
def LaunchSubProcess(self, command, out_option = None):
"""Launches a command in the background with the module 'subprocess'.
The standard output and error can be called with
'subprocess.Popen.communicate()' method when the process terminated.
\param command The name of the command.
\param outo_ption A string.
- None: writes the standard output in '/dev/null'.
- 'pipe': writes the standard output in the 'subprocess.PIPE'
- 'file': writes the standard output in file such as
'/tmp/hostname-erTfZ'.
@return A 'subprocess.Popen' instance or (file object,
subprocess.Popen) when the 'out_option' is set to 'file'.
"""
import tempfile
# Checks 'out_option'.
if out_option not in [None, 'pipe', 'file']:
out_option = None
# If host is the local host.
if self.name == socket.gethostname():
if out_option == 'pipe':
return subprocess.Popen([command], shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
elif out_option == 'file':
filename = tempfile.mkstemp(prefix = 'puppet-'
+ self.name + '-')[1]
outfile = open(filename, 'w+')
return outfile, subprocess.Popen([command], shell = True,
stdout = outfile,
stderr = subprocess.STDOUT)
else:
return subprocess.Popen([command], shell = True,
stdout = os.open(os.devnull,
os.O_RDWR),
stderr = subprocess.PIPE)
else:
if out_option == 'pipe':
return subprocess.Popen([self.ssh + self.name
+ ' ' + command],
shell = True,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE)
elif out_option == 'file':
filename = tempfile.mkstemp(prefix = 'puppet-'
+ self.name + '-')[1]
outfile = open(filename, 'w+')
return outfile, subprocess.Popen([self.ssh + self.name
+ ' ' + command],
shell = True,
stdout = outfile,
stderr = subprocess.STDOUT)
else:
return subprocess.Popen([self.ssh + self.name
+ ' ' + command],
shell = True,
stdout = os.open(os.devnull,
os.O_RDWR),
stderr = subprocess.PIPE)
def LaunchWait(self, command, ltime, wait = 0.1):
"""Launches a command in the background and waits for its output for a
given time after which the process is killed.
\param command The name of the command.
\param ltime The limit time.
\param wait The waiting time.
@return The output and the status of the command in a tuple.
"""
import time
# Launches a subprocess.
if self.name == socket.gethostname():
subproc = subprocess.Popen([command], shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
else:
subproc = subprocess.Popen([self.ssh + self.name + ' ' + command],
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Limit time and waiting time.
current_time = time.time()
while (subproc.poll() == None and time.time() - current_time < ltime):
time.sleep(wait)
# If the process is not done, try to kill it.
if subproc.poll() == None:
status = 1
try:
os.kill(subproc.pid, 9)
except:
pass
else:
status = subproc.poll()
# Returns the output.
if status == 0:
return (status, subproc.communicate()[0])
else:
std_output = subproc.communicate()
return (status, std_output[0] + std_output[1])
| gpl-2.0 | -1,263,255,637,777,537,300 | 36.526201 | 78 | 0.493396 | false |
jaredhoney/pyrad | pyrad/client.py | 1 | 6822 | # client.py
#
# Copyright 2002-2007 Wichert Akkerman <[email protected]>
__docformat__ = "epytext en"
import select
import socket
import time
import six
from pyrad import host
from pyrad import packet
class Timeout(Exception):
"""Simple exception class which is raised when a timeout occurs
while waiting for a RADIUS server to respond."""
class Client(host.Host):
"""Basic RADIUS client.
This class implements a basic RADIUS client. It can send requests
to a RADIUS server, taking care of timeouts and retries, and
validate its replies.
:ivar retries: number of times to retry sending a RADIUS request
:type retries: integer
:ivar timeout: number of seconds to wait for an answer
:type timeout: integer
"""
def __init__(self, server, authport=1812, acctport=1813,
coaport=3799, discport=1700, secret=six.b(''), dict=None):
"""Constructor.
:param server: hostname or IP address of RADIUS server
:type server: string
:param authport: port to use for authentication packets
:type authport: integer
:param acctport: port to use for accounting packets
:type acctport: integer
:param coaport: port to use for CoA packets
:type coaport: integer
:param discport: port to use for CoA packets
:type discport: integer
:param secret: RADIUS secret
:type secret: string
:param dict: RADIUS dictionary
:type dict: pyrad.dictionary.Dictionary
"""
host.Host.__init__(self, authport, acctport, coaport, discport, dict)
self.server = server
self.secret = secret
self._socket = None
self.retries = 3
self.timeout = 5
def bind(self, addr):
"""Bind socket to an address.
Binding the socket used for communicating to an address can be
usefull when working on a machine with multiple addresses.
:param addr: network address (hostname or IP) and port to bind to
:type addr: host,port tuple
"""
self._CloseSocket()
self._SocketOpen()
self._socket.bind(addr)
def _SocketOpen(self):
if not self._socket:
self._socket = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
def _CloseSocket(self):
if self._socket:
self._socket.close()
self._socket = None
def CreateAuthPacket(self, **args):
"""Create a new RADIUS packet.
This utility function creates a new RADIUS packet which can
be used to communicate with the RADIUS server this client
talks to. This is initializing the new packet with the
dictionary and secret used for the client.
:return: a new empty packet instance
:rtype: pyrad.packet.Packet
"""
return host.Host.CreateAuthPacket(self, secret=self.secret, **args)
def CreateAcctPacket(self, **args):
"""Create a new RADIUS packet.
This utility function creates a new RADIUS packet which can
be used to communicate with the RADIUS server this client
talks to. This is initializing the new packet with the
dictionary and secret used for the client.
:return: a new empty packet instance
:rtype: pyrad.packet.Packet
"""
return host.Host.CreateAcctPacket(self, secret=self.secret, **args)
def CreateCoAPacket(self, **args):
"""Create a new RADIUS packet.
This utility function creates a new RADIUS packet which can
be used to communicate with the RADIUS server this client
talks to. This is initializing the new packet with the
dictionary and secret used for the client.
:return: a new empty packet instance
:rtype: pyrad.packet.Packet
"""
return host.Host.CreateCoAPacket(self, secret=self.secret, **args)
def CreateDiscPacket(self, **args):
"""Create a new RADIUS packet.
This utility function creates a new RADIUS packet which can
be used to communicate with the RADIUS server this client
talks to. This is initializing the new packet with the
dictionary and secret used for the client.
:return: a new empty packet instance
:rtype: pyrad.packet.Packet
"""
return host.Host.CreateDiscPacket(self, secret=self.secret, **args)
def _SendPacket(self, pkt, port):
"""Send a packet to a RADIUS server.
:param pkt: the packet to send
:type pkt: pyrad.packet.Packet
:param port: UDP port to send packet to
:type port: integer
:return: the reply packet received
:rtype: pyrad.packet.Packet
:raise Timeout: RADIUS server does not reply
"""
self._SocketOpen()
for attempt in range(self.retries):
if attempt and pkt.code == packet.AccountingRequest:
if "Acct-Delay-Time" in pkt:
pkt["Acct-Delay-Time"] = \
pkt["Acct-Delay-Time"][0] + self.timeout
else:
pkt["Acct-Delay-Time"] = self.timeout
self._socket.sendto(pkt.RequestPacket(), (self.server, port))
now = time.time()
waitto = now + self.timeout
while now < waitto:
ready = select.select([self._socket], [], [],
(waitto - now))
if ready[0]:
rawreply = self._socket.recv(4096)
else:
now = time.time()
continue
try:
reply = pkt.CreateReply(packet=rawreply)
if pkt.VerifyReply(reply, rawreply):
return reply
except packet.PacketError:
pass
now = time.time()
raise Timeout
def SendPacket(self, pkt):
"""Send a packet to a RADIUS server.
:param pkt: the packet to send
:type pkt: pyrad.packet.Packet
:return: the reply packet received
:rtype: pyrad.packet.Packet
:raise Timeout: RADIUS server does not reply
"""
if isinstance(pkt, packet.AuthPacket):
return self._SendPacket(pkt, self.authport)
elif isinstance(pkt, packet.CoAPacket):
return self._SendPacket(pkt, self.coaport)
elif isinstance(pkt, packet.DiscPacket):
return self._SendPacket(pkt, self.discport)
else:
return self._SendPacket(pkt, self.acctport)
| bsd-3-clause | 2,550,055,677,371,712,000 | 34.34715 | 77 | 0.595133 | false |
Swappsco/koalixerp | crm_core/admin.py | 1 | 6050 |
import reversion
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from crm_core.models import (UserExtension, Customer, Invoice, PurchaseOrder,
Quote, Supplier, HTMLFile, TemplateSet,
CustomerBillingCycle, CustomerGroup, Contract,
Unit, TaxRate, UnitTransform, CompanyContactData)
# Define an inline admin descriptor
# which acts a bit like a singleton
class CRMUserProfileInline(admin.TabularInline):
model = UserExtension
can_delete = False
extra = 1
max_num = 1
verbose_name_plural = _('User Profile Extensions')
# Define a new User admin
class NewUserAdmin(UserAdmin):
inlines = (CRMUserProfileInline,)
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, NewUserAdmin)
class CustomerBillingCycleAdmin(admin.ModelAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (u'id', 'name', 'days_to_payment')
search_fields = ('name',)
admin.site.register(CustomerBillingCycle, CustomerBillingCycleAdmin)
class CustomerGroupAdmin(admin.ModelAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (u'id', 'name')
search_fields = ('name',)
admin.site.register(CustomerGroup, CustomerGroupAdmin)
class CustomerAdmin(reversion.VersionAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'prefix',
'name',
'firstname',
'default_currency',
'billingcycle',
'dateofcreation',
'lastmodification',
'lastmodifiedby',
)
list_filter = (
'billingcycle',
'dateofcreation',
'lastmodification',
'lastmodifiedby',
)
raw_id_fields = ('ismemberof',)
search_fields = ('name',)
exclude = ('lastmodifiedby',)
admin.site.register(Customer, CustomerAdmin)
class SupplierAdmin(reversion.VersionAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'prefix',
'name',
'default_currency',
'direct_shipment_to_customers',
'dateofcreation',
'lastmodification',
'lastmodifiedby',
)
list_filter = (
'direct_shipment_to_customers',
'dateofcreation',
'lastmodification',
'lastmodifiedby',
)
search_fields = ('name',)
admin.site.register(Supplier, SupplierAdmin)
class ContractAdmin(admin.ModelAdmin):
list_display = (
u'id',
'state',
'default_customer',
'default_supplier',
'description',
'default_currency',
'staff',
'dateofcreation',
'lastmodification',
'lastmodifiedby',
)
list_filter = (
'default_customer',
'default_supplier',
'staff',
'dateofcreation',
'lastmodification',
'lastmodifiedby',
)
admin.site.register(Contract, ContractAdmin)
class PurchaseOrderAdmin(reversion.VersionAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'contract',
'customer',
'validuntil',
'discount',
'staff',
'lastmodifiedby',
)
list_filter = (
'validuntil',
'contract',
'customer',
'staff',
'lastmodifiedby',
)
admin.site.register(PurchaseOrder, PurchaseOrderAdmin)
class QuoteAdmin(reversion.VersionAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'contract',
'customer',
'validuntil',
'discount',
'staff',
'lastmodifiedby',
)
list_filter = (
'validuntil',
'contract',
'customer',
'staff',
'lastmodifiedby',
)
admin.site.register(Quote, QuoteAdmin)
class InvoiceAdmin(reversion.VersionAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'contract',
'customer',
'payableuntil',
'discount',
'staff',
'lastmodifiedby',
)
list_filter = (
'payableuntil',
'contract',
'customer',
'staff',
'lastmodifiedby',
)
admin.site.register(Invoice, InvoiceAdmin)
class UnitAdmin(admin.ModelAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'shortname',
'description',
'fractionof',
'factor',
)
list_filter = ('fractionof',)
admin.site.register(Unit, UnitAdmin)
class TaxRateAdmin(admin.ModelAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'name',
'taxrate_in_percent',
)
search_fields = ('name',)
admin.site.register(TaxRate, TaxRateAdmin)
class UnitTransformAdmin(admin.ModelAdmin):
list_display = (u'id', 'from_unit', 'to_unit', 'product', 'factor')
list_filter = ('from_unit', 'to_unit', 'product')
admin.site.register(UnitTransform, UnitTransformAdmin)
class HTMLFileAdmin(admin.ModelAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (u'id', 'title', 'file')
admin.site.register(HTMLFile, HTMLFileAdmin)
class TemplateSetAdmin(admin.ModelAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
u'id',
'invoice_html_file',
'quote_html_file',
'purchaseorder_html_file',
)
list_filter = (
'invoice_html_file',
'quote_html_file',
'purchaseorder_html_file',
)
admin.site.register(TemplateSet, TemplateSetAdmin)
class CompanyContactDataAdmin(admin.ModelAdmin):
change_list_template = 'smuggler/change_list.html'
list_display = (
'name',
)
admin.site.register(CompanyContactData, CompanyContactDataAdmin)
| bsd-3-clause | -2,307,273,416,514,383,000 | 22.003802 | 78 | 0.614711 | false |
nearlg/greenPi | relays/relays/log.py | 1 | 2044 | #!/usr/bin/env python
import time
import json
from datetime import datetime
class Log:
fileName = "/home/pi/.greenPi/relays/log.json"
#fileName = os.environ['HOME'] + "/.greenPi/relays/log.json"
@staticmethod
def writeLog(key, cycleName, numRelays, mode, seconds=None):
log = Log.getLog()
strNumRelays = '%s' % ' '.join(map(str, numRelays))
state = "on" if mode else "off"
dicc = {"date": time.strftime('%b %d %Y %H:%M:%S'),
"key": key, "cycleName": cycleName, "numRelays": strNumRelays,
"mode": state}
if seconds is not None and seconds > 0:
dicc["lapsedSeconds"] = seconds
log.append(dicc)
with open(Log.fileName, 'w') as outfile:
json.dump(log, outfile)
@staticmethod
def getLog():
try:
with open(Log.fileName, "r") as data_file:
return json.load(data_file)
except:
Log.resetLog()
return []
@staticmethod
def resetLog():
f = open(Log.fileName, 'w')
f.write("[]")
f.close()
@staticmethod
def getLastLog():
log = Log.getLog()
lenLog = len(log)
if lenLog > 0:
return log[lenLog - 1]
return []
@staticmethod
def readLastLog():
lastLog = Log.getLastLog()
if len(lastLog) > 3:
date = datetime.strptime(lastLog["date"], '%b %d %Y %H:%M:%S')
seconds = (datetime.now() - date).total_seconds()
seconds = int(round(seconds))
finalLog = {"lapsedSeconds": seconds}
for item in lastLog:
if item == "date":
continue
elif item == "lapsedSeconds":
finalLog[item] += lastLog[item]
elif item == "mode":
finalLog[item] = True if lastLog[item] == "on" else False
else:
finalLog[item] = lastLog[item]
return finalLog
return {}
| gpl-3.0 | 5,454,230,350,684,585,000 | 29.507463 | 77 | 0.51272 | false |
AntSharesSDK/antshares-python | sdk/AntShares/Network/RemoteNode.py | 1 | 2537 | # -*- coding:utf-8 -*-
"""
Description:
Remote Node, use to broadcast tx
Usage:
from AntShares.Network.RemoteNode import RemoteNode
"""
#from AntShares.Network.RPC.RpcClient import RpcClient
from RPC.RpcClient import RpcClient
class RemoteNode(object):
"""docstring for RemoteNode"""
def __init__(self, url="http://localhost:20332/"):
super(RemoteNode, self).__init__()
self.rpc = RpcClient(url)
def sendRawTransaction(self, tx):
"""
Send Transaction
"""
return self.rpc.call(method="sendrawtransaction",
params=[tx])
def getBestBlockhash(self):
"""
Get Best BlockHash from chain
"""
return self.rpc.call(method="getbestblockhash",
params=[]).get("result", "")
def getBlock(self, hint, verbose=1):
"""
Get Block from chain with hash or index
hint : blockhash or index
Verbose: 0-Simple, 1-Verbose
"""
if verbose not in (0, 1):
raise ValueError, 'verbose, should be 0 or 1.'
return self.rpc.call(method="getblock",params=[hint, verbose])
def getBlockCount(self):
"""
Get Block Count from chain
"""
return self.rpc.call(method="getblockcount",
params=[]).get('result', 0)
def getBlockHash(self, index):
"""
Get BlockHash from chain by index
"""
return self.rpc.call(method="getblockhash",
params=[index]).get('result', '')
def getConnectionCount(self):
"""
Get Connection Count from chain
"""
return self.rpc.call(method="getconnectioncount",
params=[]).get('result', 0)
def getRawMemPool(self):
"""
Get Uncomfirmed tx in Memory Pool
"""
return self.rpc.call(method="getrawmempool",
params=[])
def getRawTransaction(self, txid, verbose=0):
"""
Get comfirmed tx from chain
Verbose: 0-Simple, 1-Verbose
"""
if verbose not in (0, 1):
raise ValueError, 'verbose, should be 0 or 1.'
return self.rpc.call(method="getrawtransaction",
params=[txid, verbose])
def getTxOut(self, txid, n=0):
"""
Get Tx Output from chain
"""
return self.rpc.call(method="gettxout",
params=[txid, n])
| apache-2.0 | 1,516,765,743,379,418,400 | 27.829545 | 70 | 0.53449 | false |
freundTech/deepl-cli | test/translator.py | 1 | 1489 | import unittest
import requests
import deepl
paragraph_text = """This is a text with multiple paragraphs. This is still the first one.
This is the second one.
This is the third paragraph."""
paragraph_list = [
'This is a text with multiple paragraphs. This is still the first one.',
'This is the second one.',
'This is the third paragraph.'
]
sentence_list = [
'This is a text with multiple paragraphs.',
'This is still the first one.',
'This is the second one.',
'This is the third paragraph.'
]
class TestOfflineMethods(unittest.TestCase):
def test_split_paragraphs(self):
self.assertListEqual(deepl.translator._split_paragraphs(paragraph_text), paragraph_list)
@unittest.skip("Not yet implemented")
def test_insert_translation(self):
pass
class TestOnlineMethods(unittest.TestCase):
def setUp(self):
try:
requests.get("https://www.deepl.com/jsonrpc")
except ConnectionError:
self.skipTest("Can't contact deepl API. Skipping online tests")
def test_split_sentences(self):
self.assertListEqual(deepl.translator._request_split_sentences(paragraph_list, "EN", ["EN"]),
sentence_list)
def test_translate(self):
self.assertListEqual(
deepl.translator._request_translate(["This is a test"], "EN", "DE", ["EN", "DE"])["translations"],
["Das ist ein Test"])
if __name__ == '__main__':
unittest.main()
| mit | -7,055,180,190,798,253,000 | 28.196078 | 110 | 0.650772 | false |
nelsonmonteiro/django-sage-api | sage_api/models.py | 1 | 9916 | from __future__ import unicode_literals
try:
from urllib import urlencode, quote
except ImportError:
from urllib.parse import urlencode, quote
import json
import pytz
import datetime
import base64
import requests
import hashlib
import hmac
import urlparse
from collections import OrderedDict
from uuid import uuid4
from django.db import models
from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from django.core.exceptions import PermissionDenied
from .settings import SageSettings
sage_settings = SageSettings()
@python_2_unicode_compatible
class Sage(models.Model):
"""
Model to connect and save tokens from SAGE API related with a specific user.
"""
class Meta:
verbose_name = 'Sage account'
verbose_name_plural = 'Sage accounts'
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='sage')
access_token_key = models.CharField(max_length=2048, blank=True, null=True)
access_token_type = models.CharField(max_length=20)
access_token_expires_on = models.DateTimeField(null=True, blank=True)
refresh_token = models.CharField(max_length=200, blank=True, null=True)
refresh_token_expires_on = models.DateTimeField(null=True, blank=True)
def __str__(self):
return '%s' % self.user
@classmethod
def get_authorization_url(cls, user):
"""
Return the link to use for oAuth authentication.
"""
state_code, created = AuthStateCode.objects.get_or_create(user=user, defaults={'code': uuid4()})
params = {
'client_id': sage_settings.CLIENT_ID,
'response_type': 'code',
'state': state_code.code,
'redirect_uri': sage_settings.AUTH_REDIRECT_URL,
'scope': sage_settings.SCOPE,
}
return '%s?%s' % (sage_settings.AUTH_URL, urlencode(params))
@classmethod
def create_for_user(cls, user, auth_code, state_code):
"""
Create a Sage model for an user and generates the first access token.
Verify if the state code is valid to protect from attacks.
"""
try:
state_code = AuthStateCode.objects.get(user=user, code=state_code)
state_code.delete()
sage_auth, created = cls.objects.get_or_create(user=user)
sage_auth.__get_access_token(auth_code)
except AuthStateCode.DoesNotExist:
raise PermissionDenied('State code is invalid for this user')
def __set_access_token(self, response):
"""
Saves access_token json response fields on database to use it later.
"""
if not ('error' in response):
now = datetime.datetime.now(tz=pytz.utc)
self.access_token_key = response['access_token']
self.access_token_type = response['token_type']
self.access_token_expires_on = now + datetime.timedelta(seconds=response['expires_in'])
self.refresh_token = response['refresh_token']
self.refresh_token_expires_on = now + datetime.timedelta(seconds=response['refresh_token_expires_in'])
self.save()
def __get_access_token(self, code):
"""
Make an API call to get the access_token from the authorization_code.
"""
params = urlencode({
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': sage_settings.AUTH_REDIRECT_URL,
})
authorization = base64.b64encode('%s:%s' % (sage_settings.CLIENT_ID, sage_settings.SECRET_KEY))
request = requests.post(sage_settings.ACCESS_TOKEN_URL, params, headers={
'Authorization': 'Basic %s' % authorization,
'ContentType': 'application/x-www-form-urlencoded;charset=UTF-8',
})
self.__set_access_token(request.json())
def __refresh_access_token(self):
"""
Make an API call to renew the access_token.
"""
params = urlencode({
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token,
})
authorization = base64.b64encode('%s:%s' % (sage_settings.CLIENT_ID, sage_settings.SECRET_KEY))
request = requests.post(sage_settings.ACCESS_TOKEN_URL, params, headers={
'Authorization': 'Basic %s' % authorization,
'ContentType': 'application/x-www-form-urlencoded;charset=UTF-8',
})
self.__set_access_token(request.json())
@property
def access_token(self):
"""
Return a valid access_token.
"""
now = datetime.datetime.now(tz=pytz.utc)
if self.access_token_expires_on < now:
if self.refresh_token_expires_on > now:
self.__refresh_access_token()
else:
return None
return self.access_token_key
def __get_signature(self, url, params, data, method, nonce):
"""
Return the signature to put in the API request's headers.
"""
if method in ['POST', 'PUT']:
params['body'] = base64.b64encode(json.dumps(data))
ordered_params = OrderedDict(sorted(params.items()))
encoded_params = quote(urlencode(ordered_params), safe='')
raw_string = '%s&%s&%s&%s' % (method, quote(url.lower(), safe=''), encoded_params, nonce)
signing_key = '%s&%s' % (quote(sage_settings.SIGNING_KEY, safe=''), quote(self.access_token, safe=''))
signature = hmac.new(signing_key, raw_string, hashlib.sha1).digest().encode('base64').rstrip('\n')
return signature
def __get_headers(self, url, params, data, method, site_id=None, company_id=None):
"""
Return the API request's headers already with signature.
"""
nonce = str(uuid4().hex)
return {
'Authorization': '%s %s' % (self.access_token_type.capitalize(), self.access_token),
'ocp-apim-subscription-key': sage_settings.SUBSCRIPTION_KEY,
'X-Site': site_id or '',
'X-Company': company_id or '',
'X-Signature': self.__get_signature(url, params, data, method, nonce),
'X-Nonce': nonce,
'Accept': 'application/json',
'Content-Type': 'application/json',
}
@staticmethod
def __get_absolute_url(relative_url):
"""
Return the absolute url for a API call.
"""
return urlparse.urljoin(sage_settings.API_URL, relative_url)
@staticmethod
def __clean_response(response):
if response.status_code != 200:
error_msg = """
STATUS_CODE:
%(status_code)s
URL:
%(url)s
REQUEST HEADERS:
%(request_headers)s
REQUEST BODY:
%(request_body)s
RESPONSE HEADERS:
%(response_headers)s
RESPONSE BODY:
%(response_body)s
""" % {
'status_code': response.status_code,
'url': response.request.url,
'request_headers': response.request.headers,
'request_body': response.request.body,
'response_headers': response.headers,
'response_body': response.content,
}
raise Exception(error_msg)
return response.json()
def api_get(self, relative_url, params=None, site_id=None, company_id=None):
"""
Make an API GET request.
"""
url = self.__get_absolute_url(relative_url)
params = params or {}
headers = self.__get_headers(url, params or {}, {}, 'GET', site_id, company_id)
if params:
url = '%s?%s' % (url, urlencode(params))
response = requests.get(url, headers=headers)
return self.__clean_response(response)
def api_post(self, relative_url, params=None, data=None, site_id=None, company_id=None):
"""
Make an API POST request.
"""
url = self.__get_absolute_url(relative_url)
params = params or {}
data = data or {}
headers = self.__get_headers(url, params, data, 'POST', site_id, company_id)
if params:
url = '%s?%s' % (url, urlencode(params))
response = requests.post(url, json.dumps(data), headers=headers)
return self.__clean_response(response)
def api_put(self, relative_url, params=None, data=None, site_id=None, company_id=None):
"""
Make an API PUT request.
"""
url = self.__get_absolute_url(relative_url)
params = params or {}
data = data or {}
headers = self.__get_headers(url, params or {}, data, 'PUT', site_id, company_id)
if params:
url = '%s?%s' % (url, urlencode(params))
response = requests.put(url, json.dumps(data), headers=headers)
return self.__clean_response(response)
def api_delete(self, relative_url, params=None, site_id=None, company_id=None):
"""
Make an API DELETE request.
"""
url = self.__get_absolute_url(relative_url)
params = params or {}
headers = self.__get_headers(url, params, {}, 'DELETE', site_id, company_id)
if params:
url = '%s?%s' % (url, urlencode(params))
response = requests.delete(url, headers=headers)
return self.__clean_response(response)
def get_sites(self):
return self.api_get('accounts/v1/sites')
@python_2_unicode_compatible
class AuthStateCode(models.Model):
"""
Model to save a random code for an user to prevent external attacks.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL, related_name='sage_state_code')
code = models.CharField(max_length=50)
def __str__(self):
return '%s' % self.user
| mit | 354,929,042,206,438,900 | 35.725926 | 114 | 0.590056 | false |
MoonRaker/cons2-python | docs/sphnix/source/conf.py | 1 | 10217 | # -*- coding: utf-8 -*-
#
# cons2_Python documentation build configuration file, created by
# sphinx-quickstart on Fri Nov 7 15:56:33 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Mock modules so RTD works
try:
from mock import Mock as MagicMock
except ImportError:
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
# MOCK_MODULES = []
if sys.platform.startswith('linux'):
for mod_name in [
'win32com',
'win32com.client',
]:
sys.modules[mod_name] = MagicMock()
# sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
import pandas as pd
pd.show_versions()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.insert(0, os.path.abspath('../../../'))
# -- General configuration ------------------------------------------------
# turns off numpydoc autosummary warnings
numpydoc_show_class_members = False
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'numpydoc',
'sphinx.ext.autosummary',
# 'IPython.sphinxext.ipython_directive',
# 'IPython.sphinxext.ipython_console_highlighting',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'cons2-python'
copyright = u'Derek Groenendyk'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import cons2
# The short X.Y version.
version = '%s' % (4.0)
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
autosummary_generate = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'cons2_Pythondoc'
# A workaround for the responsive tables always having annoying scrollbars.
def setup(app):
app.add_stylesheet("no_scrollbars.css")
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'cons2_Python.tex', u'cons2\\_Python Documentation',
u'Derek Groenendyk', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# extlinks alias
extlinks = {'issue': ('https://github.com/cons2/cons2-python/issues/%s',
'GH'),
'wiki': ('https://github.com/cons2/cons2-python/wiki/%s',
'wiki '),
'doi': ('http://dx.doi.org/%s', 'DOI: ')}
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'cons2_python', u'cons2_Python Documentation',
[u'Derek Groenendyk'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'cons2_Python', u'cons2_Python Documentation',
u'Derek Groenendyk', 'cons2_Python', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3.5/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
}
| gpl-3.0 | -1,782,417,155,217,439,200 | 30.244648 | 79 | 0.695899 | false |
Pica4x6/numina | numina/array/wavecal/tests/test_arccalibration.py | 1 | 19435 | from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
from numpy.polynomial import polynomial
from ..arccalibration import gen_triplets_master
from ..arccalibration import arccalibration_direct
from ..arccalibration import fit_solution
try:
import matplotlib
HAVE_PLOTS = True
except ImportError:
HAVE_PLOTS = False
# -----------------------------------------------------------------------------
def simulate_master_table(my_seed, wv_ini_master, wv_end_master, nlines_master,
ldebug=False, lpause=False):
"""Generates a simulated master table of wavelengths.
The location of the lines follows a random uniform distribution
between `wv_ini_master` and `wv_end_master`. The total number of generated
lines is `nlines_master`. The seed for random number generation can be
re-initialized with `my_seed`.
Parameters
----------
my_seed : int
Seed to re-initialize random number generation.
wv_ini_master : float
Minimum wavelength in master table.
wv_end_master : float
Maximum wavelength in master table.
nlines_master : int
Total number of lines in master table.
ldebug : bool
If True intermediate results are displayed.
Returns
-------
wv_master : 1d numpy array, float
Array with wavelengths corresponding to the master table (Angstroms).
"""
if my_seed is not None:
np.random.seed(my_seed)
if wv_end_master < wv_ini_master:
raise ValueError('wv_ini_master=' + str(wv_ini_master) +
' must be <= wv_end_master=' + str(wv_end_master))
wv_master = np.random.uniform(low=wv_ini_master,
high=wv_end_master,
size=nlines_master)
wv_master.sort() # in-place sort
if ldebug:
print('>>> Master table:')
for val in zip(range(nlines_master), wv_master):
print(val)
if lpause:
raw_input('press <RETURN> to continue...')
return wv_master
# -----------------------------------------------------------------------------
def simulate_arc(wv_ini_master, wv_end_master, wv_master,
wv_ini_arc, wv_end_arc, naxis1_arc,
prob_line_master_in_arc,
delta_xpos_min_arc, delta_lambda, error_xpos_arc,
poly_degree, fraction_unknown_lines,
ldebug=False, lplot=False, lpause=False):
"""Generates simulated input for arc calibration.
Parameters
----------
wv_ini_master : float
Minimum wavelength in master table.
wv_end_master : float
Maximum wavelength in master table.
wv_master : 1d numpy array, float
Array with wavelengths corresponding to the master table (Angstroms).
wv_ini_arc : float
Minimum wavelength in arc spectrum.
wv_end_arc : float
Maximum wavelength in arc spectrum.
naxis1_arc : int
NAXIS1 of arc spectrum.
prob_line_master_in_arc : float
Probability that a master table line appears in the arc spectrum.
delta_xpos_min_arc : float
Minimum distance (pixels) between lines in arc spectrum.
delta_lambda : float
Maximum deviation (Angstroms) from linearity in arc calibration
polynomial.
error_xpos_arc : float
Standard deviation (pixels) employed to introduce noise in the arc
spectrum lines. The initial lines are shifted from their original
location following a Normal distribution with mean iqual to zero and
sigma equal to this parameter.
poly_degree : int
Polynomial degree corresponding to the original wavelength calibration
function.
fraction_unknown_lines : float
Fraction of lines that on average will be unknown (i.e., lines that
appear in the arc spectrum that are not present in the master table).
ldebug : bool
If True intermediate results are displayed.
lplot : bool
If True intermediate plots are displayed.
Returns
-------
nlines_arc : int
Number of arc lines
xpos_arc : 1d numpy array, float
Location of arc lines (pixels).
crval1_arc : float
CRVAL1 for arc spectrum (linear approximation).
cdelt1_arc : float
CDELT1 for arc spectrum (linear approximation).
c0_arc, c1_arc, c2_arc : floats
Coefficients of the second order polynomial.
ipos_wv_arc : 1d numpy array, int
Number of line in master table corresponding to each arc line. Unknown
lines (i.e. those that are not present in the master table) are
assigned to -1.
coeff_original : 1d numpy array, float
Polynomial coefficients ordered from low to high, corresponding to the
fit to the arc lines before the inclusion of unknown lines.
"""
if (wv_ini_arc < wv_ini_master) or (wv_ini_arc > wv_end_master):
print('wv_ini_master:', wv_ini_master)
print('wv_end_master:', wv_end_master)
print('wv_ini_arc...:', wv_ini_arc)
raise ValueError('wavelength_ini_arc outside valid range')
if (wv_end_arc < wv_ini_master) or (wv_end_arc > wv_end_master):
print('wv_ini_master:', wv_ini_master)
print('wv_end_master:', wv_end_master)
print('wv_end_arc...:', wv_end_arc)
raise ValueError('wavelength_ini_arc outside valid range')
if wv_end_arc < wv_ini_arc:
raise ValueError('wv_ini_arc=' + str(wv_ini_arc) +
' must be <= wv_end_arc=' + str(wv_end_arc))
# ---
nlines_master = wv_master.size
crval1_arc = wv_ini_arc
cdelt1_arc = (wv_end_arc - wv_ini_arc) / float(naxis1_arc - 1)
crpix1_arc = 1.0
if ldebug:
print('>>> CRVAL1, CDELT1, CRPIX1....:', crval1_arc, cdelt1_arc,
crpix1_arc)
# ---
# The arc lines constitute a subset of the master lines in the considered
# wavelength range.
i1_master = np.searchsorted(wv_master, wv_ini_arc)
i2_master = np.searchsorted(wv_master, wv_end_arc)
nlines_temp = i2_master - i1_master
nlines_arc_ini = int(round(nlines_temp * prob_line_master_in_arc))
ipos_wv_arc_ini = np.random.choice(range(i1_master, i2_master),
size=nlines_arc_ini,
replace=False)
ipos_wv_arc_ini.sort() # in-place sort
wv_arc_ini = wv_master[ipos_wv_arc_ini]
if ldebug:
print('>>> Number of master lines in arc region.:', nlines_temp)
print('>>> Initial number of arc lines..........:', nlines_arc_ini)
print('>>> Initial selection of master list lines for arc:')
print(ipos_wv_arc_ini)
# Remove too close lines.
ipos_wv_arc = np.copy(ipos_wv_arc_ini[0:1])
wv_arc = np.copy(wv_arc_ini[0:1])
i_last = 0
for i in range(1, nlines_arc_ini):
delta_xpos = (wv_arc_ini[i] - wv_arc_ini[i_last]) / cdelt1_arc
if delta_xpos > delta_xpos_min_arc:
ipos_wv_arc = np.append(ipos_wv_arc, ipos_wv_arc_ini[i])
wv_arc = np.append(wv_arc, wv_arc_ini[i])
i_last = i
else:
if ldebug:
print('--> skipping line #', i, '. Too close to line #',
i_last)
nlines_arc = len(wv_arc)
if ldebug:
print('>>> Intermediate number of arc lines.....:', nlines_arc)
print('>>> Intermediate selection of master list lines for arc:')
print(ipos_wv_arc)
# Generate pixel location of the arc lines.
if delta_lambda == 0.0:
# linear solution
xpos_arc = (wv_arc - crval1_arc) / cdelt1_arc + 1.0
c0_arc = wv_ini_arc
c1_arc = cdelt1_arc
c2_arc = 0.0
else:
# polynomial solution
c0_arc = wv_ini_arc
c1_arc = (wv_end_arc - wv_ini_arc - 4 * delta_lambda) / float(
naxis1_arc - 1)
c2_arc = 4 * delta_lambda / float(naxis1_arc - 1) ** 2
xpos_arc = (
-c1_arc + np.sqrt(c1_arc ** 2 - 4 * c2_arc * (c0_arc - wv_arc)))
xpos_arc /= 2 * c2_arc
xpos_arc += 1 # convert from 0,...,(NAXIS1-1) to 1,...,NAXIS1
# Introduce noise in arc line positions.
if error_xpos_arc > 0:
xpos_arc += np.random.normal(loc=0.0,
scale=error_xpos_arc,
size=nlines_arc)
# Check that the order of the lines has not been modified.
xpos_arc_copy = np.copy(xpos_arc)
xpos_arc_copy.sort() # in-place sort
if sum(xpos_arc == xpos_arc_copy) != len(xpos_arc):
raise ValueError(
'FATAL ERROR: arc line switch after introducing noise')
if lplot and HAVE_PLOTS:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([1, naxis1_arc])
ax.set_ylim([wv_ini_arc, wv_end_arc])
ax.plot(xpos_arc, wv_arc, 'ro')
xp = np.array([1, naxis1_arc])
yp = np.array([wv_ini_arc, wv_end_arc])
ax.plot(xp, yp, 'b-')
xp = np.arange(1, naxis1_arc + 1)
yp = c0_arc + c1_arc * (xp - 1) + c2_arc * (xp - 1) ** 2
ax.plot(xp, yp, 'g:')
ax.set_xlabel('pixel position in arc spectrum')
ax.set_ylabel('wavelength (Angstrom)')
plt.show(block=False)
if lpause:
raw_input('press <RETURN> to continue...')
# Unweighted polynomial fit.
coeff_original = polynomial.polyfit(xpos_arc, wv_arc, poly_degree)
poly_original = polynomial.Polynomial(coeff_original)
if lplot and HAVE_PLOTS:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([1, naxis1_arc])
if delta_lambda == 0.0:
if error_xpos_arc > 0:
ax.set_ylim([-4 * error_xpos_arc * cdelt1_arc,
4 * error_xpos_arc * cdelt1_arc])
else:
ax.set_ylim([-1.1, 1.1])
else:
ax.set_ylim([-delta_lambda * 1.5, delta_lambda * 1.5])
yp = wv_arc - (crval1_arc + (xpos_arc - 1) * cdelt1_arc)
ax.plot(xpos_arc, yp, 'ro')
xp = np.array([1, naxis1_arc])
yp = np.array([0, 0])
ax.plot(xp, yp, 'b-')
xp = np.arange(1, naxis1_arc + 1)
yp = c0_arc + c1_arc * (xp - 1) + c2_arc * (xp - 1) ** 2
yp -= crval1_arc + cdelt1_arc * (xp - 1)
ax.plot(xp, yp, 'g:')
yp = poly_original(xp)
yp -= crval1_arc + cdelt1_arc * (xp - 1)
ax.plot(xp, yp, 'm-')
ax.set_xlabel('pixel position in arc spectrum')
ax.set_ylabel('residuals (Angstrom)')
plt.show(block=False)
if lpause:
raw_input('press <RETURN> to continue...')
# ---
# Include unknown lines (lines that do not appear in the master table).
nunknown_lines = int(round(fraction_unknown_lines * float(nlines_arc)))
if ldebug:
print('>>> Number of unknown arc lines..........:', nunknown_lines)
for i in range(nunknown_lines):
iiter = 0
while True:
iiter += 1
if iiter > 1000:
raise ValueError('iiter > 1000 while adding unknown lines')
xpos_dum = np.random.uniform(low=1.0,
high=float(naxis1_arc),
size=1)
isort = np.searchsorted(xpos_arc, xpos_dum)
newlineok = False
if isort == 0:
dxpos1 = abs(xpos_arc[isort] - xpos_dum)
if dxpos1 > delta_xpos_min_arc:
newlineok = True
elif isort == nlines_arc:
dxpos2 = abs(xpos_arc[isort - 1] - xpos_dum)
if dxpos2 > delta_xpos_min_arc:
newlineok = True
else:
dxpos1 = abs(xpos_arc[isort] - xpos_dum)
dxpos2 = abs(xpos_arc[isort - 1] - xpos_dum)
if (dxpos1 > delta_xpos_min_arc) and \
(dxpos2 > delta_xpos_min_arc):
newlineok = True
if newlineok:
xpos_arc = np.insert(xpos_arc, isort, xpos_dum)
ipos_wv_arc = np.insert(ipos_wv_arc, isort, -1)
nlines_arc += 1
if ldebug:
print('--> adding unknown line at pixel:', xpos_dum)
break
if ldebug:
print('>>> Final number of arc lines............:', nlines_arc)
for val in zip(range(nlines_arc), ipos_wv_arc, xpos_arc):
print(val)
if lpause:
raw_input('press <RETURN> to continue...')
if lplot and HAVE_PLOTS:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_ylim([0.0, 3.0])
ax.vlines(wv_master, 0.0, 1.0)
ax.vlines(wv_arc, 1.0, 2.0, colors='r', linestyle=':')
ax.vlines(wv_ini_arc, 0.0, 3.0, colors='m', linewidth=3.0)
ax.vlines(wv_end_arc, 0.0, 3.0, colors='m', linewidth=3.0)
ax.set_xlabel('wavelength')
axbis = ax.twiny()
axbis.vlines(xpos_arc, 2.0, 3.0, colors='g')
xmin_xpos_master = (wv_ini_master - crval1_arc) / cdelt1_arc + 1.0
xmax_xpos_master = (wv_end_master - crval1_arc) / cdelt1_arc + 1.0
axbis.set_xlim([xmin_xpos_master, xmax_xpos_master])
axbis.set_xlabel('pixel position in arc spectrum')
plt.show(block=False)
if lpause:
raw_input('press <RETURN> to continue...')
return nlines_arc, xpos_arc, crval1_arc, cdelt1_arc, \
c0_arc, c1_arc, c2_arc, \
ipos_wv_arc, coeff_original
# -----------------------------------------------------------------------------
def execute_arccalibration(my_seed=432, wv_ini_master=3000, wv_end_master=7000,
nlines_master=120,
wv_ini_arc=4000, wv_end_arc=5000, naxis1_arc=1024,
prob_line_master_in_arc=0.80,
delta_xpos_min_arc=4.0,
delta_lambda=5.0, error_xpos_arc=0.3,
poly_degree=2, fraction_unknown_lines=0.20,
wv_ini_search=None, wv_end_search=None,
times_sigma_r=3.0, frac_triplets_for_sum=0.50,
times_sigma_theil_sen=10.0, poly_degree_wfit=2,
times_sigma_polfilt=10.0, times_sigma_inclusion=5.0,
ldebug=False, lplot=False, lpause=False):
"""Execute a particular arc calibration simulation.
This function simulates a master list, generates a simulated arc, and
carry out its wavelength calibration.
Parameters
----------
my_seed : int
Seed to re-initialize random number generation.
wv_ini_master : float
Minimum wavelength in master table.
wv_end_master : float
Maximum wavelength in master table.
nlines_master : int
Total number of lines in master table.
ldebug : bool
If True intermediate results are displayed.
Returns
-------
coeff : 1d numpy array, float
Coefficients of the polynomial fit.
crval1_approx : float
Approximate CRVAL1 value.
cdetl1_approx : float
Approximate CDELT1 value.
"""
wv_master = simulate_master_table(my_seed, wv_ini_master, wv_end_master,
nlines_master,
ldebug=ldebug, lpause=lpause)
ntriplets_master, ratios_master_sorted, triplets_master_sorted_list = \
gen_triplets_master(wv_master)
nlines_arc, xpos_arc, crval1_arc, cdelt1_arc, \
c0_arc, c1_arc, c2_arc, ipos_wv_arc, coeff_original = \
simulate_arc(wv_ini_master, wv_end_master, wv_master,
wv_ini_arc, wv_end_arc, naxis1_arc,
prob_line_master_in_arc,
delta_xpos_min_arc, delta_lambda, error_xpos_arc,
poly_degree, fraction_unknown_lines,
ldebug=ldebug, lplot=lplot, lpause=lpause)
if wv_ini_search is None:
wv_ini_search = wv_ini_master - 0.1 * (wv_end_master - wv_ini_master)
if wv_end_search is None:
wv_end_search = wv_end_master + 0.1 * (wv_end_master - wv_ini_master)
solution = arccalibration_direct(wv_master,
ntriplets_master,
ratios_master_sorted,
triplets_master_sorted_list,
xpos_arc,
naxis1_arc,
wv_ini_search, wv_end_search,
error_xpos_arc,
times_sigma_r,
frac_triplets_for_sum,
times_sigma_theil_sen,
poly_degree_wfit,
times_sigma_polfilt,
times_sigma_inclusion)
coeff, crval1_approx, cdelt1_approx = fit_solution(wv_master,
xpos_arc,
solution,
poly_degree_wfit,
weighted=False)
return coeff, crval1_approx, cdelt1_approx
# -----------------------------------------------------------------------------
def test__execute_notebook_example(ldebug=False, lplot=False, lpause=False):
"""Test the explanation of the ipython notebook example."""
coeff, crval1_approx, cdelt1_approx = \
execute_arccalibration(ldebug=ldebug, lplot=lplot, lpause=lpause)
coeff_expected = np.array([3.99875794e+03, 9.59950578e-01, 1.72739867e-05])
assert np.allclose(coeff, coeff_expected)
assert np.allclose(crval1_approx, 3996.42717772)
assert np.allclose(cdelt1_approx, 0.978303317095)
print("TEST: test__execute_notebook_example... OK")
# @pytest.mark.xfail
def test__execute_simple_case(ldebug=False, lplot=False, lpause=False):
"""Test the explanation of the ipython notebook example."""
coeff, crval1_approx, cdelt1_approx = \
execute_arccalibration(nlines_master=15,
error_xpos_arc=0.3,
wv_ini_arc=3000, wv_end_arc=7000,
prob_line_master_in_arc=1.0,
fraction_unknown_lines=0.0,
frac_triplets_for_sum=0.5,
ldebug=ldebug, lplot=lplot, lpause=lpause)
coeff_expected = np.array([2.99467778e+03, 3.89781863e+00, 1.22960881e-05])
assert np.allclose(coeff, coeff_expected)
assert np.allclose(crval1_approx, 2995.4384155)
assert np.allclose(cdelt1_approx, 3.91231531392)
print("TEST: test__execute_simple_case... OK")
# -----------------------------------------------------------------------------
if __name__ == '__main__':
# test__execute_notebook_example(ldebug=True, lplot=True, lpause=True)
test__execute_simple_case(ldebug=True, lplot=False, lpause=False)
| gpl-3.0 | -7,052,297,969,942,801,000 | 39.072165 | 79 | 0.545151 | false |
scoin/redis-py-datamapper | redislist.py | 1 | 1913 | class RedisList:
import redis
r = redis.StrictRedis(host='localhost', port=6379, db=0)
def __init__(self, key):
self.key = key
def append(self, *values):
self.r.rpush(self.key, *values)
def unshift(self, *values):
self.r.lpush(self.key, *values)
def insert(self, pivot, value):
self.r.linsert(self.key, 'before', pivot, value)
def pop(self):
return self.r.rpop(self.key).decode()
def shift(self):
return self.r.lpop(self.key).decode()
def sort(self):
return [w.decode() for w in self.r.sort(self.key, alpha = True)]
def clear(self):
self.r.delete(self.key)
def __len__(self):
return self.r.llen(self.key)
def __getitem__(self, index):
if(type(index) == int):
if(index >= len(self)): raise IndexError('Out of Range')
return self.r.lindex(self.key, index).decode()
elif(type(index) == slice):
return [w.decode() for w in self.r.lrange(self.key, index.start or 0, (index.stop or len(self))-1)]
def __setitem__(self, index, value):
if(type(index) == int):
if(index >= len(self)): raise IndexError('Out of Range')
self.r.lset(self.key, index, value)
elif(type(index) == slice):
if(type(value) != tuple and type(value) != list): raise TypeError('Assignment must be iterable')
stop, start = index.stop or len(self)-1, index.start or 0
if (stop - start) != len(value): raise TypeError("Incorrect number of arguments")
pipe = self.r.pipeline()
for vindex, rindex in enumerate(range(index.start or 0, index.stop or len(self) - 1)):
pipe.lset(self.key, rindex, value[vindex])
pipe.execute()
def __repr__(self):
return "RedisList(" + str([w.decode() for w in self.r.lrange(self.key, 0, -1)]) + ")"
| mit | 1,374,646,387,132,393,700 | 35.788462 | 111 | 0.573968 | false |
chippey/gaffer | python/GafferSceneUI/OutputsUI.py | 1 | 8592 | ##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import re
import IECore
import Gaffer
import GafferUI
import GafferScene
import GafferSceneUI
##########################################################################
# Metadata
##########################################################################
Gaffer.Metadata.registerNode(
GafferScene.Outputs,
"description",
"""
Defines the image outputs to be created by the renderer. Arbitrary
outputs can be defined within the UI and also via the
`Outputs::addOutput()` API. Commonly used outputs may also
be predefined at startup via a config file - see
$GAFFER_ROOT/startup/gui/outputs.py for an example.
""",
plugs = {
"outputs" : [
"description",
"""
The outputs defined by this node.
""",
"plugValueWidget:type", "GafferSceneUI.OutputsUI.OutputsPlugValueWidget",
],
"outputs.*.parameters.quantize.value" : [
"description",
"""
The bit depth of the image.
""",
"preset:8 bit", IECore.IntVectorData( [ 0, 255, 0, 255 ] ),
"preset:16 bit", IECore.IntVectorData( [ 0, 65535, 0, 65535 ] ),
"preset:Float", IECore.IntVectorData( [ 0, 0, 0, 0 ] ),
"plugValueWidget:type", "GafferUI.PresetsPlugValueWidget",
],
"outputs.*.fileName" : [
"plugValueWidget:type", "GafferUI.FileSystemPathPlugValueWidget",
"pathPlugValueWidget:bookmarks", "image",
"pathPlugValueWidget:leaf", True,
],
"outputs.*.active" : [
"boolPlugValueWidget:displayMode", "switch",
],
}
)
##########################################################################
# Custom PlugValueWidgets for listing outputs
##########################################################################
class OutputsPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug ) :
column = GafferUI.ListContainer( spacing = 6 )
GafferUI.PlugValueWidget.__init__( self, column, plug )
with column :
# this will take care of laying out our list of outputs, as
# each output is represented as a child plug of the main plug.
GafferUI.PlugLayout( plug )
# now we just need a little footer with a button for adding new outputs
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 ) :
GafferUI.MenuButton(
image="plus.png", hasFrame=False, menu = GafferUI.Menu( Gaffer.WeakMethod( self.__addMenuDefinition ) )
)
GafferUI.Spacer( IECore.V2i( 1 ), maximumSize = IECore.V2i( 100000, 1 ), parenting = { "expand" : True } )
def hasLabel( self ) :
return True
def _updateFromPlug( self ) :
pass
def __addMenuDefinition( self ) :
node = self.getPlug().node()
currentNames = set( [ output["name"].getValue() for output in node["outputs"].children() ] )
m = IECore.MenuDefinition()
registeredOutputs = node.registeredOutputs()
for name in registeredOutputs :
menuPath = name
if not menuPath.startswith( "/" ) :
menuPath = "/" + menuPath
m.append(
menuPath,
{
"command" : IECore.curry( node.addOutput, name ),
"active" : name not in currentNames
}
)
if len( registeredOutputs ) :
m.append( "/BlankDivider", { "divider" : True } )
m.append( "/Blank", { "command" : IECore.curry( node.addOutput, "", IECore.Display( "", "", "" ) ) } )
return m
# A widget for representing an individual output.
class _ChildPlugWidget( GafferUI.PlugValueWidget ) :
def __init__( self, childPlug ) :
column = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing=4 )
GafferUI.PlugValueWidget.__init__( self, column, childPlug )
with column :
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing=4 ) as header :
collapseButton = GafferUI.Button( image = "collapsibleArrowRight.png", hasFrame=False )
collapseButton.__clickedConnection = collapseButton.clickedSignal().connect( Gaffer.WeakMethod( self.__collapseButtonClicked ) )
GafferUI.PlugValueWidget.create( childPlug["active"] )
self.__label = GafferUI.Label( self.__namePlug().getValue() )
GafferUI.Spacer( IECore.V2i( 1 ), maximumSize = IECore.V2i( 100000, 1 ), parenting = { "expand" : True } )
self.__deleteButton = GafferUI.Button( image = "delete.png", hasFrame=False )
self.__deleteButton.__clickedConnection = self.__deleteButton.clickedSignal().connect( Gaffer.WeakMethod( self.__deleteButtonClicked ) )
self.__deleteButton.setVisible( False )
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing= 4 ) as self.__detailsColumn :
GafferUI.PlugWidget( self.__namePlug() )
GafferUI.PlugWidget( self.__fileNamePlug() )
GafferUI.PlugWidget( childPlug["type"] )
GafferUI.PlugWidget( childPlug["data"] )
GafferUI.CompoundDataPlugValueWidget( childPlug["parameters"] )
GafferUI.Divider( GafferUI.Divider.Orientation.Horizontal )
self.__detailsColumn.setVisible( False )
self.__enterConnection = header.enterSignal().connect( Gaffer.WeakMethod( self.__enter ) )
self.__leaveConnection = header.leaveSignal().connect( Gaffer.WeakMethod( self.__leave ) )
def hasLabel( self ) :
return True
def _updateFromPlug( self ) :
with self.getContext() :
enabled = self.getPlug()["active"].getValue()
self.__label.setEnabled( enabled )
self.__detailsColumn.setEnabled( enabled )
self.__label.setText( self.__namePlug().getValue() )
def __namePlug( self ) :
plug = self.getPlug()
# backwards compatibility with old plug layout
return plug.getChild( "label" ) or plug.getChild( "name" )
def __fileNamePlug( self ) :
plug = self.getPlug()
# backwards compatibility with old plug layout
return plug.getChild( "fileName" ) or plug.getChild( "name" )
def __enter( self, widget ) :
self.__deleteButton.setVisible( True )
def __leave( self, widget ) :
self.__deleteButton.setVisible( False )
def __collapseButtonClicked( self, button ) :
visible = not self.__detailsColumn.getVisible()
self.__detailsColumn.setVisible( visible )
button.setImage( "collapsibleArrowDown.png" if visible else "collapsibleArrowRight.png" )
def __deleteButtonClicked( self, button ) :
with Gaffer.UndoContext( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
self.getPlug().parent().removeChild( self.getPlug() )
## \todo This regex is an interesting case to be considered during the string matching unification for #707. Once that
# is done, intuitively we want to use an "outputs.*" glob expression, but because the "*" will match anything
# at all, including ".", it will match the children of what we want too. We might want to prevent wildcards from
# matching "." when we come to use them in this context.
GafferUI.PlugValueWidget.registerCreator( GafferScene.Outputs, re.compile( "outputs\.[^\.]+$" ), _ChildPlugWidget )
| bsd-3-clause | -2,263,930,681,211,126,500 | 31.91954 | 140 | 0.670507 | false |
chewse/djangorestframework-signed-permissions | signedpermissions/permissions.py | 1 | 2401 | # -*- coding: utf-8 -*-
from django.core import signing
from rest_framework import permissions
from .signing import unsign_filters_and_actions
class SignedPermission(permissions.BasePermission):
"""
Allow access to a particular set of filters if the sign is valid.
This permission allows access to sets of items based on json encoded
filters. It takes these filters and applies to them to the proper queryset
use **kwargs expansion, or in the case of a create (POST), it checks the
POST data.
"""
def has_permission(self, request, view):
"""Check list and create permissions based on sign and filters."""
if view.suffix == 'Instance':
return True
filter_and_actions = self._get_filter_and_actions(
request.query_params.get('sign'),
view.action,
'{}.{}'.format(
view.queryset.model._meta.app_label,
view.queryset.model._meta.model_name
)
)
if not filter_and_actions:
return False
if request.method == 'POST':
for key, value in request.data.iteritems():
# Do unicode conversion because value will always be a
# string
if (key in filter_and_actions['filters'] and not
unicode(filter_and_actions['filters'][key]) == unicode(value)):
return False
return True
def has_object_permission(self, request, view, obj=None):
"""Check object permissions based on filters."""
filter_and_actions = self._get_filter_and_actions(
request.query_params.get('sign'),
view.action,
'{}.{}'.format(obj._meta.app_label, obj._meta.model_name))
if not filter_and_actions:
return False
qs = view.queryset.filter(**filter_and_actions['filters'])
return qs.filter(id=obj.id).exists()
@staticmethod
def _get_filter_and_actions(sign, action, dotted_model_name):
try:
filters_and_actions = unsign_filters_and_actions(
sign,
dotted_model_name
)
except signing.BadSignature:
return {}
for filtered_action in filters_and_actions:
if action in filtered_action['actions']:
return filtered_action
return {}
| mit | -497,435,760,355,794,200 | 35.378788 | 87 | 0.588088 | false |
zmughal/xerox-parc-uplib-mirror | win32/stopStartUpLibServices.py | 1 | 3546 | #
# This file is part of the "UpLib 1.7.11" release.
# Copyright (C) 2003-2011 Palo Alto Research Center, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
import sys, os, string, traceback, time
import win32serviceutil
import win32service
import win32event
# Stop all uplib services
def stopUplibServices():
try:
manH = win32service.OpenSCManager(None,None,win32service.SC_MANAGER_ALL_ACCESS)
sList = win32service.EnumServicesStatus(manH,win32service.SERVICE_WIN32,win32service.SERVICE_ACTIVE)
for svc in sList:
name = svc[0]
if (name.lower().startswith("uplib")):
serveH = win32service.OpenService(manH,name,win32service.SERVICE_ALL_ACCESS)
if (win32service.QueryServiceStatus(serveH)[1] == win32service.SERVICE_RUNNING):
win32service.ControlService(serveH, win32service.SERVICE_CONTROL_STOP)
while (win32service.QueryServiceStatus(serveH)[1] != win32service.SERVICE_STOPPED):
time.sleep(5)
win32service.CloseServiceHandle(serveH)
win32service.CloseServiceHandle(manH)
except:
t, v, b = sys.exc_info()
sys.stderr.write("Problem Stopping UpLib Services. %s."% string.join(traceback.format_exception(t, v, b)))
# Start all uplib services
def startUplibServices():
try:
manH = win32service.OpenSCManager(None,None,win32service.SC_MANAGER_ALL_ACCESS)
sList = win32service.EnumServicesStatus(manH,win32service.SERVICE_WIN32,win32service.SERVICE_INACTIVE)
for svc in sList:
name = svc[0]
if (name.lower().startswith("uplib")):
serveH = win32service.OpenService(manH,name,win32service.SERVICE_ALL_ACCESS)
if (win32service.QueryServiceStatus(serveH)[1] == win32service.SERVICE_STOPPED and win32service.QueryServiceConfig(serveH)[1] == win32service.SERVICE_AUTO_START):
win32service.StartService(serveH, None)
win32service.CloseServiceHandle(serveH)
win32service.CloseServiceHandle(manH)
except:
t, v, b = sys.exc_info()
sys.stderr.write("Problem Starting UpLib Services. %s."% string.join(traceback.format_exception(t, v, b)))
if __name__ == "__main__":
usage = False
if (len(sys.argv) == 2):
if (sys.argv[1].lower() == "stop"):
stopUplibServices()
elif (sys.argv[1].lower() == "start"):
startUplibServices()
else:
usage = True
else:
usage = True
if (usage):
print "Usage: "+sys.argv[0]+" OPTION"
print "Where OPTION includes:"
print "stop - Stop All UpLib services"
print "start - Start All UpLib services"
| gpl-2.0 | -4,299,353,383,845,718,000 | 39.905882 | 178 | 0.64608 | false |
mozman/ezdxf | examples/tiled_window_setup.py | 1 | 2522 | # Purpose: tiled window model space setup for AutoCAD
# Copyright (c) 2018 Manfred Moitzi
# License: MIT License
import ezdxf
FILENAME = r'C:\Users\manfred\Desktop\Outbox\tiled_windows_R2000.dxf'
# FILENAME = 'tiled_windows_R2000.dxf'
def draw_raster(doc):
marker = doc.blocks.new(name='MARKER')
attribs = {'color': 2}
marker.add_line((-1, 0), (1, 0), dxfattribs=attribs)
marker.add_line((0, -1), (0, 1), dxfattribs=attribs)
marker.add_circle((0, 0), .4, dxfattribs=attribs)
marker.add_attdef('XPOS', (0.5, -1.0), dxfattribs={'height': 0.25, 'color': 4})
marker.add_attdef('YPOS', (0.5, -1.5), dxfattribs={'height': 0.25, 'color': 4})
modelspace = doc.modelspace()
for x in range(10):
for y in range(10):
xcoord = x * 10
ycoord = y * 10
values = {
'XPOS': f"x = {xcoord}",
'YPOS': f"y = {ycoord}",
}
modelspace.add_auto_blockref('MARKER', (xcoord, ycoord), values)
def setup_active_viewport(doc):
# delete '*Active' viewport configuration
doc.viewports.delete_config('*ACTIVE')
# the available display area in AutoCAD has the virtual lower-left corner (0, 0) and the virtual upper-right corner
# (1, 1)
# first viewport, uses the left half of the screen
viewport = doc.viewports.new('*ACTIVE')
viewport.dxf.lower_left = (0, 0)
viewport.dxf.upper_right = (.5, 1)
viewport.dxf.target = (0, 0, 0) # target point defines the origin of the DCS, this is the default value
viewport.dxf.center = (40, 30) # move this location (in DCS) to the center of the viewport
viewport.dxf.height = 15 # height of viewport in drawing units, this parameter works
viewport.dxf.aspect_ratio = 1.0 # aspect ratio of viewport (x/y)
# second viewport, uses the right half of the screen
viewport = doc.viewports.new('*ACTIVE')
viewport.dxf.lower_left = (.5, 0)
viewport.dxf.upper_right = (1, 1)
viewport.dxf.target = (60, 20, 0) # target point defines the origin of the DCS
viewport.dxf.center = (0, 0) # move this location (in DCS, model space = 60, 20) to the center of the viewport
viewport.dxf.height = 15 # height of viewport in drawing units, this parameter works
viewport.dxf.aspect_ratio = 2.0 # aspect ratio of viewport (x/y)
if __name__ == '__main__':
doc = ezdxf.new('R2000')
draw_raster(doc)
setup_active_viewport(doc)
doc.saveas(FILENAME)
print(f"DXF file '{FILENAME}' created.")
| mit | -3,377,911,553,956,720,600 | 39.031746 | 119 | 0.635607 | false |
xorpaul/check_mk | web/htdocs/html_mod_python.py | 1 | 13727 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
from mod_python import Cookie, util, apache
import htmllib
import os, time, config, weblib, re
import defaults
import livestatus
class html_mod_python(htmllib.html):
def __init__(self, req, fields):
# All URIs end in .py. We strip away the .py and get the
# name of the page.
self.myfile = req.uri.split("/")[-1][:-3]
self.req = req
htmllib.html.__init__(self)
self.user = req.user
if fields:
self.fields = fields
else:
self.fields = util.FieldStorage(self.req, keep_blank_values = 1)
self.read_get_vars()
self.read_cookies()
# Install magic "live" object that connects to livestatus
# on-the-fly
def __getattr__(self, varname):
if varname not in [ "live", "site_status" ]:
raise AttributeError("html instance has no attribute '%s'" % varname)
connect_to_livestatus()
if varname == "live":
return self.live
else:
return self.site_status
def load_help_visible(self):
try:
self.help_visible = config.load_user_file("help", False) # cache for later usage
except:
pass
def is_ssl_request(self):
return self.req.headers_in.get('X-Forwarded-Proto') == 'https'
def set_cookie(self, varname, value, expires = None):
# httponly tells the browser not to make this cookie available to Javascript
c = Cookie.Cookie(varname, value, path='/', httponly=True)
if self.is_ssl_request():
c.secure = True
if expires is not None:
c.expires = expires
if not self.req.headers_out.has_key("Set-Cookie"):
self.req.headers_out.add("Cache-Control", 'no-cache="set-cookie"')
self.req.err_headers_out.add("Cache-Control", 'no-cache="set-cookie"')
self.req.headers_out.add("Set-Cookie", str(c))
self.req.err_headers_out.add("Set-Cookie", str(c))
def del_cookie(self, varname):
self.set_cookie(varname, '', time.time() - 60)
def read_cookies(self):
self.cookies = Cookie.get_cookies(self.req)
def read_get_vars(self):
self.parse_field_storage(self.fields)
def lowlevel_write(self, text):
if self.io_error:
return
try:
if self.buffering:
self.req.write(text, 0)
else:
self.req.write(text)
except IOError, e:
# Catch writing problems to client, prevent additional writes
self.io_error = True
self.log('%s' % e)
def get_button_counts(self):
return config.load_user_file("buttoncounts", {})
def top_heading(self, title):
if type(self.user) == str:
login_text = "<b>%s</b> (%s" % (config.user_id, "+".join(config.user_role_ids))
if self.enable_debug:
if config.get_language():
login_text += "/%s" % config.get_language()
login_text += ')'
else:
login_text = _("not logged in")
self.top_heading_left(title)
self.write('<td style="min-width:240px" class=right><span id=headinfo></span>%s ' % login_text)
if config.pagetitle_date_format:
self.write(' <b id=headerdate format="%s"></b>' % config.pagetitle_date_format)
self.write(' <b id=headertime></b>')
self.javascript('update_header_timer()')
self.top_heading_right()
def omd_mode(self):
# Load mod_python env into regular environment
for k, v in self.req.subprocess_env.items():
os.environ[k] = v
omd_mode = None
omd_site = None
if 'OMD_SITE' in os.environ:
omd_site = os.environ['OMD_SITE']
omd_mode = 'shared'
if omd_site == self.apache_user():
omd_mode = 'own'
return (omd_mode, omd_site)
def log(self, *args):
from lib import logger, LOG_NOTICE
for arg in args:
if type(arg) in (str, unicode):
text = arg
else:
text = repr(arg)
logger(LOG_NOTICE, text)
def http_redirect(self, url):
self.set_http_header('Location', url)
raise apache.SERVER_RETURN, apache.HTTP_MOVED_TEMPORARILY
# Needs to set both, headers_out and err_headers_out to be sure to send
# the header on all responses
def set_http_header(self, key, val):
self.req.headers_out.add(key, val)
self.req.err_headers_out.add(key, val)
def check_limit(self, rows, limit):
count = len(rows)
if limit != None and count >= limit + 1:
text = _("Your query produced more than %d results. ") % limit
if self.var("limit", "soft") == "soft" and config.may("general.ignore_soft_limit"):
text += '<a href="%s">%s</a>' % \
(self.makeuri([("limit", "hard")]), _('Repeat query and allow more results.'))
elif self.var("limit") == "hard" and config.may("general.ignore_hard_limit"):
text += '<a href="%s">%s</a>' % \
(self.makeuri([("limit", "none")]), _('Repeat query without limit.'))
self.show_warning(text)
del rows[limit:]
return False
return True
def load_transids(self, lock = False):
return config.load_user_file("transids", [], lock)
def save_transids(self, used_ids, unlock = False):
if config.user_id:
config.save_user_file("transids", used_ids, unlock)
def save_tree_states(self):
config.save_user_file("treestates", self.treestates)
def load_tree_states(self):
if self.id is not self.treestates_for_id:
self.treestates = config.load_user_file("treestates", {})
self.treestates_for_id = self.id
def add_custom_style_sheet(self):
for css in self.plugin_stylesheets():
self.write('<link rel="stylesheet" type="text/css" href="css/%s">\n' % css)
if config.custom_style_sheet:
self.write('<link rel="stylesheet" type="text/css" href="%s">\n' % config.custom_style_sheet)
def plugin_stylesheets(self):
global plugin_stylesheets
try:
return plugin_stylesheets
except:
plugins_paths = [ defaults.web_dir + "/htdocs/css" ]
if defaults.omd_root:
plugins_paths.append(defaults.omd_root + "/local/share/check_mk/web/htdocs/css")
plugin_stylesheets = set([])
for dir in plugins_paths:
if os.path.exists(dir):
for fn in os.listdir(dir):
if fn.endswith(".css"):
plugin_stylesheets.add(fn)
return plugin_stylesheets
# Build up a connection to livestatus.
# Note: this functions was previously in index.py. But now it
# moved to html_mod_python, since we do not want to connect to
# livestatus always but just when it is needed.
def connect_to_livestatus():
html.site_status = {}
# site_status keeps a dictionary for each site with the following
# keys:
# "state" --> "online", "disabled", "down", "unreach", "dead" or "waiting"
# "exception" --> An error exception in case of down, unreach, dead or waiting
# "status_host_state" --> host state of status host (0, 1, 2 or None)
# "livestatus_version" --> Version of sites livestatus if "online"
# "program_version" --> Version of Nagios if "online"
# If there is only one site (non-multisite), than
# user cannot enable/disable.
if config.is_multisite():
# do not contact those sites the user has disabled.
# Also honor HTML-variables for switching off sites
# right now. This is generally done by the variable
# _site_switch=sitename1:on,sitename2:off,...
if config.may("sidesnap.sitestatus"):
switch_var = html.var("_site_switch")
if switch_var:
for info in switch_var.split(","):
sitename, onoff = info.split(":")
d = config.user_siteconf.get(sitename, {})
if onoff == "on":
d["disabled"] = False
else:
d["disabled"] = True
config.user_siteconf[sitename] = d
config.save_site_config()
# Make lists of enabled and disabled sites
enabled_sites = {}
disabled_sites = {}
for sitename, site in config.allsites().items():
siteconf = config.user_siteconf.get(sitename, {})
# Convert livestatus-proxy links into UNIX socket
s = site["socket"]
if type(s) == tuple and s[0] == "proxy":
site["socket"] = "unix:" + defaults.livestatus_unix_socket + "proxy/" + sitename
site["cache"] = s[1].get("cache", True)
else:
site["cache"] = False
if siteconf.get("disabled", False):
html.site_status[sitename] = { "state" : "disabled", "site" : site }
disabled_sites[sitename] = site
else:
html.site_status[sitename] = { "state" : "dead", "site" : site }
enabled_sites[sitename] = site
html.live = livestatus.MultiSiteConnection(enabled_sites, disabled_sites)
# Fetch status of sites by querying the version of Nagios and livestatus
# This may be cached by a proxy for up to the next configuration reload.
html.live.set_prepend_site(True)
for sitename, v1, v2, ps, num_hosts, num_services in html.live.query(
"GET status\n"
"Cache: reload\n"
"Columns: livestatus_version program_version program_start num_hosts num_services"):
html.site_status[sitename].update({
"state" : "online",
"livestatus_version": v1,
"program_version" : v2,
"program_start" : ps,
"num_hosts" : num_hosts,
"num_services" : num_services,
})
html.live.set_prepend_site(False)
# Get exceptions in case of dead sites
for sitename, deadinfo in html.live.dead_sites().items():
html.site_status[sitename]["exception"] = deadinfo["exception"]
shs = deadinfo.get("status_host_state")
html.site_status[sitename]["status_host_state"] = shs
if shs == None:
statename = "dead"
else:
statename = { 1:"down", 2:"unreach", 3:"waiting", }.get(shs, "unknown")
html.site_status[sitename]["state"] = statename
else:
html.live = livestatus.SingleSiteConnection("unix:" + defaults.livestatus_unix_socket)
html.live.set_timeout(3) # default timeout is 3 seconds
html.site_status = { '': { "state" : "dead", "site" : config.site('') } }
v1, v2, ps = html.live.query_row("GET status\nColumns: livestatus_version program_version program_start")
html.site_status[''].update({ "state" : "online", "livestatus_version": v1, "program_version" : v2, "program_start" : ps })
# If Multisite is retricted to data user is a nagios contact for,
# we need to set an AuthUser: header for livestatus
use_livestatus_auth = True
if html.output_format == 'html':
if config.may("general.see_all") and not config.user.get("force_authuser"):
use_livestatus_auth = False
else:
if config.may("general.see_all") and not config.user.get("force_authuser_webservice"):
use_livestatus_auth = False
if use_livestatus_auth == True:
html.live.set_auth_user('read', config.user_id)
html.live.set_auth_user('action', config.user_id)
# May the user see all objects in BI aggregations or only some?
if not config.may("bi.see_all"):
html.live.set_auth_user('bi', config.user_id)
# Default auth domain is read. Please set to None to switch off authorization
html.live.set_auth_domain('read')
| gpl-2.0 | -2,736,627,898,988,684,300 | 40.098802 | 131 | 0.554528 | false |
godiard/pathagar | books/management/commands/addbooks.py | 1 | 3644 | # Copyright (C) 2010, One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from django.core.management.base import BaseCommand, CommandError
from django.core.files import File
from django.db.utils import IntegrityError
import sys
import os
import csv
import json
from optparse import make_option
from books.models import Book, Status
class Command(BaseCommand):
help = "Adds a book collection (via a CSV file)"
args = 'Absolute path to CSV file'
option_list = BaseCommand.option_list + (
make_option('--json',
action='store_true',
dest='is_json_format',
default=False,
help='The file is in JSON format'),
)
def _handle_csv(self, csvpath):
"""
Store books from a file in CSV format.
WARN: does not handle tags
"""
csvfile = open(csvpath)
dialect = csv.Sniffer().sniff(csvfile.read(1024))
csvfile.seek(0)
reader = csv.reader(csvfile, dialect)
#TODO: Figure out if this is a valid CSV file
for row in reader:
path = row[0]
title = row[1]
author = row[2]
summary = row[3]
f = open(path)
book = Book(book_file = File(f), a_title = title, a_author = author, a_summary = summary)
book.save()
def _handle_json(self, jsonpath):
"""
Store books from a file in JSON format.
"""
jsonfile = open(jsonpath)
data_list = json.loads(jsonfile.read())
for d in data_list:
# Get a Django File from the given path:
f = open(d['book_path'])
d['book_file'] = File(f)
del d['book_path']
if d.has_key('cover_path'):
f_cover = open(d['cover_path'])
d['cover_img'] = File(f_cover)
del d['cover_path']
if d.has_key('a_status'):
d['a_status'] = Status.objects.get(status = d['a_status'])
tags = d['tags']
del d['tags']
book = Book(**d)
try:
book.save() # must save item to generate Book.id before creating tags
[book.tags.add(tag) for tag in tags]
book.save() # save again after tags are generated
except IntegrityError as e:
if str(e) == "column file_sha256sum is not unique":
print "The book (", d['book_file'], ") was not saved because the file already exsists in the database."
else:
raise CommandError('Error adding file %s: %s' % (d['book_file'], sys.exc_info()[1]))
def handle(self, filepath='', *args, **options):
if not os.path.exists(filepath):
raise CommandError("%r is not a valid path" % filepath)
if options['is_json_format']:
self._handle_json(filepath)
else:
self._handle_csv(filepath)
| gpl-2.0 | -4,907,735,612,702,819,000 | 31.828829 | 123 | 0.585071 | false |
manastech/de-bee | index.py | 1 | 3273 | from google.appengine.ext import webapp
from google.appengine.api import users
from google.appengine.ext.webapp import template
from model import Membership
from util import membershipsOfUser
from util import descriptionOfBalanceInGroup
from util import descriptionOfTotalBalance
from comparators import compareMembershipsByGroupNick
from i18n import getDefaultLanguage
from i18n import getLanguage
from i18n import addMasterKeys
from i18n import _
import os
class IndexHandler(webapp.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
lang = getLanguage(self, user)
userMemberships = membershipsOfUser(user)
userMemberships.sort(cmp = compareMembershipsByGroupNick)
hasUserMemberships = len(userMemberships) > 0
if hasUserMemberships:
group = userMemberships[0].group
else:
group = 0
debts = self.getDebts(user, userMemberships, lang)
message = self.request.get('msg')
hasMessage = len(message) > 0
model = {
'username': user.nickname(),
'signout_url': users.create_logout_url("/"),
'debts': debts,
'hasUserMemberships': hasUserMemberships,
'userMemberships': userMemberships,
'group': group,
'hasMessage': hasMessage,
'message': message,
# i18n
'DontBelong': _("You don't belong to any group. You can create your own and invite your friends.", lang),
'Name': _('Name', lang),
'YouOweNobody': _('You owe nobody, and nobody owes you. Hurray!', lang),
'GoToGroup': _('Go to group', lang),
'SelectGroup': _('select group', lang),
'CreateGroup': _('Create Group', lang),
}
addMasterKeys(model, lang)
path = os.path.join(os.path.dirname(__file__), 'dashboard.html')
self.response.out.write(template.render(path, model))
else:
lang = getDefaultLanguage(self)
model = {
'loginurl': users.create_login_url("/"),
# i18n
'introduction': _('introduction', lang),
}
addMasterKeys(model, lang)
path = os.path.join(os.path.dirname(__file__), 'introduction.html')
self.response.out.write(template.render(path, model))
def getDebts(self, user, memberships, lang):
total = 0
items = []
for m in memberships:
if abs(m.balance) <= 1e-07:
continue
link = '/group?group=%s' % m.group.key()
total += m.balance
items.append({
'isOweToSelf' : m.balance > 0.0,
'desc': descriptionOfBalanceInGroup(m, link, lang)
})
return {
'isZero': abs(total) <= 1e-07,
'isOweToSelf' : total > 0.0,
'items' : items,
'desc': descriptionOfTotalBalance(total, lang),
'hasMoreThanOneItem' : len(items) > 1,
} | mit | 6,012,039,414,046,494,000 | 31.418367 | 117 | 0.545066 | false |
steinwurf/bongo | bongo/settings.py | 1 | 3513 | #! /usr/bin/env python
# encoding: utf-8
"""
Django settings for bongo project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
import os
from config import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Application definition
INSTALLED_APPS = (
'django.contrib.messages',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.staticfiles',
'social.apps.django_app.default',
'file_server',
'utils',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
)
ROOT_URLCONF = 'bongo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'bongo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Cache
# https://docs.djangoproject.com/en/1.3/ref/settings/#std:setting-CACHES
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
# 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
# 'LOCATION': '/var/tmp/bongo_cache',
}
}
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'CET'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_ROOT = '/var/www/bongo/static/'
STATIC_URL = '/static/'
AUTHENTICATION_BACKENDS = (
'social.backends.github.GithubTeamOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
STATICFILES_DIRS = (
# Twitter Bootstrap stuff
os.path.join(BASE_DIR, "bootstrap/dist"),
os.path.join(BASE_DIR, "bootstrap/assets")
)
SOCIAL_AUTH_GITHUB_TEAM_SCOPE = ['read:org']
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
SOCIAL_AUTH_LOGIN_URL = '/'
| bsd-3-clause | 921,367,275,572,019,300 | 27.330645 | 75 | 0.674637 | false |
ESOedX/edx-platform | lms/djangoapps/course_api/blocks/tests/test_api.py | 1 | 11065 | """
Tests for Blocks api.py
"""
from __future__ import absolute_import
from itertools import product
import ddt
import six
from django.test.client import RequestFactory
from mock import patch
from openedx.core.djangoapps.content.block_structure.api import clear_course_from_cache
from openedx.core.djangoapps.content.block_structure.config import STORAGE_BACKING_FOR_CACHE, waffle
from openedx.core.djangoapps.waffle_utils.testutils import override_waffle_flag
from student.tests.factories import UserFactory
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import SampleCourseFactory, check_mongo_calls
from xmodule.modulestore.tests.sample_courses import BlockInfo
from ..api import get_blocks
from ..toggles import ENABLE_VIDEO_URL_REWRITE
class TestGetBlocks(SharedModuleStoreTestCase):
"""
Tests for the get_blocks function
"""
@classmethod
def setUpClass(cls):
super(TestGetBlocks, cls).setUpClass()
with cls.store.default_store(ModuleStoreEnum.Type.split):
cls.course = SampleCourseFactory.create()
# hide the html block
cls.html_block = cls.store.get_item(cls.course.id.make_usage_key('html', 'html_x1a_1'))
cls.html_block.visible_to_staff_only = True
cls.store.update_item(cls.html_block, ModuleStoreEnum.UserID.test)
def setUp(self):
super(TestGetBlocks, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def test_basic(self):
blocks = get_blocks(self.request, self.course.location, self.user)
self.assertEquals(blocks['root'], six.text_type(self.course.location))
# subtract for (1) the orphaned course About block and (2) the hidden Html block
self.assertEquals(len(blocks['blocks']), len(self.store.get_items(self.course.id)) - 2)
self.assertNotIn(six.text_type(self.html_block.location), blocks['blocks'])
def test_no_user(self):
blocks = get_blocks(self.request, self.course.location)
self.assertIn(six.text_type(self.html_block.location), blocks['blocks'])
def test_access_before_api_transformer_order(self):
"""
Tests the order of transformers: access checks are made before the api
transformer is applied.
"""
blocks = get_blocks(self.request, self.course.location, self.user, nav_depth=5, requested_fields=['nav_depth'])
vertical_block = self.store.get_item(self.course.id.make_usage_key('vertical', 'vertical_x1a'))
problem_block = self.store.get_item(self.course.id.make_usage_key('problem', 'problem_x1a_1'))
vertical_descendants = blocks['blocks'][six.text_type(vertical_block.location)]['descendants']
self.assertIn(six.text_type(problem_block.location), vertical_descendants)
self.assertNotIn(six.text_type(self.html_block.location), vertical_descendants)
def test_sub_structure(self):
sequential_block = self.store.get_item(self.course.id.make_usage_key('sequential', 'sequential_y1'))
blocks = get_blocks(self.request, sequential_block.location, self.user)
self.assertEquals(blocks['root'], six.text_type(sequential_block.location))
self.assertEquals(len(blocks['blocks']), 5)
for block_type, block_name, is_inside_of_structure in (
('vertical', 'vertical_y1a', True),
('problem', 'problem_y1a_1', True),
('chapter', 'chapter_y', False),
('sequential', 'sequential_x1', False),
):
block = self.store.get_item(self.course.id.make_usage_key(block_type, block_name))
if is_inside_of_structure:
self.assertIn(six.text_type(block.location), blocks['blocks'])
else:
self.assertNotIn(six.text_type(block.location), blocks['blocks'])
def test_filtering_by_block_types(self):
sequential_block = self.store.get_item(self.course.id.make_usage_key('sequential', 'sequential_y1'))
# not filtered blocks
blocks = get_blocks(self.request, sequential_block.location, self.user, requested_fields=['type'])
self.assertEquals(len(blocks['blocks']), 5)
found_not_problem = False
for block in six.itervalues(blocks['blocks']):
if block['type'] != 'problem':
found_not_problem = True
self.assertTrue(found_not_problem)
# filtered blocks
blocks = get_blocks(self.request, sequential_block.location, self.user,
block_types_filter=['problem'], requested_fields=['type'])
self.assertEquals(len(blocks['blocks']), 3)
for block in six.itervalues(blocks['blocks']):
self.assertEqual(block['type'], 'problem')
# TODO: Remove this class after REVE-52 lands and old-mobile-app traffic falls to < 5% of mobile traffic
@ddt.ddt
class TestGetBlocksMobileHack(SharedModuleStoreTestCase):
"""
Tests that requests from the mobile app don't receive empty containers.
"""
@classmethod
def setUpClass(cls):
super(TestGetBlocksMobileHack, cls).setUpClass()
with cls.store.default_store(ModuleStoreEnum.Type.split):
cls.course = SampleCourseFactory.create(
block_info_tree=[
BlockInfo('empty_chapter', 'chapter', {}, [
BlockInfo('empty_sequential', 'sequential', {}, [
BlockInfo('empty_vertical', 'vertical', {}, []),
]),
]),
BlockInfo('full_chapter', 'chapter', {}, [
BlockInfo('full_sequential', 'sequential', {}, [
BlockInfo('full_vertical', 'vertical', {}, [
BlockInfo('html', 'html', {}, []),
BlockInfo('sample_video', 'video', {}, [])
]),
]),
])
]
)
def setUp(self):
super(TestGetBlocksMobileHack, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
@ddt.data(
*product([True, False], ['chapter', 'sequential', 'vertical'])
)
@ddt.unpack
def test_empty_containers(self, is_mobile, container_type):
with patch('lms.djangoapps.course_api.blocks.api.is_request_from_mobile_app', return_value=is_mobile):
blocks = get_blocks(self.request, self.course.location)
full_container_key = self.course.id.make_usage_key(container_type, 'full_{}'.format(container_type))
self.assertIn(str(full_container_key), blocks['blocks'])
empty_container_key = self.course.id.make_usage_key(container_type, 'empty_{}'.format(container_type))
assert_containment = self.assertNotIn if is_mobile else self.assertIn
assert_containment(str(empty_container_key), blocks['blocks'])
@patch('xmodule.video_module.VideoBlock.student_view_data')
@ddt.data(
True, False
)
def test_video_urls_rewrite(self, waffle_flag_value, video_data_patch):
"""
Verify the video blocks returned have their URL re-written for
encoded videos.
"""
video_data_patch.return_value = {
'encoded_videos': {
'hls': {
'url': 'https://xyz123.cloudfront.net/XYZ123ABC.mp4',
'file_size': 0
},
'mobile_low': {
'url': 'https://1234abcd.cloudfront.net/ABCD1234abcd.mp4',
'file_size': 0
}
}
}
with override_waffle_flag(ENABLE_VIDEO_URL_REWRITE, waffle_flag_value):
blocks = get_blocks(
self.request, self.course.location, requested_fields=['student_view_data'], student_view_data=['video']
)
video_block_key = str(self.course.id.make_usage_key('video', 'sample_video'))
video_block_data = blocks['blocks'][video_block_key]
for video_data in six.itervalues(video_block_data['student_view_data']['encoded_videos']):
if waffle_flag_value:
self.assertNotIn('cloudfront', video_data['url'])
else:
self.assertIn('cloudfront', video_data['url'])
@ddt.ddt
class TestGetBlocksQueryCountsBase(SharedModuleStoreTestCase):
"""
Base for the get_blocks tests.
"""
ENABLED_SIGNALS = ['course_published']
def setUp(self):
super(TestGetBlocksQueryCountsBase, self).setUp()
self.user = UserFactory.create()
self.request = RequestFactory().get("/dummy")
self.request.user = self.user
def _create_course(self, store_type):
"""
Creates the sample course in the given store type.
"""
with self.store.default_store(store_type):
return SampleCourseFactory.create()
def _get_blocks(self, course, expected_mongo_queries, expected_sql_queries):
"""
Verifies the number of expected queries when calling
get_blocks on the given course.
"""
with check_mongo_calls(expected_mongo_queries):
with self.assertNumQueries(expected_sql_queries):
get_blocks(self.request, course.location, self.user)
@ddt.ddt
class TestGetBlocksQueryCounts(TestGetBlocksQueryCountsBase):
"""
Tests query counts for the get_blocks function.
"""
@ddt.data(
*product(
(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split),
(True, False),
)
)
@ddt.unpack
def test_query_counts_cached(self, store_type, with_storage_backing):
with waffle().override(STORAGE_BACKING_FOR_CACHE, active=with_storage_backing):
course = self._create_course(store_type)
self._get_blocks(
course,
expected_mongo_queries=0,
expected_sql_queries=14 if with_storage_backing else 13,
)
@ddt.data(
*product(
((ModuleStoreEnum.Type.mongo, 5), (ModuleStoreEnum.Type.split, 3)),
(True, False),
)
)
@ddt.unpack
def test_query_counts_uncached(self, store_type_tuple, with_storage_backing):
store_type, expected_mongo_queries = store_type_tuple
with waffle().override(STORAGE_BACKING_FOR_CACHE, active=with_storage_backing):
course = self._create_course(store_type)
clear_course_from_cache(course.id)
if with_storage_backing:
num_sql_queries = 24
else:
num_sql_queries = 14
self._get_blocks(
course,
expected_mongo_queries,
expected_sql_queries=num_sql_queries,
)
| agpl-3.0 | 8,709,970,598,036,249,000 | 39.680147 | 119 | 0.615906 | false |
dedeco/cnddh-denuncias | cnddh/models.py | 1 | 25670 | # coding=latin-1
from database import db
from sqlalchemy.orm import relationship, backref, with_polymorphic
from sqlalchemy import Sequence, ForeignKey, UniqueConstraint
from cnddh.uploadsets import anexos_upload
import datetime
class Denuncia(db.Model):
__tablename__ = 'denuncias'
id = db.Column(db.Integer, Sequence('denuncias_id_seq'), primary_key=True)
numero = db.Column(db.Integer, unique=True, nullable=False)
dtcriacao = db.Column(db.DateTime, unique=False, nullable=False)
dtdenuncia = db.Column(db.DateTime, unique=False, nullable=False)
status_id = db.Column(db.Integer, ForeignKey('status.id'), nullable=False)
status = relationship("Status")
tipofonte_id = db.Column(db.Integer, ForeignKey('tipofontes.id'),nullable=False)
fonte = db.Column(db.String(240), unique=False, nullable=False)
protocolo = db.Column(db.Integer)
resumo = db.Column(db.String(1024), unique=False, nullable=False)
descricao = db.Column(db.String(8192), unique=False, nullable=False)
observacao = db.Column(db.String(8192), unique=False)
tipolocal = db.Column(db.String(240), unique=False, nullable=False)
endereco = db.Column(db.String(240), unique=False, nullable=False)
num = db.Column(db.String(60), unique=False)
complemento = db.Column(db.String(240), unique=False)
referencia = db.Column(db.String(240), unique=False)
bairro = db.Column(db.String(240), unique=False)
cidade = db.Column(db.String(60), unique=False, nullable=False)
cep = db.Column(db.String(60), unique=False, nullable=False)
estado = db.Column(db.String(2), unique=False, nullable=False)
pais = db.Column(db.String(60), unique=False, nullable=False)
vitimas = relationship("Vitima", backref="denuncia")
suspeitos = relationship("Suspeito", backref="denuncia")
violacoes = relationship("Violacao", backref="denuncia")
anexos = relationship("Anexo", backref="denuncia")
historico = relationship("Historico", backref="denuncia",order_by="asc(Historico.dtcriacao)")
def __init__(self, numero):
self.numero = numero
self.status_id = 1
self.dtcriacao = datetime.datetime.today()
class Vitima(db.Model):
__tablename__ = 'vitimas'
id = db.Column(db.Integer, Sequence('vitimas_id_seq'), primary_key=True)
denuncia_id = db.Column(db.Integer, ForeignKey('denuncias.id'), nullable=False)
tipovitima_id = db.Column(db.Integer, ForeignKey('tipovitimas.id'), nullable=False)
qtdevitimas = db.Column(db.Integer, unique=False, nullable=False, default=1)
nomenaoidentificado = db.Column(db.String(1), unique=False)
nome = db.Column(db.String(100), unique=False, nullable=False)
idade = db.Column(db.Integer, unique=False, nullable=False, default=0)
sexo = db.Column(db.String(20), unique=False, nullable=False)
cor = db.Column(db.String(20), unique=False, nullable=False)
violacoes = relationship("Violacao", backref="vitima")
tipovitima = relationship("TipoVitima")
def __init__(self, tipovitima_id):
self.tipovitima_id = tipovitima_id
class TipoVitima(db.Model):
__tablename__ = 'tipovitimas'
id = db.Column(db.Integer, Sequence('tipovitimas_id_seq'), primary_key=True)
tipo = db.Column(db.String(100), unique=False, nullable=False)
legenda = db.Column(db.String(255), unique=False)
def __init__(self, tipo, legenda):
self.tipo = tipo
self.legenda = legenda
class Suspeito(db.Model):
__tablename__ = 'suspeitos'
id = db.Column(db.Integer, Sequence('suspeitos_id_seq'), primary_key=True)
denuncia_id = db.Column(db.Integer, ForeignKey('denuncias.id'), nullable=False)
tiposuspeito_id =db.Column(db.Integer, ForeignKey('tiposuspeitos.id'), nullable=False)
qtdesuspeitos = db.Column(db.Integer, unique=False, nullable=False, default=1)
nomeinstituicao = db.Column(db.String(255), unique=False, nullable=False)
nomenaoidentificado = db.Column(db.String(1), unique=False)
nome = db.Column(db.String(255), unique=False, nullable=False)
idade = db.Column(db.Integer, unique=False, nullable=False, default=0)
sexo = db.Column(db.String(20), unique=False, nullable=False)
cor = db.Column(db.String(20), unique=False, nullable=False)
violacoes = relationship("Violacao", backref="suspeito")
tiposuspeito = relationship("TipoSuspeito")
def __init__(self, tiposuspeito_id):
self.tiposuspeito_id = tiposuspeito_id
class TipoSuspeito(db.Model):
__tablename__ = 'tiposuspeitos'
id = db.Column(db.Integer, Sequence('tiposuspeitos_id_seq'), primary_key=True)
tipo = db.Column(db.String(100), unique=False, nullable=False)
legenda = db.Column(db.String(255), unique=False)
instituicao = db.Column(db.String(255), unique=False, nullable=False)
classificacao = db.Column(db.String(255), unique=False, nullable=False)
def __init__(self, tipo, legenda, instituicao, classificacao):
self.tipo = tipo
self.legenda = legenda
self.instituicao = instituicao
self.classificacao = classificacao
class TipoViolacao(db.Model):
__tablename__ = 'tipoviolacoes'
id = db.Column(db.Integer, Sequence('tipoviolacoes_id_seq'), primary_key=True)
macrocategoria = db.Column(db.String(255), unique=False, nullable=False)
microcategoria = db.Column(db.String(255), unique=False, nullable=False)
violacoes = relationship("Violacao", backref="tipoviolacao")
def __init__(self, macrocategoria, microcategoria):
self.macrocategoria = macrocategoria
self.microcategoria = microcategoria
class Violacao(db.Model):
__tablename__ = 'violacoes'
id = db.Column(db.Integer, Sequence('violacoes_id_seq'), primary_key=True)
tipoviolacoes_id = db.Column(db.Integer, ForeignKey('tipoviolacoes.id'), nullable=False)
tipo = db.Column(db.String(20))
denuncia_id = db.Column(db.Integer, ForeignKey('denuncias.id'), nullable=False)
vitima_id = db.Column(db.Integer, ForeignKey('vitimas.id'), nullable=False)
suspeito_id = db.Column(db.Integer, ForeignKey('suspeitos.id'), nullable=False)
__table_args__ = (UniqueConstraint('tipoviolacoes_id', 'denuncia_id', 'vitima_id', 'suspeito_id', name='uix_violacao'),)
def __init__(self, denuncia_id, tipoviolacoes_id, suspeito_id, vitima_id):
self.denuncia_id = denuncia_id
self.tipoviolacoes_id = tipoviolacoes_id
self.suspeito_id = suspeito_id
self.vitima_id = vitima_id
__mapper_args__ = {
'polymorphic_on':tipo,
'polymorphic_identity':'violacoes',
'with_polymorphic':'*'
}
class Homicidio(Violacao):
__tablename__ = 'homicidios'
id = db.Column(db.Integer, ForeignKey('violacoes.id'),primary_key=True)
rco = db.Column(db.String(100), unique=False)
bo = db.Column(db.String(100), unique=False)
ip = db.Column(db.String(100), unique=False)
reds = db.Column(db.String(100), unique=False)
dtfato = db.Column(db.DateTime, unique=False)
prfato = db.Column(db.String(20), unique=False)
situacao = db.Column(db.String(20), unique=False)
obs = db.Column(db.String(255), unique=False)
arquivo = db.Column(db.String(255), unique=False)
meiosutilizados = relationship("HomicidioMeioUtilizado",cascade="all,delete")
__mapper_args__ = {'polymorphic_identity':'homicidios'}
def _get_url(self):
if self.arquivo:
return anexos_upload.url(self.arquivo)
else:
return None
url = property(_get_url)
class TipoMeioUtilizado(db.Model):
__tablename__ = 'tipomeioutilizados'
id = db.Column(db.Integer, Sequence('tipomeioutilizados_id_seq'), primary_key=True)
meio = db.Column(db.String(255), unique=False, nullable=False)
def __init__(self, meio):
self.meio = meio
class HomicidioMeioUtilizado(db.Model):
__tablename__ = 'homicidiomeioutilizado'
homicidio_id = db.Column(db.Integer, ForeignKey('homicidios.id'), primary_key=True)
tipomeioutilizado_id = db.Column(db.Integer, ForeignKey('tipomeioutilizados.id'), primary_key=True)
__table_args__ = (UniqueConstraint('homicidio_id', 'tipomeioutilizado_id', name='uix_meioutilizado'),)
def __init__(self, homicidio_id, tipomeioutilizado_id):
self.homicidio_id = homicidio_id
self.tipomeioutilizado_id = tipomeioutilizado_id
class TipoFonte(db.Model):
__tablename__ = 'tipofontes'
id = db.Column(db.Integer, Sequence('tipofontes_id_seq'), primary_key=True)
tipofonte = db.Column(db.String(255), unique=False, nullable=False)
legenda = db.Column(db.String(255), unique=False)
exemplo = db.Column(db.String(255), unique=False)
def __init__(self, tipofonte, legenda, exemplo):
self.tipofonte = tipofonte
self.legenda = legenda
self.exemplo = exemplo
class Status(db.Model):
__tablename__ = 'status'
id = db.Column(db.Integer, Sequence('status_id_seq'), primary_key=True)
status = db.Column(db.String(255), unique=False, nullable=False)
legenda = db.Column(db.String(255), unique=False)
def __init__(self, status, legenda):
self.status = status
self.legenda = legenda
class Acao(db.Model):
__tablename__ = 'acoes'
id = db.Column(db.Integer, Sequence('acoes_id_seq'), primary_key=True)
acao = db.Column(db.String(255), unique=False, nullable=False,)
legenda = db.Column(db.String(255), unique=False)
def __init__(self, acao, legenda):
self.acao = acao
self.legenda = legenda
class Historico(db.Model):
__tablename__ = 'historico'
id = db.Column(db.Integer, Sequence('historico_id_seq'), primary_key=True)
denuncia_id = db.Column(db.Integer, ForeignKey('denuncias.id'), nullable=False)
acao_id = db.Column(db.Integer, ForeignKey('acoes.id'), nullable=False)
dtcriacao = db.Column(db.DateTime, unique=False, nullable=False)
motivo = db.Column(db.String(1024), unique=False)
acao = relationship("Acao")
encaminhamento = relationship("Encaminhamento", backref="historico", lazy='joined',cascade="all,delete")
def __init__(self, denuncia_id):
self.denuncia_id = denuncia_id
self.dtcriacao = datetime.datetime.today()
class TipoEncaminhamento(db.Model):
__tablename__ = 'tipoencaminhamentos'
id = db.Column(db.Integer, Sequence('tipoencaminhamentos_id_seq'), primary_key=True)
tipo = db.Column(db.String(100), unique=False, nullable=False)
legenda = db.Column(db.String(255), unique=False)
def __init__(self, tipo):
self.tipo = tipo
class Orgao(db.Model):
__tablename__ = 'orgaos'
id = db.Column(db.Integer, Sequence('orgaos_id_seq'), primary_key=True)
orgao = db.Column(db.String(200), unique=False, nullable=False)
def __init__(self, orgao):
self.orgao = orgao
class Encaminhamento(db.Model):
__tablename__ = 'encaminhamentos'
id = db.Column(db.Integer, Sequence('encaminhamentos_id_seq'), primary_key=True)
historico_id = db.Column(db.Integer, ForeignKey('historico.id'), nullable=False)
orgao_id = db.Column(db.Integer, ForeignKey('orgaos.id'), nullable=False)
tipo_id = db.Column(db.Integer, ForeignKey('tipoencaminhamentos.id'), nullable=False)
dtenvio = db.Column(db.DateTime, unique=False, nullable=False)
dtlimite = db.Column(db.DateTime, unique=False)
dtretorno = db.Column(db.DateTime, unique=False)
dtcriacao = db.Column(db.DateTime, unique=False, nullable=False)
tipo = db.Column(db.String(20))
orgao = relationship("Orgao")
tipo_encaminhamento = relationship("TipoEncaminhamento")
retorno = relationship("Retorno",cascade="all,delete")
__mapper_args__ = {
'polymorphic_on':tipo,
'polymorphic_identity':'encaminhamentos',
'with_polymorphic':'*'
}
def __init__(self, historico_id):
self.historico_id = historico_id
dtcriacao = datetime.datetime.today()
class Oficio(Encaminhamento):
__tablename__ = 'oficios'
id = db.Column(db.Integer, ForeignKey('encaminhamentos.id'),primary_key=True)
numero = db.Column(db.String(255), nullable=False)
assunto = db.Column(db.String(255), nullable=False)
obs = db.Column(db.String(255), nullable=False)
arquivo = db.Column(db.String(255), unique=False)
__mapper_args__ = {'polymorphic_identity':'oficios'}
def _get_url(self):
if self.arquivo:
return anexos_upload.url(self.arquivo)
else:
return None
url = property(_get_url)
class Telefonema(Encaminhamento):
__tablename__ = 'telefonemas'
id = db.Column(db.Integer, ForeignKey('encaminhamentos.id'),primary_key=True)
numero = db.Column(db.String(255), unique=False, nullable=False)
destinatario = db.Column(db.String(255), nullable=True, unique=False)
obs = db.Column(db.String(255), nullable=True, unique=False)
__mapper_args__ = {'polymorphic_identity':'telefonemas'}
class Reuniao(Encaminhamento):
__tablename__ = 'reunioes'
id = db.Column(db.Integer, ForeignKey('encaminhamentos.id'),primary_key=True)
pauta = db.Column(db.String(255), nullable=False, unique=False)
participantes = db.Column(db.String(4000), nullable=False, unique=False)
obs = db.Column(db.String(255), nullable=False, unique=False)
arquivo = db.Column(db.String(255), unique=False)
__mapper_args__ = {'polymorphic_identity':'reunioes'}
def _get_url(self):
if self.arquivo:
return anexos_upload.url(self.arquivo)
else:
return None
url = property(_get_url)
class Email(Encaminhamento):
__tablename__ = 'emails'
id = db.Column(db.Integer, ForeignKey('encaminhamentos.id'),primary_key=True)
para = db.Column(db.String(255), nullable=False, unique=False)
de = db.Column(db.String(255), nullable=False, unique=False)
assunto = db.Column(db.String(255), nullable=False, unique=False)
texto = db.Column(db.String(4000), nullable=False, unique=False)
arquivo = db.Column(db.String(255), unique=False)
__mapper_args__ = {'polymorphic_identity':'emails'}
def _get_url(self):
if self.arquivo:
return anexos_upload.url(self.arquivo)
else:
return None
url = property(_get_url)
class Generico(Encaminhamento):
__tablename__ = 'genericos'
id = db.Column(db.Integer, ForeignKey('encaminhamentos.id'),primary_key=True)
obs = db.Column(db.String(255), nullable=False, unique=False)
arquivo = db.Column(db.String(255), unique=False)
__mapper_args__ = {'polymorphic_identity':'genericos'}
def _get_url(self):
if self.arquivo:
return anexos_upload.url(self.arquivo)
else:
return None
url = property(_get_url)
class Retorno(db.Model):
__tablename__ = 'retornos'
id = db.Column(db.Integer, Sequence('retornos_id_seq'), primary_key=True)
encaminhamento_id = db.Column(db.Integer, ForeignKey('encaminhamentos.id'), nullable=False)
descricao = db.Column(db.String(255), nullable=False, unique=False)
dtretorno = db.Column(db.Date, unique=False)
dtcriacao = db.Column(db.DateTime, unique=False)
tiporetorno_id = db.Column(db.Integer, ForeignKey('tiporetornos.id'), nullable=False)
tipo = db.Column(db.String(80))
tiporetorno = relationship("TipoRetorno")
arquivo = db.Column(db.String(255), unique=False)
def _get_url(self):
if self.arquivo:
return anexos_upload.url(self.arquivo)
else:
return None
url = property(_get_url)
__mapper_args__ = {
'polymorphic_on':tipo,
'polymorphic_identity':'retornos',
'with_polymorphic':'*'
}
def __init__(self, encaminhamento_id):
self.encaminhamento_id = encaminhamento_id
dtcriacao = datetime.datetime.today()
class RetornoGenerico(Retorno):
__tablename__ = 'retornogenerico'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
observacao = db.Column(db.String(255), nullable=False, unique=False)
__mapper_args__ = {'polymorphic_identity':'retornogenerico'}
class RetornoPessoasassistidas(Retorno):
__tablename__ = 'retornopessoasassistidas'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
tipoassistencia = db.Column(db.String(255), nullable=False, unique=False)
__mapper_args__ = {'polymorphic_identity':'retornopessoasassistidas'}
class RetornoInquerito(Retorno):
__tablename__ = 'retornoinquerito'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
ip = db.Column(db.String(100), unique=False)
situacao = db.Column(db.String(20), unique=False)
motivo = db.Column(db.String(80), nullable=False, unique=False)
__mapper_args__ = {'polymorphic_identity':'retornoinquerito'}
class RetornoProcesso(Retorno):
__tablename__ = 'retornoprocesso'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
np = db.Column(db.String(100), unique=False)
situacao = db.Column(db.String(20), unique=False)
__mapper_args__ = {'polymorphic_identity':'retornoprocesso'}
class RetornoBO(Retorno):
__tablename__ = 'retornobo'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
bo = db.Column(db.String(100), unique=False)
__mapper_args__ = {'polymorphic_identity':'retornobo'}
class RetornoRCO(Retorno):
__tablename__ = 'retornorco'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
rco = db.Column(db.String(100), unique=False)
__mapper_args__ = {'polymorphic_identity':'retornorco'}
class RetornoREDS(Retorno):
__tablename__ = 'retornoreds'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
reds = db.Column(db.String(100), unique=False)
__mapper_args__ = {'polymorphic_identity':'retornoreds'}
class RetornoPoliticaPSR(Retorno):
__tablename__ = 'retornopoliticapsr'
id = db.Column(db.Integer, ForeignKey('retornos.id'),primary_key=True)
tipopolitica = db.Column(db.String(255), nullable=False, unique=False)
__mapper_args__ = {'polymorphic_identity':'retornopoliticapsr'}
class TipoRetorno(db.Model):
__tablename__ = 'tiporetornos'
id = db.Column(db.Integer, Sequence('tiporetorno_id_seq'), primary_key=True)
nome = db.Column(db.String(255), unique=False, nullable=False)
tipo = db.Column(db.String(100), unique=False, nullable=False)
legenda = db.Column(db.String(255), unique=False)
def __init__(self, nome, tipo, legenda):
self.nome = nome
self.tipo = tipo
self.legenda = legenda
class Anexo(db.Model):
__tablename__ = 'anexos'
id = db.Column(db.Integer, Sequence('anexos_id_seq'), primary_key=True)
denuncia_id = db.Column(db.Integer, ForeignKey('denuncias.id'), nullable=False)
descricaoanexo = db.Column(db.String(255), nullable=False, unique=False)
arquivo = db.Column(db.String(255), unique=False)
def __init__(self, denuncia_id):
self.denuncia_id = denuncia_id
def _get_url(self):
if self.arquivo:
return anexos_upload.url(self.arquivo)
else:
return None
url = property(_get_url)
class Cidade(db.Model):
__tablename__ = 'cidades'
id = db.Column(db.Integer, primary_key=True)
estado = db.Column(db.String(2), nullable=False)
cidade = db.Column(db.String(200), nullable=False)
def __init__(self, estado, cidade):
self.estado = estado
self.cidade = cidade
class TipoLocal(db.Model):
__tablename__ = 'tipolocais'
id = db.Column(db.Integer, Sequence('tipolocais_id_seq'), primary_key=True)
local = db.Column(db.String(100), unique=False, nullable=False)
def __init__(self, local):
self.local = local
class Usuario(db.Model):
__tablename__ = 'usuarios'
id = db.Column(db.Integer, Sequence('usuarios_id_seq'), primary_key=True)
login = db.Column(db.String(16), nullable=False, unique=True, index=True)
nome = db.Column(db.String(80), nullable=False, unique=False)
ddd = db.Column(db.String(2), nullable=False, unique=False)
telefone = db.Column(db.String(10), nullable=False, unique=False)
senhahash = db.Column(db.String(80), nullable=False, unique=False)
email = db.Column(db.String(200), nullable=False, unique=True, index=True)
dtregistro = db.Column(db.DateTime, nullable=False, unique=False)
dtultlogin = db.Column(db.DateTime, nullable=True, unique=False)
permissoes = relationship("PermissaoUsuario", backref="usuario")
perfis = relationship("PerfilUsuario", backref="usuario")
__table_args__ = (UniqueConstraint('login', 'email', name='uix_usuario'),)
def __init__(self, login, nome, ddd, telefone, senhahash, email):
self.login = login
self.nome = nome
self.ddd = ddd
self.telefone = telefone
self.senhahash = senhahash
self.email = email
self.dtregistro = datetime.datetime.today()
def checa_permissao(self, permissao):
if permissao in self.permissoes:
if permissao == p.permissao.nome:
return True
for pf in self.perfis:
for pp in pf.perfil.permissoesperfis:
if permissao == pp.permissao.nome:
return True
return False
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def __repr__(self):
return '<Usuário %r>' % (self.login)
class Permissao(db.Model):
__tablename__ = 'permissoes'
id = db.Column(db.Integer, Sequence('permissoes_id_seq'), primary_key=True)
nome = db.Column(db.String(80), nullable=False, unique=True)
descricao = db.Column(db.String(255), nullable=False, unique=False)
permissoesusuario = relationship("PermissaoUsuario", backref="permissao")
permissoesperfis = relationship("PermissaoPerfil", backref="permissao")
class PermissaoUsuario(db.Model):
__tablename__ = 'permissoesusuarios'
id = db.Column(db.Integer, Sequence('permissoesusuarios_id_seq'), primary_key=True)
usuario_id = db.Column(db.Integer, ForeignKey('usuarios.id'), nullable=False)
permissao_id = db.Column(db.Integer, ForeignKey('permissoes.id'), nullable=False)
tipo = db.Column(db.Integer)
__table_args__ = (UniqueConstraint('usuario_id', 'permissao_id', name='uix_permmissao_usuario'),)
class Perfil(db.Model):
__tablename__ = 'perfis'
id = db.Column(db.Integer, Sequence('perfis_id_seq'), primary_key=True)
nome = db.Column(db.String(80), nullable=False, unique=True)
descricao = db.Column(db.String(255), nullable=False, unique=False)
permissoesperfis = relationship("PermissaoPerfil", backref="perfil")
perfisusuarios = relationship("PerfilUsuario", backref="perfil")
class PermissaoPerfil(db.Model):
__tablename__ = 'permissoesperfis'
id = db.Column(db.Integer, Sequence('permissoesperfis_id_seq'), primary_key=True)
permissao_id = db.Column(db.Integer, ForeignKey('permissoes.id'), nullable=False)
perfil_id = db.Column(db.Integer, ForeignKey('perfis.id'), nullable=False)
tipo = db.Column(db.Integer)
__table_args__ = (UniqueConstraint('permissao_id', 'perfil_id', name='uix_permissaoperfil'),)
class PerfilUsuario(db.Model):
__tablename__ = 'perfisusuarios'
id = db.Column(db.Integer, Sequence('permissoesusuarios_id_seq'), primary_key=True)
perfil_id = db.Column(db.Integer, ForeignKey('perfis.id'), unique=False)
usuario_id = db.Column(db.Integer, ForeignKey('usuarios.id'), unique=False)
tipo = db.Column(db.Integer)
__table_args__ = (UniqueConstraint('perfil_id', 'usuario_id', name='uix_perfisusuario'),) | apache-2.0 | -2,305,448,814,111,855,600 | 38.36478 | 124 | 0.628321 | false |
yanikou19/pymatgen | pymatgen/io/abinitio/eos.py | 1 | 10838 | # coding: utf-8
"""Tools to compute equations of states with different models."""
from __future__ import unicode_literals, division, print_function
import collections
import numpy as np
import pymatgen.core.units as units
from pymatgen.core.units import FloatWithUnit
import logging
logger = logging.getLogger(__file__)
__all__ = [
"EOS",
]
def quadratic(V, a, b, c):
"""Quadratic fit"""
return a*V**2 + b*V + c
def murnaghan(V, E0, B0, B1, V0):
"""From PRB 28,5480 (1983)"""
E = E0 + B0*V/B1*(((V0/V)**B1)/(B1-1)+1) - V0*B0/(B1-1)
return E
def birch(V, E0, B0, B1, V0):
"""
From Intermetallic compounds: Principles and Practice, Vol. I: Principles
Chapter 9 pages 195-210 by M. Mehl. B. Klein, D. Papaconstantopoulos paper downloaded from Web
case where n=0
"""
E = (E0
+ 9.0/8.0*B0*V0*((V0/V)**(2.0/3.0) - 1.0)**2
+ 9.0/16.0*B0*V0*(B1-4.)*((V0/V)**(2.0/3.0) - 1.0)**3)
return E
def birch_murnaghan(V, E0, B0, B1, V0):
"""BirchMurnaghan equation from PRB 70, 224107"""
eta = (V/V0)**(1./3.)
E = E0 + 9.*B0*V0/16.*(eta**2-1)**2*(6 + B1*(eta**2-1.) - 4.*eta**2)
return E
def pourier_tarantola(V, E0, B0, B1, V0):
"""Pourier-Tarantola equation from PRB 70, 224107"""
eta = (V/V0)**(1./3.)
squiggle = -3.*np.log(eta)
E = E0 + B0*V0*squiggle**2/6.*(3. + squiggle*(B1 - 2))
return E
def vinet(V, E0, B0, B1, V0):
'Vinet equation from PRB 70, 224107'
eta = (V/V0)**(1./3.)
E = (E0 + 2.*B0*V0/(B1-1.)**2
* (2. - (5. +3.*B1*(eta-1.)-3.*eta)*np.exp(-3.*(B1-1.)*(eta-1.)/2.)))
return E
def deltafactor_polyfit(volumes, energies):
"""
This is the routine used to compute V0, B0, B1 in the deltafactor code.
Taken from deltafactor/eosfit.py
"""
fitdata = np.polyfit(volumes**(-2./3.), energies, 3, full=True)
ssr = fitdata[1]
sst = np.sum((energies - np.average(energies))**2.)
residuals0 = ssr/sst
deriv0 = np.poly1d(fitdata[0])
deriv1 = np.polyder(deriv0, 1)
deriv2 = np.polyder(deriv1, 1)
deriv3 = np.polyder(deriv2, 1)
v0 = 0
x = 0
for x in np.roots(deriv1):
if x > 0 and deriv2(x) > 0:
v0 = x**(-3./2.)
break
else:
raise EOSError("No minimum could be found")
derivV2 = 4./9. * x**5. * deriv2(x)
derivV3 = (-20./9. * x**(13./2.) * deriv2(x) - 8./27. * x**(15./2.) * deriv3(x))
b0 = derivV2 / x**(3./2.)
b1 = -1 - x**(-3./2.) * derivV3 / derivV2
#print('deltafactor polyfit:')
#print('e0, b0, b1, v0')
#print(fitdata[0], b0, b1, v0)
n = collections.namedtuple("DeltaFitResults", "v0 b0 b1 poly1d")
return n(v0, b0, b1, fitdata[0])
class EOSError(Exception):
"""Exceptions raised by EOS."""
class EOS(object):
"""
Fit equation of state for bulk systems.
The following equation is used::
murnaghan
PRB 28, 5480 (1983)
birch
Intermetallic compounds: Principles and Practice, Vol I: Principles. pages 195-210
birchmurnaghan
PRB 70, 224107
pouriertarantola
PRB 70, 224107
vinet
PRB 70, 224107
Use::
eos = EOS(eos_name='murnaghan')
fit = eos.fit(volumes, energies)
print(fit)
fit.plot()
"""
Error = EOSError
#: Models available.
MODELS = {
"quadratic": quadratic,
"murnaghan": murnaghan,
"birch": birch,
"birch_murnaghan": birch_murnaghan,
"pourier_tarantola": pourier_tarantola,
"vinet": vinet,
"deltafactor": deltafactor_polyfit,
}
def __init__(self, eos_name='murnaghan'):
self._eos_name = eos_name
self._func = self.MODELS[eos_name]
@staticmethod
def Quadratic():
return EOS(eos_name="quadratic")
@staticmethod
def Murnaghan():
return EOS(eos_name='murnaghan')
@staticmethod
def Birch():
return EOS(eos_name='birch')
@staticmethod
def Birch_Murnaghan():
return EOS(eos_name='birch_murnaghan')
@staticmethod
def Pourier_Tarantola():
return EOS(eos_name='pourier_tarantola')
@staticmethod
def Vinet():
return EOS(eos_name='vinet')
@staticmethod
def DeltaFactor():
return EOS(eos_name='deltafactor')
def fit(self, volumes, energies, vol_unit="ang^3", ene_unit="eV"):
"""
Fit energies [eV] as function of volumes [Angstrom**3].
Returns `EosFit` instance that gives access to the optimal volume,
the minumum energy, and the bulk modulus.
Notice that the units for the bulk modulus is eV/Angstrom^3.
"""
# Convert volumes to Ang**3 and energies to eV (if needed).
volumes = units.ArrayWithUnit(volumes, vol_unit).to("ang^3")
energies = units.EnergyArray(energies, ene_unit).to("eV")
return EOS_Fit(volumes, energies, self._func, self._eos_name)
class EOS_Fit(object):
"""Performs the fit of E(V) and provides method to access the results of the fit."""
def __init__(self, volumes, energies, func, eos_name):
"""
args:
energies: list of energies in eV
volumes: list of volumes in Angstrom^3
func: callable function
"""
self.volumes = np.array(volumes)
self.energies = np.array(energies)
assert len(self.volumes) == len(self.energies)
self.func = func
self.eos_name = eos_name
self.exceptions = []
self.ierr = 0
if eos_name == "deltafactor":
try:
results = deltafactor_polyfit(self.volumes, self.energies)
self.e0 = None
self.v0 = results.v0
self.b0 = results.b0
self.b1 = results.b1
self.p0 = results.poly1d
self.eos_params = results.poly1d
except EOSError as exc:
self.ierr = 1
logger.critical(str(exc))
self.exceptions.append(exc)
raise
elif eos_name == "quadratic":
# Quadratic fit
a, b, c = np.polyfit(self.volumes, self.energies, 2)
self.v0 = v0 = -b/(2*a)
self.e0 = a*v0**2 + b*v0 + c
self.b0 = 2*a*v0
self.b1 = np.inf
self.p0 = [a, b, c]
self.eos_params = [a, b, c]
vmin, vmax = self.volumes.min(), self.volumes.max()
if not vmin < v0 and v0 < vmax:
exc = EOSError('The minimum volume of a fitted parabola is not in the input volumes\n.')
logger.critical(str(exc))
self.exceptions.append(exc)
else:
# Objective function that will be minimized
def objective(pars, x, y):
return y - self.func(x, *pars)
# Quadratic fit to get an initial guess for the parameters
a, b, c = np.polyfit(self.volumes, self.energies, 2)
v0 = -b/(2*a)
e0 = a*v0**2 + b*v0 + c
b0 = 2*a*v0
b1 = 4 # b1 is usually a small number like 4
vmin, vmax = self.volumes.min(), self.volumes.max()
if not vmin < v0 and v0 < vmax:
exc = EOSError('The minimum volume of a fitted parabola is not in the input volumes\n.')
logger.critical(str(exc))
self.exceptions.append(exc)
# Initial guesses for the parameters
self.p0 = [e0, b0, b1, v0]
from scipy.optimize import leastsq
self.eos_params, self.ierr = leastsq(objective, self.p0, args=(self.volumes, self.energies))
if self.ierr not in [1, 2, 3, 4]:
exc = EOSError("Optimal parameters not found")
logger.critical(str(exc))
self.exceptions.append(exc)
raise exc
self.e0 = self.eos_params[0]
self.b0 = self.eos_params[1]
self.b1 = self.eos_params[2]
self.v0 = self.eos_params[3]
print('EOS_fit:', func)
print('e0, b0, b1, v0')
print(self.eos_params)
def __str__(self):
lines = []
app = lines.append
app("Equation of State: %s" % self.name)
app("Minimum volume = %1.2f Ang^3" % self.v0)
app("Bulk modulus = %1.2f eV/Ang^3 = %1.2f GPa, b1 = %1.2f" % (self.b0, self.b0_GPa, self.b1))
return "\n".join(lines)
@property
def name(self):
return self.func.__name__
@property
def b0_GPa(self):
return FloatWithUnit(self.b0, "eV ang^-3").to("GPa")
def plot(self, ax=None, **kwargs):
"""
Uses Matplotlib to plot the energy curve.
Args:
ax:
Axis object. If ax is None, a new figure is produced.
show:
True to show the figure
savefig:
'abc.png' or 'abc.eps' to save the figure to a file.
Returns:
Matplotlib figure.
"""
import matplotlib.pyplot as plt
vmin, vmax = self.volumes.min(), self.volumes.max()
emin, emax = self.energies.min(), self.energies.max()
vmin, vmax = (vmin - 0.01 * abs(vmin), vmax + 0.01 * abs(vmax))
emin, emax = (emin - 0.01 * abs(emin), emax + 0.01 * abs(emax))
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
else:
fig = plt.gcf()
lines, legends = [], []
# Plot input data.
line, = ax.plot(self.volumes, self.energies, "ro")
lines.append(line)
legends.append("Input Data")
# Plot EOS.
vfit = np.linspace(vmin, vmax, 100)
if self.eos_name == "deltafactor":
xx = vfit**(-2./3.)
line, = ax.plot(vfit, np.polyval(self.eos_params, xx), "b-")
else:
line, = ax.plot(vfit, self.func(vfit, *self.eos_params), "b-")
lines.append(line)
legends.append(self.name + ' fit')
# Set xticks and labels.
ax.grid(True)
ax.set_xlabel("Volume $\AA^3$")
ax.set_ylabel("Energy (eV)")
ax.legend(lines, legends, loc='upper right', shadow=True)
# Add text with fit parameters.
text = []; app = text.append
app("Min Volume = %1.2f $\AA^3$" % self.v0)
app("Bulk modulus = %1.2f eV/$\AA^3$ = %1.2f GPa" % (self.b0, self.b0_GPa))
app("B1 = %1.2f" % self.b1)
fig.text(0.4, 0.5, "\n".join(text), transform=ax.transAxes)
if kwargs.pop("show", True):
plt.show()
savefig = kwargs.pop("savefig", None)
if savefig is not None:
fig.savefig(savefig)
return fig
| mit | 272,119,726,745,626,140 | 27.150649 | 104 | 0.537738 | false |
salspaugh/queryutils | queryutils/csvparser.py | 1 | 6137 | import csv
import dateutil.parser
import os
import splparser.parser
from user import *
from query import *
from logging import getLogger as get_logger
from os import path
from splparser.exceptions import SPLSyntaxError, TerminatingSPLSyntaxError
BYTES_IN_MB = 1048576
LIMIT = 2000*BYTES_IN_MB
logger = get_logger("queryutils")
def get_users_from_file(filename, users):
"""Populate the users dictionary with users and their queris from the given file.
:param filename: The .csv file containing user queries
:type filename: str
:param users: The user dict into which to place the users
:type users: dict
:rtype: None
"""
logger.debug("Reading from file:" + filename)
first = True
with open(filename) as datafile:
reader = csv.DictReader(datafile)
for row in reader:
logger.debug("Attempting to read row.")
# Get basic user information.
username = row.get('user', None)
if username is not None:
username = unicode(username.decode("utf-8"))
case = row.get('case_id', None)
if case is not None:
case = unicode(case.decode("utf-8"))
# Check if we've seen this user before.
user = None
userhash = None
if username is not None and case is not None:
userhash = ".".join([username, case])
user = users.get(userhash, None)
elif username is not None and case is None:
userhash = username
user = users.get(userhash, None)
else:
userhash = ""
user = users.get(userhash, None)
if user is None:
user = User(username)
users[userhash] = user
user.case_id = case
# Get basic query information.
timestamp = row.get('_time', None)
if timestamp is not None:
timestamp = float(dateutil.parser.parse(timestamp).strftime('%s.%f'))
querystring = row.get('search', None)
if querystring is not None:
querystring = unicode(querystring.decode("utf-8")).strip()
# Tie the query and the user together.
query = Query(querystring, timestamp)
user.queries.append(query)
query.user = user
# Get additional query information and add it to the query.
runtime = row.get('runtime', None)
if runtime is None:
runtime = row.get('total_run_time', None)
if runtime is not None:
try:
runtime = float(runtime.decode("utf-8"))
except:
runtime = None
query.execution_time = runtime
search_et = row.get('search_et', None)
if search_et is not None:
try:
search_et = float(search_et.decode("utf-8"))
except:
search_et = None
query.earliest_event = search_et
search_lt = row.get('search_lt', None)
if search_lt is not None:
try:
search_lt = float(search_lt.decode("utf-8"))
except:
search_lt = None
query.latest_event = search_lt
range = row.get('range', None)
if range is not None:
try:
range = float(range.decode("utf-8"))
except:
range = None
query.range = range
is_realtime = row.get('is_realtime', None)
if is_realtime is not None and is_realtime == "false":
is_realtime = False
if is_realtime is not None and is_realtime == "true":
is_realtime = True
query.is_realtime = is_realtime
searchtype = row.get('searchtype', None)
if searchtype is None:
searchtype = row.get('search_type', None)
if searchtype is not None:
searchtype = unicode(searchtype.decode("utf-8"))
query.search_type = searchtype
if query.search_type == "adhoc":
query.is_interactive = True
splunk_id = row.get('search_id', None)
if splunk_id is not None:
splunk_id = unicode(splunk_id.decode("utf-8"))
query.splunk_search_id = splunk_id
savedsearch_name = row.get('savedsearch_name', None)
if savedsearch_name is not None:
savedsearch_name = unicode(savedsearch_name.decode("utf-8"))
query.saved_search_name = savedsearch_name
logger.debug("Successfully read query.")
def get_users_from_directory(directory, users, limit=LIMIT):
"""Populate the users dict with users from the .csv files.
:param directory: The path to the directory containing the .csv files
:type directory: str
:param users: The dict to contain the users read from the .csv files
:type users: dict
:param limit: The approximate number of bytes to read in (for testing)
:type limit: int
:rtype: None
"""
raw_data_files = get_csv_files(directory, limit=limit)
for f in raw_data_files:
get_users_from_file(f, users)
def get_csv_files(dir, limit=LIMIT):
"""Return the paths to all the .csv files in the given directory.
:param dir: The path to the given directory
:type dir: str
:param limit: The approximate number of bytes to read in (for testing)
:type limit: int
:rtype: list
"""
csv_files = []
bytes_added = 0.
for (dirpath, dirnames, filenames) in os.walk(dir):
for filename in filenames:
if filename[-4:] == '.csv':
full_filename = path.join(path.abspath(dir), filename)
csv_files.append(full_filename)
bytes_added += path.getsize(full_filename)
if bytes_added > limit:
return csv_files
return csv_files
| bsd-3-clause | -6,412,215,798,137,213,000 | 34.473988 | 85 | 0.556135 | false |
Lilykos/inspire-next | setup.py | 1 | 2682 | # -*- coding: utf-8 -*-
#
## This file is part of INSPIRE.
## Copyright (C) 2012, 2013 CERN.
##
## INSPIRE is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## INSPIRE is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
##
## In applying this licence, CERN does not waive the privileges and immunities
## granted to it by virtue of its status as an Intergovernmental Organization
## or submit itself to any jurisdiction.
"""
INSPIRE overlay
----------------
INSPIRE overlay repository for Invenio.
"""
import os
from setuptools import setup, find_packages
packages = find_packages(exclude=['docs'])
# Load __version__, should not be done using import.
# http://python-packaging-user-guide.readthedocs.org/en/latest/tutorial.html
g = {}
with open(os.path.join('inspire', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='Inspire',
version=version,
url='https://github.com/inspirehep/inspire-next',
license='GPLv2',
author='CERN',
author_email='[email protected]',
description=__doc__,
long_description=open('README.rst', 'rt').read(),
packages=packages,
namespace_packages=["inspire", "inspire.ext", ],
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
"rt",
"HarvestingKit>=0.3",
"mixer==4.9.5",
"requests==2.3",
"raven==5.0.0",
"orcid",
"retrying"
],
extras_require={
'development': [
'Flask-DebugToolbar>=0.9',
'ipython',
'ipdb',
'kwalitee'
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GPLv2 License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
entry_points={
'invenio.config': [
"inspire = inspire.config"
]
},
test_suite='inspire.testsuite',
tests_require=[
'nose',
'Flask-Testing'
]
)
| gpl-2.0 | 5,499,976,328,954,376,000 | 27.83871 | 78 | 0.62267 | false |
mimischi/django-clock | config/settings/local.py | 1 | 2439 | # -*- coding: utf-8 -*-
import socket
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool("DJANGO_DEBUG", default=True)
TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="CHANGEME!!!^e8je^d8+us-s9!j3ks@h2h1(*^kr$-jocui3wam6%i=+^mti9",
)
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = "localhost"
EMAIL_PORT = 1025
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
# Database
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql_psycopg2",
"NAME": "db_app",
"USER": "db_user" if not env("TRAVIS_CI", default=False) else "postgres",
"PASSWORD": "db_pass",
"HOST": "db" if env("PYTHONBUFFERED", default=False) else "localhost",
"PORT": 5432,
}
}
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.locmem.LocMemCache",
"LOCATION": "",
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ("debug_toolbar.middleware.DebugToolbarMiddleware",)
INSTALLED_APPS += ("debug_toolbar",)
INTERNAL_IPS = ["127.0.0.1", "192.168.99.100", "192.168.99.101"]
# Fix django-debug-toolbar when running Django in a Docker container
if env("INSIDE_DOCKER", default=False):
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
DEBUG_TOOLBAR_CONFIG = {
"DISABLE_PANELS": ["debug_toolbar.panels.redirects.RedirectsPanel"],
"SHOW_TEMPLATE_CONTEXT": True,
"JQUERY_URL": "",
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("django_extensions", "rosetta")
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# Your local stuff: Below this line define 3rd party library settings
ALLOWED_HOSTS = ["*"]
| mit | 5,362,180,584,308,313,000 | 31.092105 | 84 | 0.507995 | false |
keithellis74/Picon-Zero | piconzero/piconzero.py | 1 | 8896 | #!/usr/bin/python3
# piconzero.py is a library to interact with the 4Tronix Picon Zero
# motor controller - https://4tronix.co.uk/blog/?p=1224
# Copyright (C) 2017 Keith Ellis
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Python library for 4tronix Picon Zero
# https://4tronix.co.uk/store/index.php?rt=product/product&product_id=552
# This code is based on the samplw code provided by 4Tronix but it has been
# built into a number of classes.
# Currently there is a base class 'Motor_Controller', this handles all the
# I2C communication
# Two other classes are 'Motor' and 'Robot', both inherrit from 'Motor_Controller'
# to control either a single motor or a pair of motors in the form of a robot
# Other functions of the board, such as inputs and outputs are still to be
# completed
import smbus
import time
class Motor_Controller(object):
def __init__(self, addr = None, debug = None):
''' base class which handles the communications with the
4Tronix Picon Zero motor controller
The motor controller is hard coded to I2C address 0x22, but
add allows this to be changed if needed.
debug should be set to True to get more debuggin info, if None
debug info will be limited
'''
if addr == None:
self.addr = 0x22
else:
self.addr = addr
self.retries = 4
self.bus = smbus.SMBus(1)
self.motorA = 0
self.motorB = 1
if debug == None:
self.debug = False
else:
self.debug = debug
self.reset = 20
self.reset_board()
def reset_board(self):
''' call to reset the board, altomatically called upon
initilisation
'''
self.send_command(self.reset, 0)
def cleanup(self):
self.reset()
def read_command(self, command):
''' Method to read info from the Picon Zero board
'''
for i in range(self.retries):
try:
rval = bus.read_word_data(self.addr, command)
return [rval/256, rval%256]
except:
if self.debug:
print("Error in reading command")
def send_command(self, command, value):
''' Method to send commands and values to the Picon Zero board
'''
value = int(value)
for i in range(self.retries):
try:
self.bus.write_byte_data(self.addr, command, value)
break
except:
if self.debug:
print("Error in sending command")
raise
def get_revision(self):
''' Method to return the revision information from the
Picon Zero board
'''
return read_command(0)
class Motor(Motor_Controller):
''' Super class of Motor_Controller to control motors, two motors
can be driven with the Picon Zero board so it is possible to have
two instances of this class.
addr is the I2C address of the Picon Zero motor controller
debug if set to True enables debug messages
motor is either 0 or 1, depending upon which motor you want
to drive
'''
def __init__(self, motor, addr= None, debug = None):
if addr == None:
self.addr = 0x22
else:
self.addr = addr
if debug == None:
self.debug = False
else:
self.debug = debug
self.motor = motor
super(Motor, self).__init__(self.addr, self.debug)
self.speed = 0
self.stop()
def scale_speed(self, speed):
''' Scales the speed from the 1 to -1 used to the 127 to -127
s used by the Piconzero board.
Used internally, should not need to be called externally.
'''
return speed * 127
def forward(self, speed):
''' Drive motor forward
Speed range is 0 for stop
through to 1 for full speed forwards.
Speed is a float, so intermediate speeds can be fractions
of 1, for example, 50% speed would be 0.5
'''
if speed > 1:
self.speed = 1
elif speed < 0:
self.speed = 0
else:
self.speed = speed
self.send_command(self.motor, self.scale_speed(self.speed))
def reverse(self, speed):
''' Drive motor backwards
Speed range is 0 for stop
through to 1 for full speed reverse.
Speed is a float, so intermediate speeds can be fractions
of 1, for example, 50% speed would be 0.5
'''
speed *= -1
if speed < -1:
self.speed = -1
elif speed > 0:
self.speed = 0
else:
self.speed = speed
self.send_command(self.motor, self.scale_speed(self.speed))
def set_motor(self, speed):
''' Speed range is 1 through to -1
Allows motors to be set anywhere from full speed
forwards to full speed reverse with a single
command, this is good if using an analogue stick to control
motor speed
'''
if speed > 1:
self.speed = 1
elif speed < -1:
self.speed = -1
else:
self.speed = speed
self.send_command(self.motor, self.scale_speed(self.speed))
def stop(self):
''' Stops motor
'''
self.speed = 0
self.send_command(self.motor, 0)
def get_speed(self):
''' Read the current speed back
'''
return self.speed
class Robot(Motor_Controller):
'''
Class representing a two wheel drive robot or a 4 wheel
drive skid/steer robot. It expands on the Motor_Controller class
and assmes motor 0 is the left motor and motor 1 is the
right motor
'''
def __init__(self, addr = None, debug = False):
if addr == None:
self.addr = 0x22
else:
self.addr = addr
self.debug = debug
super(Robot, self).__init__(self.addr, self.debug)
self.left_motor = Motor(self.motorA)
self.right_motor = Motor(self.motorB)
self.stop()
def forward(self, speed):
self.left_motor.forward(speed)
self.right_motor.forward(speed)
def reverse(self, speed):
self.left_motor.reverse(speed)
self.right_motor.reverse(speed)
def stop(self):
self.left_motor.stop()
self.right_motor.stop()
def spin_left(self, speed):
self.left_motor.reverse(speed)
self.right_motor.forward(speed)
def spin_right(self, speed):
self.left_motor.forward(speed)
self.right_motor.reverse(speed)
def turn_left(self, speed):
self.left_motor.stop()
self.right_motor.forward(speed)
def turn_right(speed):
self.left_motor.forward(speed)
self.right_motor.stop()
def set_motors(self, left_speed, right_speed):
self.left_motor.set_motor(left_speed)
self.right_motor.set_motor(right_speed)
def get_speed(self):
return[self.left_motor.speed, self.right_motor.speed]
if __name__ == '__main__':
import sys
import time
print("Tesing picon Zero motor conotrller")
print("Test options are: ")
print("1 - test Motor A")
print("2 - test Motor B")
print("3 - test Robot, i.e. both motors")
response = input("Select option, x = Exit - ")
def test_motor(motor):
#Create mote instance
print("Testing motor {0}".format(motor))
print("Creating motor instance")
motor =Motor(motor = motor, debug = True)
print("Motor speed is {0}".format(motor.get_speed()))
time.sleep(5)
print("1/2 speed forward")
motor.forward(0.5)
print("Motor speed is {0}".format(motor.get_speed()))
time.sleep(5)
print("Full speed forward")
motor.forward(1)
print("Motor speed is {0}".format(motor.get_speed()))
time.sleep(5)
print("Motor stop")
motor.stop()
print("Motor speed is {0}".format(motor.get_speed()))
time.sleep(5)
print("1/2 speed reverse")
motor.reverse(0.5)
print("Motor speed is {0}".format(motor.get_speed()))
time.sleep(5)
print("Full speed reverse")
motor.reverse(1)
print("Motor speed is {0}".format(motor.get_speed()))
time.sleep(5)
print("Motors stoped")
motor.stop()
def motorA():
test_motor(0)
def motorB():
test_motor(1)
def robot():
robot = Robot(debug = True)
print("Robot speed = {0}".format(robot.get_speed()))
robot.stop()
time.sleep(5)
print("1/2 speed forward")
robot.forward(0.5)
print("Robot speed is {0}".format(robot.get_speed()))
time.sleep(5)
print("Full speed forward")
robot.forward(1)
print("Robot speed is {0}".format(robot.get_speed()))
time.sleep(5)
print("Robot Stop")
robot.stop()
print("Robot speed is {0}".format(robot.get_speed()))
time.sleep(5)
print("1/2 speed reverse")
robot.reverse(0.5)
print("Robot speed is {0}".format(robot.get_speed()))
time.sleep(5)
print("Full speed reverse")
robot.reverse(1)
print("Robot speed is {0}".format(robot.get_speed()))
time.sleep(5)
print("Robot stop")
robot.stop()
print("Robot speed is {0}".format(robot.get_speed()))
while True:
if response == "1":
motorA()
break
elif response == "2":
motorB()
break
elif response == "3":
robot()
break
elif response.lower() =="x":
sys.exit
| gpl-3.0 | 4,123,783,833,552,161,300 | 24.129944 | 82 | 0.679631 | false |
libyal/libexe | tests/pyexe_test_support.py | 1 | 3236 | #!/usr/bin/env python
#
# Python-bindings support functions test script
#
# Copyright (C) 2011-2021, Joachim Metz <[email protected]>
#
# Refer to AUTHORS for acknowledgements.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import argparse
import os
import sys
import unittest
import pyexe
class SupportFunctionsTests(unittest.TestCase):
"""Tests the support functions."""
def test_get_version(self):
"""Tests the get_version function."""
version = pyexe.get_version()
self.assertIsNotNone(version)
def test_check_file_signature(self):
"""Tests the check_file_signature function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
result = pyexe.check_file_signature(test_source)
self.assertTrue(result)
def test_check_file_signature_file_object(self):
"""Tests the check_file_signature_file_object function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
with open(test_source, "rb") as file_object:
result = pyexe.check_file_signature_file_object(file_object)
self.assertTrue(result)
def test_open(self):
"""Tests the open function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
exe_file = pyexe.open(test_source)
self.assertIsNotNone(exe_file)
exe_file.close()
with self.assertRaises(TypeError):
pyexe.open(None)
with self.assertRaises(ValueError):
pyexe.open(test_source, mode="w")
def test_open_file_object(self):
"""Tests the open_file_object function."""
test_source = unittest.source
if not test_source:
raise unittest.SkipTest("missing source")
if not os.path.isfile(test_source):
raise unittest.SkipTest("source not a regular file")
with open(test_source, "rb") as file_object:
exe_file = pyexe.open_file_object(file_object)
self.assertIsNotNone(exe_file)
exe_file.close()
with self.assertRaises(TypeError):
pyexe.open_file_object(None)
with self.assertRaises(ValueError):
pyexe.open_file_object(file_object, mode="w")
if __name__ == "__main__":
argument_parser = argparse.ArgumentParser()
argument_parser.add_argument(
"source", nargs="?", action="store", metavar="PATH",
default=None, help="path of the source file.")
options, unknown_options = argument_parser.parse_known_args()
unknown_options.insert(0, sys.argv[0])
setattr(unittest, "source", options.source)
unittest.main(argv=unknown_options, verbosity=2)
| lgpl-3.0 | -7,019,984,351,821,116,000 | 28.962963 | 77 | 0.704883 | false |
relic7/prodimages | python/drafts/walk_scraps/walkdir_exiv2.py | 1 | 2162 | #!/usr/bin/env python
import os,sys
import PIL
def recursive_dirlist(rootdir):
walkedlist = []
for dirname, dirnames, filenames in os.walk(rootdir):
# print path to all subdirectories first.
#for subdirname in dirnames:
#print os.path.join(dirname, subdirname)
# print path to all filenames.
for filename in filenames:
file_path = os.path.abspath(os.path.join(dirname, filename))
if os.path.isfile(file_path):
walkedlist.append(file_path)
# Advanced usage:
# editing the 'dirnames' list will stop os.walk() from recursing into there.
if '.git' in dirnames:
# don't go into any .git directories.
dirnames.remove('.git')
walkedset = list(set(sorted(walkedlist)))
return walkedset
def get_exif(filepath):
ret = {}
i = Image.open(filepath)
info = i._getexif()
for tag, value in info.items():
decoded = TAGS.get(tag, tag)
ret[decoded] = value
return ret
#######
from PIL import Image
import pyexiv2
#Exif.Photo.DateTimeOriginal
#for k,v in mdata.iteritems():
# print k,v
rootdir = sys.argv[1]
for line in walkedout:
file_path = line
filename = file_path.split('/')[-1]
colorstyle = filename.split('_')[0]
alt = file_path.split('_')[-1]
alt = alt.strip('.jpg')
photodate = pyexiv2.ImageMetadata(file_path)['DateTimeOriginal']
print "{0},{1},{2},{3}".format(colorstyle,photodate,file_path,alt)
def resize_image(source_path, dest_path, size):
from PIL import *
import pyexiv2
# resize image
image = Image.open(source_path)
image.thumbnail(size, Image.ANTIALIAS)
image.save(dest_path, "JPEG")
# copy EXIF data
source_image = pyexiv2.Image(source_path)
source_image.readMetadata()
dest_image = pyexiv2.Image(dest_path)
dest_image.readMetadata()
source_image.copyMetadataTo(dest_image)
# set EXIF image size info to resized size
dest_image["Exif.Photo.PixelXDimension"] = image.size[0]
dest_image["Exif.Photo.PixelYDimension"] = image.size[1]
dest_image.writeMetadata() | mit | 3,377,195,295,547,329,000 | 26.730769 | 84 | 0.637835 | false |
esvhd/pypbo | tests/test_metrics.py | 1 | 4864 | import pytest
import numpy as np
import pandas as pd
import pypbo as pbo
import pypbo.perf as perf
# TODO test scripts
# e.g.: pytest -s test_metrics.py
# -s swtich to allow printing to stdio
def test_log_returns():
'''
Test log return logic. Asserts that cumulative value is the same as
generated data.
'''
# generate data
np.random.seed(7)
tests = pd.Series([1 + np.random.rand() for _ in range(10)])
log_rtns = perf.log_returns(tests, fillna=True)
print(log_rtns)
reconstruct = tests.values[0] * np.exp(log_rtns.cumsum())
print(tests)
print(reconstruct)
assert(np.allclose(tests - reconstruct, 0.))
def test_log_returns_na():
test_data = pd.DataFrame([[1, 2, 3],
[1.2, 2.2, 3.2],
[1.1, np.nan, 2.4],
[1, 2.42, 3.4]])
print(test_data)
log_rtns = perf.log_returns(test_data, n=1, fillna=False)
expects_true = log_rtns.isnull().iloc[2, 1]
print(f'test value = {expects_true}')
assert(expects_true)
print(log_rtns)
expected_val = np.log(2.42) - np.log(2.2)
print(f'expected value = {expected_val}')
assert(np.isclose(log_rtns.iloc[3, 1],
expected_val))
def test_pct_to_log_return():
np.random.seed(7)
tests = pd.Series([1 + np.random.rand() for _ in range(100)])
pct_rtns = tests.pct_change().fillna(0)
log_rtns = perf.pct_to_log_return(pct_rtns)
recon1 = (1 + pct_rtns).cumprod()
recon2 = np.exp(log_rtns.cumsum())
assert(np.allclose(recon1, recon2))
def test_sharpe_iid():
data = np.array([0.259,
.198,
.364,
-.081,
.057,
.055,
.188,
.317,
.24,
.184,
-.01,
.526])
# numpy array
sharpe = perf.sharpe_iid(data, bench=.05, factor=1, log=True)
assert(np.isclose(sharpe, .834364))
sharpe = perf.sharpe_iid(data, bench=.05, factor=1, log=False)
assert(np.isclose(sharpe, .834364))
# below is for computing sharpe ratio with pct returns
# assert(np.isclose(sharpe, 0.8189144744629443))
# turn data to pandas.Series
data = pd.Series(data)
sharpe = perf.sharpe_iid(data, bench=.05, factor=1, log=True)
assert(np.isclose(sharpe, .834364))
sharpe = perf.sharpe_iid(data, bench=.05, factor=252, log=True)
assert(np.isclose(sharpe, .834364 * np.sqrt(252)))
sharpe = perf.sharpe_iid(data, bench=.05, factor=1, log=False)
assert(np.isclose(sharpe, .834364))
# below is for computing sharpe ratio with pct returns
# assert(np.isclose(sharpe, 0.8189144744629443))
def test_sortino_iid():
'''
Test both `sortino_iid` and `sortino`.
'''
data = np.array([.17,
.15,
.23,
-.05,
.12,
.09,
.13,
-.04])
ratio = perf.sortino_iid(data, bench=0, factor=1, log=True)
print(ratio)
assert(np.isclose(ratio, 4.417261))
ratio = perf.sortino(data, target_rtn=0, factor=1, log=True)
assert(np.isclose(ratio, 4.417261))
data = pd.DataFrame(data)
ratio = perf.sortino_iid(data, bench=0, factor=1, log=True)
print(ratio)
assert(np.isclose(ratio, 4.417261))
ratio = perf.sortino_iid(data, bench=0, factor=252, log=True)
print(ratio)
assert(np.isclose(ratio, 4.417261 * np.sqrt(252)))
ratio = perf.sortino(data, target_rtn=0, factor=1, log=True)
assert(np.isclose(ratio, 4.417261))
def test_omega():
'''
Based on numerical example found here:
http://investexcel.net/calculate-the-omega-ratio-with-excel/
'''
data = np.array([.0089,
.0012,
-.002,
.01,
-.0002,
.02,
.03,
.01,
-.003,
.01,
.0102,
-.01])
mar = .01
omega = perf.omega(data, target_rtn=mar, log=True)
assert(np.isclose(omega, .463901689))
# DataFrame version.
df = pd.DataFrame(data)
omega = perf.omega(df, target_rtn=mar, log=True)
assert(np.isclose(omega, .463901689))
def test_annualized_log_return():
log_rtn = 0.51470826725926955
test_val = perf.annualized_log_return(log_rtn, days=827, ann_factor=365.)
assert(np.isclose(test_val, 0.22716870320390978))
def test_annualized_pct_return():
tr = 1.673150317863489
test_val = perf.annualized_pct_return(tr, days=827, ann_factor=365.)
assert(np.isclose(test_val, 0.25504157961707952))
| agpl-3.0 | -6,384,668,815,233,295,000 | 25.434783 | 77 | 0.546258 | false |
netgroup/svef | computepsnr.py | 1 | 1285 | #!/usr/bin/env python
# take a psnr file and compute the average psnr on a specified range
import sys
if len(sys.argv) < 4:
print >> sys.stderr, """
Usage:
%s <beginning frame> <ending frame> <psnr file1> [<psnr file 2>]
""" % (sys.argv[0])
sys.exit(1)
beginningframe = int(sys.argv[1])
endingframe = int(sys.argv[2])
psnrfilename = sys.argv[5]
try:
psnrfilename2 = sys.argv[6]
except IndexError:
psnrfilename2 = None
class PsnrEntry:
frameno = -1
value = 0.0
def psnrFile2List(filename):
psnrs = []
psnrfile = open(filename)
try:
for line in psnrfile:
words = line.split()
p = PsnrEntry()
p.frameno = int(words[0])
p.value = float(words[1].replace(",","."))
psnrs.append(p)
except IndexError:
pass
psnrfile.close()
return psnrs
totpsnr = 0.0
psnrs = psnrFile2List(psnrfilename)
pvalues = [p.value for p in psnrs if beginningframe <= p.frameno < endingframe]
psnr1 = sum(pvalues)/len(pvalues)
print "PSNR 1: %f" % psnr1
totpsnr += psnr1
if psnrfilename2 != None:
psnrs2 = psnrFile2List(psnrfilename2)
pvalues = [p.value for p in psnrs2 if beginningframe <= p.frameno < endingframe]
psnr2 = sum(pvalues)/len(pvalues)
print "PSNR 2: %f" % psnr2
totpsnr += psnr2
print "Total PSNR: %f" % totpsnr
| gpl-3.0 | -3,360,558,190,853,625,000 | 21.946429 | 83 | 0.661479 | false |
BhallaLab/moose-thalamocortical | pymoose/tests/randnum/kstest.py | 1 | 2706 | #!/usr/bin/env python
#
# This is a simple implementation of KS-test.
from math import *
from numpy import *
# Values taken from Knuth, TAOCP II: 3.3.1, Table 2
test_table = {1: [0.01000, 0.0500, 0.2500, 0.5000, 0.7500, 0.9500, 0.9900],
2: [0.01400, 0.06749, 0.2929, 0.5176, 0.7071, 1.0980, 1.2728],
5: [0.02152, 0.09471, 0.3249, 0.5242, 0.7674, 1.1392, 1.4024],
10: [0.02912, 0.1147, 0.3297, 0.5426, 0.7845, 1.1658, 1.444],
20: [0.03807, 0.1298, 0.3461, 0.5547, 0.7975, 1.1839, 1.4698],
30: [0.04354, 0.1351, 0.3509, 0.5605, 0.8036, 1.1916, 1.4801]}
p_list = [1.0, 5.0, 25.0, 50.0, 75.0, 95.0, 99.0] # percentage points the table entries correspond to
def ks_distribution(xx, nn):
"""Calculate P(Knn+ <= xx). See Knuth TAOCP Vol II for details."""
if nn < 30:
print "!! Larger sample size is recommended."
return (1 - exp(-2.0*xx*xx)*(1-2.0*xx/(3.0*sqrt(1.0 * nn))))
def ks_test(rand_num_list, distr_fn):
"""Execute a ks test on the given list of random numbers and tests if they have the distribution defined by distr_fn.
parameters:
rand_num_list - list containing the random sequence to be tested.
distr_fn - a function that calculates the distribution function for this sequence. TODO: allow another sample list to check if they are from same distribution.
Note that according to theory, KS test requires that the distribution be continuous"""
result = True
nn = len(rand_num_list)
inp_list = array(rand_num_list)
inp_list.sort()
distr_list = map(distr_fn, inp_list)
sample_distr = arange(nn+1) * 1.0/nn
k_plus = sqrt(nn) * max(sample_distr[1:] - distr_list)
k_minus = sqrt(nn) * max(distr_list - sample_distr[:nn])
p_k_plus = ks_distribution(k_plus, nn)
if p_k_plus < 0.05 or p_k_plus > 0.95:
print "ERROR: outside 5%-95% range. The P( K", nn, "+ <=", k_plus, ") is", p_k_plus
result = False
p_k_minus = ks_distribution(k_minus, nn)
if p_k_minus < 0.05 or p_k_minus > 0.95:
print "ERROR: outside 5%-95% range. The P( K", nn, "- <=", k_minus, ") is", p_k_minus
result = False
return result
def test_ks_distribution():
for key in test_table.keys():
values = test_table[key]
for ii in range(len(p_list)):
print "... Testing n =", key,
value = ks_distribution(values[ii], key)
print ", expected =", p_list[ii]/100.0, ", calculated =", value
if (fabs( value - p_list[ii]/100.0) <= 0.005):
print "... OK"
else:
print "FAILED"
if __name__ == "__main__":
test_ks_distribution()
| lgpl-2.1 | 144,061,998,017,744,800 | 41.28125 | 160 | 0.585366 | false |
michaelaye/pyciss | pyciss/solitons.py | 1 | 1916 | from datetime import datetime as dt
import pandas as pd
import pkg_resources as pr
from astropy import units as u
from numpy import poly1d
from . import io
from .ringcube import RingCube
def get_year_since_resonance(ringcube):
"Calculate the fraction of the year since moon swap."
t0 = dt(2006, 1, 21)
td = ringcube.imagetime - t0
return td.days / 365.25
def create_polynoms():
"""Create and return poly1d objects.
Uses the parameters from Morgan to create poly1d objects for
calculations.
"""
fname = pr.resource_filename('pyciss', 'data/soliton_prediction_parameters.csv')
res_df = pd.read_csv(fname)
polys = {}
for resorder, row in zip('65 54 43 21'.split(),
range(4)):
p = poly1d([res_df.loc[row, 'Slope (km/yr)'], res_df.loc[row, 'Intercept (km)']])
polys['janus ' + ':'.join(resorder)] = p
return polys
def check_for_soliton(img_id):
"""Workhorse function.
Creates the polynom.
Calculates radius constraints from attributes in `ringcube` object.
Parameters
----------
ringcube : pyciss.ringcube.RingCube
A containter class for a ring-projected ISS image file.
Returns
-------
dict
Dictionary with all solitons found. Reason why it is a dict is
that it could be more than one in one image.
"""
pm = io.PathManager(img_id)
try:
ringcube = RingCube(pm.cubepath)
except FileNotFoundError:
ringcube = RingCube(pm.undestriped)
polys = create_polynoms()
minrad = ringcube.minrad.to(u.km)
maxrad = ringcube.maxrad.to(u.km)
delta_years = get_year_since_resonance(ringcube)
soliton_radii = {}
for k, p in polys.items():
current_r = p(delta_years) * u.km
if minrad < current_r < maxrad:
soliton_radii[k] = current_r
return soliton_radii if soliton_radii else None
| isc | -6,622,842,903,339,667,000 | 28.030303 | 89 | 0.640919 | false |
euanlau/django-umessages | umessages/fields.py | 1 | 2111 | from django import forms
from django.forms import widgets
from django.utils.translation import ugettext_lazy as _
from umessages.utils import get_user_model
class CommaSeparatedUserInput(widgets.Input):
input_type = 'text'
def render(self, name, value, attrs=None):
if value is None:
value = ''
elif isinstance(value, (list, tuple)):
value = (', '.join([user.username for user in value]))
return super(CommaSeparatedUserInput, self).render(name, value, attrs)
class CommaSeparatedUserField(forms.Field):
"""
A :class:`CharField` that exists of comma separated usernames.
:param recipient_filter:
Optional function which receives as :class:`User` as parameter. The
function should return ``True`` if the user is allowed or ``False`` if
the user is not allowed.
:return:
A list of :class:`User`.
"""
widget = CommaSeparatedUserInput
def __init__(self, *args, **kwargs):
recipient_filter = kwargs.pop('recipient_filter', None)
self._recipient_filter = recipient_filter
super(CommaSeparatedUserField, self).__init__(*args, **kwargs)
def clean(self, value):
super(CommaSeparatedUserField, self).clean(value)
names = set(value.split(','))
names_set = set([name.strip() for name in names])
users = list(get_user_model().objects.filter(username__in=names_set))
# Check for unknown names.
unknown_names = names_set ^ set([user.username for user in users])
recipient_filter = self._recipient_filter
invalid_users = []
if recipient_filter is not None:
for r in users:
if recipient_filter(r) is False:
users.remove(r)
invalid_users.append(r.username)
if unknown_names or invalid_users:
humanized_usernames = ', '.join(list(unknown_names) + invalid_users)
raise forms.ValidationError(_("The following usernames are incorrect: %(users)s.") % {'users': humanized_usernames})
return users
| bsd-3-clause | -4,658,207,798,081,575,000 | 34.779661 | 128 | 0.632875 | false |
liavkoren/djangoDev | django/db/models/base.py | 1 | 60943 | from __future__ import unicode_literals
import copy
import sys
from functools import update_wrapper
import warnings
from django.apps import apps
from django.apps.config import MODELS_MODULE_NAME
from django.conf import settings
from django.core import checks
from django.core.exceptions import (ObjectDoesNotExist,
MultipleObjectsReturned, FieldError, ValidationError, NON_FIELD_ERRORS)
from django.db import (router, transaction, DatabaseError,
DEFAULT_DB_ALIAS)
from django.db.models.deletion import Collector
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.related import (ForeignObjectRel, ManyToOneRel,
OneToOneField, add_lazy_relation)
from django.db.models.manager import ensure_default_manager
from django.db.models.options import Options
from django.db.models.query import Q
from django.db.models.query_utils import DeferredAttribute, deferred_class_factory
from django.db.models import signals
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_str, force_text
from django.utils.functional import curry
from django.utils.six.moves import zip
from django.utils.text import get_text_list, capfirst
from django.utils.translation import ugettext_lazy as _
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# six.with_metaclass() inserts an extra class called 'NewBase' in the
# inheritance tree: Model -> NewBase -> object. But the initialization
# should be executed only once for a given model class.
# attrs will never be empty for classes declared in the standard way
# (ie. with the `class` keyword). This is quite robust.
if name == 'NewBase' and attrs == {}:
return super_new(cls, name, bases, attrs)
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase) and
not (b.__name__ == 'NewBase' and b.__mro__ == (b, object))]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
# If the model is imported before the configuration for its
# application is created (#21719), or isn't in an installed
# application (#21680), use the legacy logic to figure out the
# app_label by looking one level up from the package or module
# named 'models'. If no such package or module exists, fall
# back to looking one level up from the module this model is
# defined in.
# For 'django.contrib.sites.models', this would be 'sites'.
# For 'geo.models.places' this would be 'geo'.
msg = (
"Model class %s.%s doesn't declare an explicit app_label "
"and either isn't in an application in INSTALLED_APPS or "
"else was imported before its application was loaded. " %
(module, name))
if abstract:
msg += "Its app_label will be set to None in Django 1.9."
else:
msg += "This will no longer be supported in Django 1.9."
warnings.warn(msg, RemovedInDjango19Warning, stacklevel=2)
model_module = sys.modules[new_class.__module__]
package_components = model_module.__name__.split('.')
package_components.reverse() # find the last occurrence of 'models'
try:
app_label_index = package_components.index(MODELS_MODULE_NAME) + 1
except ValueError:
app_label_index = 1
kwargs = {"app_label": package_components[app_label_index]}
else:
kwargs = {"app_label": app_config.label}
else:
kwargs = {}
new_class.add_to_class('_meta', Options(meta, **kwargs))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
str('MultipleObjectsReturned'),
tuple(x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = (
new_class._meta.local_fields +
new_class._meta.local_many_to_many +
new_class._meta.virtual_fields
)
field_names = set(f.name for f in new_fields)
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError("Abstract base class containing model fields not permitted for proxy model '%s'." % name)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
if (new_class._meta.local_fields or
new_class._meta.local_many_to_many):
raise FieldError("Proxy model '%s' contains model fields." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
parent_links[field.rel.to] = field
# Do the appropriate setup for any model parents.
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' % (field.name, name, base.__name__)
)
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
if base in parent_links:
field = parent_links[base]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
# .. and abstract ones.
for field in parent_fields:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base._meta.parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'abstract base class %r' % (field.name, name, base.__name__)
)
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers: # NOQA (redefinition of _)
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# defer creating accessors on the foreign class until we are
# certain it has been created
def make_foreign_order_accessors(field, model, cls):
setattr(
field.rel.to,
'get_%s_order' % cls.__name__.lower(),
curry(method_get_order, cls)
)
setattr(
field.rel.to,
'set_%s_order' % cls.__name__.lower(),
curry(method_set_order, cls)
)
add_lazy_relation(
cls,
opts.order_with_respect_to,
opts.order_with_respect_to.rel.to,
make_foreign_order_accessors
)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.attname for f in opts.fields))
if hasattr(cls, 'get_absolute_url'):
cls.get_absolute_url = update_wrapper(curry(get_absolute_url, opts, cls.get_absolute_url),
cls.get_absolute_url)
ensure_default_manager(cls)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(six.with_metaclass(ModelBase)):
_deferred = False
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(self._meta.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(self._meta.fields)
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.rel, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
(isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)
or field.column is None)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.rel, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in list(kwargs):
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return force_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if six.PY2 and hasattr(self, '__unicode__'):
return force_text(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
if not isinstance(other, Model):
return False
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self._get_pk_val()
if my_pk is None:
return self is other
return my_pk == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provides pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
if not self._deferred:
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id, [], simple_class_factory), data
defers = []
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
model = self._meta.proxy_for_model
class_id = model._meta.app_label, model._meta.object_name
return (model_unpickle, (class_id, defers, deferred_class_factory), data)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field_by_name(field_name)[0]
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and self._deferred and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
deferred_fields = [
f.attname for f in self._meta.fields
if (f.attname not in self.__dict__ and
isinstance(self.__class__.__dict__[f.attname], DeferredAttribute))
]
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handles the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields)
with transaction.atomic(using=using, savepoint=False):
if not raw:
self._save_parents(cls, using, update_fields)
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
signals.post_save.send(sender=origin, instance=self, created=(not updated),
update_fields=update_fields, raw=raw, using=using)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""
Saves all the parents of cls using values from self.
"""
meta = cls._meta
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (field and getattr(self, parent._meta.pk.attname) is None
and getattr(self, field.attname) is not None):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self._save_parents(cls=parent, using=using, update_fields=update_fields)
self._save_table(cls=parent, using=using, update_fields=update_fields)
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Updates or inserts the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks
if f.name in update_fields or f.attname in update_fields]
pk_val = self._get_pk_val(meta)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
for f in non_pks]
forced_update = update_fields or force_update
updated = self._do_update(base_qs, using, pk_val, values, update_fields,
forced_update)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = cls._base_manager.using(using).filter(
**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if not isinstance(f, AutoField)]
update_pk = bool(meta.has_auto_field and not pk_set)
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
if update_pk:
setattr(self, meta.pk.attname, result)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
This method will try to update the model. If the model was updated (in
the sense that an update query was done and a matching row was found
from the DB) the method will return True.
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
if filtered.exists():
filtered._update(values)
return True
else:
return False
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, update_pk, raw):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
return manager._insert([self], fields=fields, return_id=update_pk,
using=using, raw=raw)
def delete(self, using=None):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
collector = Collector(using=using)
collector.collect([self])
collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = 'gt' if is_next else 'lt'
order = '_order' if is_next else '-_order'
order_field = self._meta.order_with_respect_to
obj = self._default_manager.filter(**{
order_field.name: getattr(self, order_field.attname)
}).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, unused):
if self.pk is None:
raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
return self.pk
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.parents.keys():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.parents.keys():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages['unique_for_date'],
code='unique_for_date',
params={
'model': self,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'lookup_type': lookup_type,
'field': field_name,
'field_label': six.text_type(capfirst(field.verbose_name)),
'date_field': unique_for,
'date_field_label': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
}
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params['field_label'] = six.text_type(capfirst(field.verbose_name))
return ValidationError(
message=field.error_messages['unique'],
code='unique',
params=params,
)
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
params['field_labels'] = six.text_type(get_text_list(field_labels, _('and')))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code='unique_together',
params=params,
)
def full_clean(self, exclude=None, validate_unique=True):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occurred.
"""
errors = {}
if exclude is None:
exclude = []
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
@classmethod
def check(cls, **kwargs):
errors = []
errors.extend(cls._check_swappable())
errors.extend(cls._check_managers(**kwargs))
if not cls._meta.swapped:
errors.extend(cls._check_fields(**kwargs))
errors.extend(cls._check_m2m_through_same_relationship())
clash_errors = cls._check_id_field() + cls._check_field_name_clashes()
errors.extend(clash_errors)
# If there are field name clashes, hide consequent column name
# clashes.
if not clash_errors:
errors.extend(cls._check_column_name_clashes())
errors.extend(cls._check_index_together())
errors.extend(cls._check_unique_together())
errors.extend(cls._check_ordering())
return errors
@classmethod
def _check_swappable(cls):
""" Check if the swapped model exists. """
errors = []
if cls._meta.swapped:
try:
apps.get_model(cls._meta.swapped)
except ValueError:
errors.append(
checks.Error(
"'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable,
hint=None,
obj=None,
id='models.E001',
)
)
except LookupError:
app_label, model_name = cls._meta.swapped.split('.')
errors.append(
checks.Error(
("'%s' references '%s.%s', which has not been installed, or is abstract.") % (
cls._meta.swappable, app_label, model_name
),
hint=None,
obj=None,
id='models.E002',
)
)
return errors
@classmethod
def _check_managers(cls, **kwargs):
""" Perform all manager checks. """
errors = []
managers = cls._meta.concrete_managers + cls._meta.abstract_managers
for (_, _, manager) in managers:
errors.extend(manager.check(**kwargs))
return errors
@classmethod
def _check_fields(cls, **kwargs):
""" Perform all field checks. """
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
for field in cls._meta.local_many_to_many:
errors.extend(field.check(from_model=cls, **kwargs))
return errors
@classmethod
def _check_m2m_through_same_relationship(cls):
""" Check if no relationship model is used by more than one m2m field.
"""
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.rel.to, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.rel.through, ModelBase))
for f in fields:
signature = (f.rel.to, cls, f.rel.through)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
("The model has two many-to-many relations through "
"the intermediate model '%s.%s'.") % (
f.rel.through._meta.app_label,
f.rel.through._meta.object_name
),
hint=None,
obj=cls,
id='models.E003',
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
@classmethod
def _check_id_field(cls):
""" Check if `id` field is a primary key. """
fields = list(f for f in cls._meta.local_fields
if f.name == 'id' and f != cls._meta.pk)
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == 'id':
return [
checks.Error(
("'id' can only be used as a field name if the field also "
"sets 'primary_key=True'."),
hint=None,
obj=cls,
id='models.E004',
)
]
else:
return []
@classmethod
def _check_field_name_clashes(cls):
""" Ref #17673. """
errors = []
used_fields = {} # name or attname -> field
# Check that multi-inheritance doesn't cause field name shadowing.
for parent in cls._meta.parents:
for f in parent._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
if clash:
errors.append(
checks.Error(
("The field '%s' from parent model "
"'%s' clashes with the field '%s' "
"from parent model '%s'.") % (
clash.name, clash.model._meta,
f.name, f.model._meta
),
hint=None,
obj=cls,
id='models.E005',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
# Check that fields defined in the model don't clash with fields from
# parents.
for f in cls._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
# Note that we may detect clash between user-defined non-unique
# field "id" and automatically added unique field "id", both
# defined at the same model. This special case is considered in
# _check_id_field and here we ignore it.
id_conflict = (f.name == "id" and
clash and clash.name == "id" and clash.model == cls)
if clash and not id_conflict:
errors.append(
checks.Error(
("The field '%s' clashes with the field '%s' "
"from model '%s'.") % (
f.name, clash.name, clash.model._meta
),
hint=None,
obj=f,
id='models.E006',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
return errors
@classmethod
def _check_column_name_clashes(cls):
# Store a list of column names which have already been used by other fields.
used_column_names = []
errors = []
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Ensure the column name is not already in use.
if column_name and column_name in used_column_names:
errors.append(
checks.Error(
"Field '%s' has column name '%s' that is used by another field." % (f.name, column_name),
hint="Specify a 'db_column' for the field.",
obj=cls,
id='models.E007'
)
)
else:
used_column_names.append(column_name)
return errors
@classmethod
def _check_index_together(cls):
""" Check the value of "index_together" option. """
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
"'index_together' must be a list or tuple.",
hint=None,
obj=cls,
id='models.E008',
)
]
elif any(not isinstance(fields, (tuple, list))
for fields in cls._meta.index_together):
return [
checks.Error(
"All 'index_together' elements must be lists or tuples.",
hint=None,
obj=cls,
id='models.E009',
)
]
else:
errors = []
for fields in cls._meta.index_together:
errors.extend(cls._check_local_fields(fields, "index_together"))
return errors
@classmethod
def _check_unique_together(cls):
""" Check the value of "unique_together" option. """
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
"'unique_together' must be a list or tuple.",
hint=None,
obj=cls,
id='models.E010',
)
]
elif any(not isinstance(fields, (tuple, list))
for fields in cls._meta.unique_together):
return [
checks.Error(
"All 'unique_together' elements must be lists or tuples.",
hint=None,
obj=cls,
id='models.E011',
)
]
else:
errors = []
for fields in cls._meta.unique_together:
errors.extend(cls._check_local_fields(fields, "unique_together"))
return errors
@classmethod
def _check_local_fields(cls, fields, option):
from django.db import models
errors = []
for field_name in fields:
try:
field = cls._meta.get_field(field_name,
many_to_many=True)
except models.FieldDoesNotExist:
errors.append(
checks.Error(
"'%s' refers to the non-existent field '%s'." % (option, field_name),
hint=None,
obj=cls,
id='models.E012',
)
)
else:
if isinstance(field.rel, models.ManyToManyRel):
errors.append(
checks.Error(
("'%s' refers to a ManyToManyField '%s', but "
"ManyToManyFields are not permitted in '%s'.") % (
option, field_name, option
),
hint=None,
obj=cls,
id='models.E013',
)
)
return errors
@classmethod
def _check_ordering(cls):
""" Check "ordering" option -- is it a list of lists and do all fields
exist? """
from django.db.models import FieldDoesNotExist
if not cls._meta.ordering:
return []
if not isinstance(cls._meta.ordering, (list, tuple)):
return [
checks.Error(
("'ordering' must be a tuple or list "
"(even if you want to order by only one field)."),
hint=None,
obj=cls,
id='models.E014',
)
]
errors = []
fields = cls._meta.ordering
# Skip '?' fields.
fields = (f for f in fields if f != '?')
# Convert "-field" to "field".
fields = ((f[1:] if f.startswith('-') else f) for f in fields)
fields = (f for f in fields if
f != '_order' or not cls._meta.order_with_respect_to)
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
fields = (f for f in fields if '__' not in f)
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
fields = (f for f in fields if f != 'pk')
for field_name in fields:
try:
cls._meta.get_field(field_name, many_to_many=False)
except FieldDoesNotExist:
errors.append(
checks.Error(
"'ordering' refers to the non-existent field '%s'." % field_name,
hint=None,
obj=cls,
id='models.E015',
)
)
return errors
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
with transaction.atomic(using=using, savepoint=False):
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
##############################################
# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) #
##############################################
def get_absolute_url(opts, func, self, *args, **kwargs):
return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.model_name), func)(self, *args, **kwargs)
########
# MISC #
########
def simple_class_factory(model, attrs):
"""
Needed for dynamic classes.
"""
return model
def model_unpickle(model_id, attrs, factory):
"""
Used to unpickle Model subclasses with deferred fields.
"""
if isinstance(model_id, tuple):
model = apps.get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
cls = factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception)
| bsd-3-clause | 566,368,153,932,182,660 | 40.42964 | 166 | 0.545034 | false |
kctan0805/vdpm | share/gdal/gdal-2.0.0/swig/python/samples/gdalinfo.py | 1 | 23758 | #!/usr/bin/env python
#/******************************************************************************
# * $Id: gdalinfo.py 28391 2015-01-30 19:57:31Z rouault $
# *
# * Project: GDAL Utilities
# * Purpose: Python port of Commandline application to list info about a file.
# * Author: Even Rouault, <even dot rouault at mines dash paris dot org>
# *
# * Port from gdalinfo.c whose author is Frank Warmerdam
# *
# ******************************************************************************
# * Copyright (c) 2010-2011, Even Rouault <even dot rouault at mines-paris dot org>
# * Copyright (c) 1998, Frank Warmerdam
# *
# * Permission is hereby granted, free of charge, to any person obtaining a
# * copy of this software and associated documentation files (the "Software"),
# * to deal in the Software without restriction, including without limitation
# * the rights to use, copy, modify, merge, publish, distribute, sublicense,
# * and/or sell copies of the Software, and to permit persons to whom the
# * Software is furnished to do so, subject to the following conditions:
# *
# * The above copyright notice and this permission notice shall be included
# * in all copies or substantial portions of the Software.
# *
# * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# * DEALINGS IN THE SOFTWARE.
# ****************************************************************************/
import sys
try:
from osgeo import gdal
from osgeo import osr
except:
import gdal
import osr
#/************************************************************************/
#/* Usage() */
#/************************************************************************/
def Usage():
print( "Usage: gdalinfo [--help-general] [-mm] [-stats] [-hist] [-nogcp] [-nomd]\n" + \
" [-norat] [-noct] [-nofl] [-checksum] [-mdd domain]* datasetname" )
return 1
def EQUAL(a, b):
return a.lower() == b.lower()
#/************************************************************************/
#/* main() */
#/************************************************************************/
def main( argv = None ):
bComputeMinMax = False
bShowGCPs = True
bShowMetadata = True
bShowRAT=True
bStats = False
bApproxStats = True
bShowColorTable = True
bComputeChecksum = False
bReportHistograms = False
pszFilename = None
papszExtraMDDomains = [ ]
pszProjection = None
hTransform = None
bShowFileList = True
#/* Must process GDAL_SKIP before GDALAllRegister(), but we can't call */
#/* GDALGeneralCmdLineProcessor before it needs the drivers to be registered */
#/* for the --format or --formats options */
#for( i = 1; i < argc; i++ )
#{
# if EQUAL(argv[i],"--config") and i + 2 < argc and EQUAL(argv[i + 1], "GDAL_SKIP"):
# {
# CPLSetConfigOption( argv[i+1], argv[i+2] );
#
# i += 2;
# }
#}
#
#GDALAllRegister();
if argv is None:
argv = sys.argv
argv = gdal.GeneralCmdLineProcessor( argv )
if argv is None:
return 1
nArgc = len(argv)
#/* -------------------------------------------------------------------- */
#/* Parse arguments. */
#/* -------------------------------------------------------------------- */
i = 1
while i < nArgc:
if EQUAL(argv[i], "--utility_version"):
print("%s is running against GDAL %s" %
(argv[0], gdal.VersionInfo("RELEASE_NAME")))
return 0
elif EQUAL(argv[i], "-mm"):
bComputeMinMax = True
elif EQUAL(argv[i], "-hist"):
bReportHistograms = True
elif EQUAL(argv[i], "-stats"):
bStats = True
bApproxStats = False
elif EQUAL(argv[i], "-approx_stats"):
bStats = True
bApproxStats = True
elif EQUAL(argv[i], "-checksum"):
bComputeChecksum = True
elif EQUAL(argv[i], "-nogcp"):
bShowGCPs = False
elif EQUAL(argv[i], "-nomd"):
bShowMetadata = False
elif EQUAL(argv[i], "-norat"):
bShowRAT = False
elif EQUAL(argv[i], "-noct"):
bShowColorTable = False
elif EQUAL(argv[i], "-mdd") and i < nArgc-1:
i = i + 1
papszExtraMDDomains.append( argv[i] )
elif EQUAL(argv[i], "-nofl"):
bShowFileList = False
elif argv[i][0] == '-':
return Usage()
elif pszFilename is None:
pszFilename = argv[i]
else:
return Usage()
i = i + 1
if pszFilename is None:
return Usage()
#/* -------------------------------------------------------------------- */
#/* Open dataset. */
#/* -------------------------------------------------------------------- */
hDataset = gdal.Open( pszFilename, gdal.GA_ReadOnly )
if hDataset is None:
print("gdalinfo failed - unable to open '%s'." % pszFilename )
return 1
#/* -------------------------------------------------------------------- */
#/* Report general info. */
#/* -------------------------------------------------------------------- */
hDriver = hDataset.GetDriver();
print( "Driver: %s/%s" % ( \
hDriver.ShortName, \
hDriver.LongName ))
papszFileList = hDataset.GetFileList();
if papszFileList is None or len(papszFileList) == 0:
print( "Files: none associated" )
else:
print( "Files: %s" % papszFileList[0] )
if bShowFileList:
for i in range(1, len(papszFileList)):
print( " %s" % papszFileList[i] )
print( "Size is %d, %d" % (hDataset.RasterXSize, hDataset.RasterYSize))
#/* -------------------------------------------------------------------- */
#/* Report projection. */
#/* -------------------------------------------------------------------- */
pszProjection = hDataset.GetProjectionRef()
if pszProjection is not None:
hSRS = osr.SpatialReference()
if hSRS.ImportFromWkt(pszProjection ) == gdal.CE_None:
pszPrettyWkt = hSRS.ExportToPrettyWkt(False)
print( "Coordinate System is:\n%s" % pszPrettyWkt )
else:
print( "Coordinate System is `%s'" % pszProjection )
#/* -------------------------------------------------------------------- */
#/* Report Geotransform. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
if adfGeoTransform[2] == 0.0 and adfGeoTransform[4] == 0.0:
print( "Origin = (%.15f,%.15f)" % ( \
adfGeoTransform[0], adfGeoTransform[3] ))
print( "Pixel Size = (%.15f,%.15f)" % ( \
adfGeoTransform[1], adfGeoTransform[5] ))
else:
print( "GeoTransform =\n" \
" %.16g, %.16g, %.16g\n" \
" %.16g, %.16g, %.16g" % ( \
adfGeoTransform[0], \
adfGeoTransform[1], \
adfGeoTransform[2], \
adfGeoTransform[3], \
adfGeoTransform[4], \
adfGeoTransform[5] ))
#/* -------------------------------------------------------------------- */
#/* Report GCPs. */
#/* -------------------------------------------------------------------- */
if bShowGCPs and hDataset.GetGCPCount() > 0:
pszProjection = hDataset.GetGCPProjection()
if pszProjection is not None:
hSRS = osr.SpatialReference()
if hSRS.ImportFromWkt(pszProjection ) == gdal.CE_None:
pszPrettyWkt = hSRS.ExportToPrettyWkt(False)
print( "GCP Projection = \n%s" % pszPrettyWkt )
else:
print( "GCP Projection = %s" % \
pszProjection )
gcps = hDataset.GetGCPs()
i = 0
for gcp in gcps:
print( "GCP[%3d]: Id=%s, Info=%s\n" \
" (%.15g,%.15g) -> (%.15g,%.15g,%.15g)" % ( \
i, gcp.Id, gcp.Info, \
gcp.GCPPixel, gcp.GCPLine, \
gcp.GCPX, gcp.GCPY, gcp.GCPZ ))
i = i + 1
#/* -------------------------------------------------------------------- */
#/* Report metadata. */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List()
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
if bShowMetadata:
for extra_domain in papszExtraMDDomains:
papszMetadata = hDataset.GetMetadata_List(extra_domain)
if papszMetadata is not None and len(papszMetadata) > 0 :
print( "Metadata (%s):" % extra_domain)
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report "IMAGE_STRUCTURE" metadata. */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("IMAGE_STRUCTURE")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Image Structure Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report subdatasets. */
#/* -------------------------------------------------------------------- */
papszMetadata = hDataset.GetMetadata_List("SUBDATASETS")
if papszMetadata is not None and len(papszMetadata) > 0 :
print( "Subdatasets:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report geolocation. */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("GEOLOCATION")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "Geolocation:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Report RPCs */
#/* -------------------------------------------------------------------- */
if bShowMetadata:
papszMetadata = hDataset.GetMetadata_List("RPC")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( "RPC Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
#/* -------------------------------------------------------------------- */
#/* Setup projected to lat/long transform if appropriate. */
#/* -------------------------------------------------------------------- */
if pszProjection is not None and len(pszProjection) > 0:
hProj = osr.SpatialReference( pszProjection )
if hProj is not None:
hLatLong = hProj.CloneGeogCS()
if hLatLong is not None:
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
hTransform = osr.CoordinateTransformation( hProj, hLatLong )
gdal.PopErrorHandler()
if gdal.GetLastErrorMsg().find( 'Unable to load PROJ.4 library' ) != -1:
hTransform = None
#/* -------------------------------------------------------------------- */
#/* Report corners. */
#/* -------------------------------------------------------------------- */
print( "Corner Coordinates:" )
GDALInfoReportCorner( hDataset, hTransform, "Upper Left", \
0.0, 0.0 );
GDALInfoReportCorner( hDataset, hTransform, "Lower Left", \
0.0, hDataset.RasterYSize);
GDALInfoReportCorner( hDataset, hTransform, "Upper Right", \
hDataset.RasterXSize, 0.0 );
GDALInfoReportCorner( hDataset, hTransform, "Lower Right", \
hDataset.RasterXSize, \
hDataset.RasterYSize );
GDALInfoReportCorner( hDataset, hTransform, "Center", \
hDataset.RasterXSize/2.0, \
hDataset.RasterYSize/2.0 );
#/* ==================================================================== */
#/* Loop over bands. */
#/* ==================================================================== */
for iBand in range(hDataset.RasterCount):
hBand = hDataset.GetRasterBand(iBand+1 )
#if( bSample )
#{
# float afSample[10000];
# int nCount;
#
# nCount = GDALGetRandomRasterSample( hBand, 10000, afSample );
# print( "Got %d samples.\n", nCount );
#}
(nBlockXSize, nBlockYSize) = hBand.GetBlockSize()
print( "Band %d Block=%dx%d Type=%s, ColorInterp=%s" % ( iBand+1, \
nBlockXSize, nBlockYSize, \
gdal.GetDataTypeName(hBand.DataType), \
gdal.GetColorInterpretationName( \
hBand.GetRasterColorInterpretation()) ))
if hBand.GetDescription() is not None \
and len(hBand.GetDescription()) > 0 :
print( " Description = %s" % hBand.GetDescription() )
dfMin = hBand.GetMinimum()
dfMax = hBand.GetMaximum()
if dfMin is not None or dfMax is not None or bComputeMinMax:
line = " "
if dfMin is not None:
line = line + ("Min=%.3f " % dfMin)
if dfMax is not None:
line = line + ("Max=%.3f " % dfMax)
if bComputeMinMax:
gdal.ErrorReset()
adfCMinMax = hBand.ComputeRasterMinMax(False)
if gdal.GetLastErrorType() == gdal.CE_None:
line = line + ( " Computed Min/Max=%.3f,%.3f" % ( \
adfCMinMax[0], adfCMinMax[1] ))
print( line )
stats = hBand.GetStatistics( bApproxStats, bStats)
# Dirty hack to recognize if stats are valid. If invalid, the returned
# stddev is negative
if stats[3] >= 0.0:
print( " Minimum=%.3f, Maximum=%.3f, Mean=%.3f, StdDev=%.3f" % ( \
stats[0], stats[1], stats[2], stats[3] ))
if bReportHistograms:
hist = hBand.GetDefaultHistogram(force = True, callback = gdal.TermProgress)
if hist is not None:
dfMin = hist[0]
dfMax = hist[1]
nBucketCount = hist[2]
panHistogram = hist[3]
print( " %d buckets from %g to %g:" % ( \
nBucketCount, dfMin, dfMax ))
line = ' '
for bucket in panHistogram:
line = line + ("%d " % bucket)
print(line)
if bComputeChecksum:
print( " Checksum=%d" % hBand.Checksum())
dfNoData = hBand.GetNoDataValue()
if dfNoData is not None:
if dfNoData != dfNoData:
print( " NoData Value=nan" )
else:
print( " NoData Value=%.18g" % dfNoData )
if hBand.GetOverviewCount() > 0:
line = " Overviews: "
for iOverview in range(hBand.GetOverviewCount()):
if iOverview != 0 :
line = line + ", "
hOverview = hBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%dx%d" % (hOverview.XSize, hOverview.YSize))
pszResampling = \
hOverview.GetMetadataItem( "RESAMPLING", "" )
if pszResampling is not None \
and len(pszResampling) >= 12 \
and EQUAL(pszResampling[0:12],"AVERAGE_BIT2"):
line = line + "*"
else:
line = line + "(null)"
print(line)
if bComputeChecksum:
line = " Overviews checksum: "
for iOverview in range(hBand.GetOverviewCount()):
if iOverview != 0:
line = line + ", "
hOverview = hBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%d" % hOverview.Checksum())
else:
line = line + "(null)"
print(line)
if hBand.HasArbitraryOverviews():
print( " Overviews: arbitrary" )
nMaskFlags = hBand.GetMaskFlags()
if (nMaskFlags & (gdal.GMF_NODATA|gdal.GMF_ALL_VALID)) == 0:
hMaskBand = hBand.GetMaskBand()
line = " Mask Flags: "
if (nMaskFlags & gdal.GMF_PER_DATASET) != 0:
line = line + "PER_DATASET "
if (nMaskFlags & gdal.GMF_ALPHA) != 0:
line = line + "ALPHA "
if (nMaskFlags & gdal.GMF_NODATA) != 0:
line = line + "NODATA "
if (nMaskFlags & gdal.GMF_ALL_VALID) != 0:
line = line + "ALL_VALID "
print(line)
if hMaskBand is not None and \
hMaskBand.GetOverviewCount() > 0:
line = " Overviews of mask band: "
for iOverview in range(hMaskBand.GetOverviewCount()):
if iOverview != 0:
line = line + ", "
hOverview = hMaskBand.GetOverview( iOverview );
if hOverview is not None:
line = line + ( "%d" % hOverview.Checksum())
else:
line = line + "(null)"
if len(hBand.GetUnitType()) > 0:
print( " Unit Type: %s" % hBand.GetUnitType())
papszCategories = hBand.GetRasterCategoryNames()
if papszCategories is not None:
print( " Categories:" );
i = 0
for category in papszCategories:
print( " %3d: %s" % (i, category) )
i = i + 1
if hBand.GetScale() != 1.0 or hBand.GetOffset() != 0.0:
print( " Offset: %.15g, Scale:%.15g" % \
( hBand.GetOffset(), hBand.GetScale()))
if bShowMetadata:
papszMetadata = hBand.GetMetadata_List()
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( " Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
if bShowMetadata:
papszMetadata = hBand.GetMetadata_List("IMAGE_STRUCTURE")
else:
papszMetadata = None
if bShowMetadata and papszMetadata is not None and len(papszMetadata) > 0 :
print( " Image Structure Metadata:" )
for metadata in papszMetadata:
print( " %s" % metadata )
hTable = hBand.GetRasterColorTable()
if hBand.GetRasterColorInterpretation() == gdal.GCI_PaletteIndex \
and hTable is not None:
print( " Color Table (%s with %d entries)" % (\
gdal.GetPaletteInterpretationName( \
hTable.GetPaletteInterpretation( )), \
hTable.GetCount() ))
if bShowColorTable:
for i in range(hTable.GetCount()):
sEntry = hTable.GetColorEntry(i)
print( " %3d: %d,%d,%d,%d" % ( \
i, \
sEntry[0],\
sEntry[1],\
sEntry[2],\
sEntry[3] ))
if bShowRAT:
pass
#hRAT = hBand.GetDefaultRAT()
#GDALRATDumpReadable( hRAT, None );
return 0
#/************************************************************************/
#/* GDALInfoReportCorner() */
#/************************************************************************/
def GDALInfoReportCorner( hDataset, hTransform, corner_name, x, y ):
line = "%-11s " % corner_name
#/* -------------------------------------------------------------------- */
#/* Transform the point into georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
adfGeoTransform = hDataset.GetGeoTransform(can_return_null = True)
if adfGeoTransform is not None:
dfGeoX = adfGeoTransform[0] + adfGeoTransform[1] * x \
+ adfGeoTransform[2] * y
dfGeoY = adfGeoTransform[3] + adfGeoTransform[4] * x \
+ adfGeoTransform[5] * y
else:
line = line + ("(%7.1f,%7.1f)" % (x, y ))
print(line)
return False
#/* -------------------------------------------------------------------- */
#/* Report the georeferenced coordinates. */
#/* -------------------------------------------------------------------- */
if abs(dfGeoX) < 181 and abs(dfGeoY) < 91:
line = line + ( "(%12.7f,%12.7f) " % (dfGeoX, dfGeoY ))
else:
line = line + ( "(%12.3f,%12.3f) " % (dfGeoX, dfGeoY ))
#/* -------------------------------------------------------------------- */
#/* Transform to latlong and report. */
#/* -------------------------------------------------------------------- */
if hTransform is not None:
pnt = hTransform.TransformPoint(dfGeoX, dfGeoY, 0)
if pnt is not None:
line = line + ( "(%s," % gdal.DecToDMS( pnt[0], "Long", 2 ) )
line = line + ( "%s)" % gdal.DecToDMS( pnt[1], "Lat", 2 ) )
print(line)
return True
if __name__ == '__main__':
version_num = int(gdal.VersionInfo('VERSION_NUM'))
if version_num < 1800: # because of GetGeoTransform(can_return_null)
print('ERROR: Python bindings of GDAL 1.8.0 or later required')
sys.exit(1)
sys.exit(main(sys.argv))
| lgpl-2.1 | -3,859,931,217,219,883,500 | 37.883797 | 95 | 0.440946 | false |
pauix1992/joustbot | joust.py | 1 | 4783 | # Load Joust variables
from random import randint
JOUST_DICE = 20
JOUST_DEATH_ROLL_TRIGGER = 17
JOUST_UNHORSE_TRIGGER = 15
JOUST_BROKEN_LANCE_TRIGGER = 10
JOUST_STRONG_HIT_TRIGGER = 7
JOUST_HIT_TRIGGER = 4
# roll 2d20 taking both players' penalty into account
def tilt(malus1, malus2):
dice1 = randint(1,JOUST_DICE) - malus1
dice2 = randint(1,JOUST_DICE) - malus2
dice1 = 0 if dice1 < 0 else dice1 # Dice rolls can't go under 0
dice2 = 0 if dice2 < 0 else dice2
return dice1,dice2
# Formats the result into something nice
def pretty_result(winner,loser,tilt_result,roll_w,roll_l):
tilt_result = abs(tilt_result)
if tilt_result > JOUST_DEATH_ROLL_TRIGGER:
res = loser + " is brutally unhorsed by " + winner
elif tilt_result > JOUST_UNHORSE_TRIGGER:
res = loser + " is unhorsed by " + winner
elif tilt_result > JOUST_BROKEN_LANCE_TRIGGER:
res = winner + " breaks a lance against " + loser
elif tilt_result > JOUST_STRONG_HIT_TRIGGER:
res = winner + " deals a strong hit to " + loser
elif tilt_result > JOUST_HIT_TRIGGER:
res = winner + " hits " + loser
else:
res = winner + " and " + loser + " exchange glancing hits"
return res+" ["+winner+" "+str(roll_w)+", "+loser+" "+str(roll_l)+"]\n\n"
# Get the malus the loser will take
def get_malus(tilt_result):
tilt_result = abs(tilt_result)
if tilt_result > JOUST_DEATH_ROLL_TRIGGER:
return -1
if tilt_result > JOUST_UNHORSE_TRIGGER:
return -1
if tilt_result > JOUST_BROKEN_LANCE_TRIGGER:
return 3
elif tilt_result > JOUST_STRONG_HIT_TRIGGER:
return 2
elif tilt_result > JOUST_HIT_TRIGGER:
return 1
else:
return 0
def death_roll(jouster):
roll = randint(1,JOUST_DICE)
if roll < 3:
return jouster + " has died! ["+str(roll)+"]\n\n"
elif roll < 6:
return jouster + " is maimed! ["+str(roll)+"]\n\n"
elif roll < 9:
return jouster + " got hurt! ["+str(roll)+"]\n\n"
else :
return jouster + " is fine! ["+str(roll)+"]\n\n"
# Joust to 7 tilts
def joust(rider1,rider2,bonus1,bonus2):
res = "\n\n"
malus1 = -int(bonus1)
malus2 = -int(bonus2)
broken_lances_1 = 0
broken_lances_2 = 0
for x in range(1,8):
rolls = tilt(malus1,malus2)
tilt_res = rolls[0] - rolls[1]
if tilt_res == 0:
res += rider1 + " and " + rider2 + " miss each other ["+rider1+" "+str(rolls[0])+", "+rider2 +" "+str(rolls[1])+"]\n\n"
else:
if tilt_res > 0:
winner = rider1
loser = rider2
roll_winner = rolls[0]
roll_loser = rolls[1]
malus1 += get_malus(tilt_res)
else:
winner = rider2
loser = rider1
roll_winner = rolls[1]
roll_loser = rolls[0]
malus2 += get_malus(tilt_res)
res += pretty_result(winner,loser,tilt_res,roll_winner,roll_loser)
if abs(tilt_res) > JOUST_DEATH_ROLL_TRIGGER:
res += "DEATH ROLL: "
res += death_roll(loser)
res += "**"+winner+" has won!**\n\n"
return res
if abs(tilt_res) > JOUST_UNHORSE_TRIGGER:
res += "**"+winner+" has won!**\n\n"
return res
elif abs(tilt_res) > JOUST_BROKEN_LANCE_TRIGGER:
if tilt_res > 0:
broken_lances_1 += 1
else:
broken_lances_2 += 1
if broken_lances_1 > broken_lances_2:
res +="**"+rider1+" won against "+rider2+" ("+str(broken_lances_1)+" broken lances against "+str(broken_lances_2)+")**\n\n"
elif broken_lances_1 > broken_lances_2:
res += "**"+rider2+" won against "+rider1+" ("+str(broken_lances_2)+" broken lances against "+str(broken_lances_1)+")**\n\n"
else:
res += "**"+rider1+" and "+rider2+" tie with "+str(broken_lances_2)+" broken lances.**\n\n"
return res
#### Roll a tournament round
def joust_round(comment):
body = comment.body
b = body.split("\n")
result = ''
contestants = []
for contestant in b:
if contestant.find("joustbot") < 0 and len(contestant) > 4:
contestants.append(contestant[2:-2])
for i in range(0,len(contestants)):
if i%2 == 1:
result +="***" + contestants[i-1] + " VERSUS " + contestants[i] + "!***\n\n"
result += joust(contestants[i-1],contestants[i],0,0) or "ERROR!"
result += "------------------------------------------------------\n\n"
comment.reply(result)
else:
contestants = []
result = ''
| cc0-1.0 | -1,187,099,026,301,466,400 | 34.169118 | 132 | 0.545055 | false |
DolphinDream/sverchok | nodes/object_nodes/getsetprop.py | 1 | 9587 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
import ast
import traceback
import bpy
from bpy.props import StringProperty, BoolProperty, IntProperty, FloatProperty, FloatVectorProperty
from bpy.types import bpy_prop_array
import mathutils
from mathutils import Matrix, Vector, Euler, Quaternion, Color
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.utils.nodes_mixins.sv_animatable_nodes import SvAnimatableNode
from sverchok.data_structure import Matrix_generate, updateNode, node_id
def parse_to_path(p):
'''
Create a path and can be looked up easily.
Return an array of tuples with op type and value
ops are:
name - global name to use
attr - attribute to get using getattr(obj,attr)
key - key for accesing via obj[key]
'''
if isinstance(p, ast.Attribute):
return parse_to_path(p.value)+[("attr", p.attr)]
elif isinstance(p, ast.Subscript):
if isinstance(p.slice.value, ast.Num):
return parse_to_path(p.value) + [("key", p.slice.value.n)]
elif isinstance(p.slice.value, ast.Str):
return parse_to_path(p.value) + [("key", p.slice.value.s)]
elif isinstance(p, ast.Name):
return [("name", p.id)]
else:
raise NameError
def get_object(path):
'''
access the object speciefed from a path
generated by parse_to_path
will fail if path is invalid
'''
curr_object = globals()[path[0][1]]
for t, value in path[1:]:
if t == "attr":
curr_object = getattr(curr_object, value)
elif t == "key":
curr_object = curr_object[value]
return curr_object
def apply_alias(eval_str):
'''
apply standard aliases
will raise error if it isn't an bpy path
'''
if not eval_str.startswith("bpy."):
for alias, expanded in aliases.items():
if eval_str.startswith(alias):
eval_str = eval_str.replace(alias, expanded, 1)
break
if not eval_str.startswith("bpy."):
raise NameError
return eval_str
def wrap_output_data(tvar):
'''
create valid sverchok socket data from an object
from ek node
'''
if isinstance(tvar, (Vector, Color)):
data = [[tvar[:]]]
elif isinstance(tvar, Matrix):
data = [[r[:] for r in tvar[:]]]
elif isinstance(tvar, (Euler, Quaternion)):
tvar = tvar.to_matrix().to_4x4()
data = [[r[:] for r in tvar[:]]]
elif isinstance(tvar, list):
data = [tvar]
elif isinstance(tvar, (int, float)):
data = [[tvar]]
else:
data = tvar
return data
def assign_data(obj, data):
'''
assigns data to the object
'''
if isinstance(obj, (int, float)):
# doesn't work
obj = data[0][0]
elif isinstance(obj, (Vector, Color)):
obj[:] = data[0][0]
elif isinstance(obj, (Matrix, Euler, Quaternion)):
mats = Matrix_generate(data)
mat = mats[0]
if isinstance(obj, Euler):
eul = mat.to_euler(obj.order)
obj[:] = eul
elif isinstance(obj, Quaternion):
quat = mat.to_quaternion()
obj[:] = quat
else: #isinstance(obj, Matrix)
obj[:] = mat
else: # super optimistic guess
obj[:] = type(obj)(data[0][0])
aliases = {
"c": "bpy.context",
"C" : "bpy.context",
"scene": "bpy.context.scene",
"data": "bpy.data",
"D": "bpy.data",
"objs": "bpy.data.objects",
"mats": "bpy.data.materials",
"M": "bpy.data.materials",
"meshes": "bpy.data.meshes",
"texts": "bpy.data.texts"
}
types = {
int: "SvStringsSocket",
float: "SvStringsSocket",
str: "SvStringsSocket", # I WANT A PROPER TEXT SOCKET!!!
mathutils.Vector: "SvVerticesSocket",
mathutils.Color: "SvVerticesSocket",
mathutils.Matrix: "SvMatrixSocket",
mathutils.Euler: "SvMatrixSocket",
mathutils.Quaternion: "SvMatrixSocket"
}
def secondary_type_assesment(item):
"""
we can use this function to perform more granular attr/type identification
"""
if isinstance(item, bpy_prop_array):
if hasattr(item, "path_from_id") and item.path_from_id().endswith('color'):
return "SvColorSocket"
return None
class SvGetPropNode(bpy.types.Node, SverchCustomTreeNode, SvAnimatableNode):
''' Get property '''
bl_idname = 'SvGetPropNode'
bl_label = 'Get property'
bl_icon = 'FORCE_VORTEX'
sv_icon = 'SV_PROP_GET'
bad_prop: BoolProperty(default=False)
def verify_prop(self, context):
try:
obj = self.obj
except:
traceback.print_exc()
self.bad_prop = True
return
self.bad_prop = False
with self.sv_throttle_tree_update():
s_type = types.get(type(self.obj))
if not s_type:
s_type = secondary_type_assesment(self.obj)
outputs = self.outputs
if s_type and outputs:
outputs[0].replace_socket(s_type)
elif s_type:
outputs.new(s_type, "Data")
updateNode(self, context)
prop_name: StringProperty(name='', update=verify_prop)
@property
def obj(self):
eval_str = apply_alias(self.prop_name)
ast_path = ast.parse(eval_str)
path = parse_to_path(ast_path.body[0].value)
return get_object(path)
def draw_buttons(self, context, layout):
layout.alert = self.bad_prop
if len(self.outputs) > 0:
self.draw_animatable_buttons(layout, icon_only=True)
layout.prop(self, "prop_name", text="")
def process(self):
# print(">> Get process is called")
self.outputs[0].sv_set(wrap_output_data(self.obj))
class SvSetPropNode(bpy.types.Node, SverchCustomTreeNode):
''' Set property '''
bl_idname = 'SvSetPropNode'
bl_label = 'Set property'
bl_icon = 'FORCE_VORTEX'
sv_icon = 'SV_PROP_SET'
ok_prop: BoolProperty(default=False)
bad_prop: BoolProperty(default=False)
@property
def obj(self):
eval_str = apply_alias(self.prop_name)
ast_path = ast.parse(eval_str)
path = parse_to_path(ast_path.body[0].value)
return get_object(path)
def verify_prop(self, context):
# test first
try:
obj = self.obj
except:
traceback.print_exc()
self.bad_prop = True
return
# execute second
self.bad_prop = False
with self.sv_throttle_tree_update():
s_type = types.get(type(self.obj))
if not s_type:
s_type = secondary_type_assesment(self.obj)
p_name = {
float: "float_prop",
int: "int_prop",
bpy_prop_array: "color_prop"
}.get(type(self.obj),"")
inputs = self.inputs
if inputs and s_type:
socket = inputs[0].replace_socket(s_type)
socket.prop_name = p_name
elif s_type:
inputs.new(s_type, "Data").prop_name = p_name
if s_type == "SvVerticesSocket":
inputs[0].use_prop = True
updateNode(self, context)
def local_updateNode(self, context):
# no further interaction with the nodetree is required.
self.process()
prop_name: StringProperty(name='', update=verify_prop)
float_prop: FloatProperty(update=updateNode, name="x")
int_prop: IntProperty(update=updateNode, name="x")
color_prop: FloatVectorProperty(
name="Color", description="Color", size=4,
min=0.0, max=1.0, subtype='COLOR', update=local_updateNode)
def draw_buttons(self, context, layout):
layout.alert = self.bad_prop
layout.prop(self, "prop_name", text="")
def process(self):
# print("<< Set process is called")
data = self.inputs[0].sv_get()
eval_str = apply_alias(self.prop_name)
ast_path = ast.parse(eval_str)
path = parse_to_path(ast_path.body[0].value)
obj = get_object(path)
#with self.sv_throttle_tree_update():
# changes here should not reflect back into the nodetree?
if isinstance(obj, (int, float, bpy_prop_array)):
obj = get_object(path[:-1])
p_type, value = path[-1]
if p_type == "attr":
setattr(obj, value, data[0][0])
else:
obj[value] = data[0][0]
else:
assign_data(obj, data)
def register():
bpy.utils.register_class(SvSetPropNode)
bpy.utils.register_class(SvGetPropNode)
def unregister():
bpy.utils.unregister_class(SvSetPropNode)
bpy.utils.unregister_class(SvGetPropNode)
| gpl-3.0 | 4,452,298,741,122,461,000 | 29.826367 | 99 | 0.593616 | false |
remybaranx/qtaste | TestSuites/TestSuite_QTaste/EngineSuite/QTASTE_REPORT/QTASTE_REPORT_03/TestScript.py | 1 | 3177 | # encoding= utf-8
# Copyright 2007-2009 QSpin - www.qspin.be
#
# This file is part of QTaste framework.
#
# QTaste is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# QTaste is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with QTaste. If not, see <http://www.gnu.org/licenses/>.
##
# QTaste Test result management: Check test timeout reporting.
# <p>
# This test case has the goal to verify that when test execution duration exceeds the TIMEOUT value, this is reported as "Failed" in the test report.
# @preparation None
##
import time
from qtaste import *
engineTest = testAPI.getEngineTest()
def Step1():
"""
@step In CSV file, define TIMEOUT to 5 for 4 rows
@expected None
"""
pass
def Step2():
"""
@step Call the verb sleep(3000), then log an error message and call testAPI.stopTest(Status.NOT_AVAILABLE, "This should not be executed.")
@expected After +-5 seconds, QTaste reports test as "Failed", reason:<i>Test execution timeout.</i><p>
Script call stack is reported.<p>
The error message doesnt appear in the log.
"""
engineTest.sleep(3000)
# this should not be executed
logger.error('The script continued to execute after timeout!');
testAPI.stopTest(Status.NOT_AVAILABLE, "This should not be executed.")
def Step3():
"""
@step Call the verb neverReturn(), then log an error message and call testAPI.stopTest(Status.NOT_AVAILABLE, "This should not be executed.")
@expected After +-5 seconds, QTaste reports test as "Failed", reason:<i>Test execution timeout.</i><p>
Script call stack is reported.<p>
The error message doesnt appear in the log.
"""
engineTest.neverReturn()
# this should not be executed
logger.error('The script continued to execute after timeout!');
testAPI.stopTest(Status.NOT_AVAILABLE, "This should not be executed.")
def Step4():
"""
@step Do a never ending loop:<p>
<b>while True: <br>
pass</b><p>
then log an error message and call testAPI.stopTest(Status.NOT_AVAILABLE, "This should not be executed.")
@expected After +-5 seconds, QTaste reports test as "Failed", reason:<i>Test execution timeout.</i><p>
Script call stack is reported.<p>
The error message doesnt appear in the log.
"""
while True:
pass
# this should not be executed
logger.error('The script continued to execute after timeout!');
testAPI.stopTest(Status.NOT_AVAILABLE, "This should not be executed.")
doStep(Step1)
if testData.getBooleanValue('IN_VERB'):
if testData.getBooleanValue('IN_SLEEP'):
doStep(Step2)
else:
doStep(Step3)
else:
if testData.getBooleanValue('IN_SLEEP'):
doStep(Step2)
else:
doStep(Step4)
| gpl-3.0 | 8,974,629,040,918,288,000 | 33.16129 | 149 | 0.711048 | false |
alphagov/notifications-utils | tests/test_base64_uuid.py | 1 | 1242 | import os
from uuid import UUID
import pytest
from notifications_utils.base64_uuid import (
base64_to_bytes,
base64_to_uuid,
bytes_to_base64,
uuid_to_base64,
)
def test_bytes_to_base64_to_bytes():
b = os.urandom(32)
b64 = bytes_to_base64(b)
assert base64_to_bytes(b64) == b
@pytest.mark.parametrize('url_val', [
'AAAAAAAAAAAAAAAAAAAAAQ',
'AAAAAAAAAAAAAAAAAAAAAQ=', # even though this has invalid padding we put extra =s on the end so this is okay
'AAAAAAAAAAAAAAAAAAAAAQ==',
])
def test_base64_converter_to_python(url_val):
assert base64_to_uuid(url_val) == UUID(int=1)
@pytest.mark.parametrize('python_val', [
UUID(int=1),
'00000000-0000-0000-0000-000000000001'
])
def test_base64_converter_to_url(python_val):
assert uuid_to_base64(python_val) == 'AAAAAAAAAAAAAAAAAAAAAQ'
@pytest.mark.parametrize('url_val', [
'this_is_valid_base64_but_is_too_long_to_be_a_uuid',
'this_one_has_emoji_➕➕➕',
])
def test_base64_converter_to_python_raises_validation_error(url_val):
with pytest.raises(Exception):
base64_to_uuid(url_val)
def test_base64_converter_to_url_raises_validation_error():
with pytest.raises(Exception):
uuid_to_base64(object())
| mit | -8,550,987,892,238,410,000 | 24.75 | 113 | 0.690939 | false |
saurabh6790/tru_app_back | test/doctype/neutralization_value/neutralization_value.py | 1 | 7907 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import webnotes
from test.doctype import assign_notify
from test.doctype import create_test_results,create_child_testresult, get_pgcil_limit,update_test_log,verfy_bottle_number
from webnotes.model.doc import addchild
from webnotes.model.bean import getlist
from webnotes.utils import cint, cstr, flt, now, nowdate, get_first_day, get_last_day, add_to_date, getdate
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def on_update(self):
#webnotes.errprint("in on_update")
self.create_testresult('Running')
def check_bottle_no(self):
for d in getlist(self.doclist,'neutralisation_test_details'):
#webnotes.errprint(d.sample_no)
if cint(webnotes.conn.sql("""select count(*) from tabSample
where name='%s' and barcode in ('%s')"""%(d.sample_no,d.bottle_no),debug=1)[0][0]) != 1:
webnotes.msgprint("Entered bottle number not belongs to Sample No '"+d.sample_no+"' Please correct it",raise_exception=1)
else:
pass
def add_equipment(self,equipment):
if self.doc.equipment_used_list:
equipment_list = self.doc.equipment_used_list + ', ' + equipment
else:
equipment_list = equipment
return{
"equipment_used_list": equipment_list
}
def get_density_details(self,args):
dic=eval(args)
if ((dic['temperature_data']) and (dic['density'])):
cal=cstr(cint(1)+(0.00065*flt((flt(dic['temperature_data'])-flt(dic['temp'])))))
density= cstr(flt(dic['density'])*flt(cal))
return{
"density_of_oil":density
}
else:
webnotes.msgprint("Density & Temperature Field Can not be blanked")
def get_neutralisation_details(self,args):
#webnotes.errprint(args)
dic=eval(args)
cal1=cstr(56.1*(flt(dic['alkoh'])*flt(self.doc.normality_of_koh)))
neutralisation=cstr(flt(cal1)/(flt(dic['density'])*flt(dic['volume'])))
return{
"reported_value":neutralisation
}
def validate(self):
self.check_duplicate_sample_id()
self.check_reported_value()
def check_duplicate_sample_id(self):
sample_no=[]
for d in getlist(self.doclist, 'neutralisation_test_details'):
if d.sample_no:
if d.sample_no in sample_no:
webnotes.msgprint("Sample no could not be duplicate",raise_exception=1)
sample_no.append(d.sample_no)
def check_reported_value(self):
for d in getlist(self.doclist, 'neutralisation_test_details'):
if not d.reported_value:
webnotes.msgprint("Reported value could not be blank",raise_exception=1)
def retrieve_normality_values(self):
self.doc.normality_of_hcl=webnotes.conn.get_value('Normality',self.doc.normality,'normality')
self.doc.volume=webnotes.conn.get_value('Normality',self.doc.normality,'volume')
self.doc.koh_volume=webnotes.conn.get_value('Normality',self.doc.normality,'koh_volume')
self.doc.normality_of_koh=webnotes.conn.get_value('Normality',self.doc.normality,'koh_normality')
self.doc.method=webnotes.conn.get_value('Normality',self.doc.normality,'method')
return "Done"
def update_status(self):
webnotes.conn.sql("update `tabSample Allocation Detail` set status='"+self.doc.workflow_state+"' where test_id='"+self.doc.name+"' ")
webnotes.conn.commit()
def get_barcode(self,sample_no):
bottle_no=webnotes.conn.get_value('Sample',sample_no,'barcode')
return {'bottle_no':bottle_no}
def assign_neutralization_value_test(self):
test_details = {'test': "Neutralization Value", 'name': self.doc.name}
# for assigening ticket to the person of role Shift Incharge in worflow Shift Incharge- Lab Incharge
if self.doc.workflow_state=='Waiting For Approval':
test_details['incharge'] = self.doc.shift_incharge_approval
assign_notify(test_details)
# for assigening ticket to the person of role Lab Incharge in worflow Shift Incharge- Lab Incharge
if self.doc.workflow_state=='Waiting For Approval Of Lab Incharge':
test_details['incharge'] = self.doc.lab_incharge_approval
assign_notify(test_details)
if self.doc.workflow_state=='Rejected':
test_details={'workflow_state':self.doc.workflow_state,'sample_no':self.doc.sample_no}
assign_notify(test_details)
def on_submit(self):
self.create_testresult('Confirm')
def add_sample_nos(self):
if self.doc.sample_no and self.doc.physical_condition_density:
bottle_no= webnotes.conn.sql("select barcode from `tabSample` where name='"+self.doc.sample_no+"'",debug=1)
webnotes.errprint(bottle_no[0][0])
self.doclist=self.doc.clear_table(self.doclist,'neutralisation_test_details')
nl = addchild(self.doc, 'neutralisation_test_details', 'Neutralization Test Details', self.doclist)
nl.sample_no =self.doc.sample_no
nl.bottle_no=bottle_no[0][0]
def create_testresult(self,status):
for g in getlist(self.doclist,'neutralisation_test_details'):
if g.reported_value:
pgcil_limit = get_pgcil_limit(self.doc.method)
test_detail = {'test': "Neutralization Value", 'sample_no':g.sample_no,'name': self.doc.name, 'method':self.doc.method, 'pgcil_limit':pgcil_limit,'workflow_state':self.doc.workflow_state,'tested_by':self.doc.tested_by,'status':status}
if self.doc.workflow_state=='Rejected':
update_test_log(test_detail)
else:
parent=create_test_results(test_detail)
create_child_testresult(parent,g.reported_value,test_detail,'Neutralization Value (Total Acidity)')
def get_physical_density_details(self,sample_no):
#webnotes.errprint([filters])
physical_density=webnotes.conn.sql("""select name from `tabPhysical Condition And Density`
where sample_no='%s' and docstatus=1""" %(sample_no),debug=1)
webnotes.errprint(physical_density)
if physical_density:
pass
else:
webnotes.msgprint("There is no any physical condition and density test completed against given sample no='"+sample_no+"' and without completing physical condition and density test we can't do neutralisation test",raise_exception=1)
# def get_sample_details(doctype, txt, searchfield, start, page_len, filters):
# #webnotes.errprint([filters])
# return webnotes.conn.sql("""select s.sample_no from `tabSample Preparation Details` s,
# `tabTest Preparation` p where s.parent=p.name and s.parent='%s'
# and p.docstatus=1 and s.status='Pending'""" %filters['test_preparation'],debug=1)
@webnotes.whitelist()
def prepare_sample_for_sediment(source_name, target_doclist=None):
#webnotes.errprint(source_name)
return _prepare_sample_for_sediment(source_name, target_doclist)
def _prepare_sample_for_sediment(source_name, target_doclist=None, ignore_permissions=False):
from webnotes.model.mapper import get_mapped_doclist
#webnotes.errprint(source_name)
def postprocess(source, doclist):
doclist[0].test = 'Sediment'
#webnotes.errprint(source)
doclist = get_mapped_doclist("Neutralization Value", source_name, {
"Neutralization Value": {
"doctype": "Test Preparation",
# "field_map":{
# "test":'Sediment'
# },
"validation": {
"docstatus": ["=", 1]
}
}
},target_doclist, postprocess)
return [d.fields for d in doclist]
@webnotes.whitelist()
def create_session():
from webnotes.model.doc import Document
d = Document('Session')
d.status = 'Open'
d.test_name='Neutralization Value'
d.save()
return{
'session_id':d.name
}
@webnotes.whitelist()
def close_session(session_id):
from webnotes.model.doc import Document
d = Document('Session',session_id)
d.status = 'Close'
d.save()
return{
'session_id':''
}
@webnotes.whitelist()
def check_session():
session = webnotes.conn.sql("""select name from tabSession
where status = 'Open' and test_name='Neutralization Value' order by creation desc limit 1""",as_list=1)
if session:
return{
'session_id':session[0][0]
}
else:
return{
'session_id':''
}
| agpl-3.0 | 4,103,033,695,903,422,500 | 33.679825 | 239 | 0.717213 | false |
maxhutch/nek-analyze | interfaces/nek/slice.py | 1 | 2125 | from interfaces.abstract import AbstractSlice
import numpy as np
class DenseSlice(AbstractSlice):
""" Uninspired dense slice """
def __init__(self, shape, op=None):
self.shape = shape
self.op = op
if self.op is 'int' or self.op is None:
self.op = np.add
if self.op is np.maximum:
self.sl = np.zeros(self.shape) + np.finfo(np.float64).min
elif self.op is np.minimum:
self.sl = np.zeros(self.shape) + np.finfo(np.float64).max
else:
self.sl = np.zeros(self.shape)
def to_array(self):
return self.sl
def merge(self, sl2):
if isinstance(sl2, SparseSlice):
for pos,patch in sl2.patches.items():
self.add(pos, patch)
else:
self.sl = self.op(self.sl, sl2.to_array())
def add(self, pos, data):
block = data.shape
idx = tuple([np.s_[pos[j]:pos[j]+block[j]] for j in range(len(pos))])
self.sl[idx] = self.op(self.sl[idx], data)
class SparseSlice(AbstractSlice):
def __init__(self, shape, op=None):
self.shape = shape
self.op = op
if self.op is 'int' or self.op is None:
self.op = np.add
self.patches = {}
def to_array(self):
if self.op is np.maximum:
res = np.zeros(self.shape) + np.finfo(np.float64).min
elif self.op is np.minimum:
res = np.zeros(self.shape) + np.finfo(np.float64).max
else:
res = np.zeros(self.shape)
for pos,patch in self.patches.items():
shp = patch.shape
idx = tuple([np.s_[pos[j]:pos[j]+shp[j]] for j in range(len(pos))])
res[idx] = self.op(res[idx], patch)
return res
def merge(self, sl2):
for pos,patch in sl2.patches.items():
self.add(pos, patch)
def add(self, pos, data):
key = tuple(pos)
if key in self.patches:
self.patches[key] = self.op(self.patches[key], data)
else:
self.patches[key] = np.copy(data)
def __add__(self, other):
res = SparseSlice(self.shape, op=np.add)
for pos,patch in self.patches.items():
res.add(pos, patch)
for pos,patch in other.patches.items():
res.add(pos, patch)
return res
| gpl-3.0 | -7,998,525,873,178,120,000 | 26.597403 | 75 | 0.600941 | false |
sminteractive/ndb-gae-admin | docs/conf.py | 1 | 8646 | # -*- coding: utf-8 -*-
#
# StarMaker ndb Admin documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 10 18:17:07 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
os.environ['APPENGINE_RUNTIME'] = 'python27'
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
html_theme = 'sphinx_rtd_theme'
html_theme_path = ['_theme']
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.pngmath',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'StarMaker ndb Admin'
copyright = u'2015, StarMaker Interactive'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'StarMakerndbAdmindoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'StarMakerndbAdmin.tex', u'StarMaker ndb Admin Documentation',
u'StarMaker Interactive', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'starmakerndbadmin', u'StarMaker ndb Admin Documentation',
[u'StarMaker Interactive'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'StarMakerndbAdmin', u'StarMaker ndb Admin Documentation',
u'StarMaker Interactive', 'StarMakerndbAdmin', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 | -1,776,324,986,875,572,700 | 30.67033 | 85 | 0.707842 | false |
dirmeier/dataframe | dataframe/dataframe.py | 1 | 7810 | # dataframe: a data-frame implementation using method piping
#
# Copyright (C) 2016 Simon Dirmeier
#
# This file is part of dataframe.
#
# dataframe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dataframe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dataframe. If not, see <http://www.gnu.org/licenses/>.
#
#
# @author = 'Simon Dirmeier'
# @email = '[email protected]'
import dataframe
from ._dataframe_abstract import ADataFrame
from ._dataframe_column_set import DataFrameColumnSet
from ._check import is_none, is_callable, has_elements
from ._piping_exception import PipingException
class DataFrame(ADataFrame):
"""
The base DataFrame class.
"""
def __init__(self, **kwargs):
"""
Constructor for DataFrame.
:param kwargs: standard named vargs argument, i.e. list of named lists
:type kwargs: list of named lists
:return: returns a new DataFrame object
:rtype: DataFrame
"""
self.__data_columns = DataFrameColumnSet(**kwargs)
def __iter__(self):
"""
Iterator implementation for DataFrame.
Every iteration yields one row of the DataFrame.
:return: returns a row from the DataFrame
:rtype: DataFrameRow
"""
for i in range(self.nrow):
yield self.__row(i)
def __getitem__(self, item):
"""
Getter method for DataFrame. Returns the column with name item.
:param item: the name of a column
:type item: str
:return: returns a column from the DataFrame
:rtype: DataFrameColumn
"""
if isinstance(item, str) and item in self.colnames:
return self.__data_columns[self.colnames.index(item)]
elif isinstance(item, int):
return self.__row(item)
elif isinstance(item, slice):
return self.__rows(list(range(*item.indices(self.nrow))))
elif isinstance(item, tuple):
return self.__rows(list(item))
elif isinstance(item, list):
return self.__rows(item)
return None
def __repr__(self):
"""
String representation of DataFrame when print is called.
:return: returns the string representation
:rtype: str
"""
return self.__str__()
def __str__(self):
"""
ToString method for DataFrame.
:return: returns the string representation
:rtype: str
"""
return self.__data_columns.__str__()
def __rrshift__(self, other):
raise PipingException("")
def aggregate(self, clazz, new_col, *args):
"""
Aggregate the rows of the DataFrame into a single value.
:param clazz: name of a class that extends class Callable
:type clazz: class
:param new_col: name of the new column
:type new_col: str
:param args: list of column names of the object that function
should be applied to
:type args: tuple
:return: returns a new dataframe object with the aggregated value
:rtype: DataFrame
"""
if is_callable(clazz) and not is_none(new_col) and has_elements(*args):
return self.__do_aggregate(clazz, new_col, *args)
def __do_aggregate(self, clazz, new_col, *col_names):
# get columns
colvals = [self[x] for x in col_names]
if colvals is None:
return None
# instantiate class and call
res = [clazz()(*colvals)]
if len(res) != 1:
raise ValueError("The function you provided " +
"yields an array of false length!")
return DataFrame(**{new_col: res})
def subset(self, *args):
"""
Subset only some of the columns of the DataFrame.
:param args: list of column names of the object that should be subsetted
:type args: tuple
:return: returns dataframe with only the columns you selected
:rtype: DataFrame
"""
cols = {}
for k in self.colnames:
if k in args:
cols[str(k)] = \
self.__data_columns[self.colnames.index(k)].values
return DataFrame(**cols)
def group(self, *args):
"""
Group the dataframe into row-subsets.
:param args: list of column names taht should be used for grouping
:type args: tuple
:return: returns a dataframe that has grouping information
:rtype: GroupedDataFrame
"""
return dataframe.GroupedDataFrame(self, *args)
def modify(self, clazz, new_col, *args):
"""
Modify some columns (i.e. apply a function) and add the
result to the table.
:param clazz: name of a class that extends class Callable
:type clazz: class
:param new_col: name of the new column
:type new_col: str
:param args: list of column names of the object that
function should be applied to
:type args: tuple
:return: returns a new dataframe object with the modiefied values,
i.e. the new column
:rtype: DataFrame
"""
if is_callable(clazz) and not is_none(new_col) and has_elements(*args):
return self.__do_modify(clazz, new_col, *args)
def __do_modify(self, clazz, new_col, *col_names):
colvals = [self[x] for x in col_names]
if colvals is None:
return None
# instantiate class and call
res = clazz()(*colvals)
res = [res] if not isinstance(res, list) else res
if len(res) != len(colvals[0].values):
raise ValueError("The function you provided " +
"yields an array of false length!")
cols = {column.colname: column.values for column in self.__data_columns}
cols[new_col] = res
return DataFrame(**cols)
@property
def nrow(self):
"""
Getter for the number of rows in the DataFrame.
:return: returns the number of rows
:rtype: int
"""
return self.__data_columns.nrow
@property
def ncol(self):
"""
Getter for the number of columns in the DataFrame.
:return: returns the number of columns
:rtype: int
"""
return self.__data_columns.ncol
@property
def colnames(self):
"""
Getter for the columns names of the DataFrame.
:return: returns a list of column names
:rtype: list(str)
"""
return self.__data_columns.colnames
def which_colnames(self, *args):
"""
Computes the indexes of the columns in the DataFrame.
:param args: list of column names
:type args: tuple
:return: returns a list of indexes
:rtype: list(int)
"""
return self.__data_columns.which_colnames(*args)
def cbind(self, **kwargs):
"""
Bind a column to the DataFrame.
:param kwargs: named list of elements you want to add
:type kwargs: keyword tuple
:return: self
:rtype: DataFrame
"""
self.__data_columns.cbind(**kwargs)
return self
def __rows(self, idxs):
return self.__data_columns.rows(idxs)
def __row(self, idx):
return self.__data_columns.row(idx)
| gpl-3.0 | -6,358,612,851,086,372,000 | 30.365462 | 80 | 0.596927 | false |
LabProdam/LabDiario | ChefeDeGabinete/Exoneracao.py | 1 | 2777 | #!/usr/bin/python
#coding: utf-8
from DiarioTools.Parser import *
from DiarioTools.Process import *
from DiarioTools.Search import *
import re
class ParseExoneracaoChefeDeGabinete(GenericParser):
def Initialize(self):
self.AddExpression("^\s*Exonerar.{0,1000}?(senhora|senhor)([^,]+).{0,400}?Chefe de Gabinete.(.+)", [2,3,0], re.I|re.M)
class SearchExoneracaoChefeDeGabinete(DlSearch):
def SetOptions(self):
self.options["sort"] = u"data desc"
self.query = "exonerar \"chefe de gabinete\""
class ProcessorExoneracaoChefeDeGabinete(ResponseProcessor):
def __init__(self, configInstance, searchObject, parseObject, fileName, sessionName):
super(ProcessorExoneracaoChefeDeGabinete, self).__init__(configInstance, searchObject, parseObject, sessionName)
self.fileName = fileName
self.records = []
with open(self.fileName, "a") as fd:
fd.write("*** Exonerações ***\r\n")
def Persist(self, data):
if len(data) > 0:
strOut = """Em """ + self.ProcessDate(data) + """, """ + self.ProcessName(data) + """ foi exonerado do cargo Chefe de Gabinete """ + self.ProcessGabinete(data) + "\n"
self.records.append(strOut.encode("utf-8"))
with open(self.fileName, "a") as fd:
fd.write(strOut.encode("utf-8"))
def ProcessEnd(self):
message = "*** Exonerações ***\r\n"
if (len(self.records) == 0):
message += """Nenhum Chefe de Gabinete exonerado neste período\r\n\r\n"""
Log.Log("Sem Alterações")
else:
message += "\r\n".join(self.records)
message += "\r\n"
return message
def ProcessName(self, data):
return data[0]
def ProcessGabinete(self, data):
gabineteRe = re.search("(Funda..o|Controladoria|Secretaria|Subprefeitura|Superintend.ncia)\s*,?\s*(([^\.](?! constante))*)", data[1], re.I)
if gabineteRe is not None:
gabineteFromData = gabineteRe.group(0)
gabineteFromData = "da " + gabineteFromData
else:
gabineteRe = re.search("(Instituto|Servi.o)\s*,?\s*([^,]*)", data[1], re.I)
if gabineteRe is not None:
gabineteFromData = gabineteRe.group(0)
gabineteFromData = "do " + gabineteFromData
else:
gabineteRe = re.search("^([^,]*).\s*s.mbolo", data[1], re.I)
if gabineteRe is not None:
gabineteFromData = gabineteRe.group(1)
else:
gabineteFromData = data[1]
gabineteFromData = re.sub("s.mbolo \w*,", "", gabineteFromData, re.I)
gabineteFromData = re.sub(",?\s*da Chefia de Gabinete[^,]*x", "", gabineteFromData, re.I)
gabineteFromData = re.sub(",?\s*constante.*$", "", gabineteFromData, re.I)
return gabineteFromData
def ProcessDate(self, data):
date = self.GetDateFromId()
dateRe = re.search("a partir de ([^,]*)", data[2], re.I)
if dateRe is not None:
date = dateRe.group(1)
return date
| gpl-2.0 | -2,094,007,997,226,392,300 | 37.472222 | 169 | 0.662094 | false |
safchain/contrail-sandesh | library/python/pysandesh/sandesh_client.py | 1 | 3139 | #
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# Sandesh Client
#
from sandesh_connection import SandeshConnection
from sandesh_logger import SandeshLogger
from transport import TTransport
from protocol import TXMLProtocol
from sandesh_uve import SandeshUVETypeMaps
class SandeshClient(object):
def __init__(self, sandesh, primary_collector, secondary_collector,
discovery_client):
self._sandesh_instance = sandesh
self._primary_collector = primary_collector
self._secondary_collector = secondary_collector
self._discovery_client = discovery_client
self._logger = sandesh._logger
self._connection = None
#end __init__
# Public functions
def initiate(self):
self._connection = SandeshConnection(self._sandesh_instance,
self,
self._primary_collector,
self._secondary_collector,
self._discovery_client)
#end initiate
def connection(self):
return self._connection
#end connection
def send_sandesh(self, sandesh):
if (self._connection.session() is not None) and \
(self._sandesh_instance._module is not None) and \
(self._sandesh_instance._module != ""):
self._connection.session().enqueue_sandesh(sandesh)
else:
if (self._connection.session() is None):
error_str = "No Connection"
else:
error_str = "No ModuleId"
if self._sandesh_instance.is_logging_dropped_allowed(sandesh):
self._logger.error(
"SANDESH: %s: %s" % (error_str, sandesh.log()))
return 0
#end send_sandesh
def send_uve_sandesh(self, uve_sandesh):
self._connection.statemachine().on_sandesh_uve_msg_send(uve_sandesh)
#end send_uve_sandesh
def handle_sandesh_msg(self, sandesh_name, sandesh_xml):
transport = TTransport.TMemoryBuffer(sandesh_xml)
protocol_factory = TXMLProtocol.TXMLProtocolFactory()
protocol = protocol_factory.getProtocol(transport)
sandesh_req = self._sandesh_instance.get_sandesh_request_object(sandesh_name)
if sandesh_req:
if sandesh_req.read(protocol) == -1:
self._logger.error('Failed to decode sandesh request "%s"' \
% (sandesh_name))
else:
self._sandesh_instance.enqueue_sandesh_request(sandesh_req)
#end handle_sandesh_msg
def handle_sandesh_ctrl_msg(self, sandesh_ctrl_msg):
uve_type_map = {}
self._logger.debug('Number of uve types in sandesh control message is %d' % (len(sandesh_ctrl_msg.type_info)))
for type_info in sandesh_ctrl_msg.type_info:
uve_type_map[type_info.type_name] = type_info.seq_num
self._sandesh_instance._uve_type_maps.sync_all_uve_types(uve_type_map, self._sandesh_instance)
#end handle_sandesh_ctrl_msg
#end class SandeshClient
| apache-2.0 | -5,720,518,425,684,812,000 | 37.280488 | 118 | 0.606881 | false |
valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/puls4.py | 1 | 1992 | # coding: utf-8
from __future__ import unicode_literals
from .prosiebensat1 import ProSiebenSat1BaseIE
from ..utils import (
unified_strdate,
parse_duration,
compat_str,
)
class Puls4IE(ProSiebenSat1BaseIE):
_VALID_URL = r'https?://(?:www\.)?puls4\.com/(?P<id>[^?#&]+)'
_TESTS = [{
'url': 'http://www.puls4.com/2-minuten-2-millionen/staffel-3/videos/2min2miotalk/Tobias-Homberger-von-myclubs-im-2min2miotalk-118118',
'md5': 'fd3c6b0903ac72c9d004f04bc6bb3e03',
'info_dict': {
'id': '118118',
'ext': 'flv',
'title': 'Tobias Homberger von myclubs im #2min2miotalk',
'description': 'md5:f9def7c5e8745d6026d8885487d91955',
'upload_date': '20160830',
'uploader': 'PULS_4',
},
}, {
'url': 'http://www.puls4.com/pro-und-contra/wer-wird-prasident/Ganze-Folgen/Wer-wird-Praesident.-Norbert-Hofer',
'only_matching': True,
}, {
'url': 'http://www.puls4.com/pro-und-contra/wer-wird-prasident/Ganze-Folgen/Wer-wird-Praesident-Analyse-des-Interviews-mit-Norbert-Hofer-416598',
'only_matching': True,
}]
_TOKEN = 'puls4'
_SALT = '01!kaNgaiNgah1Ie4AeSha'
_CLIENT_NAME = ''
def _real_extract(self, url):
path = self._match_id(url)
content_path = self._download_json(
'http://www.puls4.com/api/json-fe/page/' + path, path)['content'][0]['url']
media = self._download_json(
'http://www.puls4.com' + content_path,
content_path)['mediaCurrent']
player_content = media['playerContent']
info = self._extract_video_info(url, player_content['id'])
info.update({
'id': compat_str(media['objectId']),
'title': player_content['title'],
'description': media.get('description'),
'thumbnail': media.get('previewLink'),
'upload_date': unified_strdate(media.get('date')),
'duration': parse_duration(player_content.get('duration')),
'episode': player_content.get('episodePartName'),
'show': media.get('channel'),
'season_id': player_content.get('seasonId'),
'uploader': player_content.get('sourceCompany'),
})
return info
| gpl-3.0 | 8,175,801,211,725,513,000 | 33.947368 | 147 | 0.677209 | false |
aspiers/gertty | gertty/alembic/versions/50344aecd1c2_add_files_table.py | 1 | 3437 | """add files table
Revision ID: 50344aecd1c2
Revises: 1bb187bcd401
Create Date: 2015-04-13 08:08:08.682803
"""
# revision identifiers, used by Alembic.
revision = '50344aecd1c2'
down_revision = '1bb187bcd401'
import re
import sys
from alembic import op, context
import sqlalchemy as sa
import git.exc
import gertty.db
import gertty.gitrepo
def upgrade():
op.create_table('file',
sa.Column('key', sa.Integer(), nullable=False),
sa.Column('revision_key', sa.Integer(), nullable=False, index=True),
sa.Column('path', sa.Text(), nullable=False, index=True),
sa.Column('old_path', sa.Text(), index=True),
sa.Column('status', sa.String(length=1)),
sa.Column('inserted', sa.Integer()),
sa.Column('deleted', sa.Integer()),
sa.PrimaryKeyConstraint('key')
)
pathre = re.compile('((.*?)\{|^)(.*?) => (.*?)(\}(.*)|$)')
insert = sa.text('insert into file (key, revision_key, path, old_path, status, inserted, deleted) '
' values (NULL, :revision_key, :path, :old_path, :status, :inserted, :deleted)')
conn = op.get_bind()
countres = conn.execute('select count(*) from revision')
revisions = countres.fetchone()[0]
if revisions > 50:
print('')
print('Adding support for searching for changes by file modified. '
'This may take a while.')
qres = conn.execute('select p.name, c.number, c.status, r.key, r.number, r."commit", r.parent from project p, change c, revision r '
'where r.change_key=c.key and c.project_key=p.key order by p.name')
count = 0
for (pname, cnumber, cstatus, rkey, rnumber, commit, parent) in qres.fetchall():
count += 1
sys.stdout.write('Diffstat revision %s / %s\r' % (count, revisions))
sys.stdout.flush()
ires = conn.execute(insert, revision_key=rkey, path='/COMMIT_MSG', old_path=None,
status=None, inserted=None, deleted=None)
repo = gertty.gitrepo.get_repo(pname, context.config.gertty_app.config)
try:
stats = repo.diffstat(parent, commit)
except git.exc.GitCommandError:
# Probably a missing commit
if cstatus not in ['MERGED', 'ABANDONED']:
print("Unable to examine diff for %s %s change %s,%s" % (cstatus, pname, cnumber, rnumber))
continue
for stat in stats:
try:
(added, removed, path) = stat
except ValueError:
if cstatus not in ['MERGED', 'ABANDONED']:
print("Empty diffstat for %s %s change %s,%s" % (cstatus, pname, cnumber, rnumber))
m = pathre.match(path)
status = gertty.db.File.STATUS_MODIFIED
old_path = None
if m:
status = gertty.db.File.STATUS_RENAMED
pre = m.group(2) or ''
post = m.group(6) or ''
old_path = pre+m.group(3)+post
path = pre+m.group(4)+post
try:
added = int(added)
except ValueError:
added = None
try:
removed = int(removed)
except ValueError:
removed = None
conn.execute(insert, revision_key=rkey, path=path, old_path=old_path,
status=status, inserted=added, deleted=removed)
print('')
def downgrade():
pass
| apache-2.0 | -1,252,221,657,392,162,000 | 35.56383 | 136 | 0.571429 | false |
KeithSloan/PressureClickBarometer | ReadSensor.py | 1 | 1544 | import smbus
import datetime
def GetTime ():
now = datetime.datetime.now()
return (str(now.hour)+":"+str(now.minute)+"."+str(now.second))
#init bus
bus = smbus.SMBus(1)
print GetTime()
# power up LPS331AP pressure sensor & set BDU
bus.write_byte_data(0x5d, 0x20, 0b10000100)
#write value 0b1 to register 0x21 on device at address 0x5d
# one shot enable
bus.write_byte_data(0x5d,0x21, 0b1)
Temp_LSB = bus.read_byte_data(0x5d, 0x2b)
Temp_MSB = bus.read_byte_data(0x5d, 0x2c)
#combine LSB & MSB
count = (Temp_MSB << 8) | Temp_LSB
# As value is negative convert 2's complement to decimal
comp = count - (1 << 16)
#calc temp according to data sheet
Temp = 42.5 + (comp/480.0)
print "Temperature: %.2f" % Temp
#print "Temp MSB ",format(Temp_MSB,'02x')
#print "Temp LSB ",format(Temp_LSB,'02x')
#print "Temp 2 comp ",format(count,'04x')
#print "Temp : ",format(comp,'04x')
#print "Temp MSB dec : ",Temp_MSB
#print "Temp_LSB dec : ",Temp_LSB
Pressure_LSB = bus.read_byte_data(0x5d, 0x29)
Pressure_MSB = bus.read_byte_data(0x5d, 0x2a)
Pressure_XLB = bus.read_byte_data(0x5d, 0x28)
count = (Pressure_MSB << 16) | ( Pressure_LSB << 8 ) | Pressure_XLB
#comp = count - (1 << 24)
#Pressure value is positive so just use value as decimal
Pressure = count/4096.0
print "Pressure: %.2f" % Pressure
#print "Pressure MSB ",format(Pressure_MSB,'02x')
#print "Pressure LSB ",format(Pressure_LSB,'02x')
#print "Pressure XLB ",format(Pressure_XLB,'02x')
#print "Pressure 2 comp ",format(count,'06x')
#print "Pressure : ",format(comp,'04x')
| gpl-2.0 | -7,952,943,462,252,300,000 | 25.62069 | 67 | 0.686528 | false |
eandersson/amqpstorm | examples/simple_consumer.py | 1 | 1364 | """
A simple example consuming messages from RabbitMQ.
"""
import logging
from amqpstorm import Connection
logging.basicConfig(level=logging.INFO)
def on_message(message):
"""This function is called on message received.
:param message:
:return:
"""
print("Message:", message.body)
# Acknowledge that we handled the message without any issues.
message.ack()
# Reject the message.
# message.reject()
# Reject the message, and put it back in the queue.
# message.reject(requeue=True)
with Connection('localhost', 'guest', 'guest') as connection:
with connection.channel() as channel:
# Declare the Queue, 'simple_queue'.
channel.queue.declare('simple_queue')
# Set QoS to 100.
# This will limit the consumer to only prefetch a 100 messages.
# This is a recommended setting, as it prevents the
# consumer from keeping all of the messages in a queue to itself.
channel.basic.qos(100)
# Start consuming the queue 'simple_queue' using the callback
# 'on_message' and last require the message to be acknowledged.
channel.basic.consume(on_message, 'simple_queue', no_ack=False)
try:
# Start consuming messages.
channel.start_consuming()
except KeyboardInterrupt:
channel.close()
| mit | 2,597,213,208,315,741,700 | 26.836735 | 73 | 0.656891 | false |
uclmr/inferbeddings | scripts/wn18/UCL_WN18_adv_hinge_v1.py | 1 | 4537 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import itertools
import os
import os.path
import sys
import argparse
import logging
def cartesian_product(dicts):
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def summary(configuration):
kvs = sorted([(k, v) for k, v in configuration.items()], key=lambda e: e[0])
return '_'.join([('%s=%s' % (k, v)) for (k, v) in kvs])
def to_cmd(c, _path=None):
if _path is None:
_path = '/home/pminervi/workspace/inferbeddings/'
command = 'python3 {}/bin/kbp-cli.py' \
' --train {}/data/wn18/wordnet-mlj12-train.txt' \
' --valid {}/data/wn18/wordnet-mlj12-valid.txt' \
' --test {}/data/wn18/wordnet-mlj12-test.txt' \
' --clauses {}/data/wn18/clauses/clauses_0.9.pl' \
' --nb-epochs {}' \
' --lr {}' \
' --nb-batches {}' \
' --model {}' \
' --similarity {}' \
' --margin {}' \
' --embedding-size {}' \
' --adv-lr {} --adv-init-ground --adversary-epochs {}' \
' --discriminator-epochs {} --adv-weight {} --adv-batch-size {} --loss hinge' \
''.format(_path, _path, _path, _path, _path,
c['epochs'], c['lr'], c['batches'],
c['model'], c['similarity'],
c['margin'], c['embedding_size'],
c['adv_lr'], c['adv_epochs'],
c['disc_epochs'], c['adv_weight'], c['adv_batch_size'])
return command
def to_logfile(c, path):
outfile = "%s/ucl_wn18_adv_hinge_v1.%s.log" % (path, summary(c))
return outfile
def main(argv):
def formatter(prog):
return argparse.HelpFormatter(prog, max_help_position=100, width=200)
argparser = argparse.ArgumentParser('Generating experiments for the UCL cluster', formatter_class=formatter)
argparser.add_argument('--debug', '-D', action='store_true', help='Debug flag')
argparser.add_argument('--path', '-p', action='store', type=str, default=None, help='Path')
args = argparser.parse_args(argv)
hyperparameters_space_transe = dict(
epochs=[100],
optimizer=['adagrad'],
lr=[.1],
batches=[10],
model=['TransE'],
similarity=['l1', 'l2'],
margin=[1, 2, 5, 10],
embedding_size=[20, 50, 100, 150, 200],
adv_lr=[.1],
adv_epochs=[0, 10],
disc_epochs=[10],
adv_weight=[0, 1, 10, 100, 1000, 10000],
adv_batch_size=[1, 10, 100]
)
hyperparameters_space_distmult_complex = dict(
epochs=[100],
optimizer=['adagrad'],
lr=[.1],
batches=[10],
model=['DistMult', 'ComplEx'],
similarity=['dot'],
margin=[1, 2, 5, 10],
embedding_size=[20, 50, 100, 150, 200],
adv_lr=[.1],
adv_epochs=[0, 10],
disc_epochs=[10],
adv_weight=[0, 1, 10, 100, 1000, 10000],
adv_batch_size=[1, 10, 100]
)
configurations_transe = cartesian_product(hyperparameters_space_transe)
configurations_distmult_complex = cartesian_product(hyperparameters_space_distmult_complex)
path = '/home/pminervi/workspace/inferbeddings/logs/ucl_wn18_adv_hinge_v1/'
if not os.path.exists(path):
os.makedirs(path)
configurations = list(configurations_transe) + list(configurations_distmult_complex)
for job_id, cfg in enumerate(configurations):
logfile = to_logfile(cfg, path)
completed = False
if os.path.isfile(logfile):
with open(logfile, 'r', encoding='utf-8', errors='ignore') as f:
content = f.read()
completed = '### MICRO (test filtered)' in content
if not completed:
line = '{} >> {} 2>&1'.format(to_cmd(cfg, _path=args.path), logfile)
if args.debug:
print(line)
else:
file_name = 'ucl_wn18_adv_hinge_v1_{}.job'.format(job_id)
alias = ''
job_script = '#$ -S /bin/bash\n' \
'#$ -wd /tmp/\n' \
'#$ -l h_vmem=4G,tmem=4G\n' \
'#$ -l h_rt=24:00:00\n' \
'{}\n{}\n'.format(alias, line)
with open(file_name, 'w') as f:
f.write(job_script)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main(sys.argv[1:])
| mit | -6,206,648,303,705,620,000 | 33.371212 | 112 | 0.517743 | false |
florath/init4boot | init4boot/plugins/multipath.py | 1 | 3244 | #
# multipath iSCSI plugin
#
# (c) 2008-2009 by flonatel ([email protected])
# (c) 2015 by Andreas Florath ([email protected])
#
# For licensing details see COPYING
#
import os
from init4boot.lib.FilesystemUtils import fsutils
class multipath(object):
def __init__(self, config, opts):
self.config = config
self.opts = opts
self.__root_dir = opts.root_dir
def check(self):
return fsutils.must_exist(self.__root_dir, ["sbin"], "multipath") \
and fsutils.must_exist(self.__root_dir, ["sbin"], "kpartx")
def go_CommandLineEvaluation(self):
class CommandLineEvaluation:
def output(self, ofile):
ofile.write("""
multipath:*)
bv_deps="${bv_deps} network multipath"
;;
""")
return CommandLineEvaluation()
def go_HandleInitialModuleSetup(self):
class HandleInitialModuleSetup:
def output(self, ofile):
ofile.write("""
if check_bv "multipath"; then
logp "Handling multipath"
modprobe dm-multipath
modprobe dm-emc
modprobe dm-round-robin
fi
""")
return HandleInitialModuleSetup()
def go_SetupHighLevelTransport(self):
class SetupHighLevelTransport:
# iSCSI must be done before multipath
def deps(self):
return ["iSCSI", ]
def output(self, ofile):
ofile.write("""
multipath:*)
maybe_break multipath
logp "Handling multipath"
if [ -e /bin/multipath ]; then
# Multipath needs in some situations more than one run
for i in 1 2 3 ; do
/bin/multipath
sleep 1
/bin/multipath -ll
done
log "Accessing all disk once to get the state corrected"
# Note that the following can take about 30 seconds for EACH disk.
# So the things executed in parallel
ddpids=""
for disk in /dev/mapper/*; do
[ "${disk}" = "/dev/mapper/control" ] && continue
log "... ${disk}"
dd if=${disk} of=/dev/null bs=1024 count=1 >/dev/null 2>&1 &
ddpids="${ddpids} $!"
done
log "Waiting for possible multipath switchover to end"
wait ${ddpids}
log "Creating block devices for partitions"
for disk in /dev/mapper/*; do
[ "${disk}" = "/dev/mapper/control" ] && continue
log "... ${disk}"
/bin/kpartx -a ${disk}
done
else
log "Multipath enabled, but binary not available - ignoring multipath"
fi
logpe
;;
""")
return SetupHighLevelTransport()
# ======================================================================
# === Create hooks
def mi_Copy(self):
class Copy:
def output(self, c):
c.copy_exec("sbin/multipath")
c.copy_exec("sbin/kpartx")
c.copy_exec_w_path("devmap_name", ["sbin", ])
c.copy_exec_w_path("dmsetup", ["sbin", ])
# Not available in Debian stable
# (Part of kpartx package which is only available in unstable)
# c.copy("lib/udev/dmsetup_env", "lib/udev")
# Copy all the dependend multipath so libs
c.copytree("lib/multipath", "lib/multipath")
return Copy()
| gpl-3.0 | -1,211,690,267,482,373,000 | 25.809917 | 78 | 0.569667 | false |
MjAbuz/foundation | foundation/organisation/migrations/0016_auto__add_projectlist.py | 1 | 14482 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ProjectList'
db.create_table(u'organisation_projectlist', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('theme', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.Theme'], null=True, blank=True)),
('project_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['organisation.ProjectType'], null=True, blank=True)),
))
db.send_create_signal(u'organisation', ['ProjectList'])
def backwards(self, orm):
# Deleting model 'ProjectList'
db.delete_table(u'organisation_projectlist')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'organisation.board': {
'Meta': {'object_name': 'Board'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Person']", 'through': u"orm['organisation.BoardMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.boardmembership': {
'Meta': {'object_name': 'BoardMembership'},
'board': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Board']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.featuredproject': {
'Meta': {'object_name': 'FeaturedProject', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['organisation.Project']"})
},
u'organisation.networkgroup': {
'Meta': {'unique_together': "(('country', 'region'),)", 'object_name': 'NetworkGroup'},
'country': ('django_countries.fields.CountryField', [], {'max_length': '2'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'extra_information': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'group_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailinglist': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Person']", 'through': u"orm['organisation.NetworkGroupMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'position': ('geoposition.fields.GeopositionField', [], {'default': "'0,0'", 'max_length': '42', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.networkgroupmembership': {
'Meta': {'object_name': 'NetworkGroupMembership'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'networkgroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.NetworkGroup']"}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.person': {
'Meta': {'ordering': "['name']", 'object_name': 'Person'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'organisation.project': {
'Meta': {'ordering': "('name',)", 'object_name': 'Project'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'homepage_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mailinglist_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'sourcecode_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'teaser': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'themes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Theme']", 'symmetrical': 'False', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '18', 'blank': 'True'}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.ProjectType']", 'symmetrical': 'False', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.projectlist': {
'Meta': {'object_name': 'ProjectList', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'project_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.ProjectType']", 'null': 'True', 'blank': 'True'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Theme']", 'null': 'True', 'blank': 'True'})
},
u'organisation.projecttype': {
'Meta': {'object_name': 'ProjectType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.theme': {
'Meta': {'object_name': 'Theme'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.unit': {
'Meta': {'ordering': "['-order', 'name']", 'object_name': 'Unit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['organisation.Person']", 'through': u"orm['organisation.UnitMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.unitmembership': {
'Meta': {'object_name': 'UnitMembership'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Person']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Unit']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'organisation.workinggroup': {
'Meta': {'object_name': 'WorkingGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'incubation': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'theme': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['organisation.Theme']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['organisation'] | mit | -6,140,818,890,270,693,000 | 79.016575 | 200 | 0.558348 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.