repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ramineni/myironic | ironic/common/image_service.py | 1 | 2467 | # Copyright 2010 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.utils import importutils
from oslo_config import cfg
glance_opts = [
cfg.StrOpt('glance_host',
default='$my_ip',
help='Default glance hostname or IP address.'),
cfg.IntOpt('glance_port',
default=9292,
help='Default glance port.'),
cfg.StrOpt('glance_protocol',
default='http',
help='Default protocol to use when connecting to glance. '
'Set to https for SSL.'),
cfg.ListOpt('glance_api_servers',
help='A list of the glance api servers available to ironic. '
'Prefix with https:// for SSL-based glance API servers. '
'Format is [hostname|IP]:port.'),
cfg.BoolOpt('glance_api_insecure',
default=False,
help='Allow to perform insecure SSL (https) requests to '
'glance.'),
cfg.IntOpt('glance_num_retries',
default=0,
help='Number of retries when downloading an image from '
'glance.'),
cfg.StrOpt('auth_strategy',
default='keystone',
help='Default protocol to use when connecting to glance. '
'Set to https for SSL.'),
]
CONF = cfg.CONF
CONF.register_opts(glance_opts, group='glance')
def import_versioned_module(version, submodule=None):
module = 'ironic.common.glance_service.v%s' % version
if submodule:
module = '.'.join((module, submodule))
return importutils.try_import(module)
def Service(client=None, version=1, context=None):
module = import_versioned_module(version, 'image_service')
service_class = getattr(module, 'GlanceImageService')
return service_class(client, version, context)
| apache-2.0 | -1,234,819,432,871,128,000 | 36.378788 | 78 | 0.633158 | false |
gic888/MIEN | optimizers/randsearch.py | 1 | 1656 | #!/usr/bin/env python
# encoding: utf-8
#Created by Graham Cummins on 2007-08-23.
# Copyright (C) 2007 Graham I Cummins
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
#
from mien.optimizers.base import *
from mien.math.array import *
class RandomSearch(Optimizer):
'''This is not an optimizer per se, but uses the same setup and evaluation as an optimizer. This class repeatedly evaluates random points in the parameter space'''
'''Optimizer subclass.
Search every possible combination allowed by the range and precision of the ParameterSet.'''
def next(self):
'''Return a random point'''
r=self.random()
return r
def saferecord(self, c, f):
self.lock.acquire()
try:
n=self.record(c, f)
self._nunits+=1
if f[0]>0:
print f
#self._abort=True
if not self._nunits % 100:
print self._nunits
self.lock.release()
except:
self.report('record attempt failed')
self.lock.release()
def evaluate(self, c):
self.general_eval(c, self.saferecord)
| gpl-2.0 | 4,406,141,510,409,554,000 | 32.14 | 164 | 0.724638 | false |
nuanri/hiblog | src/app/blog/console_views.py | 1 | 17946 | # coding: utf-8
import json
import requests
from hanlder import RequestHandler as BaseRequestHandler
import tornado.web
from tornado.web import authenticated
from ..utils import deal_errors, get_local_time, get_local_time_string
from .forms import BlogWriterForm, BlogCatalogWriterForm
class RequestHandler(BaseRequestHandler):
def q(self, query_string, query_variables=None,headers=None, api_url=None):
if not (headers and isinstance(headers, dict)):
headers = {}
if query_variables is None:
query_variables = {}
sid = self.get_secure_cookie('SID')
if api_url:
url = "http://127.0.0.1:3000" + api_url
else:
url = "http://127.0.0.1:3000/console/graphql"
if sid:
if 'Authorization' not in headers:
headers['Authorization'] = 'OOC ' + sid.decode()
s = requests.Session()
r = s.post(url, json={"query": query_string, "variables": query_variables}, headers=headers)
return r.json()
class ApiHandler(RequestHandler):
def post(self, URL):
body = self.request.body.decode()
body = json.loads(body)
# query_string = self.get_argument('query', "")
query_string = body["query"]
variables = body.get("variables", None)
# print("variables==>",variables)
r = self.q(query_string, variables, api_url=URL)
self.write(r)
# console 文章部分开始
class ConsoleBlogdexHandler(RequestHandler):
BLOG_LIST_QUERY = '''
query Blogs(
$first: Int
$sort_by: String
$sort_direction: String
){
blog{id,...F1}
}
fragment F1 on BlogApi{
articles(
first: $first,
sort_by: $sort_by,
sort_direction: $sort_direction
) {
edges {
node {
id
title
body
body_markup
updated
uid
is_public
catalog {
name
}
}
}
pageInfo {
hasPreviousPage
startCursor
endCursor
hasNextPage
}
}
}'''
@authenticated
def get(self):
# print("current_user=",self.current_user.username)
bloglist_query_variables = {
"first": 12,
"sort_by": "updated",
"sort_direction": "desc",
}
bloglist_query_variables = json.dumps(bloglist_query_variables)
r = self.q(self.BLOG_LIST_QUERY, bloglist_query_variables)
# print("index==>roooo",r)
blog_list = r.get("data").get("blog").get("articles").get("edges")
self.render('/blog/console/blog_articles.html', blog_list=blog_list, get_local_time=get_local_time )
class BlogWriterHandler(RequestHandler):
BLOG_WRITER_QUERY='''
mutation BlogArticleNew(
$input_0: BlogArticleNewInput!
) {
blog_article_new(input: $input_0) {
article {
id
title
abstract
body
body_markup
is_public
uid
catalog{
uid
}
}
}
}'''
BLOG_CATALOG_LIST_QUERY = '''
query BlogCatalogs{
blog{id,...F1}
}
fragment F1 on BlogApi{
catalogs {
name
summary
body_html
created
updated
uid
public_article_count
private_article_count
}
}'''
def get_catalogs(self):
r = self.q(self.BLOG_CATALOG_LIST_QUERY)
catalog_list = r.get("data").get("blog").get("catalogs")
return catalog_list
@authenticated
def get(self):
form = BlogWriterForm(self)
catalog_list = self.get_catalogs()
self.render("blog/console/blog_writer.html", form=form, catalog_list=catalog_list)
@authenticated
def post(self):
form = BlogWriterForm(self)
catalog_list = self.get_catalogs()
if not form.validate():
error = form.errors
tag_list = []
tag_uid = form.tags.data.split(",")[::2]
tag_name = form.tags.data.split(",")[1::2]
for u,n in zip(tag_uid, tag_name):
tag_list.append((u,n))
return self.render("blog/console/blog_writer.html", form=form,
catalog_list=catalog_list, error=error,tag_list=tag_list)
blogwriter_query_variables = {
"input_0":
{
"title": "",
"clientMutationId": "1",
"abstract": "",
"body": "",
"is_public": False,
"catalog_uid": "",
"tags": "",
}
}
blogwriter_query_variables["input_0"]["is_public"] = form.is_public.data
blogwriter_query_variables["input_0"]["title"] = form.title.data
blogwriter_query_variables["input_0"]["abstract"] = form.abstract.data
blogwriter_query_variables["input_0"]["body"] = form.body.data
blogwriter_query_variables["input_0"]["catalog_uid"] = form.catalog_uid.data
blogwriter_query_variables["input_0"]["tags"] = form.tags.data.split(",")[::2]
# print("form.tags.data==>",form.tags.data)
blogwriter_query_variables = json.dumps(blogwriter_query_variables)
r = self.q(self.BLOG_WRITER_QUERY, blogwriter_query_variables)
if r.get("errors"):
errors = r.get("errors")
error = deal_errors(errors)
tag_list = []
tag_uid = form.tags.data.split(",")[::2]
tag_name = form.tags.data.split(",")[1::2]
for u,n in zip(tag_uid, tag_name):
tag_list.append((u,n))
self.render("blog/console/blog_writer.html", form=form,
catalog_list=catalog_list, error=error, tag_list=tag_list)
UID = r.get("data").get("blog_article_new").get("article")["uid"]
self.redirect("/console/blog/article/" + UID)
class BlogShowHandler(RequestHandler):
BLOG_SHOW_QUERY = '''
query Blog(
$uid: String!
){
blog{id,...F1}
}
fragment F1 on BlogApi{
article: article_u (
uid: $uid
) {
title
body: body_html
is_public
uid
catalog {
name
}
tags {
uid
name
}
}
}
'''
@authenticated
def get(self, UID):
blogshow_query_variables = {
"uid": UID,
}
blogshow_query_variables = json.dumps(blogshow_query_variables)
r = self.q(self.BLOG_SHOW_QUERY, blogshow_query_variables)
# print('r--->',r)
blog = r.get("data").get("blog").get("article")
self.render('blog/console/blog_show.html', blog=blog)
class BlogEditHandler(RequestHandler):
BLOG_EDIT_QUERY = '''
mutation MyMutation(
$input: BlogArticleEditInput!
) {
blog_article_edit(input: $input) {
article {
id
title
}
}
}'''
BLOG_SHOW_QUERY = '''
query Blog(
$uid: String!
){
blog{id,...F1}
}
fragment F1 on BlogApi{
article: article_u (
uid: $uid
) {
title
body
abstract
is_public
catalog {
name
uid
}
tags {
uid
name
}
}
}'''
BLOG_CATALOG_LIST_QUERY = '''
query BlogCatalogs{
blog{id,...F1}
}
fragment F1 on BlogApi{
catalogs {
name
summary
body_html
created
updated
uid
public_article_count
private_article_count
}
}'''
def get_catalogs(self):
r = self.q(self.BLOG_CATALOG_LIST_QUERY)
catalog_list = r.get("data").get("blog").get("catalogs")
return catalog_list
@authenticated
def get(self, UID):
form = BlogWriterForm(self)
catalog_list = self.get_catalogs()
blogshow_query_variables = {
"uid": UID,
}
blogshow_query_variables = json.dumps(blogshow_query_variables)
r = self.q(self.BLOG_SHOW_QUERY, blogshow_query_variables)
blog = r.get("data").get("blog").get("article")
# print("blog==>",blog)
self.render("blog/console/blog_edit.html",
form=form, blog=blog, catalog_list=catalog_list)
@authenticated
def post(self, UID):
form = BlogWriterForm(self)
catalog_list = self.get_catalogs()
blogshow_query_variables = {
"uid": UID,
}
r = self.q(self.BLOG_SHOW_QUERY, blogshow_query_variables)
blog = r.get("data").get("blog").get("article")
if not form.validate():
error = form.errors
# tag_list = []
# tag_uid = form.tags.data.split(",")[::2]
# tag_name = form.tags.data.split(",")[1::2]
# for u,n in zip(tag_uid, tag_name):
# tag_list.append((u,n))
return self.render("blog/console/blog_edit.html",
form=form,blog=blog,
catalog_list=catalog_list,
error = error)
blogedit_query_variables = {
"input": {
"clientMutationId": "1",
"uid": "",
"title": "",
"abstract": "",
"body": "",
"is_public": "",
"catalog_uid": "",
"tags": ""
}
}
blogedit_query_variables["input"]["is_public"] = form.is_public.data
blogedit_query_variables["input"]["uid"] = UID
blogedit_query_variables["input"]["title"] = form.title.data
blogedit_query_variables["input"]["abstract"] = form.abstract.data
blogedit_query_variables["input"]["body"] = form.body.data
blogedit_query_variables["input"]["catalog_uid"] = form.catalog_uid.data
blogedit_query_variables["input"]["tags"] = form.tags.data.split(",")[::2]
blogedit_query_variables = json.dumps(blogedit_query_variables)
r = self.q(self.BLOG_EDIT_QUERY, blogedit_query_variables)
self.redirect("/console/blog/article/" + UID)
class BlogDelHandler(RequestHandler):
BLOG_DEL_QUERY = '''
mutation MyMutaion (
$input_0: BlogArticleDeleteInput!
) {
blog_article_delete(input: $input_0) {
status
message
}
}'''
@authenticated
def get(self, UID):
blogdel_query_variables = {
"input_0": {
"clientMutationId": "1",
"uid": ""
}
}
blogdel_query_variables["input_0"]["uid"] = UID
blogdel_query_variables = json.dumps(blogdel_query_variables)
r = self.q(self.BLOG_DEL_QUERY, blogdel_query_variables)
# print("status===>",r)
status = r.get("data").get("blog_article_delete").get('status')
self.redirect("/console/blog/article")
# console 文章部分结束
# console 目录部分开始
class BlogCatalogindexHandler(RequestHandler):
BLOG_CATALOG_LIST_QUERY = '''
query BlogCatalogs{
blog{id,...F1}
}
fragment F1 on BlogApi{
catalogs {
name
summary
body_html
created
updated
uid
public_article_count
private_article_count
}
}'''
@authenticated
def get(self):
r = self.q(self.BLOG_CATALOG_LIST_QUERY)
catalog_list = r.get("data").get("blog").get("catalogs")
self.render('/blog/console/catalogs.html',
catalog_list=catalog_list,
get_local_time_string=get_local_time_string )
class BlogCatalogWriterHandler(RequestHandler):
pass
#
# BLOG_CATALOG_WRITER_QUERY='''
# mutation BlogCatalogNew(
# $input_0: BlogCatalogNewInput!
# ) {
# blog_catalog_new(input: $input_0) {
# catalog {
# uid
# body_html
# name
# public_article_count
# private_article_count
# created
# updated
# }
# }
# }'''
#
# @authenticated
# def get(self):
# form = BlogCatalogWriterForm(self)
# self.render("blog/console/catalog_writer.html")
#
# @authenticated
# def post(self):
# form = BlogCatalogWriterForm(self)
# if not form.validate():
# error = form.errors
# # print("error==>",error)
# return self.render("blog/console/catalog_writer.html",
# form=form, error=error)
#
# catalogwriter_query_variables = {
# "input_0":
# {
# "clientMutationId": "1",
# "name": "",
# "summary": "",
# "body": "",
# }
# }
#
# catalogwriter_query_variables["input_0"]["name"] = form.name.data
# catalogwriter_query_variables["input_0"]["summary"] = form.summary.data
# catalogwriter_query_variables["input_0"]["body"] = form.body.data
#
# r = self.q(self.BLOG_CATALOG_WRITER_QUERY, catalogwriter_query_variables)
# # print("r===>",r)
# if r.get("errors"):
# errors = r.get("errors")
# error = deal_errors(errors)
# self.render("blog/console/catalog_writer.html",
# form=form, error=error)
#
# UID = r.get("data").get("blog_catalog_new").get("catalog")["uid"]
# self.redirect("/console/blog/catalog/" + UID)
class BlogCatalogShowHandler(RequestHandler):
CATALOG_SHOW_QUERY = '''
query BlogCatalog(
$uid: String!
){
blog{id,...F1}
}
fragment F1 on BlogApi{
catalog: catalog_u(
uid: $uid
) {
body
name
summary
public_article_count
private_article_count
created
updated
uid
}
}'''
@authenticated
def get(self, UID):
catalogshow_query_variables = {
"uid": UID,
}
catalogshow_query_variables = json.dumps(catalogshow_query_variables)
r = self.q(self.CATALOG_SHOW_QUERY, catalogshow_query_variables)
catalog = r.get("data").get("blog").get("catalog")
self.render("blog/console/catalog_show.html", catalog=catalog, get_local_time_string=get_local_time_string )
class BlogCatalogEditHandler(RequestHandler):
CATALOG_EDIT_QUERY = '''
mutation MyMutation(
$input: BlogCatalogEditInput!
) {
blog_catalog_edit(input: $input) {
catalog {
id
uid
name
}
}
}'''
CATALOG_SHOW_QUERY = '''
query BlogCatalog(
$uid: String!
){
blog{id,...F1}
}
fragment F1 on BlogApi{
catalog: catalog_u(
uid: $uid
) {
body
name
summary
public_article_count
private_article_count
created
updated
}
}'''
@authenticated
def get(self, UID):
form = BlogCatalogWriterForm(self)
catalog_query_variables = {
"uid": UID,
}
catalog_query_variables = json.dumps(catalog_query_variables)
r = self.q(self.CATALOG_SHOW_QUERY, catalog_query_variables)
catalog = r.get("data").get("blog").get("catalog")
self.render("blog/console/catalog_edit.html", form=form, catalog=catalog)
@authenticated
def post(self, UID):
form = BlogCatalogWriterForm(self)
if not form.validate():
return self.render("blog/console/catalog_edit.html", form=form)
catalogedit_query_variables = {
"input": {
"clientMutationId": "1",
"uid": "",
"name": "",
"summary": "",
"body": ""
}
}
catalogedit_query_variables["input"]["uid"] = UID
catalogedit_query_variables["input"]["name"] = form.name.data
catalogedit_query_variables["input"]["summary"] = form.summary.data
catalogedit_query_variables["input"]["body"] = form.body.data
catalogedit_query_variables = json.dumps(catalogedit_query_variables)
r = self.q(self.CATALOG_EDIT_QUERY, catalogedit_query_variables)
self.redirect("/console/blog/catalog/" + UID)
class BlogCatalogDelHander(RequestHandler):
CATA_DEL_QUERY = '''
mutation MyMutation(
$input: BlogCatalogDeleteInput!
) {
blog_catalog_delete(input: $input) {
status
message
}
}'''
@authenticated
def get(self, UID):
catalogdel_query_variables = {
"input": {
"clientMutationId": "1",
"uid": ""
}
}
catalogdel_query_variables["input"]["uid"] = UID
catalogdel_query_variables = json.dumps(catalogdel_query_variables)
r = self.q(self.CATA_DEL_QUERY, catalogdel_query_variables)
del_info = r.get("data").get("blog_catalog_delete")
status = del_info.get("status")
error = del_info.get("message")
current_url = self.request.uri
back_url = "/console/blog/catalog"
if not status:
self.render("/error.html", error=error, current_url=current_url,
back_url=back_url)
self.redirect("/console/blog/catalog")
# console 目录部分结束
# console Tag部分开始
class BlogTagindexHandler(RequestHandler):
BLOG_TAG_LIST_QUERY = '''
query BlogTags(
$first: Int
$sort_by: String
$sort_direction: String
){
blog{id,...F1}
}
fragment F1 on BlogApi{
tags(
first: $first,
sort_by: $sort_by,
sort_direction: $sort_direction
) {
edges {
node {
id
name
summary
body
count
created
updated
uid
}
}
pageInfo {
hasPreviousPage
startCursor
endCursor
hasNextPage
}
}
}'''
@authenticated
def get(self):
r = self.q(self.BLOG_TAG_LIST_QUERY)
tag_list = r.get("data").get("blog").get("tags").get("edges")
self.render('/blog/console/tags.html',
tag_list=tag_list,
get_local_time_string=get_local_time_string)
# console Tag部分结束
| mit | 4,445,130,281,428,235,300 | 25.143275 | 116 | 0.546919 | false |
saisankargochhayat/algo_quest | leetcode/430.FlatternADoubleLinkedList/soln.py | 1 | 1710 | """
# Definition for a Node.
class Node:
def __init__(self, val, prev, next, child):
self.val = val
self.prev = prev
self.next = next
self.child = child
"""
class Solution:
def flatten(self, head: 'Node') -> 'Node':
start = head
head = head
while head != None:
# we dont need to anything in this case.
if head.child is None:
head = head.next
else:
# Flatten the list here.
head = self.flattenChildList(head, head.child, head.next)
# self.checkAnswer(start)
return start
# def checkAnswer(self, head):
# while head:
# print(head.val, head.prev)
# head = head.next
# Now we take the source node and its start would point to child list, and child node to source node.
# The child list's end node points to nextNode, and nextNode prev a last node.
# One sneaky catch - if nextNode is empty point return head.
def flattenChildList(self, sourceNode, childNode, nextNode):
head = childNode
sourceNode.next = childNode
sourceNode.child = None
childNode.prev = sourceNode
while head != None:
# End of the child list.
if head.next == None:
head.next = nextNode
if nextNode is None:
return head
nextNode.prev = head
return nextNode
elif head.child is None:
head = head.next
else:
head = self.flattenChildList(head, head.child, head.next)
return nextNode
| apache-2.0 | 319,589,736,364,629,700 | 32.529412 | 106 | 0.533918 | false |
AdiPersonalWorks/ATOS_GOM_SystemPrototyping | pico/auth.py | 1 | 1890 | import pico
from pico import PicoError
class NotAuthorizedError(PicoError):
def __init__(self, message=''):
PicoError.__init__(self, message)
self.response.status = "401 Not Authorized"
self.response.set_header("WWW-Authenticate", "Basic")
class InvalidSessionError(PicoError):
def __init__(self, message=''):
PicoError.__init__(self, message)
self.response.status = "440 Invalid Session"
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
class object(pico.object):
account_manager = None
__headers__ = {'X-SESSION-ID': ''}
def __init__(self):
super(object, self).__init__()
self.user = None
if type(self.account_manager) == dict:
self.account_manager = Bunch(**self.account_manager)
request = pico.get_request()
if 'HTTP_AUTHORIZATION' in request:
try:
auth_header = request.get('HTTP_AUTHORIZATION')
scheme, data = auth_header.split(None, 1)
assert(scheme == 'Basic')
username, password = data.decode('base64').split(':', 1)
self.user = self._get_user(username, password)
except Exception, e:
raise NotAuthorizedError(str(e))
elif 'HTTP_X_SESSION_ID' in request:
session_id = request.get('HTTP_X_SESSION_ID')
self.user = self._get_session(session_id)
elif 'DUMMY_REQUEST' in request:
pass
else:
raise NotAuthorizedError("No username or password supplied")
def _get_user(self, username, password):
if self.account_manager:
return self.account_manager._get_user(username, password)
def _get_session(self, session_id):
if self.account_manager:
return self.account_manager._get_session(session_id)
| mit | 5,647,580,370,093,337,000 | 32.75 | 72 | 0.589947 | false |
tedunderwood/horizon | chapter3/code/reproduce_fictional_prestige.py | 1 | 7078 | #!/usr/bin/env python3
# reproduce_fictional_prestige.py
# Scripts to reproduce models
# used in Chapter Three,
# The Directions of Literary Change.
import csv, os, sys, pickle, math
# we add a path to be searched so that we can import
# versatiletrainer, which will do most of the work
# Versatiletrainer, and the modules it will in turn call,
# are publicly available in this github repo:
# https://github.com/tedunderwood/overlappingcategories
# mental note: when you file the book repo with Zenodo,
# a copy of the overlappingcategories repo also needs to
# be frozen
sys.path.append('/Users/tunder/Dropbox/python/logistic')
import versatiletrainer as train
import pandas as pd
# sourcefolder =
# extension =
# metadatapath =
# outputpath = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/predictions.csv'
def genre_gridsearch(metadatapath, modelname, c_range, ftstart, ftend, ftstep, positive_tags = ['elite'], negative_tags = ['vulgar'], excl_below = 1700, excl_above = 2000):
# Function does a gridsearch to identify an optimal number of features and setting of
# the regularization constant; then produces that model.
# sourcefolder = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/fromEF/'
sourcefolder = '../sourcefiles/'
extension = '.tsv'
#metadatapath = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/prestigeficmeta.csv'
vocabpath = '/Users/tunder/Dropbox/fiction/lexicon/' + modelname + '.txt'
if os.path.exists(vocabpath):
print('Vocabulary for ' + modelname + ' already exists. Using it.')
outputpath = '../results/' + modelname + '.csv'
# We can simply exclude volumes from consideration on the basis on any
# metadata category we want, using the dictionaries defined below.
## EXCLUSIONS.
excludeif = dict()
excludeifnot = dict()
excludeabove = dict()
excludebelow = dict()
excludebelow['firstpub'] = excl_below
excludeabove['firstpub'] = excl_above
sizecap = 700
# CLASSIFY CONDITIONS
# print()
# print("You can also specify positive tags to be excluded from training, and/or a pair")
# print("of integer dates outside of which vols should be excluded from training.")
# print("If you add 'donotmatch' to the list of tags, these volumes will not be")
# print("matched with corresponding negative volumes.")
# print()
# ## testphrase = input("Comma-separated list of such tags: ")
testphrase = ''
testconditions = set([x.strip() for x in testphrase.split(',') if len(x) > 0])
datetype = "firstpub"
numfeatures = ftend
regularization = .000075
# linting the code would get rid of regularization, which is at this
# point an unused dummy parameter
paths = (sourcefolder, extension, metadatapath, outputpath, vocabpath)
exclusions = (excludeif, excludeifnot, excludebelow, excludeabove, sizecap)
classifyconditions = (positive_tags, negative_tags, datetype, numfeatures, regularization, testconditions)
modelparams = 'logistic', 12, ftstart, ftend, ftstep, c_range
matrix, rawaccuracy, allvolumes, coefficientuples = train.tune_a_model(paths, exclusions, classifyconditions, modelparams)
print('If we divide the dataset with a horizontal line at 0.5, accuracy is: ', str(rawaccuracy))
tiltaccuracy = train.diachronic_tilt(allvolumes, 'linear', [])
print("Divided with a line fit to the data trend, it's ", str(tiltaccuracy))
def applymodel(modelpath, metadatapath, outpath):
sourcefolder = '/Users/tunder/Dropbox/GenreProject/python/reception/fiction/fromEF'
extension = '.tsv'
newmetadict = train.apply_pickled_model(modelpath, sourcefolder, extension, metadatapath)
print('Got predictions for that model.')
newmetadict.to_csv(outpath)
def comparison(selfmodel, othermodel, modelname):
totalvolumes = 0
right = 0
for v in selfmodel.index:
realgenre = selfmodel.loc[v, 'realclass']
v = str(v)
otherprediction = othermodel.loc[v, modelname]
if realgenre > .5 and otherprediction > 0.5:
right += 1
elif realgenre < .5 and otherprediction < 0.5:
right += 1
totalvolumes +=1
return totalvolumes, right
def getacc(filelist):
allofem = 0
allright = 0
for afile in filelist:
df = pd.read_csv(afile)
totalcount = len(df.realclass)
tp = sum((df.realclass > 0.5) & (df.logistic > 0.5))
tn = sum((df.realclass <= 0.5) & (df.logistic <= 0.5))
fp = sum((df.realclass <= 0.5) & (df.logistic > 0.5))
fn = sum((df.realclass > 0.5) & (df.logistic <= 0.5))
assert totalcount == (tp + fp + tn + fn)
allofem += totalcount
allright += (tp + tn)
return allright / allofem
if __name__ == '__main__':
args = sys.argv
command = args[1]
if command == 'littlemagazines':
c_range = [.00009, .0002, .0004, .0008, .0012, .002, .004, .008, .012, 0.3, 0.8, 2]
featurestart = 1500
featureend = 4000
featurestep = 100
genre_gridsearch('/Users/tunder/Dropbox/GenreProject/python/reception/fiction/littlemagazines.csv', 'littlemagazinespost1919', c_range, featurestart, featureend, featurestep, positive_tags = ['elite'], negative_tags = ['vulgar'], excl_below = 1800, excl_above = 2000)
elif command == 'apply_quarter_century_models':
# We've previously trained models for each quarter-century
# of the fiction corpus: 1850-74, 75-99, and so on.
# Now we need to apply those models to the whole corpus
# in order to see how good their predictions are.
models = []
outpaths = []
for i in range (1850, 1950, 25):
modelpath = '../models/segment' + str(i) + '.pkl'
models.append(modelpath)
outpath = '../results/segment' + str(i) + '.applied.csv'
outpaths.append(outpath)
metadatapath = '../metadata/prestigeficmeta.csv'
for m, o in zip(models, outpaths):
applymodel(m, metadatapath, o)
elif command == 'gender_balance_fiction':
c_range = [.00009, .0002, .0004, .0008, .0012, .002, .004, .008, .012, 0.3, 0.8, 2]
featurestart = 1200
featureend = 4500
featurestep = 100
genre_gridsearch('../metadata/genderbalancedfiction.csv', 'gender_balanced_fiction', c_range, featurestart, featureend, featurestep, positive_tags = ['elite'], negative_tags = ['vulgar'], excl_below = 1800, excl_above = 2000)
elif command == 'nation_balance_fiction':
c_range = [.00009, .0002, .0004, .0008, .0012, .002, .004, .008, .012, 0.3, 0.8, 2]
featurestart = 1200
featureend = 4000
featurestep = 100
genre_gridsearch('../metadata/nationbalancedfiction.csv', 'nation_balanced_fiction', c_range, featurestart, featureend, featurestep, positive_tags = ['elite'], negative_tags = ['vulgar'], excl_below = 1800, excl_above = 2000)
| mit | 5,076,962,015,525,809,000 | 38.541899 | 275 | 0.658519 | false |
dwalton76/ev3dev-lang-python | tests/motor/motor_motion_unittest.py | 1 | 5822 | #!/usr/bin/env python3
# Based on the parameterized test case technique described here:
#
# http://eli.thegreenplace.net/2011/08/02/python-unit-testing-parametrized-test-cases
import unittest
import time
import ev3dev.ev3 as ev3
import parameterizedtestcase as ptc
from motor_info import motor_info
class TestMotorMotion(ptc.ParameterizedTestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def initialize_motor(self):
self._param['motor'].command = 'reset'
def run_to_positions(self, stop_action, command, speed_sp, positions, tolerance):
self._param['motor'].stop_action = stop_action
self._param['motor'].speed_sp = speed_sp
target = self._param['motor'].position
for i in positions:
self._param['motor'].position_sp = i
if 'run-to-rel-pos' == command:
target += i
else:
target = i
print("PRE position = {0} i = {1} target = {2}".format(self._param['motor'].position, i, target))
self._param['motor'].command = command
while 'running' in self._param['motor'].state:
pass
print("POS position = {0} i = {1} target = {2}".format(self._param['motor'].position, i, target))
self.assertGreaterEqual(tolerance, abs(self._param['motor'].position - target))
time.sleep(0.2)
self._param['motor'].command = 'stop'
def test_stop_brake_no_ramp_med_speed_relative(self):
if not self._param['has_brake']:
self.skipTest('brake not supported by this motor controller')
self.initialize_motor()
self.run_to_positions('brake', 'run-to-rel-pos', 400, [0, 90, 180, 360, 720, -720, -360, -180, -90, 0], 20)
def test_stop_hold_no_ramp_med_speed_relative(self):
self.initialize_motor()
self.run_to_positions('hold', 'run-to-rel-pos', 400, [0, 90, 180, 360, 720, -720, -360, -180, -90, 0], 5)
def test_stop_brake_no_ramp_low_speed_relative(self):
if not self._param['has_brake']:
self.skipTest('brake not supported by this motor controller')
self.initialize_motor()
self.run_to_positions('brake', 'run-to-rel-pos', 100, [0, 90, 180, 360, 720, -720, -360, -180, -90, 0], 20)
def test_stop_hold_no_ramp_low_speed_relative(self):
self.initialize_motor()
self.run_to_positions('hold', 'run-to-rel-pos', 100, [0, 90, 180, 360, 720, -720, -360, -180, -90, 0], 5)
def test_stop_brake_no_ramp_high_speed_relative(self):
if not self._param['has_brake']:
self.skipTest('brake not supported by this motor controller')
self.initialize_motor()
self.run_to_positions('brake', 'run-to-rel-pos', 900, [0, 90, 180, 360, 720, -720, -360, -180, -90, 0], 50)
def test_stop_hold_no_ramp_high_speed_relative(self):
self.initialize_motor()
self.run_to_positions('hold', 'run-to-rel-pos', 100, [0, 90, 180, 360, 720, -720, -360, -180, -90, 0], 5)
def test_stop_brake_no_ramp_med_speed_absolute(self):
if not self._param['has_brake']:
self.skipTest('brake not supported by this motor controller')
self.initialize_motor()
self.run_to_positions('brake', 'run-to-abs-pos', 400,
[0, 90, 180, 360, 180, 90, 0, -90, -180, -360, -180, -90, 0], 20)
def test_stop_hold_no_ramp_med_speed_absolute(self):
self.initialize_motor()
self.run_to_positions('hold', 'run-to-abs-pos', 400,
[0, 90, 180, 360, 180, 90, 0, -90, -180, -360, -180, -90, 0], 5)
def test_stop_brake_no_ramp_low_speed_absolute(self):
if not self._param['has_brake']:
self.skipTest('brake not supported by this motor controller')
self.initialize_motor()
self.run_to_positions('brake', 'run-to-abs-pos', 100,
[0, 90, 180, 360, 180, 90, 0, -90, -180, -360, -180, -90, 0], 20)
def test_stop_hold_no_ramp_low_speed_absolute(self):
self.initialize_motor()
self.run_to_positions('hold', 'run-to-abs-pos', 100,
[0, 90, 180, 360, 180, 90, 0, -90, -180, -360, -180, -90, 0], 5)
def test_stop_brake_no_ramp_high_speed_absolute(self):
if not self._param['has_brake']:
self.skipTest('brake not supported by this motor controller')
self.initialize_motor()
self.run_to_positions('brake', 'run-to-abs-pos', 900,
[0, 90, 180, 360, 180, 90, 0, -90, -180, -360, -180, -90, 0], 50)
def test_stop_hold_no_ramp_high_speed_absolute(self):
self.initialize_motor()
self.run_to_positions('hold', 'run-to-abs-pos', 100,
[0, 90, 180, 360, 180, 90, 0, -90, -180, -360, -180, -90, 0], 5)
# Add all the tests to the suite - some tests apply only to certain drivers!
def AddTachoMotorMotionTestsToSuite(suite, params):
suite.addTest(ptc.ParameterizedTestCase.parameterize(TestMotorMotion, param=params))
if __name__ == '__main__':
ev3_params = {
'motor': ev3.Motor('outA'),
'port': 'outA',
'driver_name': 'lego-ev3-l-motor',
'has_brake': True,
}
brickpi_params = {
'motor': ev3.Motor('ttyAMA0:MA'),
'port': 'ttyAMA0:MA',
'driver_name': 'lego-nxt-motor',
'has_brake': False,
}
pistorms_params = {
'motor': ev3.Motor('pistorms:BAM1'),
'port': 'pistorms:BAM1',
'driver_name': 'lego-nxt-motor',
'has_brake': True,
}
suite = unittest.TestSuite()
AddTachoMotorMotionTestsToSuite(suite, ev3_params)
unittest.TextTestRunner(verbosity=1, buffer=True).run(suite)
| mit | 3,945,988,021,767,374,300 | 38.337838 | 115 | 0.583305 | false |
defstryker/Hex-Omega | users/Xav/views.py | 1 | 2547 | from users.views import *
from .add_leader_form import *
from django.db.utils import IntegrityError
def create_leader_user(request, username):
form = LeaderForm()
if request.method == 'POST':
form = LeaderForm(request.POST)
if form.is_valid():
username = request.POST.get('username')
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
email = request.POST.get('email')
password = get_default_password()
try:
user = LeaderUser.objects.create_user(username=username, first_name=first_name, last_name=last_name,
email=email, password=password)
except IntegrityError as e:
return render(request, 'users/leaderuser_form.html',
{'form': form, 'mail_error': 'The email is not unique!'})
user.set_password(password)
mail_kickoff(user, password)
user.save()
update_session_auth_hash(request, request.user)
return redirect('display_admin', request.user.username)
return render(request, 'users/leaderuser_form.html', {'form': form})
@login_required
def display_leader_detail(request, username):
user = LeaderUser.objects.get(username__iexact=username)
return render(request, 'users/leaderdetail.html', {'user': user})
@login_required
def update_leader_detail(request, username):
user = LeaderUser.objects.get(username__iexact=username)
form_data = {'username': user.username, 'first_name': user.first_name, 'last_name': user.last_name,
'email': user.email,
'password': user.password, 'bio': user.bio}
form = UpdateLeaderForm(request.POST, initial=form_data)
if request.method == 'POST':
print(form.errors)
if form.is_valid():
user.first_name = request.POST.get('first_name')
user.last_name = request.POST.get('last_name')
user.email = request.POST.get('email')
pw = request.POST['password']
if (pw is not '' or pw is not None) and len(pw.strip()) >= 8:
user.set_password(pw)
user.bio = request.POST.get('bio')
user.save()
update_session_auth_hash(request, request.user)
return redirect('display_leader', username)
return render(request, 'users/update_leader_form.html', {'user': user, 'form': form, 'errors': form.errors})
| mit | 4,510,525,064,045,254,000 | 41.45 | 116 | 0.599136 | false |
jal-stats/django | jal_stats/jal_stats/urls.py | 1 | 1604 | """jal_stats URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
# from rest_framework import routers
from rest_framework_nested import routers
from stats import views as stats_views
router = routers.SimpleRouter()
# router.register(r'users', stats_views.UserViewSet)
router.register(r'activities', stats_views.ActivityViewSet)
activities_router = routers.NestedSimpleRouter(router,
r'activities',
lookup='activity')
activities_router.register(r'stats',
stats_views.StatViewSet,
base_name='activities-stats')
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^docs/', include('rest_framework_swagger.urls')),
url(r'^api/', include(router.urls)),
url(r'^api/', include(activities_router.urls)),
url(r'^api-auth/',
include('rest_framework.urls', namespace='rest_framework')),
]
| mit | 5,263,043,419,280,896,000 | 33.869565 | 77 | 0.650873 | false |
3L3N4/Egress-Assess | common/orchestra.py | 1 | 1550 | '''
This is the conductor which controls everything
'''
import glob
import imp
from protocols.servers import *
from protocols.clients import *
from datatypes import *
class Conductor:
def __init__(self):
# Create dictionaries of supported modules
# empty until stuff loaded into them
self.client_protocols = {}
self.server_protocols = {}
self.datatypes = {}
def load_client_protocols(self, command_line_object):
for name in glob.glob('protocols/clients/*.py'):
if name.endswith(".py") and ("__init__" not in name):
loaded_client_proto = imp.load_source(name.replace("/", ".").rstrip('.py'), name)
self.client_protocols[name] = loaded_client_proto.Client(command_line_object)
return
def load_server_protocols(self, command_line_object):
for name in glob.glob('protocols/servers/*.py'):
if name.endswith(".py") and ("__init__" not in name):
loaded_server_proto = imp.load_source(name.replace("/", ".").rstrip('.py'), name)
self.server_protocols[name] = loaded_server_proto.Server(command_line_object)
return
def load_datatypes(self, command_line_object):
for name in glob.glob('datatypes/*.py'):
if name.endswith(".py") and ("__init__" not in name):
loaded_datatypes = imp.load_source(name.replace("/", ".").rstrip('.py'), name)
self.datatypes[name] = loaded_datatypes.Datatype(command_line_object)
return
| gpl-3.0 | 3,691,353,084,157,195,300 | 35.046512 | 97 | 0.610968 | false |
deanet/gheimdall | gheimdall/responsecreator/__init__.py | 1 | 5051 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# GHeimdall - A small web application for Google Apps SSO service.
# Copyright (C) 2007 SIOS Technology, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
#
# $Id$
__author__ = '[email protected] (Takashi MATSUO)'
import saml2
import saml2.utils
import xmldsig as ds
from saml2 import saml, samlp
import time
EMPTY_SAML_RESPONSE="""<?xml version="1.0" encoding="UTF-8"?>
<samlp:Response Version="2.0"
xmlns="urn:oasis:names:tc:SAML:2.0:assertion"
xmlns:samlp="urn:oasis:names:tc:SAML:2.0:protocol">
<samlp:Status>
<samlp:StatusCode Value="urn:oasis:names:tc:SAML:2.0:status:Success"/>
</samlp:Status>
<Assertion Version="2.0" xmlns="urn:oasis:names:tc:SAML:2.0:assertion">
<Issuer></Issuer>
<Subject>
<SubjectConfirmation Method="urn:oasis:names:tc:SAML:2.0:cm:bearer">
<SubjectConfirmationData />
</SubjectConfirmation>
</Subject>
<Conditions></Conditions>
<AuthnStatement>
<AuthnContext>
<AuthnContextClassRef>
urn:oasis:names:tc:SAML:2.0:ac:classes:Password
</AuthnContextClassRef>
</AuthnContext>
</AuthnStatement>
</Assertion>
</samlp:Response>
"""
class ResponseCreator(object):
user_name = None
response = None
request = None
authn_request = None
def createLogoutRequest(self, session_index, name_id):
now = saml2.utils.getDateAndTime(time.time())
req = samlp.LogoutRequest(id=saml2.utils.createID(),
version=saml2.V2,
issue_instant=now)
req.issuer=saml.Issuer(text=self.config.get('issuer_name'))
req.name_id = name_id
req.session_index = samlp.SessionIndex(text=session_index)
req.signature = self._get_signature()
return req
def createLogoutResponse(self, logout_request_id, status_code):
now = saml2.utils.getDateAndTime(time.time())
self.response = samlp.LogoutResponse(id=saml2.utils.createID(),
version=saml2.V2,
issue_instant=now,
in_response_to=logout_request_id)
self.response.issuer = saml.Issuer(text=self.config.get('issuer_name'))
self.response.status = samlp.Status()
self.response.status.status_code = samlp.StatusCode(status_code)
self.response.signature = self._get_signature()
return self.response
def createAuthnResponse(self, user_name, authn_request, valid_time,
auth_time, acsURL):
self.user_name = user_name
self.authn_request = authn_request
response = samlp.ResponseFromString(EMPTY_SAML_RESPONSE)
response.id = saml2.utils.createID()
now = saml2.utils.getDateAndTime(time.time() - 10)
until = saml2.utils.getDateAndTime(valid_time)
auth_timestamp = saml2.utils.getDateAndTime(auth_time)
response.issue_instant = now
response.assertion[0].id = saml2.utils.createID()
response.assertion[0].issue_instant = now
response.assertion[0].issuer.text = self.config.get('issuer_name')
response.assertion[0].conditions.not_before = now
response.assertion[0].conditions.not_on_or_after = until
response.assertion[0].authn_statement[0].authn_instant = auth_timestamp
response.assertion[0].authn_statement[0].session_not_on_or_after = until
response.assertion[0].subject.name_id = self._getNameID()
response.assertion[0].subject.subject_confirmation[0].subject_confirmation_data.recipient = acsURL
self.response = response
self.response.signature = self._get_signature()
self._adjustment()
return self.response
def _get_signature(self):
key_type = self.config.get("apps_privkey_type")
if key_type == "rsa":
alg = ds.SIG_RSA_SHA1
elif key_type == "dsa":
alg = ds.SIG_DSA_SHA1
else:
alg = ds.SIG_RSA_SHA1
return ds.GetEmptySignature(signature_method_algorithm=alg)
def __init__(self, config):
self._prepare(config)
def _getNameID(self):
raise NotImplementedError('Child class must implement me.')
def _prepare(self, config):
raise NotImplementedError('Child class must implement me.')
def _adjustment(self):
return None
def create(mapper, config):
exec('from gheimdall.responsecreator import %s' % mapper)
ret = eval('%s.cls(config)' % mapper)
return ret
| gpl-2.0 | -5,745,384,431,272,258,000 | 35.868613 | 102 | 0.67848 | false |
smallyear/linuxLearn | salt/salt/utils/process.py | 1 | 11798 | # -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import os
import sys
import time
import types
import signal
import subprocess
import logging
import multiprocessing
import threading
# Import salt libs
import salt.defaults.exitcodes
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import queue, range # pylint: disable=import-error,redefined-builtin
log = logging.getLogger(__name__)
# pylint: disable=import-error
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
def notify_systemd():
'''
Notify systemd that this process has started
'''
try:
import systemd.daemon
except ImportError:
return False
if systemd.daemon.booted():
try:
return systemd.daemon.notify('READY=1')
except SystemError:
# Daemon was not started by systemd
pass
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid()))
except IOError:
pass
log.debug(('Created pidfile: {0}').format(pidfile))
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('{0} Traceback follows:\n'.format(msg), exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: {0} to user: {1}'.format(pidfile, user))
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ThreadPool(object):
'''
This is a very VERY basic threadpool implementation
This was made instead of using multiprocessing ThreadPool because
we want to set max queue size and we want to daemonize threads (neither
is exposed in the stdlib version).
Since there isn't much use for this class as of right now this implementation
Only supports daemonized threads and will *not* return results
TODO: if this is found to be more generally useful it would be nice to pull
in the majority of code from upstream or from http://bit.ly/1wTeJtM
'''
def __init__(self,
num_threads=None,
queue_size=0):
# if no count passed, default to number of CPUs
if num_threads is None:
num_threads = multiprocessing.cpu_count()
self.num_threads = num_threads
# create a task queue of queue_size
self._job_queue = queue.Queue(queue_size)
self._workers = []
# create worker threads
for _ in range(num_threads):
thread = threading.Thread(target=self._thread_target)
thread.daemon = True
thread.start()
self._workers.append(thread)
# intentionally not called "apply_async" since we aren't keeping track of
# the return at all, if we want to make this API compatible with multiprocessing
# threadpool we can in the future, and we won't have to worry about name collision
def fire_async(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
self._job_queue.put_nowait((func, args, kwargs))
return True
except queue.Full:
return False
def _thread_target(self):
while True:
# 1s timeout so that if the parent dies this thread will die within 1s
try:
func, args, kwargs = self._job_queue.get(timeout=1)
self._job_queue.task_done() # Mark the task as done once we get it
except queue.Empty:
continue
try:
log.debug('ThreadPool executing func: {0} with args:{1}'
' kwargs{2}'.format(func, args, kwargs))
func(*args, **kwargs)
except Exception as err:
log.debug(err, exc_info=True)
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
def add_process(self, tgt, args=None, kwargs=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if type(multiprocessing.Process) is type(tgt) and issubclass(tgt, multiprocessing.Process):
process = tgt(*args, **kwargs)
else:
process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs)
process.start()
# create a nicer name for the debug log
if isinstance(tgt, types.FunctionType):
name = '{0}.{1}'.format(
tgt.__module__,
tgt.__name__,
)
else:
name = '{0}.{1}.{2}'.format(
tgt.__module__,
tgt.__class__,
tgt.__name__,
)
log.debug("Started '{0}' with pid {1}".format(name, process.pid))
self._process_map[process.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': process}
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
log.info('Process {0} ({1}) died with exit status {2},'
' restarting...'.format(self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode))
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def run(self):
'''
Load and start all available api modules
'''
salt.utils.appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
signal.signal(signal.SIGTERM, self.kill_children)
while True:
try:
# in case someone died while we were waiting...
self.check_children()
if not salt.utils.is_windows():
pid, exit_status = os.wait()
if pid not in self._process_map:
log.debug(('Process of pid {0} died, not a known'
' process, will not restart').format(pid))
continue
self.restart_process(pid)
else:
# os.wait() is not supported on Windows.
time.sleep(10)
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
def check_children(self):
'''
Check the children once
'''
for pid, mapping in six.iteritems(self._process_map):
if not mapping['Process'].is_alive():
self.restart_process(pid)
def kill_children(self, *args):
'''
Kill all of the children
'''
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
if salt.utils.is_windows():
with open(os.devnull, 'wb') as devnull:
for pid, p_map in six.iteritems(self._process_map):
# On Windows, we need to explicitly terminate sub-processes
# because the processes don't have a sigterm handler.
subprocess.call(
['taskkill', '/F', '/T', '/PID', str(pid)],
stdout=devnull, stderr=devnull
)
p_map['Process'].terminate()
else:
for p_map in six.itervalues(self._process_map):
p_map['Process'].terminate()
end_time = time.time() + self.wait_for_kill # when to die
while self._process_map and time.time() < end_time:
for pid, p_map in six.iteritems(self._process_map.copy()):
p_map['Process'].join(0)
# This is a race condition if a signal was passed to all children
try:
del self._process_map[pid]
except KeyError:
pass
# if anyone is done after
for pid in self._process_map:
try:
os.kill(signal.SIGKILL, pid)
# in case the process has since decided to die, os.kill returns OSError
except OSError:
pass
| apache-2.0 | -4,748,247,303,098,665,000 | 31.955307 | 99 | 0.544075 | false |
grueni75/GeoDiscoverer | Source/Platform/Target/Android/core/src/main/jni/gdal-3.2.1/swig/python/scripts/gdal2tiles.py | 1 | 134939 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ******************************************************************************
# $Id: gdal2tiles.py d712a530aa1b0dabf9717dd935996dd7b9fd8ced 2020-11-15 15:12:13 +0100 Even Rouault $
#
# Project: Google Summer of Code 2007, 2008 (http://code.google.com/soc/)
# Support: BRGM (http://www.brgm.fr)
# Purpose: Convert a raster into TMS (Tile Map Service) tiles in a directory.
# - generate Google Earth metadata (KML SuperOverlay)
# - generate simple HTML viewer based on Google Maps and OpenLayers
# - support of global tiles (Spherical Mercator) for compatibility
# with interactive web maps a la Google Maps
# Author: Klokan Petr Pridal, klokan at klokan dot cz
# Web: http://www.klokan.cz/projects/gdal2tiles/
# GUI: http://www.maptiler.org/
#
###############################################################################
# Copyright (c) 2008, Klokan Petr Pridal
# Copyright (c) 2010-2013, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
from __future__ import print_function, division
import math
from multiprocessing import Pool
from functools import partial
import glob
import json
import os
import tempfile
import threading
import shutil
import sys
from uuid import uuid4
from xml.etree import ElementTree
from osgeo import gdal
from osgeo import osr
try:
from PIL import Image
import numpy
import osgeo.gdal_array as gdalarray
numpy_available = True
except ImportError:
# 'antialias' resampling is not available
numpy_available = False
__version__ = "$Id: gdal2tiles.py d712a530aa1b0dabf9717dd935996dd7b9fd8ced 2020-11-15 15:12:13 +0100 Even Rouault $"
resampling_list = (
'average', 'near', 'bilinear', 'cubic', 'cubicspline', 'lanczos',
'antialias', 'mode', 'max', 'min', 'med', 'q1', 'q3')
webviewer_list = ('all', 'google', 'openlayers', 'leaflet', 'mapml', 'none')
class UnsupportedTileMatrixSet(Exception):
pass
class TileMatrixSet(object):
def __init__(self):
self.identifier = None
self.srs = None
self.topleft_x = None
self.topleft_y = None
self.matrix_width = None # at zoom 0
self.matrix_height = None # at zoom 0
self.tile_size = None
self.resolution = None # at zoom 0
self.level_count = None
def GeorefCoordToTileCoord(self, x, y, z, overriden_tile_size):
res = self.resolution * self.tile_size / overriden_tile_size / (2**z)
tx = int((x - self.topleft_x) / (res * overriden_tile_size))
# In default mode, we use a bottom-y origin
ty = int((y - (self.topleft_y - self.matrix_height * self.tile_size * self.resolution)) / (res * overriden_tile_size))
return tx, ty
def ZoomForPixelSize(self, pixelSize, overriden_tile_size):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(self.level_count):
res = self.resolution * self.tile_size / overriden_tile_size / (2**i)
if pixelSize > res:
return max(0, i - 1) # We don't want to scale up
return self.level_count - 1
def PixelsToMeters(self, px, py, zoom, overriden_tile_size):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:3857"
res = self.resolution * self.tile_size / overriden_tile_size / (2**zoom)
mx = px * res + self.topleft_x
my = py * res + (self.topleft_y - self.matrix_height * self.tile_size * self.resolution)
return mx, my
def TileBounds(self, tx, ty, zoom, overriden_tile_size):
"Returns bounds of the given tile in georef coordinates"
minx, miny = self.PixelsToMeters(tx * overriden_tile_size, ty * overriden_tile_size, zoom, overriden_tile_size)
maxx, maxy = self.PixelsToMeters((tx + 1) * overriden_tile_size, (ty + 1) * overriden_tile_size, zoom, overriden_tile_size)
return (minx, miny, maxx, maxy)
@staticmethod
def parse(j):
assert 'identifier' in j
assert 'supportedCRS' in j
assert 'tileMatrix' in j
assert isinstance(j['tileMatrix'], list)
srs = osr.SpatialReference()
assert srs.SetFromUserInput(str(j['supportedCRS'])) == 0
swapaxis = srs.EPSGTreatsAsLatLong() or srs.EPSGTreatsAsNorthingEasting()
metersPerUnit = 1.0
if srs.IsProjected():
metersPerUnit = srs.GetLinearUnits()
elif srs.IsGeographic():
metersPerUnit = srs.GetSemiMajor() * math.pi / 180;
tms = TileMatrixSet()
tms.srs = srs
tms.identifier = str(j['identifier'])
for i, tileMatrix in enumerate(j['tileMatrix']):
assert 'topLeftCorner' in tileMatrix
assert isinstance(tileMatrix['topLeftCorner'], list)
topLeftCorner = tileMatrix['topLeftCorner']
assert len(topLeftCorner) == 2
assert 'scaleDenominator' in tileMatrix
assert 'tileWidth' in tileMatrix
assert 'tileHeight' in tileMatrix
topleft_x = topLeftCorner[0]
topleft_y = topLeftCorner[1]
tileWidth = tileMatrix['tileWidth']
tileHeight = tileMatrix['tileHeight']
if tileWidth != tileHeight:
raise UnsupportedTileMatrixSet('Only square tiles supported')
# Convention in OGC TileMatrixSet definition. See gcore/tilematrixset.cpp
resolution = tileMatrix['scaleDenominator'] * 0.28e-3 / metersPerUnit
if swapaxis:
topleft_x, topleft_y = topleft_y, topleft_x
if i == 0:
tms.topleft_x = topleft_x
tms.topleft_y = topleft_y
tms.resolution = resolution
tms.tile_size = tileWidth
assert 'matrixWidth' in tileMatrix
assert 'matrixHeight' in tileMatrix
tms.matrix_width = tileMatrix['matrixWidth']
tms.matrix_height = tileMatrix['matrixHeight']
else:
if topleft_x != tms.topleft_x or topleft_y != tms.topleft_y:
raise UnsupportedTileMatrixSet('All levels should have same origin')
if abs(tms.resolution / (1 << i) - resolution) > 1e-8 * resolution:
raise UnsupportedTileMatrixSet('Only resolutions varying as power-of-two supported')
if tileWidth != tms.tile_size:
raise UnsupportedTileMatrixSet('All levels should have same tile size')
tms.level_count = len(j['tileMatrix'])
return tms
tmsMap = {}
profile_list = ['mercator', 'geodetic', 'raster']
# Read additional tile matrix sets from GDAL data directory
filename = gdal.FindFile('gdal', 'tms_MapML_APSTILE.json')
if filename:
dirname = os.path.dirname(filename)
for tmsfilename in glob.glob(os.path.join(dirname, "tms_*.json")):
data = open(tmsfilename, 'rb').read()
try:
j = json.loads(data.decode('utf-8'))
except:
j = None
if j is None:
print('Cannot parse ' + tmsfilename)
continue
try:
tms = TileMatrixSet.parse(j)
except UnsupportedTileMatrixSet:
continue
except:
print('Cannot parse ' + tmsfilename)
continue
tmsMap[tms.identifier] = tms
profile_list.append(tms.identifier)
threadLocal = threading.local()
# =============================================================================
# =============================================================================
# =============================================================================
__doc__globalmaptiles = """
globalmaptiles.py
Global Map Tiles as defined in Tile Map Service (TMS) Profiles
==============================================================
Functions necessary for generation of global tiles used on the web.
It contains classes implementing coordinate conversions for:
- GlobalMercator (based on EPSG:3857)
for Google Maps, Yahoo Maps, Bing Maps compatible tiles
- GlobalGeodetic (based on EPSG:4326)
for OpenLayers Base Map and Google Earth compatible tiles
More info at:
http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification
http://wiki.osgeo.org/wiki/WMS_Tiling_Client_Recommendation
http://msdn.microsoft.com/en-us/library/bb259689.aspx
http://code.google.com/apis/maps/documentation/overlays.html#Google_Maps_Coordinates
Created by Klokan Petr Pridal on 2008-07-03.
Google Summer of Code 2008, project GDAL2Tiles for OSGEO.
In case you use this class in your product, translate it to another language
or find it useful for your project please let me know.
My email: klokan at klokan dot cz.
I would like to know where it was used.
Class is available under the open-source GDAL license (www.gdal.org).
"""
MAXZOOMLEVEL = 32
class GlobalMercator(object):
r"""
TMS Global Mercator Profile
---------------------------
Functions necessary for generation of tiles in Spherical Mercator projection,
EPSG:3857.
Such tiles are compatible with Google Maps, Bing Maps, Yahoo Maps,
UK Ordnance Survey OpenSpace API, ...
and you can overlay them on top of base maps of those web mapping applications.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Mercator tiles::
LatLon <-> Meters <-> Pixels <-> Tile
WGS84 coordinates Spherical Mercator Pixels in pyramid Tiles in pyramid
lat/lon XY in meters XY pixels Z zoom XYZ from TMS
EPSG:4326 EPSG:387
.----. --------- -- TMS
/ \ <-> | | <-> /----/ <-> Google
\ / | | /--------/ QuadTree
----- --------- /------------/
KML, public WebMapService Web Clients TileMapService
What is the coordinate extent of Earth in EPSG:3857?
[-20037508.342789244, -20037508.342789244, 20037508.342789244, 20037508.342789244]
Constant 20037508.342789244 comes from the circumference of the Earth in meters,
which is 40 thousand kilometers, the coordinate origin is in the middle of extent.
In fact you can calculate the constant as: 2 * math.pi * 6378137 / 2.0
$ echo 180 85 | gdaltransform -s_srs EPSG:4326 -t_srs EPSG:3857
Polar areas with abs(latitude) bigger then 85.05112878 are clipped off.
What are zoom level constants (pixels/meter) for pyramid with EPSG:3857?
whole region is on top of pyramid (zoom=0) covered by 256x256 pixels tile,
every lower zoom level resolution is always divided by two
initialResolution = 20037508.342789244 * 2 / 256 = 156543.03392804062
What is the difference between TMS and Google Maps/QuadTree tile name convention?
The tile raster itself is the same (equal extent, projection, pixel size),
there is just different identification of the same raster tile.
Tiles in TMS are counted from [0,0] in the bottom-left corner, id is XYZ.
Google placed the origin [0,0] to the top-left corner, reference is XYZ.
Microsoft is referencing tiles by a QuadTree name, defined on the website:
http://msdn2.microsoft.com/en-us/library/bb259689.aspx
The lat/lon coordinates are using WGS84 datum, yes?
Yes, all lat/lon we are mentioning should use WGS84 Geodetic Datum.
Well, the web clients like Google Maps are projecting those coordinates by
Spherical Mercator, so in fact lat/lon coordinates on sphere are treated as if
the were on the WGS84 ellipsoid.
From MSDN documentation:
To simplify the calculations, we use the spherical form of projection, not
the ellipsoidal form. Since the projection is used only for map display,
and not for displaying numeric coordinates, we don't need the extra precision
of an ellipsoidal projection. The spherical projection causes approximately
0.33 percent scale distortion in the Y direction, which is not visually
noticeable.
How do I create a raster in EPSG:3857 and convert coordinates with PROJ.4?
You can use standard GIS tools like gdalwarp, cs2cs or gdaltransform.
All of the tools supports -t_srs 'epsg:3857'.
For other GIS programs check the exact definition of the projection:
More info at http://spatialreference.org/ref/user/google-projection/
The same projection is designated as EPSG:3857. WKT definition is in the
official EPSG database.
Proj4 Text:
+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0
+k=1.0 +units=m +nadgrids=@null +no_defs
Human readable WKT format of EPSG:3857:
PROJCS["Google Maps Global Mercator",
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433],
AUTHORITY["EPSG","4326"]],
PROJECTION["Mercator_1SP"],
PARAMETER["central_meridian",0],
PARAMETER["scale_factor",1],
PARAMETER["false_easting",0],
PARAMETER["false_northing",0],
UNIT["metre",1,
AUTHORITY["EPSG","9001"]]]
"""
def __init__(self, tile_size=256):
"Initialize the TMS Global Mercator pyramid"
self.tile_size = tile_size
self.initialResolution = 2 * math.pi * 6378137 / self.tile_size
# 156543.03392804062 for tile_size 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:3857"
mx = lon * self.originShift / 180.0
my = math.log(math.tan((90 + lat) * math.pi / 360.0)) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my):
"Converts XY point from Spherical Mercator EPSG:3857 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan(math.exp(lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:3857"
res = self.Resolution(zoom)
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:3857 to pyramid pixel coordinates in given zoom level"
res = self.Resolution(zoom)
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int(math.ceil(px / float(self.tile_size)) - 1)
ty = int(math.ceil(py / float(self.tile_size)) - 1)
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tile_size << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels(mx, my, zoom)
return self.PixelsToTile(px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:3857 coordinates"
minx, miny = self.PixelsToMeters(tx * self.tile_size, ty * self.tile_size, zoom)
maxx, maxy = self.PixelsToMeters((tx + 1) * self.tile_size, (ty + 1) * self.tile_size, zoom)
return (minx, miny, maxx, maxy)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in latitude/longitude using WGS84 datum"
bounds = self.TileBounds(tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return (minLat, minLon, maxLat, maxLon)
def Resolution(self, zoom):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tile_size * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
return max(0, i - 1) # We don't want to scale up
return MAXZOOMLEVEL - 1
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i - 1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
class GlobalGeodetic(object):
r"""
TMS Global Geodetic Profile
---------------------------
Functions necessary for generation of global tiles in Plate Carre projection,
EPSG:4326, "unprojected profile".
Such tiles are compatible with Google Earth (as any other EPSG:4326 rasters)
and you can overlay the tiles on top of OpenLayers base map.
Pixel and tile coordinates are in TMS notation (origin [0,0] in bottom-left).
What coordinate conversions do we need for TMS Global Geodetic tiles?
Global Geodetic tiles are using geodetic coordinates (latitude,longitude)
directly as planar coordinates XY (it is also called Unprojected or Plate
Carre). We need only scaling to pixel pyramid and cutting to tiles.
Pyramid has on top level two tiles, so it is not square but rectangle.
Area [-180,-90,180,90] is scaled to 512x256 pixels.
TMS has coordinate origin (for pixels and tiles) in bottom-left corner.
Rasters are in EPSG:4326 and therefore are compatible with Google Earth.
LatLon <-> Pixels <-> Tiles
WGS84 coordinates Pixels in pyramid Tiles in pyramid
lat/lon XY pixels Z zoom XYZ from TMS
EPSG:4326
.----. ----
/ \ <-> /--------/ <-> TMS
\ / /--------------/
----- /--------------------/
WMS, KML Web Clients, Google Earth TileMapService
"""
def __init__(self, tmscompatible, tile_size=256):
self.tile_size = tile_size
if tmscompatible is not None:
# Defaults the resolution factor to 0.703125 (2 tiles @ level 0)
# Adhers to OSGeo TMS spec
# http://wiki.osgeo.org/wiki/Tile_Map_Service_Specification#global-geodetic
self.resFact = 180.0 / self.tile_size
else:
# Defaults the resolution factor to 1.40625 (1 tile @ level 0)
# Adheres OpenLayers, MapProxy, etc default resolution for WMTS
self.resFact = 360.0 / self.tile_size
def LonLatToPixels(self, lon, lat, zoom):
"Converts lon/lat to pixel coordinates in given zoom of the EPSG:4326 pyramid"
res = self.resFact / 2**zoom
px = (180 + lon) / res
py = (90 + lat) / res
return px, py
def PixelsToTile(self, px, py):
"Returns coordinates of the tile covering region in pixel coordinates"
tx = int(math.ceil(px / float(self.tile_size)) - 1)
ty = int(math.ceil(py / float(self.tile_size)) - 1)
return tx, ty
def LonLatToTile(self, lon, lat, zoom):
"Returns the tile for zoom which covers given lon/lat coordinates"
px, py = self.LonLatToPixels(lon, lat, zoom)
return self.PixelsToTile(px, py)
def Resolution(self, zoom):
"Resolution (arc/pixel) for given zoom level (measured at Equator)"
return self.resFact / 2**zoom
def ZoomForPixelSize(self, pixelSize):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(MAXZOOMLEVEL):
if pixelSize > self.Resolution(i):
return max(0, i - 1) # We don't want to scale up
return MAXZOOMLEVEL - 1
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile"
res = self.resFact / 2**zoom
return (
tx * self.tile_size * res - 180,
ty * self.tile_size * res - 90,
(tx + 1) * self.tile_size * res - 180,
(ty + 1) * self.tile_size * res - 90
)
def TileLatLonBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in the SWNE form"
b = self.TileBounds(tx, ty, zoom)
return (b[1], b[0], b[3], b[2])
class Zoomify(object):
"""
Tiles compatible with the Zoomify viewer
----------------------------------------
"""
def __init__(self, width, height, tile_size=256, tileformat='jpg'):
"""Initialization of the Zoomify tile tree"""
self.tile_size = tile_size
self.tileformat = tileformat
imagesize = (width, height)
tiles = (math.ceil(width / tile_size), math.ceil(height / tile_size))
# Size (in tiles) for each tier of pyramid.
self.tierSizeInTiles = []
self.tierSizeInTiles.append(tiles)
# Image size in pixels for each pyramid tierself
self.tierImageSize = []
self.tierImageSize.append(imagesize)
while (imagesize[0] > tile_size or imagesize[1] > tile_size):
imagesize = (math.floor(imagesize[0] / 2), math.floor(imagesize[1] / 2))
tiles = (math.ceil(imagesize[0] / tile_size), math.ceil(imagesize[1] / tile_size))
self.tierSizeInTiles.append(tiles)
self.tierImageSize.append(imagesize)
self.tierSizeInTiles.reverse()
self.tierImageSize.reverse()
# Depth of the Zoomify pyramid, number of tiers (zoom levels)
self.numberOfTiers = len(self.tierSizeInTiles)
# Number of tiles up to the given tier of pyramid.
self.tileCountUpToTier = []
self.tileCountUpToTier[0] = 0
for i in range(1, self.numberOfTiers + 1):
self.tileCountUpToTier.append(
self.tierSizeInTiles[i - 1][0] * self.tierSizeInTiles[i - 1][1] +
self.tileCountUpToTier[i - 1]
)
def tilefilename(self, x, y, z):
"""Returns filename for tile with given coordinates"""
tileIndex = x + y * self.tierSizeInTiles[z][0] + self.tileCountUpToTier[z]
return os.path.join("TileGroup%.0f" % math.floor(tileIndex / 256),
"%s-%s-%s.%s" % (z, x, y, self.tileformat))
class GDALError(Exception):
pass
def exit_with_error(message, details=""):
# Message printing and exit code kept from the way it worked using the OptionParser (in case
# someone parses the error output)
sys.stderr.write("Usage: gdal2tiles.py [options] input_file [output]\n\n")
sys.stderr.write("gdal2tiles.py: error: %s\n" % message)
if details:
sys.stderr.write("\n\n%s\n" % details)
sys.exit(2)
def set_cache_max(cache_in_bytes):
# We set the maximum using `SetCacheMax` and `GDAL_CACHEMAX` to support both fork and spawn as multiprocessing start methods.
# https://github.com/OSGeo/gdal/pull/2112
os.environ['GDAL_CACHEMAX'] = '%d' % int(cache_in_bytes / 1024 / 1024)
gdal.SetCacheMax(cache_in_bytes)
def generate_kml(tx, ty, tz, tileext, tile_size, tileswne, options, children=None, **args):
"""
Template for the KML. Returns filled string.
"""
if not children:
children = []
args['tx'], args['ty'], args['tz'] = tx, ty, tz
args['tileformat'] = tileext
if 'tile_size' not in args:
args['tile_size'] = tile_size
if 'minlodpixels' not in args:
args['minlodpixels'] = int(args['tile_size'] / 2)
if 'maxlodpixels' not in args:
args['maxlodpixels'] = int(args['tile_size'] * 8)
if children == []:
args['maxlodpixels'] = -1
if tx is None:
tilekml = False
args['title'] = options.title
else:
tilekml = True
args['realtiley'] = GDAL2Tiles.getYTile(ty, tz, options)
args['title'] = "%d/%d/%d.kml" % (tz, tx, args['realtiley'])
args['south'], args['west'], args['north'], args['east'] = tileswne(tx, ty, tz)
if tx == 0:
args['drawOrder'] = 2 * tz + 1
elif tx is not None:
args['drawOrder'] = 2 * tz
else:
args['drawOrder'] = 0
url = options.url
if not url:
if tilekml:
url = "../../"
else:
url = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
<name>%(title)s</name>
<description></description>
<Style>
<ListStyle id="hideChildren">
<listItemType>checkHideChildren</listItemType>
</ListStyle>
</Style>""" % args
if tilekml:
s += """
<Region>
<LatLonAltBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%(minlodpixels)d</minLodPixels>
<maxLodPixels>%(maxlodpixels)d</maxLodPixels>
</Lod>
</Region>
<GroundOverlay>
<drawOrder>%(drawOrder)d</drawOrder>
<Icon>
<href>%(realtiley)d.%(tileformat)s</href>
</Icon>
<LatLonBox>
<north>%(north).14f</north>
<south>%(south).14f</south>
<east>%(east).14f</east>
<west>%(west).14f</west>
</LatLonBox>
</GroundOverlay>
""" % args
for cx, cy, cz in children:
csouth, cwest, cnorth, ceast = tileswne(cx, cy, cz)
ytile = GDAL2Tiles.getYTile(cy, cz, options)
s += """
<NetworkLink>
<name>%d/%d/%d.%s</name>
<Region>
<LatLonAltBox>
<north>%.14f</north>
<south>%.14f</south>
<east>%.14f</east>
<west>%.14f</west>
</LatLonAltBox>
<Lod>
<minLodPixels>%d</minLodPixels>
<maxLodPixels>-1</maxLodPixels>
</Lod>
</Region>
<Link>
<href>%s%d/%d/%d.kml</href>
<viewRefreshMode>onRegion</viewRefreshMode>
<viewFormat/>
</Link>
</NetworkLink>
""" % (cz, cx, ytile, args['tileformat'], cnorth, csouth, ceast, cwest,
args['minlodpixels'], url, cz, cx, ytile)
s += """ </Document>
</kml>
"""
return s
def scale_query_to_tile(dsquery, dstile, tiledriver, options, tilefilename=''):
"""Scales down query dataset to the tile dataset"""
querysize = dsquery.RasterXSize
tile_size = dstile.RasterXSize
tilebands = dstile.RasterCount
if options.resampling == 'average':
# Function: gdal.RegenerateOverview()
for i in range(1, tilebands + 1):
# Black border around NODATA
res = gdal.RegenerateOverview(dsquery.GetRasterBand(i), dstile.GetRasterBand(i),
'average')
if res != 0:
exit_with_error("RegenerateOverview() failed on %s, error %d" % (
tilefilename, res))
elif options.resampling == 'antialias' and numpy_available:
# Scaling by PIL (Python Imaging Library) - improved Lanczos
array = numpy.zeros((querysize, querysize, tilebands), numpy.uint8)
for i in range(tilebands):
array[:, :, i] = gdalarray.BandReadAsArray(dsquery.GetRasterBand(i + 1),
0, 0, querysize, querysize)
im = Image.fromarray(array, 'RGBA') # Always four bands
im1 = im.resize((tile_size, tile_size), Image.ANTIALIAS)
if os.path.exists(tilefilename):
im0 = Image.open(tilefilename)
im1 = Image.composite(im1, im0, im1)
im1.save(tilefilename, tiledriver)
else:
if options.resampling == 'near':
gdal_resampling = gdal.GRA_NearestNeighbour
elif options.resampling == 'bilinear':
gdal_resampling = gdal.GRA_Bilinear
elif options.resampling == 'cubic':
gdal_resampling = gdal.GRA_Cubic
elif options.resampling == 'cubicspline':
gdal_resampling = gdal.GRA_CubicSpline
elif options.resampling == 'lanczos':
gdal_resampling = gdal.GRA_Lanczos
elif options.resampling == 'mode':
gdal_resampling = gdal.GRA_Mode
elif options.resampling == 'max':
gdal_resampling = gdal.GRA_Max
elif options.resampling == 'min':
gdal_resampling = gdal.GRA_Min
elif options.resampling == 'med':
gdal_resampling = gdal.GRA_Med
elif options.resampling == 'q1':
gdal_resampling = gdal.GRA_Q1
elif options.resampling == 'q3':
gdal_resampling = gdal.GRA_Q3
# Other algorithms are implemented by gdal.ReprojectImage().
dsquery.SetGeoTransform((0.0, tile_size / float(querysize), 0.0, 0.0, 0.0,
tile_size / float(querysize)))
dstile.SetGeoTransform((0.0, 1.0, 0.0, 0.0, 0.0, 1.0))
res = gdal.ReprojectImage(dsquery, dstile, None, None, gdal_resampling)
if res != 0:
exit_with_error("ReprojectImage() failed on %s, error %d" % (tilefilename, res))
def setup_no_data_values(input_dataset, options):
"""
Extract the NODATA values from the dataset or use the passed arguments as override if any
"""
in_nodata = []
if options.srcnodata:
nds = list(map(float, options.srcnodata.split(',')))
if len(nds) < input_dataset.RasterCount:
in_nodata = (nds * input_dataset.RasterCount)[:input_dataset.RasterCount]
else:
in_nodata = nds
else:
for i in range(1, input_dataset.RasterCount + 1):
band = input_dataset.GetRasterBand(i)
raster_no_data = band.GetNoDataValue()
if raster_no_data is not None:
# Ignore nodata values that are not in the range of the band data type (see https://github.com/OSGeo/gdal/pull/2299)
if band.DataType == gdal.GDT_Byte and (raster_no_data != int(raster_no_data) or raster_no_data < 0 or raster_no_data > 255):
# We should possibly do similar check for other data types
in_nodata = []
break
in_nodata.append(raster_no_data)
if options.verbose:
print("NODATA: %s" % in_nodata)
return in_nodata
def setup_input_srs(input_dataset, options):
"""
Determines and returns the Input Spatial Reference System (SRS) as an osr object and as a
WKT representation
Uses in priority the one passed in the command line arguments. If None, tries to extract them
from the input dataset
"""
input_srs = None
input_srs_wkt = None
if options.s_srs:
input_srs = osr.SpatialReference()
input_srs.SetFromUserInput(options.s_srs)
input_srs_wkt = input_srs.ExportToWkt()
else:
input_srs_wkt = input_dataset.GetProjection()
if not input_srs_wkt and input_dataset.GetGCPCount() != 0:
input_srs_wkt = input_dataset.GetGCPProjection()
if input_srs_wkt:
input_srs = osr.SpatialReference()
input_srs.ImportFromWkt(input_srs_wkt)
if input_srs is not None:
input_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
return input_srs, input_srs_wkt
def setup_output_srs(input_srs, options):
"""
Setup the desired SRS (based on options)
"""
output_srs = osr.SpatialReference()
if options.profile == 'mercator':
output_srs.ImportFromEPSG(3857)
elif options.profile == 'geodetic':
output_srs.ImportFromEPSG(4326)
elif options.profile == 'raster':
output_srs = input_srs
else:
output_srs = tmsMap[options.profile].srs.Clone()
if output_srs:
output_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
return output_srs
def has_georeference(dataset):
return (dataset.GetGeoTransform() != (0.0, 1.0, 0.0, 0.0, 0.0, 1.0) or
dataset.GetGCPCount() != 0)
def reproject_dataset(from_dataset, from_srs, to_srs, options=None):
"""
Returns the input dataset in the expected "destination" SRS.
If the dataset is already in the correct SRS, returns it unmodified
"""
if not from_srs or not to_srs:
raise GDALError("from and to SRS must be defined to reproject the dataset")
if (from_srs.ExportToProj4() != to_srs.ExportToProj4()) or (from_dataset.GetGCPCount() != 0):
if from_srs.IsGeographic() and to_srs.GetAuthorityName(None) == 'EPSG' and to_srs.GetAuthorityCode(None) == '3857':
from_gt = from_dataset.GetGeoTransform(can_return_null=True)
if from_gt and from_gt[2] == 0 and from_gt[4] == 0 and from_gt[5] < 0:
maxlat = from_gt[3]
minlat = from_gt[3] + from_dataset.RasterYSize * from_gt[5]
MAX_LAT = 85.0511287798066
adjustBounds = False
if maxlat > MAX_LAT:
maxlat = MAX_LAT
adjustBounds = True
if minlat < -MAX_LAT:
minlat = -MAX_LAT
adjustBounds = True
if adjustBounds:
ct = osr.CoordinateTransformation(from_srs, to_srs)
west, south = ct.TransformPoint(from_gt[0], minlat)[:2]
east, north = ct.TransformPoint(from_gt[0] + from_dataset.RasterXSize * from_gt[1], maxlat)[:2]
return gdal.Warp("", from_dataset, format='VRT', outputBounds = [west, south, east, north], dstSRS = 'EPSG:3857')
to_dataset = gdal.AutoCreateWarpedVRT(from_dataset,
from_srs.ExportToWkt(), to_srs.ExportToWkt())
if options and options.verbose:
print("Warping of the raster by AutoCreateWarpedVRT (result saved into 'tiles.vrt')")
to_dataset.GetDriver().CreateCopy("tiles.vrt", to_dataset)
return to_dataset
else:
return from_dataset
def add_gdal_warp_options_to_string(vrt_string, warp_options):
if not warp_options:
return vrt_string
vrt_root = ElementTree.fromstring(vrt_string)
options = vrt_root.find("GDALWarpOptions")
if options is None:
return vrt_string
for key, value in warp_options.items():
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": key})
tb.data(value)
tb.end("Option")
elem = tb.close()
options.insert(0, elem)
return ElementTree.tostring(vrt_root).decode()
def update_no_data_values(warped_vrt_dataset, nodata_values, options=None):
"""
Takes an array of NODATA values and forces them on the WarpedVRT file dataset passed
"""
# TODO: gbataille - Seems that I forgot tests there
assert nodata_values != []
vrt_string = warped_vrt_dataset.GetMetadata("xml:VRT")[0]
vrt_string = add_gdal_warp_options_to_string(
vrt_string, {"INIT_DEST": "NO_DATA", "UNIFIED_SRC_NODATA": "YES"})
# TODO: gbataille - check the need for this replacement. Seems to work without
# # replace BandMapping tag for NODATA bands....
# for i in range(len(nodata_values)):
# s = s.replace(
# '<BandMapping src="%i" dst="%i"/>' % ((i+1), (i+1)),
# """
# <BandMapping src="%i" dst="%i">
# <SrcNoDataReal>%i</SrcNoDataReal>
# <SrcNoDataImag>0</SrcNoDataImag>
# <DstNoDataReal>%i</DstNoDataReal>
# <DstNoDataImag>0</DstNoDataImag>
# </BandMapping>
# """ % ((i+1), (i+1), nodata_values[i], nodata_values[i]))
corrected_dataset = gdal.Open(vrt_string)
# set NODATA_VALUE metadata
corrected_dataset.SetMetadataItem(
'NODATA_VALUES', ' '.join([str(i) for i in nodata_values]))
if options and options.verbose:
print("Modified warping result saved into 'tiles1.vrt'")
with open("tiles1.vrt", "w") as f:
f.write(corrected_dataset.GetMetadata("xml:VRT")[0])
return corrected_dataset
def add_alpha_band_to_string_vrt(vrt_string):
# TODO: gbataille - Old code speak of this being equivalent to gdalwarp -dstalpha
# To be checked
vrt_root = ElementTree.fromstring(vrt_string)
index = 0
nb_bands = 0
for subelem in list(vrt_root):
if subelem.tag == "VRTRasterBand":
nb_bands += 1
color_node = subelem.find("./ColorInterp")
if color_node is not None and color_node.text == "Alpha":
raise Exception("Alpha band already present")
else:
if nb_bands:
# This means that we are one element after the Band definitions
break
index += 1
tb = ElementTree.TreeBuilder()
tb.start("VRTRasterBand",
{'dataType': "Byte", "band": str(nb_bands + 1), "subClass": "VRTWarpedRasterBand"})
tb.start("ColorInterp", {})
tb.data("Alpha")
tb.end("ColorInterp")
tb.end("VRTRasterBand")
elem = tb.close()
vrt_root.insert(index, elem)
warp_options = vrt_root.find(".//GDALWarpOptions")
tb = ElementTree.TreeBuilder()
tb.start("DstAlphaBand", {})
tb.data(str(nb_bands + 1))
tb.end("DstAlphaBand")
elem = tb.close()
warp_options.append(elem)
# TODO: gbataille - this is a GDALWarpOptions. Why put it in a specific place?
tb = ElementTree.TreeBuilder()
tb.start("Option", {"name": "INIT_DEST"})
tb.data("0")
tb.end("Option")
elem = tb.close()
warp_options.append(elem)
return ElementTree.tostring(vrt_root).decode()
def update_alpha_value_for_non_alpha_inputs(warped_vrt_dataset, options=None):
"""
Handles dataset with 1 or 3 bands, i.e. without alpha channel, in the case the nodata value has
not been forced by options
"""
if warped_vrt_dataset.RasterCount in [1, 3]:
vrt_string = warped_vrt_dataset.GetMetadata("xml:VRT")[0]
vrt_string = add_alpha_band_to_string_vrt(vrt_string)
warped_vrt_dataset = gdal.Open(vrt_string)
if options and options.verbose:
print("Modified -dstalpha warping result saved into 'tiles1.vrt'")
with open("tiles1.vrt", "w") as f:
f.write(warped_vrt_dataset.GetMetadata("xml:VRT")[0])
return warped_vrt_dataset
def nb_data_bands(dataset):
"""
Return the number of data (non-alpha) bands of a gdal dataset
"""
alphaband = dataset.GetRasterBand(1).GetMaskBand()
if ((alphaband.GetMaskFlags() & gdal.GMF_ALPHA) or
dataset.RasterCount == 4 or
dataset.RasterCount == 2):
return dataset.RasterCount - 1
return dataset.RasterCount
def create_base_tile(tile_job_info, tile_detail):
dataBandsCount = tile_job_info.nb_data_bands
output = tile_job_info.output_file_path
tileext = tile_job_info.tile_extension
tile_size = tile_job_info.tile_size
options = tile_job_info.options
tilebands = dataBandsCount + 1
cached_ds = getattr(threadLocal, 'cached_ds', None)
if cached_ds and cached_ds.GetDescription() == tile_job_info.src_file:
ds = cached_ds
else:
ds = gdal.Open(tile_job_info.src_file, gdal.GA_ReadOnly)
threadLocal.cached_ds = ds
mem_drv = gdal.GetDriverByName('MEM')
out_drv = gdal.GetDriverByName(tile_job_info.tile_driver)
alphaband = ds.GetRasterBand(1).GetMaskBand()
tx = tile_detail.tx
ty = tile_detail.ty
tz = tile_detail.tz
rx = tile_detail.rx
ry = tile_detail.ry
rxsize = tile_detail.rxsize
rysize = tile_detail.rysize
wx = tile_detail.wx
wy = tile_detail.wy
wxsize = tile_detail.wxsize
wysize = tile_detail.wysize
querysize = tile_detail.querysize
# Tile dataset in memory
tilefilename = os.path.join(
output, str(tz), str(tx), "%s.%s" % (ty, tileext))
dstile = mem_drv.Create('', tile_size, tile_size, tilebands)
data = alpha = None
if options.verbose:
print("\tReadRaster Extent: ",
(rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize))
# Query is in 'nearest neighbour' but can be bigger in then the tile_size
# We scale down the query to the tile_size by supplied algorithm.
if rxsize != 0 and rysize != 0 and wxsize != 0 and wysize != 0:
alpha = alphaband.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize)
# Detect totally transparent tile and skip its creation
if tile_job_info.exclude_transparent and len(alpha) == alpha.count('\x00'.encode('ascii')):
return
data = ds.ReadRaster(rx, ry, rxsize, rysize, wxsize, wysize,
band_list=list(range(1, dataBandsCount + 1)))
# The tile in memory is a transparent file by default. Write pixel values into it if
# any
if data:
if tile_size == querysize:
# Use the ReadRaster result directly in tiles ('nearest neighbour' query)
dstile.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount + 1)))
dstile.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
# Note: For source drivers based on WaveLet compression (JPEG2000, ECW,
# MrSID) the ReadRaster function returns high-quality raster (not ugly
# nearest neighbour)
# TODO: Use directly 'near' for WaveLet files
else:
# Big ReadRaster query in memory scaled to the tile_size - all but 'near'
# algo
dsquery = mem_drv.Create('', querysize, querysize, tilebands)
# TODO: fill the null value in case a tile without alpha is produced (now
# only png tiles are supported)
dsquery.WriteRaster(wx, wy, wxsize, wysize, data,
band_list=list(range(1, dataBandsCount + 1)))
dsquery.WriteRaster(wx, wy, wxsize, wysize, alpha, band_list=[tilebands])
scale_query_to_tile(dsquery, dstile, tile_job_info.tile_driver, options,
tilefilename=tilefilename)
del dsquery
del data
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_drv.CreateCopy(tilefilename, dstile, strict=0)
del dstile
# Create a KML file for this tile.
if tile_job_info.kml:
swne = get_tile_swne(tile_job_info, options)
if swne is not None:
kmlfilename = os.path.join(output, str(tz), str(tx), '%d.kml' % GDAL2Tiles.getYTile(ty, tz, options))
if not options.resume or not os.path.exists(kmlfilename):
with open(kmlfilename, 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
swne, tile_job_info.options
).encode('utf-8'))
def create_overview_tiles(tile_job_info, output_folder, options):
"""Generation of the overview tiles (higher in the pyramid) based on existing tiles"""
mem_driver = gdal.GetDriverByName('MEM')
tile_driver = tile_job_info.tile_driver
out_driver = gdal.GetDriverByName(tile_driver)
tilebands = tile_job_info.nb_data_bands + 1
# Usage of existing tiles: from 4 underlying tiles generate one as overview.
tcount = 0
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
tcount += (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
if tcount == 0:
return
if not options.quiet:
print("Generating Overview Tiles:")
progress_bar = ProgressBar(tcount)
progress_bar.start()
for tz in range(tile_job_info.tmaxz - 1, tile_job_info.tminz - 1, -1):
tminx, tminy, tmaxx, tmaxy = tile_job_info.tminmax[tz]
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
ytile = GDAL2Tiles.getYTile(ty, tz, options)
tilefilename = os.path.join(output_folder,
str(tz),
str(tx),
"%s.%s" % (ytile, tile_job_info.tile_extension))
if options.verbose:
print(ti, '/', tcount, tilefilename)
if options.resume and os.path.exists(tilefilename):
if options.verbose:
print("Tile generation skipped because of --resume")
else:
progress_bar.log_progress()
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
dsquery = mem_driver.Create('', 2 * tile_job_info.tile_size,
2 * tile_job_info.tile_size, tilebands)
# TODO: fill the null value
dstile = mem_driver.Create('', tile_job_info.tile_size, tile_job_info.tile_size,
tilebands)
# TODO: Implement more clever walking on the tiles with cache functionality
# probably walk should start with reading of four tiles from top left corner
# Hilbert curve
children = []
# Read the tiles and write them to query window
for y in range(2 * ty, 2 * ty + 2):
for x in range(2 * tx, 2 * tx + 2):
minx, miny, maxx, maxy = tile_job_info.tminmax[tz + 1]
if x >= minx and x <= maxx and y >= miny and y <= maxy:
ytile2 = GDAL2Tiles.getYTile(y, tz+1, options)
base_tile_path = os.path.join(output_folder, str(tz + 1), str(x),
"%s.%s" % (ytile2, tile_job_info.tile_extension))
if not os.path.isfile(base_tile_path):
continue
dsquerytile = gdal.Open(
base_tile_path,
gdal.GA_ReadOnly)
if x == 2*tx:
tileposx = 0
else:
tileposx = tile_job_info.tile_size
if options.xyz and options.profile == 'raster':
if y == 2*ty:
tileposy = 0
else:
tileposy = tile_job_info.tile_size
else:
if y == 2*ty:
tileposy = tile_job_info.tile_size
else:
tileposy = 0
dsquery.WriteRaster(
tileposx, tileposy, tile_job_info.tile_size,
tile_job_info.tile_size,
dsquerytile.ReadRaster(0, 0,
tile_job_info.tile_size,
tile_job_info.tile_size),
band_list=list(range(1, tilebands + 1)))
children.append([x, y, tz + 1])
if children:
scale_query_to_tile(dsquery, dstile, tile_driver, options,
tilefilename=tilefilename)
# Write a copy of tile to png/jpg
if options.resampling != 'antialias':
# Write a copy of tile to png/jpg
out_driver.CreateCopy(tilefilename, dstile, strict=0)
if options.verbose:
print("\tbuild from zoom", tz + 1,
" tiles:", (2 * tx, 2 * ty), (2 * tx + 1, 2 * ty),
(2 * tx, 2 * ty + 1), (2 * tx + 1, 2 * ty + 1))
# Create a KML file for this tile.
if tile_job_info.kml:
swne = get_tile_swne(tile_job_info, options)
if swne is not None:
with open(os.path.join(
output_folder,
'%d/%d/%d.kml' % (tz, tx, ytile)
), 'wb') as f:
f.write(generate_kml(
tx, ty, tz, tile_job_info.tile_extension, tile_job_info.tile_size,
swne, options, children
).encode('utf-8'))
if not options.verbose and not options.quiet:
progress_bar.log_progress()
def optparse_init():
"""Prepare the option parser for input (argv)"""
from optparse import OptionParser, OptionGroup
usage = "Usage: %prog [options] input_file [output]"
p = OptionParser(usage, version="%prog " + __version__)
p.add_option("-p", "--profile", dest='profile',
type='choice', choices=profile_list,
help=("Tile cutting profile (%s) - default 'mercator' "
"(Google Maps compatible)" % ",".join(profile_list)))
p.add_option("-r", "--resampling", dest="resampling",
type='choice', choices=resampling_list,
help="Resampling method (%s) - default 'average'" % ",".join(resampling_list))
p.add_option('-s', '--s_srs', dest="s_srs", metavar="SRS",
help="The spatial reference system used for the source input data")
p.add_option('-z', '--zoom', dest="zoom",
help="Zoom levels to render (format:'2-5' or '10').")
p.add_option('-e', '--resume', dest="resume", action="store_true",
help="Resume mode. Generate only missing files.")
p.add_option('-a', '--srcnodata', dest="srcnodata", metavar="NODATA",
help="Value in the input dataset considered as transparent")
p.add_option('-d', '--tmscompatible', dest="tmscompatible", action="store_true",
help=("When using the geodetic profile, specifies the base resolution "
"as 0.703125 or 2 tiles at zoom level 0."))
p.add_option('--xyz',
action='store_true', dest='xyz',
help="Use XYZ tile numbering (OSM Slippy Map tiles) instead of TMS")
p.add_option("-v", "--verbose",
action="store_true", dest="verbose",
help="Print status messages to stdout")
p.add_option("-x", "--exclude",
action="store_true", dest="exclude_transparent",
help="Exclude transparent tiles from result tileset")
p.add_option("-q", "--quiet",
action="store_true", dest="quiet",
help="Disable messages and status to stdout")
p.add_option("--processes",
dest="nb_processes",
type='int',
help="Number of processes to use for tiling")
p.add_option("--tilesize", dest="tilesize", metavar="PIXELS", default=256,
type='int',
help="Width and height in pixel of a tile")
# KML options
g = OptionGroup(p, "KML (Google Earth) options",
"Options for generated Google Earth SuperOverlay metadata")
g.add_option("-k", "--force-kml", dest='kml', action="store_true",
help=("Generate KML for Google Earth - default for 'geodetic' profile and "
"'raster' in EPSG:4326. For a dataset with different projection use "
"with caution!"))
g.add_option("-n", "--no-kml", dest='kml', action="store_false",
help="Avoid automatic generation of KML files for EPSG:4326")
g.add_option("-u", "--url", dest='url',
help="URL address where the generated tiles are going to be published")
p.add_option_group(g)
# HTML options
g = OptionGroup(p, "Web viewer options",
"Options for generated HTML viewers a la Google Maps")
g.add_option("-w", "--webviewer", dest='webviewer', type='choice', choices=webviewer_list,
help="Web viewer to generate (%s) - default 'all'" % ",".join(webviewer_list))
g.add_option("-t", "--title", dest='title',
help="Title of the map")
g.add_option("-c", "--copyright", dest='copyright',
help="Copyright for the map")
g.add_option("-g", "--googlekey", dest='googlekey',
help="Google Maps API key from http://code.google.com/apis/maps/signup.html")
g.add_option("-b", "--bingkey", dest='bingkey',
help="Bing Maps API key from https://www.bingmapsportal.com/")
p.add_option_group(g)
# MapML options
g = OptionGroup(p, "MapML options",
"Options for generated MapML file")
g.add_option("--mapml-template", dest='mapml_template', action="store_true",
help=("Filename of a template mapml file where variables will "
"be substituted. If not specified, the generic "
"template_tiles.mapml file from GDAL data resources "
"will be used"))
p.add_option_group(g)
p.set_defaults(verbose=False, profile="mercator", kml=False, url='',
webviewer='all', copyright='', resampling='average', resume=False,
googlekey='INSERT_YOUR_KEY_HERE', bingkey='INSERT_YOUR_KEY_HERE',
processes=1)
return p
def process_args(argv):
parser = optparse_init()
options, args = parser.parse_args(args=argv)
# Args should be either an input file OR an input file and an output folder
if not args:
exit_with_error("You need to specify at least an input file as argument to the script")
if len(args) > 2:
exit_with_error("Processing of several input files is not supported.",
"Please first use a tool like gdal_vrtmerge.py or gdal_merge.py on the "
"files: gdal_vrtmerge.py -o merged.vrt %s" % " ".join(args))
input_file = args[0]
if not os.path.isfile(input_file):
exit_with_error("The provided input file %s does not exist or is not a file" % input_file)
if len(args) == 2:
output_folder = args[1]
else:
# Directory with input filename without extension in actual directory
output_folder = os.path.splitext(os.path.basename(input_file))[0]
if options.webviewer == 'mapml':
options.xyz = True
if options.profile == 'geodetic':
options.tmscompatible = True
options = options_post_processing(options, input_file, output_folder)
return input_file, output_folder, options
def options_post_processing(options, input_file, output_folder):
if not options.title:
options.title = os.path.basename(input_file)
if options.url and not options.url.endswith('/'):
options.url += '/'
if options.url:
out_path = output_folder
if out_path.endswith("/"):
out_path = out_path[:-1]
options.url += os.path.basename(out_path) + '/'
# Supported options
if options.resampling == 'antialias' and not numpy_available:
exit_with_error("'antialias' resampling algorithm is not available.",
"Install PIL (Python Imaging Library) and numpy.")
try:
os.path.basename(input_file).encode('ascii')
except UnicodeEncodeError:
full_ascii = False
else:
full_ascii = True
# LC_CTYPE check
if not full_ascii and 'UTF-8' not in os.environ.get("LC_CTYPE", ""):
if not options.quiet:
print("\nWARNING: "
"You are running gdal2tiles.py with a LC_CTYPE environment variable that is "
"not UTF-8 compatible, and your input file contains non-ascii characters. "
"The generated sample googlemaps, openlayers or "
"leaflet files might contain some invalid characters as a result\n")
# Output the results
if options.verbose:
print("Options:", options)
print("Input:", input_file)
print("Output:", output_folder)
print("Cache: %s MB" % (gdal.GetCacheMax() / 1024 / 1024))
print('')
return options
class TileDetail(object):
tx = 0
ty = 0
tz = 0
rx = 0
ry = 0
rxsize = 0
rysize = 0
wx = 0
wy = 0
wxsize = 0
wysize = 0
querysize = 0
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __str__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
def __repr__(self):
return "TileDetail %s\n%s\n%s\n" % (self.tx, self.ty, self.tz)
class TileJobInfo(object):
"""
Plain object to hold tile job configuration for a dataset
"""
src_file = ""
nb_data_bands = 0
output_file_path = ""
tile_extension = ""
tile_size = 0
tile_driver = None
kml = False
tminmax = []
tminz = 0
tmaxz = 0
in_srs_wkt = 0
out_geo_trans = []
ominy = 0
is_epsg_4326 = False
options = None
exclude_transparent = False
def __init__(self, **kwargs):
for key in kwargs:
if hasattr(self, key):
setattr(self, key, kwargs[key])
def __unicode__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __str__(self):
return "TileJobInfo %s\n" % (self.src_file)
def __repr__(self):
return "TileJobInfo %s\n" % (self.src_file)
class Gdal2TilesError(Exception):
pass
class GDAL2Tiles(object):
def __init__(self, input_file, output_folder, options):
"""Constructor function - initialization"""
self.out_drv = None
self.mem_drv = None
self.warped_input_dataset = None
self.out_srs = None
self.nativezoom = None
self.tminmax = None
self.tsize = None
self.mercator = None
self.geodetic = None
self.alphaband = None
self.dataBandsCount = None
self.out_gt = None
self.tileswne = None
self.swne = None
self.ominx = None
self.omaxx = None
self.omaxy = None
self.ominy = None
self.input_file = None
self.output_folder = None
self.isepsg4326 = None
self.in_srs = None
self.in_srs_wkt = None
# Tile format
self.tile_size = 256
if options.tilesize:
self.tile_size = options.tilesize
self.tiledriver = 'PNG'
self.tileext = 'png'
self.tmp_dir = tempfile.mkdtemp()
self.tmp_vrt_filename = os.path.join(self.tmp_dir, str(uuid4()) + '.vrt')
# Should we read bigger window of the input raster and scale it down?
# Note: Modified later by open_input()
# Not for 'near' resampling
# Not for Wavelet based drivers (JPEG2000, ECW, MrSID)
# Not for 'raster' profile
self.scaledquery = True
# How big should be query window be for scaling down
# Later on reset according the chosen resampling algorithm
self.querysize = 4 * self.tile_size
# Should we use Read on the input file for generating overview tiles?
# Note: Modified later by open_input()
# Otherwise the overview tiles are generated from existing underlying tiles
self.overviewquery = False
self.input_file = input_file
self.output_folder = output_folder
self.options = options
if self.options.resampling == 'near':
self.querysize = self.tile_size
elif self.options.resampling == 'bilinear':
self.querysize = self.tile_size * 2
# User specified zoom levels
self.tminz = None
self.tmaxz = None
if self.options.zoom:
minmax = self.options.zoom.split('-', 1)
minmax.extend([''])
zoom_min, zoom_max = minmax[:2]
self.tminz = int(zoom_min)
if zoom_max:
if int(zoom_max) < self.tminz:
raise Exception('max zoom (%d) less than min zoom (%d)' %
(int(zoom_max), self.tminz))
self.tmaxz = int(zoom_max)
else:
self.tmaxz = int(zoom_min)
# KML generation
self.kml = self.options.kml
# -------------------------------------------------------------------------
def open_input(self):
"""Initialization of the input raster, reprojection if necessary"""
gdal.AllRegister()
self.out_drv = gdal.GetDriverByName(self.tiledriver)
self.mem_drv = gdal.GetDriverByName('MEM')
if not self.out_drv:
raise Exception("The '%s' driver was not found, is it available in this GDAL build?" %
self.tiledriver)
if not self.mem_drv:
raise Exception("The 'MEM' driver was not found, is it available in this GDAL build?")
# Open the input file
if self.input_file:
input_dataset = gdal.Open(self.input_file, gdal.GA_ReadOnly)
else:
raise Exception("No input file was specified")
if self.options.verbose:
print("Input file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
if not input_dataset:
# Note: GDAL prints the ERROR message too
exit_with_error("It is not possible to open the input file '%s'." % self.input_file)
# Read metadata from the input file
if input_dataset.RasterCount == 0:
exit_with_error("Input file '%s' has no raster band" % self.input_file)
if input_dataset.GetRasterBand(1).GetRasterColorTable():
exit_with_error(
"Please convert this file to RGB/RGBA and run gdal2tiles on the result.",
"From paletted file you can create RGBA file (temp.vrt) by:\n"
"gdal_translate -of vrt -expand rgba %s temp.vrt\n"
"then run:\n"
"gdal2tiles temp.vrt" % self.input_file
)
if input_dataset.GetRasterBand(1).DataType != gdal.GDT_Byte:
exit_with_error(
"Please convert this file to 8-bit and run gdal2tiles on the result.",
"To scale pixel values you can use:\n"
"gdal_translate -of VRT -ot Byte -scale %s temp.vrt\n"
"then run:\n"
"gdal2tiles temp.vrt" % self.input_file
)
in_nodata = setup_no_data_values(input_dataset, self.options)
if self.options.verbose:
print("Preprocessed file:",
"( %sP x %sL - %s bands)" % (input_dataset.RasterXSize,
input_dataset.RasterYSize,
input_dataset.RasterCount))
self.in_srs, self.in_srs_wkt = setup_input_srs(input_dataset, self.options)
self.out_srs = setup_output_srs(self.in_srs, self.options)
# If input and output reference systems are different, we reproject the input dataset into
# the output reference system for easier manipulation
self.warped_input_dataset = None
if self.options.profile != 'raster':
if not self.in_srs:
exit_with_error(
"Input file has unknown SRS.",
"Use --s_srs EPSG:xyz (or similar) to provide source reference system.")
if not has_georeference(input_dataset):
exit_with_error(
"There is no georeference - neither affine transformation (worldfile) "
"nor GCPs. You can generate only 'raster' profile tiles.",
"Either gdal2tiles with parameter -p 'raster' or use another GIS "
"software for georeference e.g. gdal_transform -gcp / -a_ullr / -a_srs"
)
if ((self.in_srs.ExportToProj4() != self.out_srs.ExportToProj4()) or
(input_dataset.GetGCPCount() != 0)):
self.warped_input_dataset = reproject_dataset(
input_dataset, self.in_srs, self.out_srs)
if in_nodata:
self.warped_input_dataset = update_no_data_values(
self.warped_input_dataset, in_nodata, options=self.options)
else:
self.warped_input_dataset = update_alpha_value_for_non_alpha_inputs(
self.warped_input_dataset, options=self.options)
if self.warped_input_dataset and self.options.verbose:
print("Projected file:", "tiles.vrt", "( %sP x %sL - %s bands)" % (
self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize,
self.warped_input_dataset.RasterCount))
if not self.warped_input_dataset:
self.warped_input_dataset = input_dataset
gdal.GetDriverByName('VRT').CreateCopy(self.tmp_vrt_filename,
self.warped_input_dataset)
# Get alpha band (either directly or from NODATA value)
self.alphaband = self.warped_input_dataset.GetRasterBand(1).GetMaskBand()
self.dataBandsCount = nb_data_bands(self.warped_input_dataset)
# KML test
self.isepsg4326 = False
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
srs4326.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
if self.out_srs and srs4326.ExportToProj4() == self.out_srs.ExportToProj4():
self.kml = True
self.isepsg4326 = True
if self.options.verbose:
print("KML autotest OK!")
# Read the georeference
self.out_gt = self.warped_input_dataset.GetGeoTransform()
# Test the size of the pixel
# Report error in case rotation/skew is in geotransform (possible only in 'raster' profile)
if (self.out_gt[2], self.out_gt[4]) != (0, 0):
exit_with_error("Georeference of the raster contains rotation or skew. "
"Such raster is not supported. Please use gdalwarp first.")
# Here we expect: pixel is square, no rotation on the raster
# Output Bounds - coordinates in the output SRS
self.ominx = self.out_gt[0]
self.omaxx = self.out_gt[0] + self.warped_input_dataset.RasterXSize * self.out_gt[1]
self.omaxy = self.out_gt[3]
self.ominy = self.out_gt[3] - self.warped_input_dataset.RasterYSize * self.out_gt[1]
# Note: maybe round(x, 14) to avoid the gdal_translate behavior, when 0 becomes -1e-15
if self.options.verbose:
print("Bounds (output srs):", round(self.ominx, 13), self.ominy, self.omaxx, self.omaxy)
# Calculating ranges for tiles in different zoom levels
if self.options.profile == 'mercator':
self.mercator = GlobalMercator(tile_size=self.tile_size)
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.mercator.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.mercator.MetersToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.mercator.MetersToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**tz - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the minimal zoom level (map covers area equivalent to one tile)
if self.tminz is None:
self.tminz = self.mercator.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tile_size))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.mercator.ZoomForPixelSize(self.out_gt[1])
self.tminz = min(self.tminz, self.tmaxz)
if self.options.verbose:
print("Bounds (latlong):",
self.mercator.MetersToLatLon(self.ominx, self.ominy),
self.mercator.MetersToLatLon(self.omaxx, self.omaxy))
print('MinZoomLevel:', self.tminz)
print("MaxZoomLevel:",
self.tmaxz,
"(",
self.mercator.Resolution(self.tmaxz),
")")
elif self.options.profile == 'geodetic':
self.geodetic = GlobalGeodetic(self.options.tmscompatible, tile_size=self.tile_size)
# Function which generates SWNE in LatLong for given tile
self.tileswne = self.geodetic.TileLatLonBounds
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, 32))
for tz in range(0, 32):
tminx, tminy = self.geodetic.LonLatToTile(self.ominx, self.ominy, tz)
tmaxx, tmaxy = self.geodetic.LonLatToTile(self.omaxx, self.omaxy, tz)
# crop tiles extending world limits (+-180,+-90)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(2**(tz + 1) - 1, tmaxx), min(2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# TODO: Maps crossing 180E (Alaska?)
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tminz is None:
self.tminz = self.geodetic.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tile_size))
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = self.geodetic.ZoomForPixelSize(self.out_gt[1])
self.tminz = min(self.tminz, self.tmaxz)
if self.options.verbose:
print("Bounds (latlong):", self.ominx, self.ominy, self.omaxx, self.omaxy)
elif self.options.profile == 'raster':
def log2(x):
return math.log10(x) / math.log10(2)
self.nativezoom = max(0, int(
max(math.ceil(log2(self.warped_input_dataset.RasterXSize / float(self.tile_size))),
math.ceil(log2(self.warped_input_dataset.RasterYSize / float(self.tile_size))))))
if self.options.verbose:
print("Native zoom of the raster:", self.nativezoom)
# Get the minimal zoom level (whole raster in one tile)
if self.tminz is None:
self.tminz = 0
# Get the maximal zoom level (native resolution of the raster)
if self.tmaxz is None:
self.tmaxz = self.nativezoom
elif self.tmaxz > self.nativezoom:
print('Clamping max zoom level to %d' % self.nativezoom)
self.tmaxz = self.nativezoom
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, self.tmaxz + 1))
self.tsize = list(range(0, self.tmaxz + 1))
for tz in range(0, self.tmaxz + 1):
tsize = 2.0**(self.nativezoom - tz) * self.tile_size
tminx, tminy = 0, 0
tmaxx = int(math.ceil(self.warped_input_dataset.RasterXSize / tsize)) - 1
tmaxy = int(math.ceil(self.warped_input_dataset.RasterYSize / tsize)) - 1
self.tsize[tz] = math.ceil(tsize)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# Function which generates SWNE in LatLong for given tile
if self.kml and self.in_srs_wkt:
ct = osr.CoordinateTransformation(self.in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2**(self.tmaxz - z) * self.out_gt[1]) # X-pixel size in level
west = self.out_gt[0] + x * self.tile_size * pixelsizex
east = west + self.tile_size * pixelsizex
if self.options.xyz:
north = self.omaxy - y * self.tile_size * pixelsizex
south = north - self.tile_size * pixelsizex
else:
south = self.ominy + y * self.tile_size * pixelsizex
north = south + self.tile_size * pixelsizex
if not self.isepsg4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
self.tileswne = rastertileswne
else:
self.tileswne = lambda x, y, z: (0, 0, 0, 0) # noqa
else:
tms = tmsMap[self.options.profile]
# Function which generates SWNE in LatLong for given tile
self.tileswne = None # not implemented
# Generate table with min max tile coordinates for all zoomlevels
self.tminmax = list(range(0, tms.level_count+1))
for tz in range(0, tms.level_count+1):
tminx, tminy = tms.GeorefCoordToTileCoord(self.ominx, self.ominy, tz, self.tile_size)
tmaxx, tmaxy = tms.GeorefCoordToTileCoord(self.omaxx, self.omaxy, tz, self.tile_size)
tminx, tminy = max(0, tminx), max(0, tminy)
tmaxx, tmaxy = min(tms.matrix_width * 2**tz - 1, tmaxx), min(tms.matrix_height * 2**tz - 1, tmaxy)
self.tminmax[tz] = (tminx, tminy, tmaxx, tmaxy)
# Get the minimal zoom level (map covers area equivalent to one tile)
if self.tminz is None:
self.tminz = tms.ZoomForPixelSize(
self.out_gt[1] *
max(self.warped_input_dataset.RasterXSize,
self.warped_input_dataset.RasterYSize) /
float(self.tile_size), self.tile_size)
# Get the maximal zoom level
# (closest possible zoom level up on the resolution of raster)
if self.tmaxz is None:
self.tmaxz = tms.ZoomForPixelSize(self.out_gt[1], self.tile_size)
self.tminz = min(self.tminz, self.tmaxz)
if self.options.verbose:
print("Bounds (georef):", self.ominx, self.ominy, self.omaxx, self.omaxy)
print('MinZoomLevel:', self.tminz)
print("MaxZoomLevel:", self.tmaxz)
def generate_metadata(self):
"""
Generation of main metadata files and HTML viewers (metadata related to particular
tiles are generated during the tile processing).
"""
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
if self.options.profile == 'mercator':
south, west = self.mercator.MetersToLatLon(self.ominx, self.ominy)
north, east = self.mercator.MetersToLatLon(self.omaxx, self.omaxy)
south, west = max(-85.05112878, south), max(-180.0, west)
north, east = min(85.05112878, north), min(180.0, east)
self.swne = (south, west, north, east)
# Generate googlemaps.html
if self.options.webviewer in ('all', 'google') and self.options.profile == 'mercator':
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'googlemaps.html'))):
with open(os.path.join(self.output_folder, 'googlemaps.html'), 'wb') as f:
f.write(self.generate_googlemaps().encode('utf-8'))
# Generate leaflet.html
if self.options.webviewer in ('all', 'leaflet'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'leaflet.html'))):
with open(os.path.join(self.output_folder, 'leaflet.html'), 'wb') as f:
f.write(self.generate_leaflet().encode('utf-8'))
elif self.options.profile == 'geodetic':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
south, west = max(-90.0, south), max(-180.0, west)
north, east = min(90.0, north), min(180.0, east)
self.swne = (south, west, north, east)
elif self.options.profile == 'raster':
west, south = self.ominx, self.ominy
east, north = self.omaxx, self.omaxy
self.swne = (south, west, north, east)
else:
self.swne = None
# Generate openlayers.html
if self.options.webviewer in ('all', 'openlayers'):
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'openlayers.html'))):
with open(os.path.join(self.output_folder, 'openlayers.html'), 'wb') as f:
f.write(self.generate_openlayers().encode('utf-8'))
# Generate tilemapresource.xml.
if not self.options.xyz and self.swne is not None and (not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'tilemapresource.xml'))):
with open(os.path.join(self.output_folder, 'tilemapresource.xml'), 'wb') as f:
f.write(self.generate_tilemapresource().encode('utf-8'))
# Generate mapml file
if self.options.webviewer in ('all', 'mapml') and \
self.options.xyz and \
self.options.profile != 'raster' and \
(self.options.profile != 'geodetic' or self.options.tmscompatible) and \
(not self.options.resume or not os.path.exists(os.path.join(self.output_folder, 'mapml.mapml'))):
with open(os.path.join(self.output_folder, 'mapml.mapml'), 'wb') as f:
f.write(self.generate_mapml().encode('utf-8'))
if self.kml and self.tileswne is not None:
# TODO: Maybe problem for not automatically generated tminz
# The root KML should contain links to all tiles in the tminz level
children = []
xmin, ymin, xmax, ymax = self.tminmax[self.tminz]
for x in range(xmin, xmax + 1):
for y in range(ymin, ymax + 1):
children.append([x, y, self.tminz])
# Generate Root KML
if self.kml:
if (not self.options.resume or not
os.path.exists(os.path.join(self.output_folder, 'doc.kml'))):
with open(os.path.join(self.output_folder, 'doc.kml'), 'wb') as f:
f.write(generate_kml(
None, None, None, self.tileext, self.tile_size, self.tileswne,
self.options, children
).encode('utf-8'))
def generate_base_tiles(self):
"""
Generation of the base tiles (the lowest in the pyramid) directly from the input raster
"""
if not self.options.quiet:
print("Generating Base Tiles:")
if self.options.verbose:
print('')
print("Tiles generated from the max zoom level:")
print("----------------------------------------")
print('')
# Set the bounds
tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]
ds = self.warped_input_dataset
tilebands = self.dataBandsCount + 1
querysize = self.querysize
if self.options.verbose:
print("dataBandsCount: ", self.dataBandsCount)
print("tilebands: ", tilebands)
tcount = (1 + abs(tmaxx - tminx)) * (1 + abs(tmaxy - tminy))
ti = 0
tile_details = []
tz = self.tmaxz
for ty in range(tmaxy, tminy - 1, -1):
for tx in range(tminx, tmaxx + 1):
ti += 1
ytile = GDAL2Tiles.getYTile(ty, tz, self.options)
tilefilename = os.path.join(
self.output_folder, str(tz), str(tx), "%s.%s" % (ytile, self.tileext))
if self.options.verbose:
print(ti, '/', tcount, tilefilename)
if self.options.resume and os.path.exists(tilefilename):
if self.options.verbose:
print("Tile generation skipped because of --resume")
continue
# Create directories for the tile
if not os.path.exists(os.path.dirname(tilefilename)):
os.makedirs(os.path.dirname(tilefilename))
if self.options.profile == 'mercator':
# Tile bounds in EPSG:3857
b = self.mercator.TileBounds(tx, ty, tz)
elif self.options.profile == 'geodetic':
b = self.geodetic.TileBounds(tx, ty, tz)
elif self.options.profile != 'raster':
b = tmsMap[self.options.profile].TileBounds(tx, ty, tz, self.tile_size)
# Don't scale up by nearest neighbour, better change the querysize
# to the native resolution (and return smaller query tile) for scaling
if self.options.profile != 'raster':
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1])
# Pixel size in the raster covering query geo extent
nativesize = wb[0] + wb[2]
if self.options.verbose:
print("\tNative Extent (querysize", nativesize, "): ", rb, wb)
# Tile bounds in raster coordinates for ReadRaster query
rb, wb = self.geo_query(ds, b[0], b[3], b[2], b[1], querysize=querysize)
rx, ry, rxsize, rysize = rb
wx, wy, wxsize, wysize = wb
else: # 'raster' profile:
tsize = int(self.tsize[tz]) # tile_size in raster coordinates for actual zoom
xsize = self.warped_input_dataset.RasterXSize # size of the raster in pixels
ysize = self.warped_input_dataset.RasterYSize
querysize = self.tile_size
rx = tx * tsize
rxsize = 0
if tx == tmaxx:
rxsize = xsize % tsize
if rxsize == 0:
rxsize = tsize
ry = ty * tsize
rysize = 0
if ty == tmaxy:
rysize = ysize % tsize
if rysize == 0:
rysize = tsize
wx, wy = 0, 0
wxsize = int(rxsize / float(tsize) * self.tile_size)
wysize = int(rysize / float(tsize) * self.tile_size)
if not self.options.xyz:
ry = ysize - (ty * tsize) - rysize
if wysize != self.tile_size:
wy = self.tile_size - wysize
# Read the source raster if anything is going inside the tile as per the computed
# geo_query
tile_details.append(
TileDetail(
tx=tx, ty=ytile, tz=tz, rx=rx, ry=ry, rxsize=rxsize, rysize=rysize, wx=wx,
wy=wy, wxsize=wxsize, wysize=wysize, querysize=querysize,
)
)
conf = TileJobInfo(
src_file=self.tmp_vrt_filename,
nb_data_bands=self.dataBandsCount,
output_file_path=self.output_folder,
tile_extension=self.tileext,
tile_driver=self.tiledriver,
tile_size=self.tile_size,
kml=self.kml,
tminmax=self.tminmax,
tminz=self.tminz,
tmaxz=self.tmaxz,
in_srs_wkt=self.in_srs_wkt,
out_geo_trans=self.out_gt,
ominy=self.ominy,
is_epsg_4326=self.isepsg4326,
options=self.options,
exclude_transparent=self.options.exclude_transparent,
)
return conf, tile_details
def geo_query(self, ds, ulx, uly, lrx, lry, querysize=0):
"""
For given dataset and query in cartographic coordinates returns parameters for ReadRaster()
in raster coordinates and x/y shifts (for border tiles). If the querysize is not given, the
extent is returned in the native resolution of dataset ds.
raises Gdal2TilesError if the dataset does not contain anything inside this geo_query
"""
geotran = ds.GetGeoTransform()
rx = int((ulx - geotran[0]) / geotran[1] + 0.001)
ry = int((uly - geotran[3]) / geotran[5] + 0.001)
rxsize = max(1, int((lrx - ulx) / geotran[1] + 0.5))
rysize = max(1, int((lry - uly) / geotran[5] + 0.5))
if not querysize:
wxsize, wysize = rxsize, rysize
else:
wxsize, wysize = querysize, querysize
# Coordinates should not go out of the bounds of the raster
wx = 0
if rx < 0:
rxshift = abs(rx)
wx = int(wxsize * (float(rxshift) / rxsize))
wxsize = wxsize - wx
rxsize = rxsize - int(rxsize * (float(rxshift) / rxsize))
rx = 0
if rx + rxsize > ds.RasterXSize:
wxsize = int(wxsize * (float(ds.RasterXSize - rx) / rxsize))
rxsize = ds.RasterXSize - rx
wy = 0
if ry < 0:
ryshift = abs(ry)
wy = int(wysize * (float(ryshift) / rysize))
wysize = wysize - wy
rysize = rysize - int(rysize * (float(ryshift) / rysize))
ry = 0
if ry + rysize > ds.RasterYSize:
wysize = int(wysize * (float(ds.RasterYSize - ry) / rysize))
rysize = ds.RasterYSize - ry
return (rx, ry, rxsize, rysize), (wx, wy, wxsize, wysize)
def generate_tilemapresource(self):
"""
Template for tilemapresource.xml. Returns filled string. Expected variables:
title, north, south, east, west, isepsg4326, projection, publishurl,
zoompixels, tile_size, tileformat, profile
"""
args = {}
args['title'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['profile'] = self.options.profile
if self.options.profile == 'mercator':
args['srs'] = "EPSG:3857"
elif self.options.profile == 'geodetic':
args['srs'] = "EPSG:4326"
elif self.options.s_srs:
args['srs'] = self.options.s_srs
elif self.out_srs:
args['srs'] = self.out_srs.ExportToWkt()
else:
args['srs'] = ""
s = """<?xml version="1.0" encoding="utf-8"?>
<TileMap version="1.0.0" tilemapservice="http://tms.osgeo.org/1.0.0">
<Title>%(title)s</Title>
<Abstract></Abstract>
<SRS>%(srs)s</SRS>
<BoundingBox minx="%(west).14f" miny="%(south).14f" maxx="%(east).14f" maxy="%(north).14f"/>
<Origin x="%(west).14f" y="%(south).14f"/>
<TileFormat width="%(tile_size)d" height="%(tile_size)d" mime-type="image/%(tileformat)s" extension="%(tileformat)s"/>
<TileSets profile="%(profile)s">
""" % args # noqa
for z in range(self.tminz, self.tmaxz + 1):
if self.options.profile == 'raster':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, (2**(self.nativezoom - z) * self.out_gt[1]), z)
elif self.options.profile == 'mercator':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 156543.0339 / 2**z, z)
elif self.options.profile == 'geodetic':
s += """ <TileSet href="%s%d" units-per-pixel="%.14f" order="%d"/>\n""" % (
args['publishurl'], z, 0.703125 / 2**z, z)
s += """ </TileSets>
</TileMap>
"""
return s
def generate_googlemaps(self):
"""
Template for googlemaps.html implementing Overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, googlemapskey, north, south, east, west, minzoom, maxzoom, tile_size, tileformat,
publishurl
"""
args = {}
args['title'] = self.options.title
args['googlemapskey'] = self.options.googlekey
args['south'], args['west'], args['north'], args['east'] = self.swne
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 95%%; border: 1px solid #888; }
</style>
<script src='http://maps.google.com/maps?file=api&v=2&key=%(googlemapskey)s'></script>
<script>
//<![CDATA[
/*
* Constants for given map
* TODO: read it from tilemapresource.xml
*/
var mapBounds = new GLatLngBounds(new GLatLng(%(south)s, %(west)s), new GLatLng(%(north)s, %(east)s));
var mapMinZoom = %(minzoom)s;
var mapMaxZoom = %(maxzoom)s;
var opacity = 0.75;
var map;
var hybridOverlay;
/*
* Create a Custom Opacity GControl
* http://www.maptiler.org/google-maps-overlay-opacity-control/
*/
var CTransparencyLENGTH = 58;
// maximum width that the knob can move (slide width minus knob width)
function CTransparencyControl( overlay ) {
this.overlay = overlay;
this.opacity = overlay.getTileLayer().getOpacity();
}
CTransparencyControl.prototype = new GControl();
// This function positions the slider to match the specified opacity
CTransparencyControl.prototype.setSlider = function(pos) {
var left = Math.round((CTransparencyLENGTH*pos));
this.slide.left = left;
this.knob.style.left = left+"px";
this.knob.style.top = "0px";
}
// This function reads the slider and sets the overlay opacity level
CTransparencyControl.prototype.setOpacity = function() {
// set the global variable
opacity = this.slide.left/CTransparencyLENGTH;
this.map.clearOverlays();
this.map.addOverlay(this.overlay, { zPriority: 0 });
if (this.map.getCurrentMapType() == G_HYBRID_MAP) {
this.map.addOverlay(hybridOverlay);
}
}
// This gets called by the API when addControl(new CTransparencyControl())
CTransparencyControl.prototype.initialize = function(map) {
var that=this;
this.map = map;
// Is this MSIE, if so we need to use AlphaImageLoader
var agent = navigator.userAgent.toLowerCase();
if ((agent.indexOf("msie") > -1) && (agent.indexOf("opera") < 1)){this.ie = true} else {this.ie = false}
// create the background graphic as a <div> containing an image
var container = document.createElement("div");
container.style.width="70px";
container.style.height="21px";
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
container.innerHTML = '<div style="height:21px; width:70px; ' +loader+ '" ></div>';
} else {
container.innerHTML = '<div style="height:21px; width:70px; background-image: url(http://www.maptiler.org/img/opacity-slider.png)" ></div>';
}
// create the knob as a GDraggableObject
// Handle transparent PNG files in MSIE
if (this.ie) {
var loader = "progid:DXImageTransform.Microsoft.AlphaImageLoader(src='http://www.maptiler.org/img/opacity-slider.png', sizingMethod='crop');";
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.overflow="hidden";
this.knob_img = document.createElement("div");
this.knob_img.style.height="21px";
this.knob_img.style.width="83px";
this.knob_img.style.filter=loader;
this.knob_img.style.position="relative";
this.knob_img.style.left="-70px";
this.knob.appendChild(this.knob_img);
} else {
this.knob = document.createElement("div");
this.knob.style.height="21px";
this.knob.style.width="13px";
this.knob.style.backgroundImage="url(http://www.maptiler.org/img/opacity-slider.png)";
this.knob.style.backgroundPosition="-70px 0px";
}
container.appendChild(this.knob);
this.slide=new GDraggableObject(this.knob, {container:container});
this.slide.setDraggableCursor('pointer');
this.slide.setDraggingCursor('pointer');
this.container = container;
// attach the control to the map
map.getContainer().appendChild(container);
// init slider
this.setSlider(this.opacity);
// Listen for the slider being moved and set the opacity
GEvent.addListener(this.slide, "dragend", function() {that.setOpacity()});
//GEvent.addListener(this.container, "click", function( x, y ) { alert(x, y) });
return container;
}
// Set the default position for the control
CTransparencyControl.prototype.getDefaultPosition = function() {
return new GControlPosition(G_ANCHOR_TOP_RIGHT, new GSize(7, 47));
}
/*
* Full-screen Window Resize
*/
function getWindowHeight() {
if (self.innerHeight) return self.innerHeight;
if (document.documentElement && document.documentElement.clientHeight)
return document.documentElement.clientHeight;
if (document.body) return document.body.clientHeight;
return 0;
}
function getWindowWidth() {
if (self.innerWidth) return self.innerWidth;
if (document.documentElement && document.documentElement.clientWidth)
return document.documentElement.clientWidth;
if (document.body) return document.body.clientWidth;
return 0;
}
function resize() {
var map = document.getElementById("map");
var header = document.getElementById("header");
var subheader = document.getElementById("subheader");
map.style.height = (getWindowHeight()-80) + "px";
map.style.width = (getWindowWidth()-20) + "px";
header.style.width = (getWindowWidth()-20) + "px";
subheader.style.width = (getWindowWidth()-20) + "px";
// map.checkResize();
}
/*
* Main load function:
*/
function load() {
if (GBrowserIsCompatible()) {
// Bug in the Google Maps: Copyright for Overlay is not correctly displayed
var gcr = GMapType.prototype.getCopyrights;
GMapType.prototype.getCopyrights = function(bounds,zoom) {
return ["%(copyright)s"].concat(gcr.call(this,bounds,zoom));
}
map = new GMap2( document.getElementById("map"), { backgroundColor: '#fff' } );
map.addMapType(G_PHYSICAL_MAP);
map.setMapType(G_PHYSICAL_MAP);
map.setCenter( mapBounds.getCenter(), map.getBoundsZoomLevel( mapBounds ));
hybridOverlay = new GTileLayerOverlay( G_HYBRID_MAP.getTileLayers()[1] );
GEvent.addListener(map, "maptypechanged", function() {
if (map.getCurrentMapType() == G_HYBRID_MAP) {
map.addOverlay(hybridOverlay);
} else {
map.removeOverlay(hybridOverlay);
}
} );
var tilelayer = new GTileLayer(GCopyrightCollection(''), mapMinZoom, mapMaxZoom);
var mercator = new GMercatorProjection(mapMaxZoom+1);
tilelayer.getTileUrl = function(tile,zoom) {
if ((zoom < mapMinZoom) || (zoom > mapMaxZoom)) {
return "http://www.maptiler.org/img/none.png";
}
var ymax = 1 << zoom;
var y = ymax - tile.y -1;
var tileBounds = new GLatLngBounds(
mercator.fromPixelToLatLng( new GPoint( (tile.x)*256, (tile.y+1)*256 ) , zoom ),
mercator.fromPixelToLatLng( new GPoint( (tile.x+1)*256, (tile.y)*256 ) , zoom )
);
if (mapBounds.intersects(tileBounds)) {
return zoom+"/"+tile.x+"/"+y+".png";
} else {
return "http://www.maptiler.org/img/none.png";
}
}
// IE 7-: support for PNG alpha channel
// Unfortunately, the opacity for whole overlay is then not changeable, either or...
tilelayer.isPng = function() { return true;};
tilelayer.getOpacity = function() { return opacity; }
overlay = new GTileLayerOverlay( tilelayer );
map.addOverlay(overlay);
map.addControl(new GLargeMapControl());
map.addControl(new GHierarchicalMapTypeControl());
map.addControl(new CTransparencyControl( overlay ));
""" % args # noqa
if self.kml:
s += """
map.addMapType(G_SATELLITE_3D_MAP);
map.getEarthInstance(getEarthInstanceCB);
"""
s += """
map.enableContinuousZoom();
map.enableScrollWheelZoom();
map.setMapType(G_HYBRID_MAP);
}
resize();
}
"""
if self.kml:
s += """
function getEarthInstanceCB(object) {
var ge = object;
if (ge) {
var url = document.location.toString();
url = url.substr(0,url.lastIndexOf('/'))+'/doc.kml';
var link = ge.createLink("");
if ("%(publishurl)s") { link.setHref("%(publishurl)s/doc.kml") }
else { link.setHref(url) };
var networkLink = ge.createNetworkLink("");
networkLink.setName("TMS Map Overlay");
networkLink.setFlyToView(true);
networkLink.setLink(link);
ge.getFeatures().appendChild(networkLink);
} else {
// alert("You should open a KML in Google Earth");
// add div with the link to generated KML... - maybe JavaScript redirect to the URL of KML?
}
}
""" % args # noqa
s += """
onresize=function(){ resize(); };
//]]>
</script>
</head>
<body onload="load()">
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>
<!-- PLEASE, LET THIS NOTE ABOUT AUTHOR AND PROJECT SOMEWHERE ON YOUR WEBSITE, OR AT LEAST IN THE COMMENT IN HTML. THANK YOU -->
</div>
<div id="map"></div>
</body>
</html>
""" % args # noqa
return s
def generate_leaflet(self):
"""
Template for leaflet.html implementing overlay of tiles for 'mercator' profile.
It returns filled string. Expected variables:
title, north, south, east, west, minzoom, maxzoom, tile_size, tileformat, publishurl
"""
args = {}
args['title'] = self.options.title.replace('"', '\\"')
args['htmltitle'] = self.options.title
args['south'], args['west'], args['north'], args['east'] = self.swne
args['centerlon'] = (args['north'] + args['south']) / 2.
args['centerlat'] = (args['west'] + args['east']) / 2.
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['beginzoom'] = self.tmaxz
args['tile_size'] = self.tile_size # not used
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url # not used
args['copyright'] = self.options.copyright.replace('"', '\\"')
s = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name='viewport' content='width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no' />
<title>%(htmltitle)s</title>
<!-- Leaflet -->
<link rel="stylesheet" href="https://unpkg.com/[email protected]/dist/leaflet.css" />
<script src="https://unpkg.com/[email protected]/dist/leaflet.js"></script>
<style>
body { margin:0; padding:0; }
body, table, tr, td, th, div, h1, h2, input { font-family: "Calibri", "Trebuchet MS", "Ubuntu", Serif; font-size: 11pt; }
#map { position:absolute; top:0; bottom:0; width:100%%; } /* full size */
.ctl {
padding: 2px 10px 2px 10px;
background: white;
background: rgba(255,255,255,0.9);
box-shadow: 0 0 15px rgba(0,0,0,0.2);
border-radius: 5px;
text-align: right;
}
.title {
font-size: 18pt;
font-weight: bold;
}
.src {
font-size: 10pt;
}
</style>
</head>
<body>
<div id="map"></div>
<script>
/* **** Leaflet **** */
// Base layers
// .. OpenStreetMap
var osm = L.tileLayer('http://{s}.tile.osm.org/{z}/{x}/{y}.png', {attribution: '© <a href="http://osm.org/copyright">OpenStreetMap</a> contributors', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// .. CartoDB Positron
var cartodb = L.tileLayer('http://{s}.basemaps.cartocdn.com/light_all/{z}/{x}/{y}.png', {attribution: '© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors, © <a href="http://cartodb.com/attributions">CartoDB</a>', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// .. OSM Toner
var toner = L.tileLayer('http://{s}.tile.stamen.com/toner/{z}/{x}/{y}.png', {attribution: 'Map tiles by <a href="http://stamen.com">Stamen Design</a>, under <a href="http://creativecommons.org/licenses/by/3.0">CC BY 3.0</a>. Data by <a href="http://openstreetmap.org">OpenStreetMap</a>, under <a href="http://www.openstreetmap.org/copyright">ODbL</a>.', minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// .. White background
var white = L.tileLayer("data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAQAAAAEAAQMAAABmvDolAAAAA1BMVEX///+nxBvIAAAAH0lEQVQYGe3BAQ0AAADCIPunfg43YAAAAAAAAAAA5wIhAAAB9aK9BAAAAABJRU5ErkJggg==", {minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// Overlay layers (TMS)
var lyr = L.tileLayer('./{z}/{x}/{y}.%(tileformat)s', {tms: true, opacity: 0.7, attribution: "%(copyright)s", minZoom: %(minzoom)s, maxZoom: %(maxzoom)s});
// Map
var map = L.map('map', {
center: [%(centerlon)s, %(centerlat)s],
zoom: %(beginzoom)s,
minZoom: %(minzoom)s,
maxZoom: %(maxzoom)s,
layers: [osm]
});
var basemaps = {"OpenStreetMap": osm, "CartoDB Positron": cartodb, "Stamen Toner": toner, "Without background": white}
var overlaymaps = {"Layer": lyr}
// Title
var title = L.control();
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl title');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = "%(title)s";
};
title.addTo(map);
// Note
var src = 'Generated by <a href="http://www.klokan.cz/projects/gdal2tiles/">GDAL2Tiles</a>, Copyright © 2008 <a href="http://www.klokan.cz/">Klokan Petr Pridal</a>, <a href="http://www.gdal.org/">GDAL</a> & <a href="http://www.osgeo.org/">OSGeo</a> <a href="http://code.google.com/soc/">GSoC</a>';
var title = L.control({position: 'bottomleft'});
title.onAdd = function(map) {
this._div = L.DomUtil.create('div', 'ctl src');
this.update();
return this._div;
};
title.update = function(props) {
this._div.innerHTML = src;
};
title.addTo(map);
// Add base layers
L.control.layers(basemaps, overlaymaps, {collapsed: false}).addTo(map);
// Fit to overlay bounds (SW and NE points with (lat, lon))
map.fitBounds([[%(south)s, %(east)s], [%(north)s, %(west)s]]);
</script>
</body>
</html>
""" % args # noqa
return s
def generate_openlayers(self):
"""
Template for openlayers.html, with the tiles as overlays, and base layers.
It returns filled string.
"""
args = {}
args['title'] = self.options.title
args['bingkey'] = self.options.bingkey
args['minzoom'] = self.tminz
args['maxzoom'] = self.tmaxz
args['tile_size'] = self.tile_size
args['tileformat'] = self.tileext
args['publishurl'] = self.options.url
args['copyright'] = self.options.copyright
if self.options.xyz:
args['sign_y'] = ''
else:
args['sign_y'] = '-'
args['ominx'] = self.ominx
args['ominy'] = self.ominy
args['omaxx'] = self.omaxx
args['omaxy'] = self.omaxy
args['center_x'] = (self.ominx + self.omaxx) / 2
args['center_y'] = (self.ominy + self.omaxy) / 2
s = r"""<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(title)s</title>
<meta http-equiv='imagetoolbar' content='no'/>
<style type="text/css"> v\:* {behavior:url(#default#VML);}
html, body { overflow: hidden; padding: 0; height: 100%%; width: 100%%; font-family: 'Lucida Grande',Geneva,Arial,Verdana,sans-serif; }
body { margin: 10px; background: #fff; }
h1 { margin: 0; padding: 6px; border:0; font-size: 20pt; }
#header { height: 43px; padding: 0; background-color: #eee; border: 1px solid #888; }
#subheader { height: 12px; text-align: right; font-size: 10px; color: #555;}
#map { height: 90%%; border: 1px solid #888; }
</style>
<link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/openlayers/openlayers.github.io@master/en/v6.3.1/css/ol.css" type="text/css">
<script src="https://cdn.jsdelivr.net/gh/openlayers/openlayers.github.io@master/en/v6.3.1/build/ol.js"></script>
<script src="https://unpkg.com/[email protected]"></script>
<link rel="stylesheet" href="https://unpkg.com/[email protected]/src/ol-layerswitcher.css" />
</head>
<body>
<div id="header"><h1>%(title)s</h1></div>
<div id="subheader">Generated by <a href="https://gdal.org/programs/gdal2tiles.html">GDAL2Tiles</a> </div>
<div id="map" class="map"></div>
<div id="mouse-position"></div>
<script type="text/javascript">
var mousePositionControl = new ol.control.MousePosition({
className: 'custom-mouse-position',
target: document.getElementById('mouse-position'),
undefinedHTML: ' '
});
var map = new ol.Map({
controls: ol.control.defaults().extend([mousePositionControl]),
target: 'map',
""" % args
if self.options.profile == 'mercator' or self.options.profile == 'geodetic':
s += """
layers: [
new ol.layer.Group({
title: 'Base maps',
layers: [
new ol.layer.Tile({
title: 'OpenStreetMap',
type: 'base',
visible: true,
source: new ol.source.OSM()
}),
new ol.layer.Tile({
title: 'Bing Roads',
type: 'base',
visible: false,
source: new ol.source.BingMaps({
key: "%(bingkey)s",
imagerySet: 'Road'
})
}),
new ol.layer.Tile({
title: 'Bing Aerial',
type: 'base',
visible: false,
source: new ol.source.BingMaps({
key: "%(bingkey)s",
imagerySet: 'Aerial'
})
}),
new ol.layer.Tile({
title: 'Bing Hybrid',
type: 'base',
visible: false,
source: new ol.source.BingMaps({
key: "%(bingkey)s",
imagerySet: 'AerialWithLabels'
})
}),
]
}),""" % args # noqa
if self.options.profile == 'mercator':
s += """
new ol.layer.Group({
title: 'Overlay',
layers: [
new ol.layer.Tile({
title: 'Overlay',
// opacity: 0.7,
extent: [%(ominx)f, %(ominy)f,%(omaxx)f, %(omaxy)f],
source: new ol.source.XYZ({
attributions: '%(copyright)s',
minZoom: %(minzoom)d,
maxZoom: %(maxzoom)d,
url: './{z}/{x}/{%(sign_y)sy}.%(tileformat)s',
tileSize: [%(tile_size)d, %(tile_size)d]
})
}),
]
}),""" % args # noqa
elif self.options.profile == 'geodetic':
if self.options.tmscompatible:
base_res = 180. / self.tile_size
else:
base_res = 360. / self.tile_size
resolutions = [ base_res / 2**i for i in range(self.tmaxz+1) ]
args['resolutions'] = '[' + ','.join('%.18g' % res for res in resolutions) + ']'
if self.options.xyz:
args['origin'] = '[-180,90]'
args['y_formula'] = 'tileCoord[2]'
else:
args['origin'] = '[-180,-90]'
args['y_formula'] = '- 1 - tileCoord[2]'
s += """
new ol.layer.Group({
title: 'Overlay',
layers: [
new ol.layer.Tile({
title: 'Overlay',
// opacity: 0.7,
extent: [%(ominx)f, %(ominy)f,%(omaxx)f, %(omaxy)f],
source: new ol.source.TileImage({
attributions: '%(copyright)s',
projection: 'EPSG:4326',
minZoom: %(minzoom)d,
maxZoom: %(maxzoom)d,
tileGrid: new ol.tilegrid.TileGrid({
extent: [-180,-90,180,90],
origin: %(origin)s,
resolutions: %(resolutions)s,
tileSize: [%(tile_size)d, %(tile_size)d]
}),
tileUrlFunction: function(tileCoord) {
return ('./{z}/{x}/{y}.%(tileformat)s'
.replace('{z}', String(tileCoord[0]))
.replace('{x}', String(tileCoord[1]))
.replace('{y}', String(%(y_formula)s)));
},
})
}),
]
}),""" % args # noqa
elif self.options.profile == 'raster':
base_res = 2**(self.nativezoom) * self.out_gt[1]
resolutions = [ base_res / 2**i for i in range(self.tmaxz+1) ]
args['maxres'] = resolutions[self.tminz]
args['resolutions'] = '[' + ','.join('%.18g' % res for res in resolutions) + ']'
args['tilegrid_extent'] = '[%.18g,%.18g,%.18g,%.18g]' % (self.ominx, self.ominy, self.omaxx, self.omaxy)
if self.options.xyz:
args['origin'] = '[%.18g,%.18g]' % (self.ominx, self.omaxy)
args['y_formula'] = 'tileCoord[2]'
else:
args['origin'] = '[%.18g,%.18g]' % (self.ominx, self.ominy)
args['y_formula'] = '- 1 - tileCoord[2]'
s += """
layers: [
new ol.layer.Group({
title: 'Overlay',
layers: [
new ol.layer.Tile({
title: 'Overlay',
// opacity: 0.7,
source: new ol.source.TileImage({
attributions: '%(copyright)s',
tileGrid: new ol.tilegrid.TileGrid({
extent: %(tilegrid_extent)s,
origin: %(origin)s,
resolutions: %(resolutions)s,
tileSize: [%(tile_size)d, %(tile_size)d]
}),
tileUrlFunction: function(tileCoord) {
return ('./{z}/{x}/{y}.%(tileformat)s'
.replace('{z}', String(tileCoord[0]))
.replace('{x}', String(tileCoord[1]))
.replace('{y}', String(%(y_formula)s)));
},
})
}),
]
}),""" % args # noqa
else:
tms = tmsMap[self.options.profile]
base_res = tms.resolution
resolutions = [ base_res / 2**i for i in range(self.tmaxz+1) ]
args['maxres'] = resolutions[self.tminz]
args['resolutions'] = '[' + ','.join('%.18g' % res for res in resolutions) + ']'
args['matrixsizes'] = '[' + ','.join('[%d,%d]' % (tms.matrix_width << i, tms.matrix_height << i) for i in range(len(resolutions))) + ']'
if self.options.xyz:
args['origin'] = '[%.18g,%.18g]' % (tms.topleft_x, tms.topleft_y)
args['y_formula'] = 'tileCoord[2]'
else:
args['origin'] = '[%.18g,%.18g]' % (tms.topleft_x, tms.topleft_y - tms.resolution * tms.tile_size)
args['y_formula'] = '- 1 - tileCoord[2]'
args['tilegrid_extent'] = '[%.18g,%.18g,%.18g,%.18g]' % ( \
tms.topleft_x,
tms.topleft_y - tms.matrix_height * tms.resolution * tms.tile_size,
tms.topleft_x + tms.matrix_width * tms.resolution * tms.tile_size,
tms.topleft_y)
s += """
layers: [
new ol.layer.Group({
title: 'Overlay',
layers: [
new ol.layer.Tile({
title: 'Overlay',
// opacity: 0.7,
extent: [%(ominx)f, %(ominy)f,%(omaxx)f, %(omaxy)f],
source: new ol.source.TileImage({
attributions: '%(copyright)s',
minZoom: %(minzoom)d,
maxZoom: %(maxzoom)d,
tileGrid: new ol.tilegrid.TileGrid({
extent: %(tilegrid_extent)s,
origin: %(origin)s,
resolutions: %(resolutions)s,
sizes: %(matrixsizes)s,
tileSize: [%(tile_size)d, %(tile_size)d]
}),
tileUrlFunction: function(tileCoord) {
return ('./{z}/{x}/{y}.%(tileformat)s'
.replace('{z}', String(tileCoord[0]))
.replace('{x}', String(tileCoord[1]))
.replace('{y}', String(%(y_formula)s)));
},
})
}),
]
}),""" % args # noqa
s += """
],
view: new ol.View({
center: [%(center_x)f, %(center_y)f],""" % args # noqa
if self.options.profile in ('mercator', 'geodetic'):
args['view_zoom'] = args['minzoom']
if self.options.profile == 'geodetic' and self.options.tmscompatible:
args['view_zoom'] += 1
s += """
zoom: %(view_zoom)d,""" % args # noqa
else:
s += """
resolution: %(maxres)f,""" % args # noqa
if self.options.profile == 'geodetic':
s += """
projection: 'EPSG:4326',"""
elif self.options.profile != 'mercator':
if self.in_srs and self.in_srs.IsProjected() and self.in_srs.GetAuthorityName(None) == 'EPSG':
s += """
projection: new ol.proj.Projection({code: 'EPSG:%s', units:'m'}),""" % self.in_srs.GetAuthorityCode(None)
s += """
})
});"""
if self.options.profile in ('mercator', 'geodetic'):
s += """
map.addControl(new ol.control.LayerSwitcher());"""
s += """
</script>
</body>
</html>"""
return s
def generate_mapml(self):
if self.options.mapml_template:
template = self.options.mapml_template
else:
template = gdal.FindFile('gdal', 'template_tiles.mapml')
s = open(template, 'rb').read().decode('utf-8')
if self.options.profile == 'mercator':
tiling_scheme = 'OSMTILE'
elif self.options.profile == 'geodetic':
tiling_scheme = 'WGS84'
else:
tiling_scheme = self.options.profile
s = s.replace('${TILING_SCHEME}', tiling_scheme)
s = s.replace('${URL}', self.options.url if self.options.url else "./")
tminx, tminy, tmaxx, tmaxy = self.tminmax[self.tmaxz]
s = s.replace('${MINTILEX}', str(tminx))
s = s.replace('${MINTILEY}', str(GDAL2Tiles.getYTile(tmaxy, self.tmaxz, self.options)))
s = s.replace('${MAXTILEX}', str(tmaxx))
s = s.replace('${MAXTILEY}', str(GDAL2Tiles.getYTile(tminy, self.tmaxz, self.options)))
s = s.replace('${CURZOOM}', str(self.tmaxz))
s = s.replace('${MINZOOM}', str(self.tminz))
s = s.replace('${MAXZOOM}', str(self.tmaxz))
s = s.replace('${TILEEXT}', str(self.tileext))
return s
@staticmethod
def getYTile(ty, tz, options):
"""
Calculates the y-tile number based on whether XYZ or TMS (default) system is used
:param ty: The y-tile number
:param tz: The z-tile number
:return: The transformed y-tile number
"""
if options.xyz and options.profile != 'raster':
if options.profile in ('mercator', 'geodetic'):
return (2**tz - 1) - ty # Convert from TMS to XYZ numbering system
tms = tmsMap[options.profile]
return (tms.matrix_height * 2**tz - 1) - ty # Convert from TMS to XYZ numbering system
return ty
def worker_tile_details(input_file, output_folder, options):
gdal2tiles = GDAL2Tiles(input_file, output_folder, options)
gdal2tiles.open_input()
gdal2tiles.generate_metadata()
tile_job_info, tile_details = gdal2tiles.generate_base_tiles()
return tile_job_info, tile_details
class ProgressBar(object):
def __init__(self, total_items):
self.total_items = total_items
self.nb_items_done = 0
self.current_progress = 0
self.STEP = 2.5
def start(self):
sys.stdout.write("0")
def log_progress(self, nb_items=1):
self.nb_items_done += nb_items
progress = float(self.nb_items_done) / self.total_items * 100
if progress >= self.current_progress + self.STEP:
done = False
while not done:
if self.current_progress + self.STEP <= progress:
self.current_progress += self.STEP
if self.current_progress % 10 == 0:
sys.stdout.write(str(int(self.current_progress)))
if self.current_progress == 100:
sys.stdout.write("\n")
else:
sys.stdout.write(".")
else:
done = True
sys.stdout.flush()
def get_tile_swne(tile_job_info, options):
if options.profile == 'mercator':
mercator = GlobalMercator()
tile_swne = mercator.TileLatLonBounds
elif options.profile == 'geodetic':
geodetic = GlobalGeodetic(options.tmscompatible)
tile_swne = geodetic.TileLatLonBounds
elif options.profile == 'raster':
srs4326 = osr.SpatialReference()
srs4326.ImportFromEPSG(4326)
srs4326.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
if tile_job_info.kml and tile_job_info.in_srs_wkt:
in_srs = osr.SpatialReference()
in_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
in_srs.ImportFromWkt(tile_job_info.in_srs_wkt)
ct = osr.CoordinateTransformation(in_srs, srs4326)
def rastertileswne(x, y, z):
pixelsizex = (2 ** (tile_job_info.tmaxz - z) * tile_job_info.out_geo_trans[1])
west = tile_job_info.out_geo_trans[0] + x * tile_job_info.tile_size * pixelsizex
east = west + tile_job_info.tile_size * pixelsizex
if options.xyz:
north = tile_job_info.out_geo_trans[3] - y * tile_job_info.tile_size * pixelsizex
south = north - tile_job_info.tile_size * pixelsizex
else:
south = tile_job_info.ominy + y * tile_job_info.tile_size * pixelsizex
north = south + tile_job_info.tile_size * pixelsizex
if not tile_job_info.is_epsg_4326:
# Transformation to EPSG:4326 (WGS84 datum)
west, south = ct.TransformPoint(west, south)[:2]
east, north = ct.TransformPoint(east, north)[:2]
return south, west, north, east
tile_swne = rastertileswne
else:
tile_swne = lambda x, y, z: (0, 0, 0, 0) # noqa
else:
tile_swne = None
return tile_swne
def single_threaded_tiling(input_file, output_folder, options):
"""
Keep a single threaded version that stays clear of multiprocessing, for platforms that would not
support it
"""
if options.verbose:
print("Begin tiles details calc")
conf, tile_details = worker_tile_details(input_file, output_folder, options)
if options.verbose:
print("Tiles details calc complete.")
if not options.verbose and not options.quiet:
progress_bar = ProgressBar(len(tile_details))
progress_bar.start()
for tile_detail in tile_details:
create_base_tile(conf, tile_detail)
if not options.verbose and not options.quiet:
progress_bar.log_progress()
if getattr(threadLocal, 'cached_ds', None):
del threadLocal.cached_ds
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def multi_threaded_tiling(input_file, output_folder, options):
nb_processes = options.nb_processes or 1
# Make sure that all processes do not consume more than `gdal.GetCacheMax()`
gdal_cache_max = gdal.GetCacheMax()
gdal_cache_max_per_process = max(1024 * 1024, math.floor(gdal_cache_max / nb_processes))
set_cache_max(gdal_cache_max_per_process)
pool = Pool(processes=nb_processes)
if options.verbose:
print("Begin tiles details calc")
conf, tile_details = worker_tile_details(input_file, output_folder, options)
if options.verbose:
print("Tiles details calc complete.")
if not options.verbose and not options.quiet:
progress_bar = ProgressBar(len(tile_details))
progress_bar.start()
# TODO: gbataille - check the confs for which each element is an array... one useless level?
# TODO: gbataille - assign an ID to each job for print in verbose mode "ReadRaster Extent ..."
for _ in pool.imap_unordered(partial(create_base_tile, conf), tile_details, chunksize=128):
if not options.verbose and not options.quiet:
progress_bar.log_progress()
pool.close()
pool.join() # Jobs finished
# Set the maximum cache back to the original value
set_cache_max(gdal_cache_max)
create_overview_tiles(conf, output_folder, options)
shutil.rmtree(os.path.dirname(conf.src_file))
def main(argv):
# TODO: gbataille - use mkdtemp to work in a temp directory
# TODO: gbataille - debug intermediate tiles.vrt not produced anymore?
# TODO: gbataille - Refactor generate overview tiles to not depend on self variables
# For multiprocessing, we need to propagate the configuration options to
# the environment, so that forked processes can inherit them.
for i in range(len(argv)):
if argv[i] == '--config' and i + 2 < len(argv):
os.environ[argv[i+1]] = argv[i+2]
argv = gdal.GeneralCmdLineProcessor(argv)
if argv is None:
return
input_file, output_folder, options = process_args(argv[1:])
nb_processes = options.nb_processes or 1
if nb_processes == 1:
single_threaded_tiling(input_file, output_folder, options)
else:
multi_threaded_tiling(input_file, output_folder, options)
# vim: set tabstop=4 shiftwidth=4 expandtab:
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-3.0 | 3,414,157,871,314,064,000 | 40.622147 | 407 | 0.542697 | false |
mespinozas/si | t3/t3loader.py | 1 | 1118 | import threading
import logging
import time
import os
logging.basicConfig( level=logging.DEBUG, format='[%(levelname)s] - %(threadName)-10s : %(message)s')
def worker(x):
logging.debug('Lanzado')
importer = 'bin/mallet import-svmlight --input archivoEntrenamiento%s.txt --output training%s.mallet' % (x,x)
#print importer
os.system(importer)
classifiers = ['NaiveBayes', 'DecisionTree','MaxEntL1','MaxEnt', 'BalancedWinnow', 'Winnow']
for j in range(len(classifiers)):
trainer= 'bin/mallet train-classifier --input training%s.mallet --output-classifier output%s_%s.classifier --trainer %s' % (x,x,classifiers[j],classifiers[j])
#print trainer
os.system(trainer)
classify = 'bin/mallet classify-file --input archivo%s.txt --output output%s_%s.txt --classifier output%s_%s.classifier' % (x,x,classifiers[j],x,classifiers[j])
#print classify
os.system(classify)
logging.debug('Deteniendo')
return
threads = list()
for i in range(1,11):
t = threading.Thread(target=worker, args=(i,))
threads.append(t)
t.start()
| gpl-2.0 | -8,920,960,298,645,710,000 | 30.942857 | 168 | 0.669946 | false |
paddycarey/stretch | tests/stretch/test_application.py | 1 | 2711 | # third-party imports
import pytest
# local imports
from stretch import application
from stretch.triggers import results
from tests.utils import fixture_loader
@pytest.fixture
def configured_app():
app_json = fixture_loader.json_fixture("web-service-configured")
return application.Application(app_json)
@pytest.fixture
def configured_app_missing_tasks():
app_json = fixture_loader.json_fixture("web-service-missing-tasks")
return application.Application(app_json)
@pytest.fixture
def badly_configured_app():
app_json = fixture_loader.json_fixture("web-service-badly-configured")
return application.Application(app_json)
@pytest.mark.parametrize("property_name,expected", [
("app_id", "/web-service"),
("instances", 2),
("min_instances", 2),
("max_instances", 8),
("scaling_factor", 1.5),
])
def test_application_parsing(configured_app, property_name, expected):
assert getattr(configured_app, property_name) == expected
def test_application_parsing_autoscaling_enabled(configured_app):
assert configured_app.autoscaling_enabled()
def test_application_parsing_new_instances(configured_app):
assert isinstance(configured_app.new_instances, application.InstanceCalculator)
def test_application_parsing_validate(configured_app):
assert configured_app.validate()
def test_application_parsing_validate_missing_tasks(configured_app_missing_tasks):
assert not configured_app_missing_tasks.validate()
def test_application_parsing_validate_badly_configured(badly_configured_app):
assert not badly_configured_app.validate()
@pytest.mark.parametrize("instances,min_instances,max_instances,scaling_factor,scaled_up,scaled_down", [
(0, 0, 8, 1.5, 1, 0),
(1, 2, 8, 1.5, 2, 2),
(2, 2, 8, 1.5, 3, 2),
(3, 2, 8, 1.5, 5, 2),
(5, 2, 8, 1.5, 8, 3),
(8, 2, 8, 1.5, 8, 5),
(10, 2, 8, 1.5, 8, 6),
(100, 2, 8, 1.5, 8, 8),
(4, 1, 10, 1.5, 6, 2),
])
def test_instance_calculator_scale_up(instances, min_instances, max_instances,
scaling_factor, scaled_up, scaled_down):
calc = application.InstanceCalculator(instances,
min_instances,
max_instances,
scaling_factor)
assert calc.calculate(results.CheckResults.SCALE_UP) == scaled_up
assert calc.calculate(results.CheckResults.SCALE_DOWN) == scaled_down
def test_instance_calculator_invalid_result_type():
calc = application.InstanceCalculator(3, 2, 10, 1.5)
assert calc.calculate(results.CheckResults.DONT_SCALE) == 3
assert calc.calculate(results.CheckResults.FAILED) == 3
| mit | -9,053,456,474,729,907,000 | 31.662651 | 104 | 0.672814 | false |
alvestrand/old-compare-codecs | lib/mpeg_settings.py | 1 | 3650 | #!/usr/bin/python
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import optimizer
# Files and rates for the MPEG 2nd CFP for RF codecs, early 2013.
def OldMpegFiles():
the_set = optimizer.FileAndRateSet()
my_directory = 'video/mpeg_video'
# Class A
the_set.AddFilesAndRates(["Traffic_2560x1600_30_crop.yuv",
"PeopleOnStreet_2560x1600_30_crop.yuv"],
[2500, 3500, 5000, 8000, 14000],
my_directory)
# Class B1
the_set.AddFilesAndRates(["Kimono1_1920x1080_24.yuv",
"ParkScene_1920x1080_24.yuv"],
[1000, 1600, 2500, 4000, 6000],
my_directory)
# Class B2
the_set.AddFilesAndRates(["Cactus_1920x1080_50.yuv",
"BasketballDrive_1920x1080_50.yuv"],
[2000, 3000, 4500, 7000, 10000],
my_directory)
# Class C
the_set.AddFilesAndRates(["BasketballDrill_832x480_50.yuv",
"BQMall_832x480_60.yuv",
"PartyScene_832x480_50.yuv",
"RaceHorses_832x480_30.yuv"],
[384, 512, 768, 1200, 2000],
my_directory)
# Class D
the_set.AddFilesAndRates(["BasketballPass_416x240_50.yuv",
"BlowingBubbles_416x240_50.yuv",
"RaceHorses_416x240_30.yuv"],
[256, 384, 512, 850, 1500],
my_directory)
# Class E
the_set.AddFilesAndRates(["FourPeople_1280x720_60.yuv",
"Johnny_1280x720_60.yuv",
"KristenAndSara_1280x720_60.yuv"],
[256, 384, 512, 850, 1500],
my_directory)
return the_set
# Files and rates for the MPEG codec comparision test, December 2013.
def MpegFiles():
the_set = optimizer.FileAndRateSet()
my_directory = 'video/mpeg_video'
# Class A
the_set.AddFilesAndRates(["Kimono1_1920x1080_24.yuv",
"ParkScene_1920x1080_24.yuv"],
[1600, 2500, 4000, 6000],
my_directory)
# Class B
the_set.AddFilesAndRates(["Cactus_1920x1080_50.yuv",
"BasketballDrive_1920x1080_50.yuv"],
[3000, 4500, 7000, 10000],
my_directory)
# Class C
the_set.AddFilesAndRates(["BasketballDrill_832x480_50.yuv",
"BQMall_832x480_60.yuv",
"PartyScene_832x480_50.yuv",
"RaceHorses_832x480_30.yuv"],
[512, 768, 1200, 2000],
my_directory)
# Class D
the_set.AddFilesAndRates(["FourPeople_1280x720_60.yuv",
"Johnny_1280x720_60.yuv",
"KristenAndSara_1280x720_60.yuv"],
[384, 512, 850, 1500],
my_directory)
return the_set
| apache-2.0 | 737,835,373,393,000,600 | 40.954023 | 74 | 0.526301 | false |
infobloxopen/infoblox-netmri | infoblox_netmri/easy/netmri_easy.py | 1 | 10624 | ###########################################################################
# Export of Script Module: netmri_easy
# Language: Python
# Category: Internal
# Description: Object oriented library for Python scripting support
###########################################################################
import datetime
import sys
import time
from urllib.parse import urlparse
from infoblox_netmri.client import InfobloxNetMRI
class NetMRIEasy(object):
def __init__(self, debug=False, **kwargs):
self.debug = debug
self.api_version = kwargs.get('api_version') or "auto"
self.host = urlparse(kwargs.get('api_url')).hostname
self.username = kwargs.get('http_username')
self.password = kwargs.get('http_password')
self.job_id = kwargs.get('job_id')
self.device_id = kwargs.get('device_id')
self.batch_id = kwargs.get('batch_id')
self.script_login = kwargs.get('script_login')
if (not self.job_id) or (not self.device_id) or (not self.batch_id):
raise RuntimeError('job_id or device_id or batch_id not initialized')
self.client = InfobloxNetMRI(
self.host,
self.username,
self.password,
api_version=self.api_version
)
self.dis_session = self._open_dis_session()
if not self.script_login == 'false':
self.cli_connection = self._open_cli_connection()
self._setup_logging()
def __enter__(self):
return self
def __exit__(self, *args):
self.close_session()
def broker(self, name):
return self.client.get_broker(name)
def _setup_logging(self):
broker = self.broker('Job')
if hasattr(broker, 'log_custom_message'):
def remote_log_message(self, severity, message):
date = datetime.datetime.now()
brkr = self.broker('Job')
msg_formatted = "{} [{:<7}] {}".format(
date.strftime("%Y-%m-%d %H:%M:%S "),
str(severity).upper(),
message
)
brkr.log_custom_message(message=msg_formatted, id=self.batch_id,
jobdetailid=self.job_id, severity=severity)
setattr(self.__class__, "remote_log_message", remote_log_message)
else:
def remote_log_message(self, severity, message):
if not hasattr(self, 'cli_connection'):
return
brkr = self._get_cli_broker()
return brkr.log_message(
id=self.dis_session.SessionID,
device_id=self.device_id,
severity=severity,
message=message
)
setattr(self.__class__, "remote_log_message", remote_log_message)
def _open_cli_connection(self):
if getattr(self, 'cli_connection', None) and self.cli_connection:
return self.cli_connection
return self._get_cli_broker().open(
id=self.dis_session.SessionID,
DeviceID=self.device_id
)
def _close_dis_session(self):
return self.broker('DisSession').close(id=self.dis_session.SessionID)
def _open_dis_session(self):
return self.broker('DisSession').open(job_id=self.job_id)
def send_command(self, command, regex=None):
if not hasattr(self, 'cli_connection'):
return
regex = regex or ""
result = self._get_cli_broker().send_command(
id=self.dis_session.SessionID,
device_id=self.device_id,
command=command,
regex=regex
)
if result:
return result.get('command_response')
return result
def send_async_command(self, command, timeout, regex, wait_until_finished=True):
if not hasattr(self, 'cli_connection'):
return
regex = regex or ""
self._print("Sending asynchronous command ({})".format(command))
try:
async_command_result = self._get_cli_broker().send_async_command(
id=self.dis_session.SessionID,
device_id=self.device_id,
command=command,
timeout=timeout,
debug=self.debug,
regex=regex,
)
self._ok()
async_command_id = async_command_result.get('command_response')
if async_command_id:
async_command_id = async_command_id.replace("async_command_id:", "")
else:
self._error("Invalid send_async_command response ({})".format(str(async_command_id)))
if not async_command_id.isdigit():
self._error("Invalid send_async_command response ({})".format(str(async_command_id)))
self._print("Received async_command_id {}".format(str(async_command_id)))
self._ok()
if not wait_until_finished:
return async_command_id
delay = 30
max_delay = 14400 + 900 # DIS max session time plus a little padding (4.25 hours)
cur_delay = 0
ok = "async_command_id_status:OK\n"
error = "async_command_id_status:Error\n"
while True:
self._print("Getting the status of async_command_id {}".format(async_command_id))
res = self._get_cli_broker().get_async_command_status(
id=self.dis_session.SessionID,
device_id=self.device_id,
async_command_id=async_command_id
)
if not res:
self._error("Invalid get_async_command_status response ({})".format(res))
result = res.get('command_response')
if ok in result:
return result.replace(ok, '')
if error in result:
self._error("Asynchronous command failed {}".format(
result.replace(error, '')
))
if cur_delay >= max_delay:
self._error("Timeout waiting for asynchronous command to complete")
time.sleep(delay)
cur_delay += delay
except AttributeError:
raise RuntimeError("Api version {} not support async commands. Minimal version: 2.10".format(
self.client.api_version
))
def get_config(self, sync=True):
if not hasattr(self, 'cli_connection'):
return
self._print("Requesting on demand configuration collection ({})".format(sync))
traking_id = None
try:
res = self.broker('ConfigRevision').get_configs(
DeviceID=self.device_id
)
traking_id = res.get('TrackingID')
except RuntimeError:
raise RuntimeError("Api version {} not support this command. Minimal version: 2.10".format(
self.client.api_version
))
self._print("Received TrackingID {}".format(traking_id))
self._ok()
if not sync:
return traking_id
delay = 30
max_delay = 600
cur_delay = 0
while True:
self._print("Getting the status of TrackingID {}".format(traking_id))
status_resp = self.broker('ConfigRevision').get_configs_status(
TrackingID=traking_id
)
if status_resp.get('Status') == "OK":
return status_resp
if status_resp.get('Status') == "Error":
self._error("On demand configuration collection failed")
if cur_delay >= max_delay:
self._error("Timeout waiting for configuration collection to complete")
time.sleep(delay)
cur_delay += delay
self._print("Sending 'Keep Alive CR/LF'")
self.send_command("NOP:")
self._ok()
def set_variable(self, name, value):
if not hasattr(self, 'cli_connection'):
return
command = '${name} ="{value}"'.format(name=name, value=value)
result = self._get_cli_broker().set_variable(
id=self.dis_session.SessionID,
device_id=self.device_id,
command=command
)
if result:
return result.get('command_response')
return result
def get_template(self, template_name, stage):
if not hasattr(self, 'cli_connection'):
return
result = self._get_cli_broker().get_template(
id=self.dis_session.SessionID,
device_id=self.device_id,
template=template_name,
stage=(stage or 0)
)
if result:
return result.get('command_response')
return result
def get_list_value(self, list_name, key_column, key_value, value_column, default):
if not hasattr(self, 'cli_connection'):
return
result = self._get_cli_broker().get_list_value(
id=self.dis_session.SessionID,
device_id=self.device_id,
list_name=list_name,
key_column=key_column,
key_value=key_value,
value_column=value_column,
default_value=default
)
if result:
return result.get('command_response')
return result
def generate_issue(self, issue_type_id, severity, **kwargs):
result = self.broker('IssueAdhoc').generate_issue(
DeviceID=self.device_id,
BatchID=self.batch_id,
Severity=severity,
IssueTypeID=issue_type_id,
**kwargs
)
if result:
return result.get('IssueID')
return result
def close_session(self):
brkr = self.broker('DisSession')
return brkr.close(
SessionID=self.dis_session.SessionID
)
def get_device(self):
return self.broker('Device').show(
DeviceID=self.device_id
)
def log_message(self, severity, message):
self.remote_log_message(severity, message)
def _get_cli_broker(self):
return self.client.get_broker('CliConnection')
def _print(self, msg):
if self.debug:
print(msg)
def _print_status(self, status):
if self.debug:
print(status)
def _ok(self):
self._print("OK")
def _error(self, message):
print("\n*** ERROR: {} ***\n".format(message))
sys.exit(-1)
| apache-2.0 | 1,485,694,990,432,806,000 | 33.493506 | 105 | 0.538592 | false |
apollux/whose_turn_is_it_anyway | whose_turn_is_it_anyway/app.py | 1 | 2193 | # -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask, render_template
from whose_turn_is_it_anyway.settings import ProdConfig
from whose_turn_is_it_anyway.assets import assets
from whose_turn_is_it_anyway.extensions import (
bcrypt,
cache,
db,
login_manager,
migrate,
debug_toolbar,
admin,
)
from whose_turn_is_it_anyway import public, user, activity
from flask_admin.contrib.sqla import ModelView
from whose_turn_is_it_anyway.user.models import User
from whose_turn_is_it_anyway.activity.models import Activity
from whose_turn_is_it_anyway.activity.filters import date_time_format, date_time_to_local
def create_app(config_object=ProdConfig):
"""An application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/
:param config_object: The configuration object to use.
"""
app = Flask(__name__)
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
register_filters(app)
return app
def register_extensions(app):
assets.init_app(app)
bcrypt.init_app(app)
#cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
#admin.init_app(app)
#admin.add_view(ModelView(User, db.session))
#admin.add_view(ModelView(Activity, db.session))
return None
def register_blueprints(app):
app.register_blueprint(public.views.blueprint)
app.register_blueprint(user.views.blueprint)
app.register_blueprint(activity.views.blueprint)
return None
def register_errorhandlers(app):
def render_error(error):
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template("{0}.html".format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_filters(app):
app.jinja_env.filters['date_time_format'] = date_time_format
app.jinja_env.filters['date_time_to_local'] = date_time_to_local
| bsd-3-clause | -2,446,497,246,849,966,000 | 29.887324 | 89 | 0.708618 | false |
doriancoins/doriancoin | test/functional/test_runner.py | 1 | 23307 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Doriancoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:DoriancoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import signal
import sys
import subprocess
import tempfile
import re
import logging
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
BASE_SCRIPTS= [
# Scripts that are run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'wallet_hd.py',
'wallet_backup.py',
# vv Tests less than 5m vv
'feature_block.py',
'rpc_fundrawtransaction.py',
'p2p_compactblocks.py',
'feature_segwit.py',
# vv Tests less than 2m vv
'wallet_basic.py',
'wallet_accounts.py',
'p2p_segwit.py',
'wallet_dump.py',
'rpc_listtransactions.py',
# vv Tests less than 60s vv
'p2p_sendheaders.py',
'wallet_zapwallettxes.py',
'wallet_importmulti.py',
'mempool_limit.py',
'rpc_txoutproof.py',
'wallet_listreceivedby.py',
'wallet_abandonconflict.py',
'feature_csv_activation.py',
'rpc_rawtransaction.py',
'wallet_address_types.py',
'feature_reindex.py',
# vv Tests less than 30s vv
'wallet_keypool_topup.py',
'interface_zmq.py',
'interface_doriancoin_cli.py',
'mempool_resurrect.py',
'wallet_txn_doublespend.py --mineblock',
'wallet_txn_clone.py',
'wallet_txn_clone.py --segwit',
'rpc_getchaintips.py',
'interface_rest.py',
'mempool_spend_coinbase.py',
'mempool_reorg.py',
'mempool_persist.py',
'wallet_multiwallet.py',
'wallet_multiwallet.py --usecli',
'interface_http.py',
'rpc_users.py',
'feature_proxy.py',
'rpc_signrawtransaction.py',
'p2p_disconnect_ban.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
'wallet_disable.py',
'rpc_net.py',
'wallet_keypool.py',
'p2p_mempool.py',
'mining_prioritisetransaction.py',
'p2p_invalid_block.py',
'p2p_invalid_tx.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py',
'rpc_signmessage.py',
'feature_nulldummy.py',
'wallet_import_rescan.py',
'mining_basic.py',
'wallet_bumpfee.py',
'rpc_named_arguments.py',
'wallet_listsinceblock.py',
'p2p_leak.py',
'wallet_encryption.py',
'wallet_scriptaddress2.py',
'feature_dersig.py',
'feature_cltv.py',
'rpc_uptime.py',
'wallet_resendwallettransactions.py',
'feature_minchainwork.py',
'p2p_fingerprint.py',
'feature_uacomment.py',
'p2p_unrequested_blocks.py',
'feature_logging.py',
'p2p_node_network_limited.py',
'feature_config_args.py',
# Don't append tests at the end to avoid merge conflicts
# Put them in a random line within the section that fits their approximate run-time
]
EXTENDED_SCRIPTS = [
# These tests are not run by the travis build process.
# Longest test should go first, to favor running tests in parallel
'feature_pruning.py',
# vv Tests less than 20m vv
'feature_fee_estimation.py',
# vv Tests less than 5m vv
'feature_maxuploadtarget.py',
'mempool_packages.py',
'feature_dbcrash.py',
# vv Tests less than 2m vv
'feature_bip68_sequence.py',
'mining_getblocktemplate_longpoll.py',
'p2p_timeouts.py',
# vv Tests less than 60s vv
'feature_bip9_softforks.py',
'p2p_feefilter.py',
'rpc_bind.py',
# vv Tests less than 30s vv
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py',
'wallet_txn_clone.py --mineblock',
'feature_notifications.py',
'rpc_invalidateblock.py',
'feature_rbf.py',
]
# Place EXTENDED_SCRIPTS first since it has the 3 longest running tests
ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
def main():
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.')
parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini"
config.read_file(open(configfile))
passon_args.append("--configfile=%s" % configfile)
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
# Create base test directory
tmpdir = "%s/doriancoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S"))
os.makedirs(tmpdir)
logging.debug("Temporary test directory at %s" % tmpdir)
enable_wallet = config["components"].getboolean("ENABLE_WALLET")
enable_utils = config["components"].getboolean("ENABLE_UTILS")
enable_doriancoind = config["components"].getboolean("ENABLE_DORIANCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/doriancoin/doriancoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/doriancoin/doriancoin/pull/5677#issuecomment-136646964
print("Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not (enable_wallet and enable_utils and enable_doriancoind):
print("No functional tests to run. Wallet, utils, and doriancoind must all be enabled")
print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make")
sys.exit(0)
# Build list of tests
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the ALL_SCRIPTS list. Accept the name with or without .py extension.
tests = [re.sub("\.py$", "", t) + ".py" for t in tests]
test_list = []
for t in tests:
if t in ALL_SCRIPTS:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t))
else:
# No individual tests have been specified.
# Run all base tests, and optionally run extended tests.
test_list = BASE_SCRIPTS
if args.extended:
# place the EXTENDED_SCRIPTS first since the three longest ones
# are there and the list is shorter
test_list = EXTENDED_SCRIPTS + test_list
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test))
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script (with args removed) and exit.
parser.print_help()
subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h'])
sys.exit(0)
check_script_list(config["environment"]["SRCDIR"])
check_script_prefixes()
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, args.combinedlogslen)
def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[], combined_logs_len=0):
# Warn if doriancoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "doriancoind"]) is not None:
print("%sWARNING!%s There is already a doriancoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = "%s/test/cache" % build_dir
if os.path.isdir(cache_dir):
print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir))
#Set env vars
if "DORIANCOIND" not in os.environ:
os.environ["DORIANCOIND"] = build_dir + '/src/doriancoind' + exeext
os.environ["DORIANCOINCLI"] = build_dir + '/src/doriancoin-cli' + exeext
tests_dir = src_dir + '/test/functional/'
flags = ["--srcdir={}/src".format(build_dir)] + args
flags.append("--cachedir=%s" % cache_dir)
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug("Initializing coverage directory at %s" % coverage.dir)
else:
coverage = None
if len(test_list) > 1 and jobs > 1:
# Populate cache
try:
subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
#Run Tests
job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags)
time0 = time.time()
test_results = []
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0]))
else:
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
print_results(test_results, max_len_name, (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def print_results(test_results, max_len_name, runtime):
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=lambda result: result.name.lower())
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
status = TICK + "Passed" if all_passed else CROSS + "Failed"
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
results += "Runtime: %s s\n" % (runtime)
print(results)
class TestHandler:
"""
Trigger the test scripts passed in via the list.
"""
def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_list = test_list
self.flags = flags
self.num_running = 0
# In case there is a graveyard of zombie doriancoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
self.portseed_offset = int(time.time() * 1000) % 625
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
portseed = len(self.test_list) + self.portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((t,
time.time(),
subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
testdir,
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, testdir, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
proc.send_signal(signal.SIGINT)
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
if proc.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif proc.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
self.num_running -= 1
self.jobs.remove(j)
return TestResult(name, status, int(time.time() - time0)), testdir, stdout, stderr
print('.', end='', flush=True)
class TestResult():
def __init__(self, name, status, time):
self.name = name
self.status = status
self.time = time
self.padding = 0
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def check_script_prefixes():
"""Check that at most a handful of the
test scripts don't start with one of the allowed name prefixes."""
# LEEWAY is provided as a transition measure, so that pull-requests
# that introduce new tests that don't conform with the naming
# convention don't immediately cause the tests to fail.
LEEWAY = 10
good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet)_")
bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None]
if len(bad_script_names) > 0:
print("INFO: %d tests not meeting naming conventions:" % (len(bad_script_names)))
print(" %s" % ("\n ".join(sorted(bad_script_names))))
assert len(bad_script_names) <= LEEWAY, "Too many tests not following naming convention! (%d found, maximum: %d)" % (len(bad_script_names), LEEWAY)
def check_script_list(src_dir):
"""Check scripts directory.
Check that there are no scripts in the functional tests directory which are
not being run by pull-tester.py."""
script_dir = src_dir + '/test/functional/'
python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"])
missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS)))
if len(missed_tests) != 0:
print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests)))
if os.getenv('TRAVIS') == 'true':
# On travis this warning is an error to prevent merging incomplete commits into master
sys.exit(1)
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `doriancoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
main()
| mit | 5,020,215,571,676,961,000 | 39.523478 | 195 | 0.615639 | false |
AndrewGoldstein/grasshopper | grasshopper/public/views.py | 1 | 2732 | # -*- coding: utf-8 -*-
"""Public section, including homepage and signup."""
from flask import Blueprint, flash, redirect, render_template, request, url_for, g
from flask_login import login_required, login_user, logout_user
from grasshopper.extensions import login_manager
from grasshopper.public.forms import LoginForm
from grasshopper.user.forms import RegisterForm
from grasshopper.user.models import User
from grasshopper.utils import flash_errors
blueprint = Blueprint('public', __name__, static_folder='../static')
@login_manager.user_loader
def load_user(user_id):
"""Load user by ID."""
return User.get_by_id(int(user_id))
@blueprint.route('/', methods=['GET', 'POST'])
def home():
"""Home page."""
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
with open("foo.py", "w") as f:
f.write("X=" + str(form.user.id))
#flash('You are logged in.', 'success')
#redirect_url = request.args.get('next') or url_for('user.jumbo')
return redirect(url_for('user.jumbo'))
else:
flash_errors(form)
return render_template('users/testing2.html', form=form)
@blueprint.route('/logout/')
@login_required
def logout():
"""Logout."""
logout_user()
#flash('You are logged out.', 'info')
return redirect(url_for('public.home'))
@blueprint.route('/register/', methods=['GET', 'POST'])
def register():
"""Register new user."""
form = RegisterForm(request.form, csrf_enabled=False)
if form.validate_on_submit():
User.create(username=form.username.data, email=form.email.data, password=form.password.data, active=True)
print form.username.data
print form.email.data
print form.password.data
flash('Thank you for registering. You can now log in.', 'success')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template('public/register.html', form=form)
@blueprint.route('/about/')
def about():
"""About page."""
form = LoginForm(request.form)
return render_template('public/about.html', form=form)
"""
@blueprint.route('/db')
def dbtest():
try:
#User.create(username="aaaa", email="[email protected]", password="aaaa", active=True)
print "hey"
User.create(username='John1', email='[email protected]', password="aaaa1", active=True)
#db.session.add(user)
#db.session.commit()
print "success"
except Exception as e:
f = open('/tmp/error.log', 'w')
f.write(e.message)
f.close()
return 'done'
return 'done2'
""" | bsd-3-clause | -4,215,876,279,390,760,000 | 30.77907 | 113 | 0.635798 | false |
nikhila05/MicroSite | micro_blog/migrations/0001_initial.py | 1 | 4172 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BlogComments',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('email', models.EmailField(max_length=255)),
('message', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(default=b'off', max_length=3, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=20)),
('slug', models.CharField(unique=True, max_length=20)),
('description', models.CharField(max_length=500)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Image_File',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('upload', models.FileField(upload_to=b'static/uploads/%Y/%m/%d/')),
('date_created', models.DateTimeField(default=datetime.datetime.now)),
('is_image', models.BooleanField(default=True)),
('thumbnail', models.FileField(null=True, upload_to=b'static/uploads/%Y/%m/%d/', blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100)),
('slug', models.SlugField(max_length=100)),
('created_on', models.DateTimeField(auto_now_add=True)),
('updated_on', models.DateField(auto_now=True)),
('content', models.TextField()),
('featured_image', models.CharField(max_length=400, null=True, blank=True)),
('featured_post', models.CharField(default=b'off', max_length=4, blank=True)),
('status', models.CharField(blank=True, max_length=2, choices=[(b'D', b'Draft'), (b'P', b'Published'), (b'T', b'Rejected')])),
('category', models.ForeignKey(to='micro_blog.Category')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Tags',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=20)),
('slug', models.CharField(unique=True, max_length=20)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(to='micro_blog.Tags', null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='post',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='blogcomments',
name='post',
field=models.ForeignKey(blank=True, to='micro_blog.Post', null=True),
preserve_default=True,
),
]
| gpl-2.0 | -10,623,089,563,571,604 | 39.901961 | 142 | 0.529003 | false |
alphagov/notifications-api | migrations/versions/0083_add_perm_types_and_svc_perm.py | 1 | 2344 | """empty message
Revision ID: 0083_add_perm_types_and_svc_perm
Revises: 0082_add_go_live_template
Create Date: 2017-05-12 11:29:32.664811
"""
# revision identifiers, used by Alembic.
revision = '0083_add_perm_types_and_svc_perm'
down_revision = '0082_add_go_live_template'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
service_permission_types=op.create_table('service_permission_types',
sa.Column('name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('name'))
op.bulk_insert(service_permission_types,
[
{'name': x} for x in {
'letter',
'email',
'sms',
'international_sms',
'incoming_sms'
}
])
op.create_table('service_permissions',
sa.Column('service_id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('permission', sa.String(length=255), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['permission'], ['service_permission_types.name'], ),
sa.ForeignKeyConstraint(['service_id'], ['services.id'], ),
sa.PrimaryKeyConstraint('service_id', 'permission'))
op.create_index(op.f('ix_service_permissions_permission'), 'service_permissions', ['permission'], unique=False)
op.create_index(op.f('ix_service_permissions_service_id'), 'service_permissions', ['service_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_service_permissions_service_id'), table_name='service_permissions')
op.drop_index(op.f('ix_service_permissions_permission'), table_name='service_permissions')
op.drop_table('service_permissions')
op.drop_table('service_permission_types')
# ### end Alembic commands ###
| mit | -4,852,084,235,956,907,000 | 43.226415 | 115 | 0.579352 | false |
jonasfj/dxr | tests/test_functions.py | 1 | 2593 | """Tests for searches about functions"""
from dxr.testing import SingleFileTestCase, MINIMAL_MAIN
class ReferenceTests(SingleFileTestCase):
"""Tests for finding out where functions are referenced or declared"""
source = r"""
#include <stdio.h>
const char* getHello() {
return "Hello World";
}
int main(int argc, char* argv[]) {
printf("%s\n", getHello());
return 0;
}
"""
def test_functions(self):
"""Try searching for function declarations."""
self.found_line_eq(
'function:main', 'int <b>main</b>(int argc, char* argv[]) {')
self.found_line_eq(
'function:getHello', 'const char* <b>getHello</b>() {')
def test_callers(self):
"""Test that we can find calling functions of another function."""
self.found_line_eq(
'callers:getHello', 'int <b>main</b>(int argc, char* argv[]) {')
def test_called_by(self):
"""Test that we can find the functions a function calls."""
self.found_line_eq(
'called-by:main', 'const char* <b>getHello</b>() {')
class ConstTests(SingleFileTestCase):
source = """
class ConstOverload
{
public:
void foo();
void foo() const;
};
void ConstOverload::foo() {
}
void ConstOverload::foo() const {
}
""" + MINIMAL_MAIN
def test_const_functions(self):
"""Make sure const functions are indexed separately from non-const but
otherwise identical signatures."""
self.found_line_eq('+function:ConstOverload::foo()',
'void ConstOverload::<b>foo</b>() {')
self.found_line_eq('+function:"ConstOverload::foo() const"',
'void ConstOverload::<b>foo</b>() const {')
class PrototypeParamTests(SingleFileTestCase):
source = """
int prototype_parameter_function(int prototype_parameter);
int prototype_parameter_function(int prototype_parameter) {
return prototype_parameter;
}
""" + MINIMAL_MAIN
def test_prototype_params(self):
# I have no idea what this tests.
self.found_line_eq(
'+var:prototype_parameter_function(int)::prototype_parameter',
'int prototype_parameter_function(int <b>prototype_parameter</b>) {')
self.found_line_eq(
'+var-ref:prototype_parameter_function(int)::prototype_parameter',
'return <b>prototype_parameter</b>;')
| mit | 3,067,298,335,636,265,000 | 31.012346 | 81 | 0.570382 | false |
lidaobing/itcc | itcc/ccs2/solvent_caflisch.py | 1 | 3718 | import pkg_resources
import math
from itcc import tools
ff_cache = {}
caflisch_dat = None
r_probe = 1.4
pij_bonded = 0.8875
pij_nonbonded = 0.3516
class Caflisch(object):
def __init__(self, data):
self.data = data
assert len(self.data) == 5
def _r_min(self):
return self.data[1]
def _r(self):
return self.data[2]
def _p(self):
return self.data[3]
def _sigma(self):
return self.data[4]
r_min = property(_r_min)
r = property(_r)
p = property(_p)
sigma = property(_sigma)
def init_caflisch():
global caflisch_dat
if caflisch_dat is not None: return
caflisch_dat = read_caflisch(
pkg_resources.resource_stream(__name__, 'caflisch.dat'))
def init_ff(forcefield):
if forcefield in ff_cache:
return
init_caflisch()
ff_cache[forcefield] = {}
res = ff_cache[forcefield]
ifname = forcefield + "-caflisch.dat"
ifile = pkg_resources.resource_stream(__name__, ifname)
for line in tools.conffile(ifile):
ff_type, cal_type = (int(x) for x in line.split())
if ff_type in res:
raise RuntimeError("duplicate type")
if cal_type != 0:
res[ff_type] = caflisch_dat[cal_type]
else:
res[ff_type] = None
def solvent_caflisch(mol, forcefield, debug=0):
if mol.connect is None:
raise RuntimeError("can't deal with mol without connective information")
init_ff(forcefield)
ff = ff_cache[forcefield]
data = []
for i in range(len(mol)):
if mol.atoms[i].type not in ff:
raise RuntimeError(
"no corresponding caflisch type for type %i of %s"
% (mol.atoms[i].type, forcefield))
if ff[mol.atoms[i].type] is not None:
data.append((ff[mol.atoms[i].type], mol.coords[i], i))
areas = []
for i in range(len(data)):
ri = data[i][0].r
area = 1
S = 4 * math.pi * (ri + r_probe) * (ri + r_probe)
for j in range(len(data)):
if j == i: continue
rj = data[j][0].r
rijsq = tools.dissq(data[i][1], data[j][1])
max_r = data[i][0].r + data[j][0].r + r_probe * 2
if rijsq >= max_r * max_r:
continue
rij = math.sqrt(rijsq)
bij = math.pi * (ri + r_probe) * (max_r - rij) \
* (1 + (rj - ri) / rij)
bonded = mol.is_connect(data[i][2], data[j][2])
if bonded:
pij = pij_bonded
else:
pij = pij_nonbonded
area *= 1 - data[i][0].p * pij * bij / S
areas.append(area * S)
if debug >= 1:
for i in range(len(data)):
print data[i][2]+1, areas[i]
return sum(areas[i] * data[i][0].sigma for i in range(len(data)))
def read_caflisch(ifile):
formats = (int, str, float, float, float, float)
result = {}
for line in ifile:
line = line.strip()
if not line: continue
if line[0] == '#': continue
words = line.split()
assert len(words) == 6
words = [format(x) for format,x in zip(formats, words)]
assert words[0] not in result, "duplicates type in input file"
result[words[0]] = Caflisch(tuple(words[1:]))
return result
def main():
import sys
if len(sys.argv) != 3:
import os.path
sys.stderr.write('Usage: %s molname forcefield\n'
% os.path.basename(sys.argv[0]))
sys.exit(1)
from itcc.molecule import read
mol = read.readxyz(file(sys.argv[1]))
print solvent_caflisch(mol, sys.argv[2], 1)
if __name__ == '__main__':
main()
| gpl-3.0 | -5,666,795,548,658,434,000 | 26.746269 | 80 | 0.537924 | false |
anaoaktree/vcgen | vcgen/test.py | 1 | 1786 | # Copyright (c) 2011, Jay Conrod.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Jay Conrod nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL JAY CONROD BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import unittest
if __name__ == '__main__':
test_names = ['test_lexer', 'test_combinators', 'test_imp_parser', 'test_eval']
suite = unittest.defaultTestLoader.loadTestsFromNames(test_names)
result = unittest.TextTestRunner().run(suite)
| mit | -2,567,668,676,885,615,600 | 54.8125 | 83 | 0.758679 | false |
Yubico/yubiadmin-dpkg | yubiadmin/config.py | 1 | 2301 | # Copyright (c) 2013 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import imp
import errno
from yubiadmin import default_settings
__all__ = [
'settings'
]
SETTINGS_FILE = os.getenv('YUBIADMIN_SETTINGS',
'/etc/yubico/admin/yubiadmin.conf')
VALUES = {
#Web interface
'USERNAME': 'user',
'PASSWORD': 'pass',
'INTERFACE': 'iface',
'PORT': 'port'
}
def parse(conf, settings={}):
for confkey, settingskey in VALUES.items():
if hasattr(conf, confkey):
settings[settingskey] = getattr(conf, confkey)
return settings
settings = parse(default_settings)
dont_write_bytecode = sys.dont_write_bytecode
try:
sys.dont_write_bytecode = True
user_settings = imp.load_source('user_settings', SETTINGS_FILE)
settings = parse(user_settings, settings)
except IOError, e:
if not e.errno in [errno.ENOENT, errno.EACCES]:
raise e
finally:
sys.dont_write_bytecode = dont_write_bytecode
| bsd-2-clause | -259,948,468,104,190,900 | 32.838235 | 71 | 0.720991 | false |
mompiou/misorientation | misorientation.py | 1 | 48149 | #!/usr/bin/python
from __future__ import division
import numpy as np
from Tkinter import *
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
from PIL import Image
from PIL import PngImagePlugin
import ttk
import sys
from fractions import Fraction
from tkFileDialog import *
import os
import matplotlib as mpl
mpl.rcParams['font.size'] = 12
###################################################################"
##### Fonction projection sur l'abaque
####################################################################
def proj(x,y,z):
if z==1:
X=0
Y=0
elif z<-0.000001:
X=250
Y=250
else:
X=x/(1+z)
Y=y/(1+z)
return np.array([X,Y],float)
###################################################################"
##### Fonction rotation
####################################################################
def rotation(phi1,phi,phi2):
phi1=phi1*np.pi/180;
phi=phi*np.pi/180;
phi2=phi2*np.pi/180;
R=np.array([[np.cos(phi1)*np.cos(phi2)-np.cos(phi)*np.sin(phi1)*np.sin(phi2),
-np.cos(phi)*np.cos(phi2)*np.sin(phi1)-np.cos(phi1)*
np.sin(phi2),np.sin(phi)*np.sin(phi1)],[np.cos(phi2)*np.sin(phi1)
+np.cos(phi)*np.cos(phi1)*np.sin(phi2),np.cos(phi)*np.cos(phi1)
*np.cos(phi2)-np.sin(phi1)*np.sin(phi2), -np.cos(phi1)*np.sin(phi)],
[np.sin(phi)*np.sin(phi2), np.cos(phi2)*np.sin(phi), np.cos(phi)]],float)
return R
####################################################################
##### Fonction rotation autour d'un axe
####################################################################
def Rot(th,a,b,c):
th=th*np.pi/180;
aa=a/np.linalg.norm([a,b,c]);
bb=b/np.linalg.norm([a,b,c]);
cc=c/np.linalg.norm([a,b,c]);
c1=np.array([[1,0,0],[0,1,0],[0,0,1]],float)
c2=np.array([[aa**2,aa*bb,aa*cc],[bb*aa,bb**2,bb*cc],[cc*aa,
cc*bb,cc**2]],float)
c3=np.array([[0,-cc,bb],[cc,0,-aa],[-bb,aa,0]],float)
R=np.cos(th)*c1+(1-np.cos(th))*c2+np.sin(th)*c3
return R
####################################################################
##### Fonction cristal
####################################################################
def crist():
global axesA,axeshA,axesB,axeshB,D,Dstar,V
a=eval(a_entry.get())
b=eval(b_entry.get())
c=eval(c_entry.get())
alp=eval(alp_entry.get())
bet=eval(bet_entry.get())
gam=eval(gam_entry.get())
e=eval(e_entry.get())
d2=eval(d_label_var.get())
alp=alp*np.pi/180;
bet=bet*np.pi/180;
gam=gam*np.pi/180;
V=a*b*c*np.sqrt(1-(np.cos(alp)**2)-(np.cos(bet))**2-(np.cos(gam))**2+2*b*c*np.cos(alp)*np.cos(bet)*np.cos(gam))
D=np.array([[a,b*np.cos(gam),c*np.cos(bet)],[0,b*np.sin(gam), c*(np.cos(alp)-np.cos(bet)*np.cos(gam))/np.sin(gam)],[0,0,V/(a*b*np.sin(gam))]])
Dstar=np.transpose(np.linalg.inv(D))
G=np.array([[a**2,a*b*np.cos(gam),a*c*np.cos(bet)],[a*b*np.cos(gam),b**2,b*c*np.cos(alp)],[a*c*np.cos(bet),b*c*np.cos(alp),c**2]])
axes=np.zeros(((2*e+1)**3-1,3))
axesh=np.zeros(((2*e+1)**3-1,3))
id=0
for i in range(-e,e+1):
for j in range(-e,e+1):
for k in range(-e,e+1):
if (i,j,k)!=(0,0,0):
d=1/(np.sqrt(np.dot(np.array([i,j,k]),np.dot(np.linalg.inv(G),np.array([i,j,k])))))
if d>d2*0.1*np.amax([a,b,c]):
if var_uvw.get()==0:
axesh[id,:]=np.dot(Dstar,np.array([i,j,k],float))
axes[id,:]=np.array([i,j,k],float)
else:
axesh[id,:]=np.dot(D,np.array([i,j,k],float))
axes[id,:]=np.array([i,j,k],float)
id=id+1
axesA=axes
axesB=axes
axeshA=axesh
axeshB=axesh
return axesA,axeshA,axesB,axeshB,D,Dstar,V
def dm():
global dmip
a=f.add_subplot(111)
a.figure.clear()
a=f.add_subplot(111)
dmip=dmip-eval(d_entry.get())
d_label_var.set(dmip)
crist()
trace()
return dmip
def dp():
global dmip
a=f.add_subplot(111)
a.figure.clear()
a=f.add_subplot(111)
dmip=dmip+eval(d_entry.get())
d_label_var.set(dmip)
crist()
trace()
return dmip
####################################################################
##### Fonction ajouter un pole
####################################################################
def poleA(pole1,pole2,pole3):
global MA,axesA,axeshA,Ta,V,D,Dstar
fp=f.add_subplot(111)
Gs=np.array([pole1,pole2,pole3],float)
Pp=np.zeros((1,2),float)
if var_uvw.get()==0:
Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs))
else:
Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs))
S=np.dot(MA,Gsh)
if S[2]<0:
S=-S
Gsh=-Gsh
pole1=-pole1
pole2=-pole2
pole3=-pole3
Pp=proj(S[0],S[1],S[2])*600/2
l=str(int(pole1))+str(int(pole2))+str(int(pole3))
fp.plot(Pp[0]+600/2,Pp[1]+600/2,'ro')
fp.annotate(l,(Pp[0]+600/2,Pp[1]+600/2))
fp.axis([0,600,0,600])
fp.axis('off')
fp.figure.canvas.draw()
axesA=np.vstack((axesA,np.array([pole1,pole2,pole3])))
axesA=np.vstack((axesA,np.array([-pole1,-pole2,-pole3])))
Ta=np.vstack((Ta,np.array([S[0],S[1],S[2]])))
Ta=np.vstack((Ta,np.array([-S[0],-S[1],-S[2]])))
axeshA=np.vstack((axeshA,np.array([Gsh[0],Gsh[1],Gsh[2]])))
axeshA=np.vstack((axeshA,np.array([-Gsh[0],-Gsh[1],-Gsh[2]])))
return axesA,axeshA,Ta
def poleB(pole1,pole2,pole3):
global MB,axesB,axeshB,Tb,V,D,Dstar
fp=f.add_subplot(111)
Gs=np.array([pole1,pole2,pole3],float)
Pp=np.zeros((1,2),float)
if var_uvw.get()==0:
Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs))
else:
Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs))
S=np.dot(MB,Gsh)
if S[2]<0:
S=-S
Gsh=-Gsh
pole1=-pole1
pole2=-pole2
pole3=-pole3
Pp=proj(S[0],S[1],S[2])*600/2
l=str(int(pole1))+str(int(pole2))+str(int(pole3))
fp.plot(Pp[0]+600/2,Pp[1]+600/2,'ro')
fp.annotate(l,(Pp[0]+600/2,Pp[1]+600/2))
fp.axis([0,600,0,600])
fp.axis('off')
fp.figure.canvas.draw()
axesB=np.vstack((axesB,np.array([pole1,pole2,pole3])))
axesB=np.vstack((axesB,np.array([-pole1,-pole2,-pole3])))
Tb=np.vstack((Tb,np.array([S[0],S[1],S[2]])))
Tb=np.vstack((Tb,np.array([-S[0],-S[1],-S[2]])))
axeshB=np.vstack((axeshB,np.array([Gsh[0],Gsh[1],Gsh[2]])))
axeshB=np.vstack((axeshB,np.array([-Gsh[0],-Gsh[1],-Gsh[2]])))
return axesB,axeshB,Tb
def addpoleA_sym():
pole1A=eval(pole1A_entry.get())
pole2A=eval(pole2A_entry.get())
pole3A=eval(pole3A_entry.get())
poleA(pole1A,pole2A,pole3A)
poleA(pole1A,pole2A,-pole3A)
poleA(pole1A,-pole2A,pole3A)
poleA(-pole1A,pole2A,pole3A)
poleA(pole2A,pole1A,pole3A)
poleA(pole2A,pole1A,-pole3A)
poleA(pole2A,-pole1A,pole3A)
poleA(-pole2A,pole1A,pole3A)
poleA(pole2A,pole3A,pole1A)
poleA(pole2A,pole3A,-pole1A)
poleA(pole2A,-pole3A,pole1A)
poleA(-pole2A,pole3A,pole1A)
poleA(pole1A,pole3A,pole2A)
poleA(pole1A,pole3A,-pole2A)
poleA(pole1A,-pole3A,pole2A)
poleA(-pole1A,pole3A,pole2A)
poleA(pole3A,pole1A,pole2A)
poleA(pole3A,pole1A,-pole2A)
poleA(pole3A,-pole1A,pole2A)
poleA(-pole3A,pole1A,pole2A)
poleA(pole3A,pole2A,pole1A)
poleA(pole3A,pole2A,-pole1A)
poleA(pole3A,-pole2A,pole1A)
poleA(-pole3A,pole2A,pole1A)
trace()
def addpoleB_sym():
pole1B=eval(pole1B_entry.get())
pole2B=eval(pole2B_entry.get())
pole3B=eval(pole3B_entry.get())
poleB(pole1B,pole2B,pole3B)
poleB(pole1B,pole2B,-pole3B)
poleB(pole1B,-pole2B,pole3B)
poleB(-pole1B,pole2B,pole3B)
poleB(pole2B,pole1B,pole3B)
poleB(pole2B,pole1B,-pole3B)
poleB(pole2B,-pole1B,pole3B)
poleB(-pole2B,pole1B,pole3B)
poleB(pole2B,pole3B,pole1B)
poleB(pole2B,pole3B,-pole1B)
poleB(pole2B,-pole3B,pole1B)
poleB(-pole2B,pole3B,pole1B)
poleB(pole1B,pole3B,pole2B)
poleB(pole1B,pole3B,-pole2B)
poleB(pole1B,-pole3B,pole2B)
poleB(-pole1B,pole3B,pole2B)
poleB(pole3B,pole1B,pole2B)
poleB(pole3B,pole1B,-pole2B)
poleB(pole3B,-pole1B,pole2B)
poleB(-pole3B,pole1B,pole2B)
poleB(pole3B,pole2B,pole1B)
poleB(pole3B,pole2B,-pole1B)
poleB(pole3B,-pole2B,pole1B)
poleB(-pole3B,pole2B,pole1B)
trace()
def addpoleA():
pole1A=eval(pole1A_entry.get())
pole2A=eval(pole2A_entry.get())
pole3A=eval(pole3A_entry.get())
poleA(pole1A,pole2A,pole3A)
trace()
def addpoleB():
pole1B=eval(pole1B_entry.get())
pole2B=eval(pole2B_entry.get())
pole3B=eval(pole3B_entry.get())
poleB(pole1B,pole2B,pole3B)
trace()
####################################################################
##### Fonction tracer plan
####################################################################
def trace_planA():
global MA,axes,axesh,Ta,V,D,Dstar
f2=f.add_subplot(111)
pole1A=eval(pole1A_entry.get())
pole2A=eval(pole2A_entry.get())
pole3A=eval(pole3A_entry.get())
Gs=np.array([pole1A,pole2A,pole3A],float)
if var_uvw.get()==0:
Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs))
else:
Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs))
S=np.dot(MA,Gsh)
if S[2]<0:
S=-S
Gsh=-Gsh
pole1A=-pole1A
pole2A=-pole2A
pole3A=-pole3A
r=np.sqrt(S[0]**2+S[1]**2+S[2]**2)
A=np.zeros((2,100))
Q=np.zeros((1,2))
if S[2]==0:
t=90
w=0
else:
t=np.arctan2(S[1],S[0])*180/np.pi
w=0
ph=np.arccos(S[2]/r)*180/np.pi
for g in np.linspace(-np.pi,np.pi-0.00001,100):
Aa=np.dot(Rot(t,0,0,1),np.dot(Rot(ph,0,1,0),np.array([np.sin(g),np.cos(g),0])))
A[:,w]=proj(Aa[0],Aa[1],Aa[2])*600/2
if A[0,w]<>75000:
Q=np.vstack((Q,A[:,w]))
w=w+1
Q=np.delete(Q,0,0)
f2.plot(Q[:,0]+600/2,Q[:,1]+600/2,'r')
f2.axis([0,600,0,600])
f2.axis('off')
f2.figure.canvas.draw()
trace()
def trace_planB():
global MB,axes,axesh,Tb,V,D,Dstar
f2=f.add_subplot(111)
pole1B=eval(pole1B_entry.get())
pole2B=eval(pole2B_entry.get())
pole3B=eval(pole3B_entry.get())
Gs=np.array([pole1B,pole2B,pole3B],float)
if var_uvw.get()==0:
Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs))
else:
Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs))
S=np.dot(MB,Gsh)
if S[2]<0:
S=-S
Gsh=-Gsh
pole1B=-pole1B
pole2B=-pole2B
pole3B=-pole3B
r=np.sqrt(S[0]**2+S[1]**2+S[2]**2)
A=np.zeros((2,100))
Q=np.zeros((1,2))
if S[2]==0:
t=90
w=0
else:
t=np.arctan2(S[1],S[0])*180/np.pi
w=0
ph=np.arccos(S[2]/r)*180/np.pi
for g in np.linspace(-np.pi,np.pi-0.00001,100):
Aa=np.dot(Rot(t,0,0,1),np.dot(Rot(ph,0,1,0),np.array([np.sin(g),np.cos(g),0])))
A[:,w]=proj(Aa[0],Aa[1],Aa[2])*600/2
if A[0,w]<>75000:
Q=np.vstack((Q,A[:,w]))
w=w+1
Q=np.delete(Q,0,0)
f2.plot(Q[:,0]+600/2,Q[:,1]+600/2,'r')
f2.axis([0,600,0,600])
f2.axis('off')
f2.figure.canvas.draw()
trace()
####################################################################
##### Click a pole
####################################################################
def click_a_pole(event):
global MB,Dstar,D
x=event.x
y=event.y
x=(x-411)*2/620
y=-(y-400)*2/620
X=2*x/(1+x**2+y**2)
Y=2*y/(1+x**2+y**2)
Z=(-1+x**2+y**2)/(1+x**2+y**2)
if Z<0:
X=-X
Y=-Y
A=np.dot(np.linalg.inv(MB),np.array([X,Y,Z]))
n=0
L=np.zeros((3,16**3))
for i in range(-8,9,1):
for j in range(-8,9,1):
for k in range(-8,9,1):
if np.linalg.norm([i,j,k])<>0:
if var_uvw.get()==0:
Q=np.dot(Dstar,np.array([i,j,k],float))/np.linalg.norm(np.dot(Dstar,np.array([i,j,k],float)))
if np.abs(Q[0]-A[0])<0.05 and np.abs(Q[1]-A[1])<0.05 and np.abs(Q[2]-A[2])<0.05:
L[:,n]=np.array([i,j,k],float)
n=n+1
else:
Q=np.dot(D,np.array([i,j,k],float))/np.linalg.norm(np.dot(D,np.array([i,j,k],float)))
if np.abs(Q[0]-A[0])<0.05 and np.abs(Q[1]-A[1])<0.05 and np.abs(Q[2]-A[2])<0.05:
L[:,n]=np.array([i,j,k],float)
n=n+1
if np.linalg.norm(L[:,0])<>0:
poleB(L[0,0],L[1,0],L[2,0])
trace()
####################################################################
##### Inclinaison-beta
####################################################################
####################################################################
##### Fonction desorientation
####################################################################
def Rota(t,u,v,w,g):
Ae=np.dot(g,np.array([u,v,w]))
Re=Rot(t,Ae[0],Ae[1],Ae[2])
return Re
def cryststruct():
global cs
a=eval(a_entry.get())
b=eval(b_entry.get())
c=eval(c_entry.get())
alp=eval(alp_entry.get())
bet=eval(bet_entry.get())
gam=eval(gam_entry.get())
if gam==90 and alp==90 and bet==90 and a==b and b==c:
cs=1
if gam==120 and alp==90 and bet==90:
cs=2
if gam==90 and alp==90 and bet==90 and a==b and b<>c:
cs=3
if alp<>90 and a==b and b==c:
cs=4
if gam==90 and alp==90 and bet==90 and a<>b and b<>c:
cs=5
if gam<>90 and alp==90 and bet==90 and a<>b and b<>c:
cs=6
if gam<>90 and alp<>90 and bet<>90 and a<>b and b<>c:
cs=7
return cs
def Sy(g):
global cs
if cs==1:
S1=Rota(90,1,0,0,g);
S2=Rota(180,1,0,0,g);
S3=Rota(270,1,0,0,g);
S4=Rota(90,0,1,0,g);
S5=Rota(180,0,1,0,g);
S6=Rota(270,0,1,0,g);
S7=Rota(90,0,0,1,g);
S8=Rota(180,0,0,1,g);
S9=Rota(270,0,0,1,g);
S10=Rota(180,1,1,0,g);
S11=Rota(180,1,0,1,g);
S12=Rota(180,0,1,1,g);
S13=Rota(180,-1,1,0,g);
S14=Rota(180,-1,0,1,g);
S15=Rota(180,0,-1,1,g);
S16=Rota(120,1,1,1,g);
S17=Rota(240,1,1,1,g);
S18=Rota(120,-1,1,1,g);
S19=Rota(240,-1,1,1,g);
S20=Rota(120,1,-1,1,g);
S21=Rota(240,1,-1,1,g);
S22=Rota(120,1,1,-1,g);
S23=Rota(240,1,1,-1,g);
S24=np.eye(3,3);
S=np.vstack((S1,S2,S3,S4,S5,S6,S7,S8,S9,S10,S11,S12,S13,S14,S15,S16,S17,S18,S19,S20,S21,S22,S23,S24))
if cs==2:
S1=Rota(60,0,0,1,g);
S2=Rota(120,0,0,1,g);
S3=Rota(180,0,0,1,g);
S4=Rota(240,0,0,1,g);
S5=Rota(300,0,0,1,g);
S6=np.eye(3,3);
S7=Rota(180,0,0,1,g);
S8=Rota(180,0,1,0,g);
S9=Rota(180,1/2,np.sqrt(3)/2,0,g);
S10=Rota(180,-1/2,np.sqrt(3)/2,0,g);
S11=Rota(180,np.sqrt(3)/2,1/2,0,g);
S12=Rota(180,-np.sqrt(3)/2,1/2,0,g);
S=np.vstack((S1,S2,S3,S4,S5,S6,S7,S8,S9,S10,S11,S12))
if cs==3:
S1=Rota(90,0,0,1,g);
S2=Rota(180,0,0,1,g);
S3=Rota(270,0,0,1,g);
S4=Rota(180,0,1,0,g);
S5=Rota(180,1,0,0,g);
S6=Rota(180,1,1,0,g);
S7=Rota(180,1,-1,0,g);
S8=np.eye(3,3)
S=np.vstack((S1,S2,S3,S4,S5,S6,S7,S8))
if cs==4:
S1=Rota(60,0,0,1,g);
S2=Rota(120,0,0,1,g);
S3=Rota(180,0,0,1,g);
S4=Rota(240,0,0,1,g);
S5=Rota(300,0,0,1,g);
S6=np.eye(3,3);
S7=Rota(180,0,0,1,g);
S8=Rota(180,0,1,0,g);
S9=Rota(180,1/2,np.sqrt(3)/2,0,g);
S10=Rota(180,-1/2,np.sqrt(3)/2,0,g);
S11=Rota(180,np.sqrt(3)/2,1/2,0,g);
S12=Rota(180,-np.sqrt(3)/2,1/2,0,g);
S=np.vstack((S1,S2,S3,S4,S5,S6,S7,S8,S9,S10,S11,S12))
if cs==5:
S1=Rota(180,0,0,1,g);
S2=Rota(180,1,0,0,g);
S3=Rota(180,0,1,0,g);
S4=np.eye(3,3);
S=np.vstack((S1,S2,S3,S4))
if cs==6:
S1=Rota(180,0,1,0,g);
S2=np.eye(3,3);
S=np.vstack((S1,S2))
if cs==7:
S=np.eye(3,3);
return S
def null(A, rcond=None):
u, s, vh = np.linalg.svd(A, full_matrices=True)
M, N = u.shape[0], vh.shape[1]
if rcond is None:
rcond = np.finfo(s.dtype).eps * max(M, N)
tol = np.amax(s) * rcond
num = np.sum(s > tol, dtype=int)
Q = vh[num:,:].T.conj()
return Q
def desorientation():
global D0,S,D1,cs,V,Qp
a = f.add_subplot(111)
a.figure.clear()
a = f.add_subplot(111)
fn = os.path.join(os.path.dirname(__file__), 'stereo.png')
img=np.array(Image.open(fn))
cryststruct()
phi1a=eval(phi1A_entry.get())
phia=eval(phiA_entry.get())
phi2a=eval(phi2A_entry.get())
phi1b=eval(phi1B_entry.get())
phib=eval(phiB_entry.get())
phi2b=eval(phi2B_entry.get())
gA=rotation(phi1a,phia,phi2a)
gB=rotation(phi1b,phib,phi2b)
k=0
S=Sy(gA)
D0=np.zeros((int(np.shape(S)[0]/3),5))
D1=np.zeros((int(np.shape(S)[0]/3),3))
Qp=np.zeros((int(np.shape(S)[0]/3),2))
for i in range(0,np.shape(S)[0],3):
In=np.dot(np.array([[S[i,0],S[i+1,0],S[i+2,0]],[S[i,1],S[i+1,1],S[i+2,1]],[S[i,2],S[i+1,2],S[i+2,2]]]),gA)
Ing=np.dot(In,np.array([0,0,1]))
In2=np.dot(Rot(-phi2b,Ing[0],Ing[1],Ing[2]),In)
Ing2=np.dot(In2,np.array([1,0,0]))
In3=np.dot(Rot(-phib,Ing2[0],Ing2[1],Ing2[2]),In2)
Ing3=np.dot(In3,np.array([0,0,1]))
A=np.dot(Rot(-phi1b,Ing3[0],Ing3[1],Ing3[2]),In3)-np.eye(3)
V=null(A,0.001).T
if 0.5*(np.trace(A+np.eye(3))-1)>1:
D0[k,3]=0
elif 0.5*(np.trace(A+np.eye(3))-1)<-1:
D0[k,3]=180
else:
D0[k,3]=np.arccos(0.5*(np.trace(A+np.eye(3))-1))*180/np.pi
if np.abs(D0[k,3])<1e-5:
D0[k,0]=0
D0[k,1]=0
D0[k,2]=0
else:
D0[k,0]=V[0,0]/np.linalg.norm(V)
D0[k,1]=V[0,1]/np.linalg.norm(V)
D0[k,2]=V[0,2]/np.linalg.norm(V)
Ds1=np.dot(np.linalg.inv(gB),np.array([D0[k,0],D0[k,1],D0[k,2]]))
F0=Fraction(Ds1[0]).limit_denominator(10)
F1=Fraction(Ds1[1]).limit_denominator(10)
F2=Fraction(Ds1[2]).limit_denominator(10)
D1[k,0]=F0.numerator*F1.denominator*F2.denominator
D1[k,1]=F1.numerator*F0.denominator*F2.denominator
D1[k,2]=F2.numerator*F0.denominator*F1.denominator
if D0[k,2]<0:
D0[k,0]=-D0[k,0]
D0[k,1]=-D0[k,1]
D0[k,2]=-D0[k,2]
D1[k,0]=-D1[k,0]
D1[k,1]=-D1[k,1]
D1[k,2]=-D1[k,2]
D0[k,4]=k
Qp[k,:]=proj(D0[k,0],D0[k,1],D0[k,2])*600/2
k=k+1
a.plot(Qp[:,0]+600/2,Qp[:,1]+600/2,'ro')
a.axis([0,600,0,600])
a.imshow(img,interpolation="bicubic")
a.axis('off')
a.figure.canvas.draw()
trace()
return Qp,S,D1
####################################################################
##### Fonction principale
####################################################################
def trace():
global Ta,Tb,axesA,axeshA,MA,axesB,axeshB,MB,Qp,S,D1,show_ind,D0
a = f.add_subplot(111)
fn = os.path.join(os.path.dirname(__file__), 'stereo.png')
img=np.array(Image.open(fn))
Pa=np.zeros((np.shape(axesA)[0],2))
Pb=np.zeros((np.shape(axesB)[0],2))
for i in range(0,np.shape(axesA)[0]):
axeshA[i,:]=axeshA[i,:]/np.linalg.norm(axeshA[i,:])
Ta[i,:]=np.dot(MA,axeshA[i,:])
Pa[i,:]=proj(Ta[i,0],Ta[i,1],Ta[i,2])*600/2
if show_ind.get()==1:
m=np.amax([np.abs(axesA[i,0]),np.abs(axesA[i,1]),np.abs(axesA[i,2])])
if (np.around(axesA[i,0]/m)==axesA[i,0]/m) & (np.around(axesA[i,1]/m)==axesA[i,1]/m) & (np.around(axesA[i,2]/m)==axesA[i,2]/m):
sA=str(int(axesA[i,0]/m))+str(int(axesA[i,1]/m))+str(int(axesA[i,2]/m))
else:
sA=str(int(axesA[i,0]))+str(int(axesA[i,1]))+str(int(axesA[i,2]))
a.annotate(sA,(Pa[i,0]+600/2,Pa[i,1]+600/2))
for i in range(0,np.shape(axesB)[0]):
axeshB[i,:]=axeshB[i,:]/np.linalg.norm(axeshB[i,:])
Tb[i,:]=np.dot(MB,axeshB[i,:])
Pb[i,:]=proj(Tb[i,0],Tb[i,1],Tb[i,2])*600/2
if show_ind.get()==1:
m=np.amax([np.abs(axesB[i,0]),np.abs(axesB[i,1]),np.abs(axesB[i,2])])
if (np.around(axesB[i,0]/m)==axesB[i,0]/m) & (np.around(axesB[i,1]/m)==axesB[i,1]/m) & (np.around(axesB[i,2]/m)==axesB[i,2]/m):
sB=str(int(axesB[i,0]/m))+str(int(axesB[i,1]/m))+str(int(axesB[i,2]/m))
else:
sB=str(int(axesB[i,0]))+str(int(axesB[i,1]))+str(int(axesB[i,2]))
a.annotate(sB,(Pb[i,0]+600/2,Pb[i,1]+600/2))
for l in range(0,int(np.shape(S)[0]/3)):
if show_angle.get()==1:
sangle=str(np.round(D0[l,3],decimals=1))
a.annotate(sangle,(Qp[l,0]+600/2,Qp[l,1]+600/2),size=8)
if show_axe.get()==1:
saxe=str(int(D1[l,0]))+','+str(int(D1[l,1]))+','+str(int(D1[l,2]))
a.annotate(saxe,(Qp[l,0]+600/2,Qp[l,1]+600/2),size=8)
if show_num.get()==1:
snum=str(int(D0[l,4]))
a.annotate(snum,(Qp[l,0]+600/2,Qp[l,1]+600/2),size=10)
a.plot(Pa[:,0]+600/2,Pa[:,1]+600/2,'bo')
a.plot(Pb[:,0]+600/2,Pb[:,1]+600/2,'go')
a.plot(Qp[:,0]+600/2,Qp[:,1]+600/2,'ro')
a.axis([0,600,0,600])
a.imshow(img,interpolation="bicubic")
a.axis('off')
a.figure.canvas.draw()
def princ():
global Ta,Tb,MA,MB
a = f.add_subplot(111)
a.figure.clear()
a = f.add_subplot(111)
phi1a=eval(phi1A_entry.get())
phia=eval(phiA_entry.get())
phi2a=eval(phi2A_entry.get())
phi1b=eval(phi1B_entry.get())
phib=eval(phiB_entry.get())
phi2b=eval(phi2B_entry.get())
fn = os.path.join(os.path.dirname(__file__), 'stereo.png')
img=np.array(Image.open(fn))
crist()
Pa=np.zeros((np.shape(axesA)[0],2))
Ta=np.zeros((np.shape(axesA)))
Pb=np.zeros((np.shape(axesB)[0],2))
Tb=np.zeros((np.shape(axesB)))
for i in range(0,np.shape(axesA)[0]):
axeshA[i,:]=axeshA[i,:]/np.linalg.norm(axeshA[i,:])
Ta[i,:]=np.dot(rotation(phi1a,phia,phi2a),axeshA[i,:])
Pa[i,:]=proj(Ta[i,0],Ta[i,1],Ta[i,2])*600/2
m=np.amax([np.abs(axesA[i,0]),np.abs(axesA[i,1]),np.abs(axesA[i,2])])
if (np.around(axesA[i,0]/m)==axesA[i,0]/m) & (np.around(axesA[i,1]/m)==axesA[i,1]/m) & (np.around(axesA[i,2]/m)==axesA[i,2]/m):
sA=str(int(axesA[i,0]/m))+str(int(axesA[i,1]/m))+str(int(axesA[i,2]/m))
else:
sA=str(int(axesA[i,0]))+str(int(axesA[i,1]))+str(int(axesA[i,2]))
a.annotate(sA,(Pa[i,0]+600/2,Pa[i,1]+600/2))
for i in range(0,np.shape(axesB)[0]):
axeshB[i,:]=axeshB[i,:]/np.linalg.norm(axeshB[i,:])
Tb[i,:]=np.dot(rotation(phi1b,phib,phi2b),axeshB[i,:])
Pb[i,:]=proj(Tb[i,0],Tb[i,1],Tb[i,2])*600/2
m=np.amax([np.abs(axesB[i,0]),np.abs(axesB[i,1]),np.abs(axesB[i,2])])
if (np.around(axesB[i,0]/m)==axesB[i,0]/m) & (np.around(axesB[i,1]/m)==axesB[i,1]/m) & (np.around(axesB[i,2]/m)==axesB[i,2]/m):
sB=str(int(axesA[i,0]/m))+str(int(axesA[i,1]/m))+str(int(axesA[i,2]/m))
else:
sB=str(int(axesB[i,0]))+str(int(axesB[i,1]))+str(int(axesB[i,2]))
a.annotate(sB,(Pb[i,0]+600/2,Pb[i,1]+600/2))
a.plot(Pa[:,0]+600/2,Pa[:,1]+600/2,'bo')
a.plot(Pb[:,0]+600/2,Pb[:,1]+600/2,'go')
a.axis([0,600,0,600])
a.imshow(img,interpolation="bicubic")
a.axis('off')
a.figure.canvas.draw()
MA=rotation(phi1a,phia,phi2a)
MB=rotation(phi1b,phib,phi2b)
return Ta,MA,MB,Tb
######################################################################
# GUI
######################################################################
def file_save():
global D1,D0,D
fout = asksaveasfile(mode='w', defaultextension=".txt")
for i in range(np.shape(D1)[0]):
text2save = str(int(D0[i,4]))+'\t'+'['+str(int(D1[i,0]))+','+str(int(D1[i,1]))+','+str(int(D1[i,2]))+']'+'\t '+str(np.around(D0[i,3],decimals=2))
fout.write("%s\n" % text2save)
fout.close()
def image_save():
s = asksaveasfile(mode='w', defaultextension=".jpg")
if s:
f.savefig(s.name)
#s.close()
####################################################
#fonction d'initialisation
##################################################
def init():
global var_uvw,D1,S,Qp,show_ind,show_angle,show_axe,show_num,dmip,d_label_var
fn = os.path.join(os.path.dirname(__file__), 'stereo.png')
img=np.array(Image.open(fn))
a = f.add_subplot(111)
a.axis('off')
a.imshow(img,interpolation="bicubic")
a.figure.canvas.draw()
S=np.zeros((1,5))
Qp=np.zeros((1,2))
D1=np.zeros((1,5))
var_uvw=IntVar()
show_ind=IntVar()
show_angle=IntVar()
show_axe=IntVar()
show_num=IntVar()
d_label_var=StringVar()
d_label_var.set(0)
dmip=0
return var_uvw,show_ind,show_angle,show_axe,show_num
##############################################################
# fonction pour quitter
#######################################################
def _quit():
root.quit() # stops mainloop
root.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
#############################################################
root = Tk()
root.wm_title("Misorientation")
root.geometry('1220x798+10+40')
root.configure(bg = '#BDBDBD')
#root.resizable(0,0)
#s=ttk.Style()
#s.theme_use('clam')
style = ttk.Style()
theme = style.theme_use()
default = style.lookup(theme, 'background')
################################################
# Creation d'une zone pour tracer des graphiques
################################################
f = Figure(facecolor='white',figsize=[2,2],dpi=100)
canvas = FigureCanvasTkAgg(f, master=root)
canvas.get_tk_widget().place(x=0,y=0,height=800,width=800)
canvas._tkcanvas.bind('<Button-3>', click_a_pole)
canvas.show()
toolbar = NavigationToolbar2TkAgg( canvas, root )
toolbar.zoom('off')
toolbar.update()
###################################################
init()
#import _imaging
#print _imaging.__file__
##############################################
# Boutons
##############################################
phi1A_entry = Entry (master=root)
phi1A_entry.place(relx=0.72,rely=0.5,relheight=0.03,relwidth=0.07)
phi1A_entry.configure(background="white")
phi1A_entry.configure(foreground="black")
phi1A_entry.configure(highlightbackground="#e0e0dfdfe3e3")
phi1A_entry.configure(highlightcolor="#000000")
phi1A_entry.configure(insertbackground="#000000")
phi1A_entry.configure(selectbackground="#c4c4c4")
phi1A_entry.configure(selectforeground="black")
phiA_entry = Entry (master=root)
phiA_entry.place(relx=0.72,rely=0.55,relheight=0.03,relwidth=0.07)
phiA_entry.configure(background="white")
phiA_entry.configure(foreground="black")
phiA_entry.configure(highlightcolor="black")
phiA_entry.configure(insertbackground="black")
phiA_entry.configure(selectbackground="#c4c4c4")
phiA_entry.configure(selectforeground="black")
label_euler = Label (master=root)
label_euler.place(relx=0.77,rely=0.42,height=46,width=163)
label_euler.configure(activebackground="#cccccc")
label_euler.configure(activeforeground="black")
label_euler.configure(cursor="fleur")
label_euler.configure(foreground="black")
label_euler.configure(highlightcolor="black")
label_euler.configure(text='''Euler angles \n A blue , B green''')
phi2A_entry = Entry (master=root)
phi2A_entry.place(relx=0.72,rely=0.6,relheight=0.03,relwidth=0.07)
phi2A_entry.configure(background="white")
phi2A_entry.configure(foreground="black")
phi2A_entry.configure(highlightcolor="black")
phi2A_entry.configure(insertbackground="black")
phi2A_entry.configure(selectbackground="#c4c4c4")
phi2A_entry.configure(selectforeground="black")
button_trace = Button (master=root)
button_trace.place(relx=0.7,rely=0.66,height=21,width=49)
button_trace.configure(activebackground="#f9f9f9")
button_trace.configure(activeforeground="black")
button_trace.configure(background="#ff0000")
button_trace.configure(command=princ)
button_trace.configure(foreground="black")
button_trace.configure(highlightcolor="black")
button_trace.configure(pady="0")
button_trace.configure(text='''PLOT''')
Phi1A_label = Label (master=root)
Phi1A_label.place(relx=0.67,rely=0.5,height=19,width=50)
Phi1A_label.configure(activebackground="#cccccc")
Phi1A_label.configure(activeforeground="black")
Phi1A_label.configure(foreground="black")
Phi1A_label.configure(highlightcolor="black")
Phi1A_label.configure(text='''Phi1A''')
PhiA_label = Label (master=root)
PhiA_label.place(relx=0.67,rely=0.55,height=19,width=50)
PhiA_label.configure(activebackground="#cccccc")
PhiA_label.configure(activeforeground="black")
PhiA_label.configure(foreground="black")
PhiA_label.configure(highlightcolor="black")
PhiA_label.configure(text='''PhiA''')
Phi2A_label = Label (master=root)
Phi2A_label.place(relx=0.67,rely=0.6,height=19,width=50)
Phi2A_label.configure(activebackground="#cccccc")
Phi2A_label.configure(activeforeground="black")
Phi2A_label.configure(foreground="black")
Phi2A_label.configure(highlightcolor="black")
Phi2A_label.configure(text='''Phi2A''')
phi1B_entry = Entry (master=root)
phi1B_entry.place(relx=0.86,rely=0.5,relheight=0.03,relwidth=0.07)
phi1B_entry.configure(background="white")
phi1B_entry.configure(foreground="black")
phi1B_entry.configure(highlightbackground="#e0e0dfdfe3e3")
phi1B_entry.configure(highlightcolor="#000000")
phi1B_entry.configure(insertbackground="#000000")
phi1B_entry.configure(selectbackground="#c4c4c4")
phi1B_entry.configure(selectforeground="black")
Phi1B = Label (master=root)
Phi1B.place(relx=0.81,rely=0.5,height=19,width=50)
Phi1B.configure(activebackground="#cccccc")
Phi1B.configure(activeforeground="black")
Phi1B.configure(foreground="black")
Phi1B.configure(highlightcolor="black")
Phi1B.configure(text='''Phi1B''')
PhiB_label1 = Label (master=root)
PhiB_label1.place(relx=0.81,rely=0.55,height=19,width=50)
PhiB_label1.configure(activebackground="#cccccc")
PhiB_label1.configure(activeforeground="black")
PhiB_label1.configure(foreground="black")
PhiB_label1.configure(highlightcolor="black")
PhiB_label1.configure(text='''PhiB''')
Phi2B_label2 = Label (master=root)
Phi2B_label2.place(relx=0.81,rely=0.6,height=19,width=50)
Phi2B_label2.configure(activebackground="#cccccc")
Phi2B_label2.configure(activeforeground="black")
Phi2B_label2.configure(foreground="black")
Phi2B_label2.configure(highlightcolor="black")
Phi2B_label2.configure(text='''Phi2B''')
phiB_entry = Entry (master=root)
phiB_entry.place(relx=0.86,rely=0.55,relheight=0.03,relwidth=0.07)
phiB_entry.configure(background="white")
phiB_entry.configure(foreground="black")
phiB_entry.configure(highlightbackground="#e0e0dfdfe3e3")
phiB_entry.configure(highlightcolor="#000000")
phiB_entry.configure(insertbackground="#000000")
phiB_entry.configure(selectbackground="#c4c4c4")
phiB_entry.configure(selectforeground="black")
phi2B_entry = Entry (master=root)
phi2B_entry.place(relx=0.86,rely=0.6,relheight=0.03,relwidth=0.07)
phi2B_entry.configure(background="white")
phi2B_entry.configure(foreground="black")
phi2B_entry.configure(highlightbackground="#e0e0dfdfe3e3")
phi2B_entry.configure(highlightcolor="#000000")
phi2B_entry.configure(insertbackground="#000000")
phi2B_entry.configure(selectbackground="#c4c4c4")
phi2B_entry.configure(selectforeground="black")
button_desorientation = Button (master=root)
button_desorientation.place(relx=0.81,rely=0.66,height=21,width=124)
button_desorientation.configure(activebackground="#f9f9f9")
button_desorientation.configure(activeforeground="black")
button_desorientation.configure(background="#00ff00")
button_desorientation.configure(command=desorientation)
button_desorientation.configure(foreground="black")
button_desorientation.configure(highlightcolor="black")
button_desorientation.configure(pady="0")
button_desorientation.configure(text='''MISORIENTATION''')
Cristal_label = Label (master=root)
Cristal_label.place(relx=0.66,rely=0.03,height=19,width=142)
Cristal_label.configure(text='''Crystal Parameters''')
a_cristal_label = Label (master=root)
a_cristal_label.place(relx=0.68,rely=0.06,height=19,width=12)
a_cristal_label.configure(text='''a''')
b_cristal_label = Label (master=root)
b_cristal_label.place(relx=0.68,rely=0.1,height=19,width=12)
b_cristal_label.configure(activebackground="#f9f9f9")
b_cristal_label.configure(activeforeground="black")
b_cristal_label.configure(foreground="black")
b_cristal_label.configure(highlightcolor="black")
b_cristal_label.configure(text='''b''')
c_cristal_label = Label (master=root)
c_cristal_label.place(relx=0.68,rely=0.14,height=19,width=11)
c_cristal_label.configure(activebackground="#f9f9f9")
c_cristal_label.configure(activeforeground="black")
c_cristal_label.configure(foreground="black")
c_cristal_label.configure(highlightcolor="black")
c_cristal_label.configure(text='''c''')
alp_cristal_label = Label (master=root)
alp_cristal_label.place(relx=0.67,rely=0.18,height=19,width=42)
alp_cristal_label.configure(activebackground="#f9f9f9")
alp_cristal_label.configure(activeforeground="black")
alp_cristal_label.configure(foreground="black")
alp_cristal_label.configure(highlightcolor="black")
alp_cristal_label.configure(text='''alpha''')
bet_cristal_label = Label (master=root)
bet_cristal_label.place(relx=0.67,rely=0.22,height=19,width=42)
bet_cristal_label.configure(activebackground="#f9f9f9")
bet_cristal_label.configure(activeforeground="black")
bet_cristal_label.configure(foreground="black")
bet_cristal_label.configure(highlightcolor="black")
bet_cristal_label.configure(text='''beta''')
gam_cristal_label = Label (master=root)
gam_cristal_label.place(relx=0.66,rely=0.26,height=19,width=52)
gam_cristal_label.configure(activebackground="#f9f9f9")
gam_cristal_label.configure(activeforeground="black")
gam_cristal_label.configure(foreground="black")
gam_cristal_label.configure(highlightcolor="black")
gam_cristal_label.configure(text='''gamma''')
a_entry = Entry (master=root)
a_entry.place(relx=0.7,rely=0.06,relheight=0.03,relwidth=0.06)
a_entry.configure(background="white")
a_entry.configure(insertbackground="black")
b_entry = Entry (master=root)
b_entry.place(relx=0.7,rely=0.1,relheight=0.03,relwidth=0.06)
b_entry.configure(background="white")
b_entry.configure(foreground="black")
b_entry.configure(highlightcolor="black")
b_entry.configure(insertbackground="black")
b_entry.configure(selectbackground="#c4c4c4")
b_entry.configure(selectforeground="black")
c_entry = Entry (master=root)
c_entry.place(relx=0.7,rely=0.14,relheight=0.03,relwidth=0.06)
c_entry.configure(background="white")
c_entry.configure(foreground="black")
c_entry.configure(highlightcolor="black")
c_entry.configure(insertbackground="black")
c_entry.configure(selectbackground="#c4c4c4")
c_entry.configure(selectforeground="black")
alp_entry = Entry (master=root)
alp_entry.place(relx=0.71,rely=0.18,relheight=0.03,relwidth=0.06)
alp_entry.configure(background="white")
alp_entry.configure(foreground="black")
alp_entry.configure(highlightcolor="black")
alp_entry.configure(insertbackground="black")
alp_entry.configure(selectbackground="#c4c4c4")
alp_entry.configure(selectforeground="black")
bet_entry = Entry (master=root)
bet_entry.place(relx=0.71,rely=0.22,relheight=0.03,relwidth=0.06)
bet_entry.configure(background="white")
bet_entry.configure(foreground="black")
bet_entry.configure(highlightcolor="black")
bet_entry.configure(insertbackground="black")
bet_entry.configure(selectbackground="#c4c4c4")
bet_entry.configure(selectforeground="black")
gam_entry = Entry (master=root)
gam_entry.place(relx=0.71,rely=0.26,relheight=0.03,relwidth=0.06)
gam_entry.configure(background="white")
gam_entry.configure(foreground="black")
gam_entry.configure(highlightcolor="black")
gam_entry.configure(insertbackground="black")
gam_entry.configure(selectbackground="#c4c4c4")
gam_entry.configure(selectforeground="black")
uvw_button = Checkbutton (master=root)
uvw_button.place(relx=0.75,rely=0.66,relheight=0.03,relwidth=0.04)
uvw_button.configure(text='''uvw''')
uvw_button.configure(variable=var_uvw)
e_label = Label (master=root)
e_label.place(relx=0.66,rely=0.31,height=19,width=86)
e_label.configure(text='''Max indices''')
e_entry = Entry (master=root)
e_entry.place(relx=0.74,rely=0.31,relheight=0.03,relwidth=0.05)
e_entry.configure(background="white")
e_entry.configure(insertbackground="black")
e2_label = Label (master=root)
e2_label.place(relx=0.68,rely=0.36,height=19,width=12)
e2_label.configure(text='''d''')
dm_button = Button (master=root)
dm_button.place(relx=0.7,rely=0.36,height=21,width=13)
dm_button.configure(activebackground="#f9f9f9")
dm_button.configure(activeforeground="black")
dm_button.configure(command=dm)
dm_button.configure(foreground="black")
dm_button.configure(highlightcolor="black")
dm_button.configure(pady="0")
dm_button.configure(text='''-''')
d_entry = Entry (master=root)
d_entry.place(relx=0.72,rely=0.36,relheight=0.02,relwidth=0.04)
d_entry.configure(background="white")
d_entry.configure(foreground="black")
d_entry.configure(highlightcolor="black")
d_entry.configure(insertbackground="black")
d_entry.configure(selectbackground="#c4c4c4")
d_entry.configure(selectforeground="black")
dp_button = Button (master=root)
dp_button.place(relx=0.76,rely=0.36,height=21,width=17)
dp_button.configure(activebackground="#f9f9f9")
dp_button.configure(activeforeground="black")
dp_button.configure(command=dp)
dp_button.configure(foreground="black")
dp_button.configure(highlightcolor="black")
dp_button.configure(pady="0")
dp_button.configure(text='''+''')
d_label = Label (master=root)
d_label.place(relx=0.73,rely=0.39,height=19,width=16)
d_label.configure(textvariable=d_label_var)
label_addpoleA = Label (master=root)
label_addpoleA.place(relx=0.81,rely=0.03,height=19,width=90)
label_addpoleA.configure(activebackground="#cccccc")
label_addpoleA.configure(activeforeground="black")
label_addpoleA.configure(foreground="black")
label_addpoleA.configure(highlightcolor="black")
label_addpoleA.configure(text='''Add pole A''')
pole1A_entry = Entry (master=root)
pole1A_entry.place(relx=0.81,rely=0.06,relheight=0.02
,relwidth=0.04)
pole1A_entry.configure(background="white")
pole1A_entry.configure(foreground="black")
pole1A_entry.configure(highlightcolor="black")
pole1A_entry.configure(insertbackground="black")
pole1A_entry.configure(selectbackground="#c4c4c4")
pole1A_entry.configure(selectforeground="black")
pole2A_entry = Entry (master=root)
pole2A_entry.place(relx=0.87,rely=0.06,relheight=0.02
,relwidth=0.04)
pole2A_entry.configure(background="white")
pole2A_entry.configure(foreground="black")
pole2A_entry.configure(highlightcolor="black")
pole2A_entry.configure(insertbackground="black")
pole2A_entry.configure(selectbackground="#c4c4c4")
pole2A_entry.configure(selectforeground="black")
pole3A_entry = Entry (master=root)
pole3A_entry.place(relx=0.93,rely=0.06,relheight=0.02
,relwidth=0.04)
pole3A_entry.configure(background="white")
pole3A_entry.configure(foreground="black")
pole3A_entry.configure(highlightcolor="black")
pole3A_entry.configure(insertbackground="black")
pole3A_entry.configure(selectbackground="#c4c4c4")
pole3A_entry.configure(selectforeground="black")
addpoleA_button = Button (master=root)
addpoleA_button.place(relx=0.81,rely=0.11,height=31,width=57)
addpoleA_button.configure(activebackground="#f9f9f9")
addpoleA_button.configure(activeforeground="black")
addpoleA_button.configure(command=addpoleA)
addpoleA_button.configure(foreground="black")
addpoleA_button.configure(highlightcolor="black")
addpoleA_button.configure(pady="0")
addpoleA_button.configure(text='''Add''')
symA_button = Button (master=root)
symA_button.place(relx=0.87,rely=0.11,height=31,width=71)
symA_button.configure(command=addpoleA_sym)
symA_button.configure(pady="0")
symA_button.configure(text='''Symetry''')
trace_planA_button = Button (master=root)
trace_planA_button.place(relx=0.93,rely=0.11,height=31,width=81)
trace_planA_button.configure(command=trace_planA)
trace_planA_button.configure(pady="0")
trace_planA_button.configure(text='''Draw plane''')
label_addpoleB = Label (master=root)
label_addpoleB.place(relx=0.81,rely=0.2,height=19,width=90)
label_addpoleB.configure(activebackground="#cccccc")
label_addpoleB.configure(activeforeground="black")
label_addpoleB.configure(foreground="black")
label_addpoleB.configure(highlightcolor="black")
label_addpoleB.configure(text='''Add pole B''')
pole1B_entry = Entry (master=root)
pole1B_entry.place(relx=0.81,rely=0.24,relheight=0.02
,relwidth=0.04)
pole1B_entry.configure(background="white")
pole1B_entry.configure(foreground="black")
pole1B_entry.configure(highlightcolor="black")
pole1B_entry.configure(insertbackground="black")
pole1B_entry.configure(selectbackground="#c4c4c4")
pole1B_entry.configure(selectforeground="black")
pole2B_entry = Entry (master=root)
pole2B_entry.place(relx=0.87,rely=0.24,relheight=0.02
,relwidth=0.04)
pole2B_entry.configure(background="white")
pole2B_entry.configure(foreground="black")
pole2B_entry.configure(highlightcolor="black")
pole2B_entry.configure(insertbackground="black")
pole2B_entry.configure(selectbackground="#c4c4c4")
pole2B_entry.configure(selectforeground="black")
pole3B_entry = Entry (master=root)
pole3B_entry.place(relx=0.93,rely=0.24,relheight=0.02
,relwidth=0.04)
pole3B_entry.configure(background="white")
pole3B_entry.configure(foreground="black")
pole3B_entry.configure(highlightcolor="black")
pole3B_entry.configure(insertbackground="black")
pole3B_entry.configure(selectbackground="#c4c4c4")
pole3B_entry.configure(selectforeground="black")
addpoleB_button = Button (master=root)
addpoleB_button.place(relx=0.81,rely=0.28,height=31,width=55)
addpoleB_button.configure(activebackground="#f9f9f9")
addpoleB_button.configure(activeforeground="black")
addpoleB_button.configure(command=addpoleB)
addpoleB_button.configure(foreground="black")
addpoleB_button.configure(highlightcolor="black")
addpoleB_button.configure(pady="0")
addpoleB_button.configure(text='''Add''')
symB_button = Button (master=root)
symB_button.place(relx=0.87,rely=0.28,height=31,width=71)
symB_button.configure(command=addpoleB_sym)
symB_button.configure(pady="0")
symB_button.configure(text='''Symetry''')
trace_planB_button = Button (master=root)
trace_planB_button.place(relx=0.93,rely=0.28,height=31,width=81)
trace_planB_button.configure(command=trace_planB)
trace_planB_button.configure(pady="0")
trace_planB_button.configure(text='''Draw plane''')
show_ind_button = Checkbutton (master=root)
show_ind_button.place(relx=0.81,rely=0.7,relheight=0.03
,relwidth=0.11)
show_ind_button.configure(text='''Show indices''')
show_ind_button.configure(variable=show_ind)
show_angle_button = Checkbutton (master=root)
show_angle_button.place(relx=0.81,rely=0.74,relheight=0.03
,relwidth=0.11)
show_angle_button.configure(text='''Show angle''')
show_angle_button.configure(variable=show_angle)
show_axe_button = Checkbutton (master=root)
show_axe_button.place(relx=0.81,rely=0.78,relheight=0.03
,relwidth=0.11)
show_axe_button.configure(text='''Show axes''')
show_axe_button.configure(variable=show_axe)
show_num_button = Checkbutton (master=root)
show_num_button.place(relx=0.81,rely=0.82,relheight=0.03
,relwidth=0.11)
show_num_button.configure(text='''Show numbers''')
show_num_button.configure(variable=show_num)
menu = Menu(master=root)
filemenu = Menu(menu, tearoff=0)
menu.add_cascade(label="Save", menu=filemenu)
root.config(menu=menu)
filemenu.add_command(label="Save data", command=file_save)
filemenu.add_command(label="Save figure", command=image_save)
######################################################################################################
######## importer des structures cristallines depuis un fichier Nom,a,b,c,alpha,beta,gamma,space group
######################################################################################################
def structure(i0):
global x0
a_entry.delete(0,END)
a_entry.insert(1,eval(x0[i0][1]))
b_entry.delete(0,END)
b_entry.insert(1,eval(x0[i0][2]))
c_entry.delete(0,END)
c_entry.insert(1,eval(x0[i0][3]))
alp_entry.delete(0,END)
alp_entry.insert(1,eval(x0[i0][4]))
bet_entry.delete(0,END)
bet_entry.insert(1,eval(x0[i0][5]))
gam_entry.delete(0,END)
gam_entry.insert(1,eval(x0[i0][6]))
def createstructure(i):
return lambda:structure(i)
cristalmenu=Menu(menu,tearoff=0)
menu.add_cascade(label="Structures", menu=cristalmenu)
file_struct=open(os.path.join(os.path.dirname(__file__), 'structure.txt') ,"r")
x0=[]
i=0
for line in file_struct:
x0.append(map(str, line.split()))
cristalmenu.add_command(label=x0[i][0], command=createstructure(i))
i=i+1
file_struct.close()
#######################################################################################################
phi1A_entry.insert(0,0)
phiA_entry.insert(0,0)
phi2A_entry.insert(0,0)
phi1B_entry.insert(0,0)
phiB_entry.insert(0,0)
phi2B_entry.insert(0,0)
e_entry.insert(1,1)
d_entry.insert(1,1)
mainloop()
| gpl-2.0 | 980,697,101,634,222,300 | 33.094023 | 147 | 0.579285 | false |
jdelgad/IRefuse | irefuse/irefuse.py | 1 | 6388 | # -*- encoding: UTF-8 -*-
"""
'I Refuse' web application.
Copyright (C) 2017 Jacob Delgado
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json
import logging
import random
from typing import Callable, List
from irefuse.player import Players, Player
logger = logging.getLogger()
class IRefuse(object):
"""The game logic behind I Refuse."""
USER_PASSES = 1
USER_TAKES_CARD = 2
MIN_PLAYERS = 3
MAX_PLAYERS = 5
NUMBER_OF_ROUNDS = 24
MIN_CARD = 3
MAX_CARD = 36
def __init__(self):
"""Construct 'I Refuse' game object."""
self.cards = []
self.players = None
def setup(self, input_func: Callable[[], str]) -> None:
"""
Set up the card game.
:param input_func: The function to use to prompt the user with.
:return: None
"""
logger.debug("Setting up I Refuse")
self.cards = self.setup_cards()
self.players = self.setup_players(input_func)
logger.info("Game created with {} players".format(len(self.players)))
logger.debug("Cards to be used in game: {}".format(self.cards))
@staticmethod
def setup_players(input_func: Callable[[], str]) -> Players:
"""
Set up the number of players. Must be between 3-5.
:param input_func: Used for mocking input()
:return: A list of game.player.Player objects
"""
print("Enter the number of players [3-5]: ")
number_of_people_playing = int(input_func())
if number_of_people_playing < IRefuse.MIN_PLAYERS or \
number_of_people_playing > IRefuse.MAX_PLAYERS:
logger.error("Invalid number of players specified: {}"
.format(number_of_people_playing))
raise AssertionError("invalid number of players")
return Players(number_of_people_playing)
@staticmethod
def setup_cards() -> List[int]:
""":return: A list of randomized 24 cards ranging from 3-35."""
return random.sample(range(IRefuse.MIN_CARD, IRefuse.MAX_CARD),
IRefuse.NUMBER_OF_ROUNDS)
def determine_winner(self) -> List[Player]:
"""
Calculate who won. Ties can occur.
Creates a dictionary of point values to list of players with that
value. Returns the players with the lowest point value.
:return: The list of winners.
"""
player_totals = {}
for player in self.players:
if player.calculate_points() in player_totals:
player_totals[player.calculate_points()].append(player)
else:
player_totals[player.calculate_points()] = [player]
logger.info("Final results: {}".format(self.players))
sorted_totals = sorted(player_totals.keys())
return player_totals[sorted_totals[0]]
def play(self, input_func: Callable[[], str]):
"""
Coordinate how the game is played.
:param input_func: Input function to prompt the user.
:return: The list of winners after a game has been completed.
"""
max_flips = len(self.cards)
player = self.players.next_player()
for _ in range(max_flips):
card = self.flip_card()
tokens = 0
action = self.prompt_for_action(card, tokens, input_func, player)
logger.debug("Available card: {}".format(card))
while action == IRefuse.USER_PASSES:
logger.debug("{} passed on {} with {} tokens remaining"
.format(player, card, player.tokens))
tokens += 1
player.passes()
player = self.players.next_player(player)
action = self.prompt_for_action(card, tokens, input_func,
player)
player.take_card(card, tokens)
logger.debug("{} took {} and now has {} tokens".
format(player, card, player.tokens))
logger.debug("No more actions")
# TODO: command or query, but not both
return self.determine_winner()
@staticmethod
def prompt_for_action(card: int,
tokens: int,
input_func: Callable[[], str],
current_player: Player):
"""
Prompt the user for action. Return enum for user selection.
:param card: The card currently face up.
:param tokens: The amount of tokens on the face up card.
:param input_func: Prompt for user input.
:param current_player: The player whose action it is.
:return: The user selection (enum integer).
"""
# TODO: command or query, but not both
if not current_player.can_pass():
return IRefuse.USER_TAKES_CARD
action = 0
while not (action == IRefuse.USER_PASSES or
action == IRefuse.USER_TAKES_CARD):
print("\n{} it is your turn".format(current_player))
print("Available card: {}, Number of tokens: {}"
.format(card, tokens))
print("What action do you wish to perform: ")
print("{}. Pass".format(IRefuse.USER_PASSES))
print("{}. Take card".format(IRefuse.USER_TAKES_CARD))
print("------------")
print("Selection: ")
action = int(input_func())
return action
def flip_card(self) -> int:
"""
Flip the top card on the deck.
:return: The newest card to be face up.
"""
return self.cards.pop()
def serialize(self) -> str:
"""Serialize class to json string."""
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True)
| agpl-3.0 | -3,449,111,402,710,158,300 | 35.090395 | 77 | 0.588134 | false |
flyingSprite/spinelle | task_inventory/order_1_to_30/order_16_use_faker.py | 1 | 2553 |
""" Order 16: Use faker in python.
Generate lots of kinds data with Faker
* User information
"""
from faker import Faker, Factory
class FakerGenerator(object):
"""Generate different data by this class."""
fake = None
def __init__(self, language=None):
if language:
self.fake = Factory.create(language)
else:
self.fake = Faker()
def gen_user_info(self):
user = User()
user.name = self.fake.name()
user.address = self.fake.address()
return user
def get_full_values(self):
full_values = FullValues()
full_values.address = self.fake.address()
# full_values.barcode = self.fake.barcode()
full_values.color = self.fake.safe_hex_color()
full_values.company = self.fake.company()
full_values.credit_card = self.fake.credit_card_number()
full_values.currency = self.fake.currency_code()
full_values.date_time = self.fake.date_time()
full_values.file = self.fake.file_name()
full_values.internet = self.fake.company_email()
full_values.job = self.fake.job()
full_values.lorem = self.fake.text(max_nb_chars=200)
full_values.misc = self.fake.password()
full_values.person = self.fake.name_female()
full_values.phone_number = self.fake.phone_number()
full_values.profile = self.fake.profile()
# full_values.python = self.fake.python()
full_values.ssn = self.fake.ssn()
full_values.user_agent = self.fake.user_agent()
return full_values
class FullValues(object):
address = None
barcode = None
color = None
company = None
credit_card = None
currency = None
date_time = None
file = None
internet = None
job = None
lorem = None
misc = None
person = None
phone_number = None
profile = None
python = None
ssn = None
user_agent = None
def __str__(self):
"""Get this object instance string values."""
return 'FullValues = [%s]' % ', '.join(['%s: %s' % item for item in self.__dict__.items()])
class User(object):
name = ''
address = ''
def __str__(self):
"""Get this object instance string values."""
return 'User = [%s]' % ', '.join(['%s: %s' % item for item in self.__dict__.items()])
# import logging
# gen = FakerGenerator(language='zh_CN')
# print(gen.gen_user_info().__str__())
# logging.info(gen.gen_user_info().__str__())
#
# full = gen.get_full_values()
# print(full.__str__())
| mit | 3,729,536,303,053,445,000 | 27.685393 | 99 | 0.596553 | false |
Victory/realpython-tdd | contacts/user_contacts/views.py | 1 | 1864 | from django.shortcuts import (
render,
render_to_response)
from django.template import RequestContext
from django.http import HttpResponseRedirect, HttpResponse
from django.views.generic import DetailView
from django.core.exceptions import ValidationError
from django.views.decorators.http import require_http_methods
from user_contacts.models import (
Phone,
Person)
from user_contacts.new_contact_form import ContactForm
def home(request):
return render_to_response('home.html')
class DetailContactView(DetailView):
model = Person
template_name = 'contact.html'
@require_http_methods(["POST"])
def validate(request):
post = request.POST
field_name = post['field_name']
field_value = post['field_value']
data_for_form = {}
data_for_form[field_name] = field_value
form = ContactForm(data_for_form)
field = form.fields[field_name]
data = field.widget.value_from_datadict(
form.data, form.files, form.add_prefix(field_name))
try:
cleaned_data = field.clean(data)
result = "valid"
except ValidationError, e:
result = '\n'.join(e.messages)
data = '{"result":"' + result + '"}'
return HttpResponse(data, content_type="text/json")
def all_contacts(request):
contacts = Phone.objects.all()
return render_to_response('all.html', {'contacts': contacts})
def add_contact(request):
person_form = ContactForm()
return render(
request,
'add.html',
{'person_form': person_form},
context_instance=RequestContext(request))
def create(request):
form = ContactForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('all/')
return render(
request,
'add.html',
{'person_form': form},
context_instance=RequestContext(request))
| mit | 4,318,323,745,429,808,600 | 24.534247 | 65 | 0.670601 | false |
bjarnagin/manyquery | manyquery/cli.py | 1 | 1598 | import click
import csv
from manyquery.connection import Connection, MultiConnection
@click.command()
@click.option('--host', '-h', default=['ict.croptrak.com'], multiple=True,
help='Hostname. Repeatable.')
@click.option('--user', '-u', help='MySQL username')
@click.option('--password', '-p', prompt=True, hide_input=True,
help='MySQL password')
@click.option('--database', '-d', multiple=True,
help='Databases to execute query on. Default: all. Repeatable.')
@click.option('--all-hosts', help='Executes a query on all hostnames. ' \
'Not compatible with --databases option.', is_flag=True)
@click.argument('infile', type=click.File('rb'))
@click.argument('outfile')
def cli(host, user, password, databases, all_hosts, infile, outfile):
if databases and len(host) > 1:
click.echo('--databases option only available when used with single host')
return
if all_hosts:
conn = MultiConnection(user, password)
elif len(host) > 1:
conn = MultiConnection(user, password, host=host)
else:
conn = Connection(host[0], user, password)
if databases:
conn.select_dbs(databases)
query = ''
while True:
chunk = infile.read(1024).decode('utf-8')
if not chunk:
break
query = query + chunk
query = [query.replace(char, ' ') for char in ['\n', '\t']]
with open(outfile, 'w') as f:
writer = csv.writer(f)
for row in conn.execute(query, include_fields=True):
writer.writerow(row) | mit | -4,797,792,615,372,584,000 | 34.533333 | 82 | 0.610138 | false |
conchyliculture/wikipoff-tools | tests/units.py | 1 | 18483 | #!/usr/bin/python
# encoding: utf-8
from __future__ import unicode_literals
import os.path
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), u'..'))
sys.path.append(
os.path.join(
os.path.dirname(__file__),
u'..', u'lib', u'python{0:d}.{1:d}'.format(
sys.version_info.major, sys.version_info.minor),
u'site-packages'))
from binascii import unhexlify
import codecs
import locale
import unittest
from lib.writer.compress import LzmaCompress
from lib.writer.sqlite import OutputSqlite
from lib.wikimedia.converter import WikiConverter
from lib.wikimedia.XMLworker import XMLworker
class TestCompression(unittest.TestCase):
def test_lzma(self):
data = u'['+u'oléléolala'*12+u']'
compressed = LzmaCompress(data)
expected_compressed = unhexlify(
u'5d000080009200000000000000002d9bc98c53caed25d8aa1da643a8fa430000')
self.assertEqual(expected_compressed, compressed)
class TestSQLWriter(unittest.TestCase):
def testCheckRequiredInfos(self):
o = OutputSqlite(None)
with self.assertRaises(Exception):
o._CheckRequiredInfos({})
good_tags = {
u'lang-code': u'lol',
u'lang-local': u'lil',
u'lang-english': u'lolu',
u'type': u'lola',
u'source': u'loly',
u'author': u'lolll'
}
o.SetMetadata(good_tags)
res = o.GetMetadata()
date = res.pop(u'date')
self.assertIsNotNone(date, None)
version = res.pop(u'version')
self.assertIsNotNone(version, None)
self.assertEqual(good_tags, res)
def test_AddRedirect(self):
o = OutputSqlite(None)
test_redirect = (u'From', u'To')
o.AddRedirect(*test_redirect)
o._AllCommit()
o.cursor.execute(u'SELECT * FROM redirects')
self.assertEqual(test_redirect, o.cursor.fetchone())
def test_AddArticle(self):
o = OutputSqlite(None)
test_article = (
u'This title & à_è>Ýü',
(u"{{Japonais|'''Lolicon'''|ロリータ・コンプレックス|"
u"''rorīta konpurekkusu''}}, ou {{japonais|'''Rorikon'''|ロリコン}}")
)
o.AddArticle(*test_article)
o._AllCommit()
o.cursor.execute(u'SELECT * FROM articles')
self.assertEqual((1, test_article[0], test_article[1]), o.cursor.fetchone())
def test_Close(self):
o = OutputSqlite(None)
test_article = (
u'This title & à_è>Ýü',
(u"{{Japonais|'''Lolicon'''|ロリータ・コンプレックス|"
u"''rorīta konpurekkusu''}}, ou {{japonais|'''Rorikon'''|ロリコン}}"))
o.AddArticle(*test_article)
test_redirect = (u'From', u'To')
o.AddRedirect(*test_redirect)
o._AllCommit()
o.Close()
class TestWikiFr(unittest.TestCase):
def setUp(self):
try:
locale.setlocale(locale.LC_ALL, u'fr_FR.utf-8')
from lib.wikimedia.languages import wikifr
self.sfrt = wikifr.WikiFRTranslator()
except locale.Error as e:
self.skipTest(u'Skipping WikiFr tests due to locale not being installed')
def testLang(self):
tests = [
[u"lolilol ''{{lang|la|domus Dei}}''", u"lolilol ''domus Dei''"],
[u"''{{lang-en|Irish Republican Army}}, IRA'' ; ''{{lang-ga|Óglaigh na hÉireann}}'') est le nom porté",
u"''Irish Republican Army, IRA'' ; ''Óglaigh na hÉireann'') est le nom porté"],
[u"{{lang|ko|입니다.}}", u"입니다."],
[u"Ainsi, le {{lang|en|''[[Quicksort]]''}} (ou tri rapide)",
u"Ainsi, le ''[[Quicksort]]'' (ou tri rapide)"],
[u" ''{{lang|hy|Hayastan}}'', {{lang|hy|Հայաստան}} et ''{{lang|hy|Hayastani Hanrapetut’yun}}'', {{lang|hy|Հայաստանի Հանրապետություն}}",
u" ''Hayastan'', Հայաստան et ''Hayastani Hanrapetut’yun'', Հայաստանի Հանրապետություն"],
[u"{{langue|ja|酸度}} || 1.4({{langue|ja|芳醇}}", u"酸度 || 1.4(芳醇"],
[u"{{langue|thaï|กรุงเทพฯ}}", u"กรุงเทพฯ"],
[u"{{Lang|ar|texte=''Jabal ad Dukhan''}}", u"''Jabal ad Dukhan''"],
[u"{{lang|arc-Hebr|dir=rtl|texte=ארמית}} {{lang|arc-Latn|texte=''Arāmît''}},}}",
u"ארמית ''Arāmît'',}}"],
[u"ce qui augmente le risque de {{lang|en|''[[Mémoire virtuelle#Swapping|swapping]]''}})",
u"ce qui augmente le risque de ''[[Mémoire virtuelle#Swapping|swapping]]'')"]
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testDateShort(self):
tests = [
[u'{{1er janvier}}', u'1<sup>er</sup> janvier'],
[u'{{1er février}}', u'1<sup>er</sup> février'],
[u'Le {{1er mars}}, le débarquement, prévu ', u'Le 1<sup>er</sup> mars, le débarquement, prévu '],
[u'{{1er avril}}', u'1<sup>er</sup> avril'],
[u'{{1er mai}}', u'1<sup>er</sup> mai'],
[u'{{1er juin}}', u'1<sup>er</sup> juin'],
[u'{{1er juillet}}', u'1<sup>er</sup> juillet'],
[u'{{1er août}}', u'1<sup>er</sup> août'],
[u'{{1er septembre}}', u'1<sup>er</sup> septembre'],
[u'{{1er octobre}}', u'1<sup>er</sup> octobre'],
[u'{{1er novembre}}', u'1<sup>er</sup> novembre'],
[u'{{1er décembre}}', u'1<sup>er</sup> décembre'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testDate(self):
tests = [
[u'{{date|10|août|1425}}', u'10 août 1425'],
[u'{{Date|10|août|1989}} - {{Date|28|février|1990}}', u'10 août 1989 - 28 février 1990'],
[u'{{date|6|février|1896|en France}}', u'6 février 1896'],
[u'{{Date|1er|janvier|537}}', u'1er janvier 537'],
[u'{{Date||Octobre|1845|en sport}}', u'Octobre 1845'],
[u'{{Date|1|octobre|2005|dans les chemins de fer}}', u'1er octobre 2005'],
[u'les {{Date|25|mars}} et {{Date|8|avril|1990}}', u'les 25 mars et 8 avril 1990'],
[u'Jean-François Bergier, né à [[Lausanne]], le {{date de naissance|5|décembre|1931}} et mort le {{date de décès|29|octobre|2009}}<ref name="swissinfo"/>, est un [[historien]] [[suisse]].', u'Jean-François Bergier, né à [[Lausanne]], le 5 décembre 1931 et mort le 29 octobre 2009<ref name="swissinfo"/>, est un [[historien]] [[suisse]].'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testSimpleSiecle(self):
tests = [
[u'{{Ier siècle}}, {{IIe siècle}}, ... {{XXe siècle}}, ...', u'Ier siècle, IIe siècle, ... XXe siècle, ...'],
[u'{{Ier siècle av. J.-C.}}, {{IIe siècle av. J.-C.}}, ...', u'Ier siècle av. J.-C., IIe siècle av. J.-C., ...'],
[u'{{Ier millénaire}}, {{IIe millénaire}}, ...', u'Ier millénaire, IIe millénaire, ...'],
[u'{{Ier millénaire av. J.-C.}}, {{IIe millénaire av. J.-C.}}, ...', u'Ier millénaire av. J.-C., IIe millénaire av. J.-C., ...'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testGSiecles(self):
tests = [
[u'{{sp|VII|e|ou|VIII|e|}}', u'VIIe ou VIIIe siècle'],
[u'{{sp-|VII|e|ou|VIII|e|}}', u'VIIe ou VIIIe siècle'],
[u'{{-sp|IX|e|-|VII|e|s}}', u'IXe - VIIe siècles av. J.-C.'],
[u'{{-sp-|IX|e|-|VII|e|s}}', u'IXe - VIIe siècles av. J.-C.'],
[u'au {{sp-|XII|e|et au|XVI|e}}', u'au XIIe et au XVIe siècle'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testTemperature(self):
tests = [
[u'température supérieure à {{tmp|10|°C}}.', u'température supérieure à 10°C.'],
[u'Il se décompose de façon explosive aux alentours de {{tmp|95|°C}}.', u'Il se décompose de façon explosive aux alentours de 95°C.'],
[u'Entre 40 et {{tmp|70|°C}}', u'Entre 40 et 70°C'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testSiecle(self):
tests = [
["{{s|III|e}}", u"IIIe siècle"],
["{{-s|III|e}}", u"IIIe siècle av. J.-C. "],
["{{s-|III|e}}", u"IIIe siècle"],
["{{-s-|III|e}}", u"IIIe siècle av. J.-C. "],
["{{s2|III|e|IX|e}}", u"IIIe et IXe siècles"],
["{{-s2|III|e|IX|e}}", u"IIIe et IXe siècles av. J.-C. "],
["{{s2-|III|e|IX|e}}", u"IIIe et IXe siècles"],
["{{-s2-|III|e|IX|e}}", u"IIIe et IXe siècles av. J.-C. "],
[u"{{s-|XIX|e|}}", u"XIXe siècle"],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testUnit(self):
tests = [
[u'{{Unité|1234567}}', u'1 234 567'],
[u'{{Unité|1234567.89}}', u'1 234 567.89'],
[u'{{Unité|1234567,89}}', u'1 234 567.89'],
[u'{{Unité|1.23456789|e=15}}', u'1.23456789×10<sup>15</sup>'],
[u'{{Unité|10000|km}}', u'10 000 km'],
[u'{{nombre|8|[[bit]]s}}', u'8 [[bit]]s'],
[u'{{nombre|1000|[[yen]]s}}', u'1 000 [[yen]]s'],
[u'{{nombre|9192631770|périodes}}', u'9 192 631 770 périodes'],
[u'{{nombre|3620|hab. par km|2}}', u'3 620 hab. par km<sup>2</sup>'],
[u'{{Unité|10000|km/h}}', u'10 000 km/h'],
[u'{{Unité|10000|km|2}}', u'10 000 km<sup>2</sup>'],
[u'{{Unité|10000|m|3}}', u'10 000 m<sup>3</sup>'],
[u'{{Unité|10000|km||h|-1}}', u'10 000 km⋅h<sup>-1</sup>'],
[u'{{Unité|10000|J|2|K|3|s|-1}}', u'10 000 J<sup>2</sup>⋅K<sup>3</sup>⋅s<sup>-1</sup>'],
[u'{{Unité|10000|J||kg||m|-2}}', u'10 000 J⋅kg⋅m<sup>-2</sup>'],
[u'{{Unité|-40.234|°C}}', u'-40.234 °C'],
# [u'{{Unité|1.23456|e=9|J|2|K|3|s|-1}}', u'1.23456×10<sup>9</sup> J<sup>2</sup>⋅K<sup>3</sup>⋅s<sup>-1</sup>'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testFormatNum(self):
tests = [
[u'Elle comporte plus de {{formatnum:1000}} [[espèce]]s dans {{formatnum:90}}',
u'Elle comporte plus de 1 000 [[espèce]]s dans 90'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testJaponais(self):
tests = [
[u"{{Japonais|'''Happa-tai'''|はっぱ隊||Brigade des feuilles}}",
u"'''Happa-tai''' (はっぱ隊, , Brigade des feuilles)"],
[u"{{Japonais|'''Lolicon'''|ロリータ・コンプレックス|''rorīta konpurekkusu''}}, ou {{japonais|'''Rorikon'''|ロリコン}}",
u"'''Lolicon''' (ロリータ・コンプレックス, ''rorīta konpurekkusu''), ou '''Rorikon''' (ロリコン)"],
[u"Le {{japonais|'''Tōdai-ji'''|東大寺||littéralement « Grand temple de l’est »}}, de son nom complet {{japonais|Kegon-shū daihonzan Tōdai-ji|華厳宗大本山東大寺}}, est un",
u"Le '''Tōdai-ji''' (東大寺, , littéralement « Grand temple de l’est »), de son nom complet Kegon-shū daihonzan Tōdai-ji (華厳宗大本山東大寺), est un"]
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testNobr(self):
tests = [
[u'{{nobr|[[préfixe binaire|préfixes binaires]]}}',
u'<span class="nowrap">[[préfixe binaire|préfixes binaires]]</span>'],
[u'{{nobr|93,13x2{{exp|30}} octets}}',
u'<span class="nowrap">93,13x2<sup>30</sup> octets</span>']
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def testHeures(self):
tests = [
[u'{{heure|8}}', u'8 h'],
[u'{{heure|22}}', u'22 h'],
[u'{{heure|1|55}}', u'1 h 55'],
[u'{{heure|10|5}}', u'10 h 5'],
[u'{{heure|22|55|00}}', u'22 h 55 min 00 s'],
]
for t in tests:
self.assertEqual(self.sfrt.Translate(t[0]), t[1])
def test_allowed_title(self):
self.assertEqual(False, self.sfrt.IsAllowedTitle(u'Modèle'))
self.assertEqual(True, self.sfrt.IsAllowedTitle(u'Lolilol'))
class TestXMLworkerClass(XMLworker):
def __init__(self, input_file, output_array):
super(TestXMLworkerClass, self).__init__(input_file)
self.GENERATED_STUFF = output_array
def GenerateMessage(self, title, body, msgtype):
self.GENERATED_STUFF.append({u'type': msgtype, u'title': title, u'body': body})
class TestXMLworker(unittest.TestCase):
def setUp(self):
try:
locale.setlocale(locale.LC_ALL, u'fr_FR.utf-8')
except locale.Error as e:
self.skipTest(u'Skipping TestXMLworker tests due to locale not being installed')
self.GENERATED_STUFF = []
self.xmlw = TestXMLworkerClass(
os.path.join(
os.path.dirname(__file__), u'test_data',
u'frwiki-latest-pages-articles.xml.short'),
self.GENERATED_STUFF)
self.xmlw._ProcessData()
def test_GetInfos(self):
self.maxDiff = None
expected_infos = {
u'lang': u'fr',
u'generator': u'MediaWiki 1.29.0-wmf.18',
u'author': u'renzokuken @ Wikipoff-tools',
u'sitename': u'Wikipédia',
u'lang-english': u'French',
u'lang-local': u'Français',
u'source': u'https://fr.wikipedia.org/wiki/Wikip%C3%A9dia:Accueil_principal',
u'base': u'https://fr.wikipedia.org/wiki/Wikip%C3%A9dia:Accueil_principal',
u'lang-code': u'fr',
u'type': u'Wikipédia',
u'dbname': u'frwiki'}
self.assertEqual(expected_infos, self.xmlw.db_metadata)
def test_wikitype(self):
self.assertEqual(u'wikipedia', self.xmlw.wikitype)
def test_ProcessData(self):
self.xmlw._ProcessData()
generated_redirect = self.GENERATED_STUFF[10]
self.assertEqual(
u'Sigles en médecine', generated_redirect[u'title'])
self.assertEqual(
u'Liste d\'abréviations en médecine', generated_redirect[u'body'])
self.assertEqual(1, generated_redirect[u'type'])
generated_article = self.GENERATED_STUFF[-1]
self.assertEqual(
u'Aude (département)', generated_article[u'title'])
self.assertEqual(
u'{{Voir homonymes|Aude}}\n{{Infobox Département de France',
generated_article[u'body'][0:55])
self.assertEqual(17357, len(generated_article[u'body']))
self.assertEqual(2, generated_article[u'type'])
generated_article_colon_allowed = self.GENERATED_STUFF[-2]
self.assertEqual(
u'Race:Chie', generated_article_colon_allowed[u'title'])
self.assertEqual(
u'osef ', generated_article_colon_allowed[u'body'])
self.assertEqual(2, generated_article_colon_allowed[u'type'])
generated_article_colon_notallowed = self.GENERATED_STUFF[-3]
self.assertEqual(u'Aube (département)', generated_article_colon_notallowed[u'title'])
class TestConverterNoLang(unittest.TestCase):
def test_thumbstuff(self):
self.maxDiff = None
wikicode = u'[[Figure:Sahara satellite hires.jpg|thumb|right|300px|Foto dal satelit]] Il \'\'\'Sahara\'\'\' ([[Lenghe arabe|arap]] صحراء {{audio|ar-Sahara.ogg|pronuncie}}, \'\'desert\'\') al è un [[desert]] di gjenar tropicâl inte [[Afriche]] dal nord. Al è il secont desert plui grant dal mont (daspò la [[Antartide]]), cuntune superficie di 9.000.000 km².'
expected = u' Il <b>Sahara</b> (<a href="Lenghe arabe">arap</a> صحراء , <i>desert</i> ) al è un <a href="desert">desert</a> di gjenar tropicâl inte <a href="Afriche">Afriche</a> dal nord. Al è il secont desert plui grant dal mont (daspò la <a href="Antartide">Antartide</a>), cuntune superficie di 9.000.000 km².'
c = WikiConverter()
body = c.Convert(u'title', wikicode)[1]
self.assertEqual(expected, body)
class TestConverterFR(unittest.TestCase):
def setUp(self):
try:
locale.setlocale(locale.LC_ALL, u'fr_FR.utf-8')
from lib.wikimedia.languages import wikifr
self.sfrt = wikifr.WikiFRTranslator()
except locale.Error as e:
self.skipTest(u'Skipping WikiFr tests due to locale not being installed')
self.GENERATED_STUFF = []
self.xmlw = TestXMLworkerClass(
os.path.join(
os.path.dirname(__file__), u'test_data',
u'frwiki-latest-pages-articles.xml.short'),
self.GENERATED_STUFF)
self.xmlw._ProcessData()
def test_ShortConvert(self):
self.maxDiff = None
wikicode = (
u'le [[lis martagon|lis des Pyrénées]], \'\'[[Calotriton asper]]\'\''
u'ou la [[Equisetum sylvaticum|prêle des bois]]')
expected = (
u'le <a href="lis martagon">lis des Pyrénées</a>, <i><a href="Calotriton asper">'
u'Calotriton asper</a></i> ou la <a href="Equisetum sylvaticum">prêle des bois</a>')
c = WikiConverter(u'wikipedia', u'fr')
body = c.Convert(u'title', wikicode)[1]
self.assertEqual(expected, body)
def test_ConvertArticle(self):
self.maxDiff = None
c = WikiConverter(u'wikipedia', u'fr')
a = self.GENERATED_STUFF[-1]
(_, body) = c.Convert(a[u'title'], a[u'body'])
body = body.strip()
# with open(u'/tmp/lolilol', u'wb') as w:
#w.write(body.encode(u'utf-8'))
expected_html_path = os.path.join(os.path.dirname(__file__), u'test_data', u'aude.html')
with codecs.open(expected_html_path, u'r', encoding=u'utf-8') as html:
test_data = html.read().strip()
self.assertEqual(len(test_data), len(body))
self.assertEqual(test_data, body)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -8,170,664,019,109,375,000 | 43.095823 | 383 | 0.554243 | false |
tensorflow/probability | tensorflow_probability/python/distributions/blockwise.py | 1 | 15570 | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Blockwise distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.distributions import joint_distribution_sequential
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import parameter_properties
from tensorflow_probability.python.internal import prefer_static as ps
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import tensorshape_util
def _is_iterable(x):
try:
_ = iter(x)
except TypeError:
return False
return True
class _Cast(distribution_lib.Distribution):
"""Utility distribution to cast inputs/outputs of another distribution."""
def __init__(self, distribution, dtype):
parameters = dict(locals())
name = 'CastTo{}'.format(dtype_util.name(dtype))
with tf.name_scope(name) as name:
self._distribution = distribution
self._dtype = dtype
super(_Cast, self).__init__(
dtype=dtype,
validate_args=distribution.validate_args,
allow_nan_stats=distribution.allow_nan_stats,
reparameterization_type=distribution.reparameterization_type,
parameters=parameters,
name=name)
def _batch_shape(self):
return self._distribution.batch_shape
def _batch_shape_tensor(self):
return self._distribution.batch_shape_tensor()
def _event_shape(self):
return self._distribution.event_shape
def _event_shape_tensor(self):
return self._distribution.event_shape_tensor()
def _sample_n(self, n, seed=None):
return tf.nest.map_structure(lambda x: tf.cast(x, self._dtype),
self._distribution.sample(n, seed))
def _log_prob(self, x):
x = tf.nest.map_structure(tf.cast, x, self._distribution.dtype)
return tf.cast(self._distribution.log_prob(x), self._dtype)
def _entropy(self):
return self._distribution.entropy()
def _mean(self):
return tf.nest.map_structure(lambda x: tf.cast(x, self._dtype),
self._distribution.mean())
@kullback_leibler.RegisterKL(_Cast, _Cast)
def _kl_blockwise_cast(d0, d1, name=None):
return d0._distribution.kl_divergence(d1._distribution, name=name) # pylint: disable=protected-access
class Blockwise(distribution_lib.Distribution):
"""Blockwise distribution.
This distribution converts a distribution or list of distributions into a
vector-variate distribution by doing a sequence of reshapes and concatenating
the results. This is particularly useful for converting `JointDistribution`
instances to vector-variate for downstream uses which can only handle
single-`Tensor` distributions.
#### Examples
Flattening a sequence of distrbutions:
```python
tfd = tfp.distributions
d = tfd.Blockwise(
[
tfd.Independent(
tfd.Normal(
loc=tf.zeros(4, dtype=tf.float64),
scale=1),
reinterpreted_batch_ndims=1),
tfd.MultivariateNormalTriL(
scale_tril=tf.eye(2, dtype=tf.float32)),
],
dtype_override=tf.float32,
)
x = d.sample([2, 1])
y = d.log_prob(x)
x.shape # ==> (2, 1, 4 + 2)
x.dtype # ==> tf.float32
y.shape # ==> (2, 1)
y.dtype # ==> tf.float32
d.mean() # ==> np.zeros((4 + 2,))
```
Flattening a joint distribution:
```python
tfd = tfp.distributions
Root = tfd.JointDistributionCoroutine.Root # Convenient alias.
def model():
e = yield Root(tfd.Independent(tfd.Exponential(rate=[100, 120]), 1))
g = yield tfd.Gamma(concentration=e[..., 0], rate=e[..., 1])
n = yield Root(tfd.Normal(loc=0, scale=2.))
yield tfd.Normal(loc=n, scale=g)
joint = tfd.JointDistributionCoroutine(model)
d = tfd.Blockwise(joint)
x = d.sample([2, 1])
y = d.log_prob(x)
x.shape # ==> (2, 1, 2 + 1 + 1 + 1)
x.dtype # ==> tf.float32
y.shape # ==> (2, 1)
y.dtype # ==> tf.float32
```
"""
def __init__(self,
distributions,
dtype_override=None,
validate_args=False,
allow_nan_stats=False,
name='Blockwise'):
"""Construct the `Blockwise` distribution.
Args:
distributions: Python `list` of `tfp.distributions.Distribution`
instances. All distribution instances must have the same `batch_shape`
and all must have `event_ndims==1`, i.e., be vector-variate
distributions.
dtype_override: samples of `distributions` will be cast to this `dtype`.
If unspecified, all `distributions` must have the same `dtype`.
Default value: `None` (i.e., do not cast).
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`, statistics
(e.g., mean, mode, variance) use the value "`NaN`" to indicate the
result is undefined. When `False`, an exception is raised if one or more
of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
self._distributions = distributions
if dtype_override is not None:
distributions = tf.nest.map_structure(
lambda d: _Cast(d, dtype_override), distributions)
if _is_iterable(distributions):
self._distribution = (
joint_distribution_sequential.JointDistributionSequential(
list(distributions)))
else:
self._distribution = distributions
# Need to cache these for JointDistributions as the batch shape of that
# distribution can change after `_sample` calls.
self._cached_batch_shape_tensor = self._distribution.batch_shape_tensor()
self._cached_batch_shape = self._distribution.batch_shape
if dtype_override is not None:
dtype = dtype_override
else:
dtype = set(
dtype_util.base_dtype(dtype)
for dtype in tf.nest.flatten(self._distribution.dtype)
if dtype is not None)
if len(dtype) == 0: # pylint: disable=g-explicit-length-test
dtype = tf.float32
elif len(dtype) == 1:
dtype = dtype.pop()
else:
raise TypeError(
'Distributions must have same dtype; found: {}.'.format(
self._distribution.dtype))
reparameterization_type = set(
tf.nest.flatten(self._distribution.reparameterization_type))
reparameterization_type = (
reparameterization_type.pop() if len(reparameterization_type) == 1
else reparameterization.NOT_REPARAMETERIZED)
super(Blockwise, self).__init__(
dtype=dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=reparameterization_type,
parameters=parameters,
name=name)
@property
def distributions(self):
return self._distributions
@property
def experimental_is_sharded(self):
any_is_sharded = any(
d.experimental_is_sharded for d in self.distributions)
all_are_sharded = all(
d.experimental_is_sharded for d in self.distributions)
if any_is_sharded and not all_are_sharded:
raise ValueError('`Blockwise.distributions` sharding must match.')
return all_are_sharded
@classmethod
def _parameter_properties(cls, dtype, num_classes=None):
return dict(
distributions=parameter_properties.BatchedComponentProperties(
event_ndims=(
lambda self: [0 for _ in self.distributions])))
def _batch_shape(self):
return functools.reduce(tensorshape_util.merge_with,
tf.nest.flatten(self._cached_batch_shape),
tf.TensorShape(None))
def _batch_shape_tensor(self):
# We could get partial static-ness by swapping in values from
# `self.batch_shape`, however this would require multiple graph ops.
return tf.nest.flatten(self._cached_batch_shape_tensor)[0]
def _event_shape(self):
event_sizes = tf.nest.map_structure(tensorshape_util.num_elements,
self._distribution.event_shape)
if any(r is None for r in tf.nest.flatten(event_sizes)):
return tf.TensorShape([None])
return tf.TensorShape([sum(tf.nest.flatten(event_sizes))])
def _event_shape_tensor(self):
event_sizes = tf.nest.map_structure(tensorshape_util.num_elements,
self._distribution.event_shape)
if any(s is None for s in tf.nest.flatten(event_sizes)):
event_sizes = tf.nest.map_structure(
lambda static_size, shape_tensor: # pylint: disable=g-long-lambda
(tf.reduce_prod(shape_tensor)
if static_size is None else static_size),
event_sizes,
self._distribution.event_shape_tensor())
return tf.reduce_sum(tf.nest.flatten(event_sizes))[tf.newaxis]
def _flatten_and_concat_event(self, x):
def _reshape_part(part, event_shape):
part = tf.cast(part, self.dtype)
static_rank = tf.get_static_value(ps.rank_from_shape(event_shape))
if static_rank == 1:
return part
new_shape = ps.concat([
ps.shape(part)[:ps.size(ps.shape(part)) - ps.size(event_shape)], [-1]
],
axis=-1)
return tf.reshape(part, ps.cast(new_shape, tf.int32))
if all(
tensorshape_util.is_fully_defined(s)
for s in tf.nest.flatten(self._distribution.event_shape)):
x = tf.nest.map_structure(_reshape_part, x,
self._distribution.event_shape)
else:
x = tf.nest.map_structure(_reshape_part, x,
self._distribution.event_shape_tensor())
return tf.concat(tf.nest.flatten(x), axis=-1)
def _split_and_reshape_event(self, x):
event_tensors = self._distribution.event_shape_tensor()
splits = [
ps.maximum(1, ps.reduce_prod(s))
for s in tf.nest.flatten(event_tensors)
]
x = tf.nest.pack_sequence_as(event_tensors, tf.split(x, splits, axis=-1))
def _reshape_part(part, dtype, event_shape):
part = tf.cast(part, dtype)
static_rank = tf.get_static_value(ps.rank_from_shape(event_shape))
if static_rank == 1:
return part
new_shape = ps.concat([ps.shape(part)[:-1], event_shape], axis=-1)
return tf.reshape(part, ps.cast(new_shape, tf.int32))
if all(
tensorshape_util.is_fully_defined(s)
for s in tf.nest.flatten(self._distribution.event_shape)):
x = tf.nest.map_structure(_reshape_part, x, self._distribution.dtype,
self._distribution.event_shape)
else:
x = tf.nest.map_structure(_reshape_part, x, self._distribution.dtype,
self._distribution.event_shape_tensor())
return x
def _sample_n(self, n, seed=None):
return self._flatten_and_concat_event(
self._distribution.sample(n, seed=seed))
def _sample_and_log_prob(self, sample_shape, seed):
x, lp = self._distribution.experimental_sample_and_log_prob(
sample_shape, seed=seed)
return self._flatten_and_concat_event(x), lp
def _log_prob(self, x):
return self._distribution.log_prob(self._split_and_reshape_event(x))
def _entropy(self):
return self._distribution.entropy()
def _prob(self, x):
return self._distribution.prob(self._split_and_reshape_event(x))
def _mean(self):
return self._flatten_and_concat_event(self._distribution.mean())
def _default_event_space_bijector(self):
return self._distribution.experimental_default_event_space_bijector()
def _parameter_control_dependencies(self, is_init):
assertions = []
message = 'Distributions must have the same `batch_shape`'
if is_init:
batch_shapes = tf.nest.flatten(self._cached_batch_shape)
if all(tensorshape_util.is_fully_defined(b) for b in batch_shapes):
if batch_shapes[1:] != batch_shapes[:-1]:
raise ValueError('{}; found: {}.'.format(message, batch_shapes))
if not self.validate_args:
assert not assertions # Should never happen.
return []
if self.validate_args:
batch_shapes = self._cached_batch_shape
if not all(
tensorshape_util.is_fully_defined(s)
for s in tf.nest.flatten(batch_shapes)):
batch_shapes = tf.nest.map_structure(
lambda static_shape, shape_tensor: # pylint: disable=g-long-lambda
(static_shape if tensorshape_util.is_fully_defined(static_shape)
else shape_tensor), batch_shapes, self._cached_batch_shape_tensor)
batch_shapes = tf.nest.flatten(batch_shapes)
assertions.extend(
assert_util.assert_equal( # pylint: disable=g-complex-comprehension
b1,
b2,
message='{}.'.format(message))
for b1, b2 in zip(batch_shapes[1:], batch_shapes[:-1]))
assertions.extend(
assert_util.assert_equal( # pylint: disable=g-complex-comprehension
tf.size(b1),
tf.size(b2),
message='{}.'.format(message))
for b1, b2 in zip(batch_shapes[1:], batch_shapes[:-1]))
return assertions
def _sample_control_dependencies(self, x):
assertions = []
message = 'Input must have at least one dimension.'
if tensorshape_util.rank(x.shape) is not None:
if tensorshape_util.rank(x.shape) == 0:
raise ValueError(message)
elif self.validate_args:
assertions.append(assert_util.assert_rank_at_least(x, 1, message=message))
return assertions
@kullback_leibler.RegisterKL(Blockwise, Blockwise)
def _kl_blockwise_blockwise(b0, b1, name=None):
"""Calculate the batched KL divergence KL(b0 || b1) with b0 and b1 Blockwise distributions.
Args:
b0: instance of a Blockwise distribution object.
b1: instance of a Blockwise distribution object.
name: (optional) Name to use for created operations. Default is
"kl_blockwise_blockwise".
Returns:
kl_blockwise_blockwise: `Tensor`. The batchwise KL(b0 || b1).
"""
return b0._distribution.kl_divergence(b1._distribution, name=name) # pylint: disable=protected-access
| apache-2.0 | 4,754,112,428,471,258,000 | 36.071429 | 104 | 0.648105 | false |
AlanD88/website | seadssite/urls.py | 1 | 1364 | from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth.views import login, logout, password_reset, password_reset_done, password_reset_confirm,\
password_reset_complete
from django.conf import settings
from django.conf.urls.static import static
from seadssite import views as v
admin.autodiscover()
urlpatterns = [
url(r'^login/$', login),
url(r'^logout/$', logout, {'next_page': '/'}),
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/password/reset/$', password_reset,
{'post_reset_redirect': '/accounts/password/reset/done/'}),
url(r'^accounts/password/reset/done/$', password_reset_done),
url(r'^accounts/password/reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$',
password_reset_confirm, {'post_reset_redirect': '/accounts/password/done/'}),
url(r'^accounts/password/done/$', password_reset_complete),
url(r'^$', v.IndexView.as_view()),
url(r'^dashboard/$', v.DashboardView),
url(r'^dashboard/[0-9]+/$', v.graph),
url(r'^dashboard/[0-9]+/timer/$', v.TimerView),
url(r'^dashboard/[0-9]+/appliances/$', v.DevicesView),
url(r'^register', v.RegisterView.as_view()),
]
| mit | -1,528,178,105,431,894,800 | 49.518519 | 114 | 0.58651 | false |
Oppium/BTCeGUI | BTCeGUI.py | 1 | 21599 | #! python3
import tkinter
import tkinter.ttk as ttk
import threading
import operator
import time
import copy
import os.path
import datetime
import queue
import BTCe
api = BTCe.API('BTCe.ini')
console = None
def format_float(value):
return ('{:0.8f}'.format(float(value)).rstrip('0').rstrip('.'))
def validate_float(value):
if not value:
return True
try:
v = float(value)
return True
except ValueError:
return False
class CurrencyBox(ttk.Combobox):
"""Currency pair selection combo box."""
def __init__(self, parent):
ttk.Combobox.__init__(self, parent, state='readonly', justify='left', width=12)
self.set('Currency Pair')
def update(self, pairs):
"""Update available pairs."""
if not pairs:
return
values = [pair.upper().replace('_', '/') for pair in pairs]
values.sort()
self.config(values=values)
class TradeFrame(ttk.Frame):
"""Buy/sell box."""
def __init__(self, parent, type):
"""type: Buy | Sell"""
ttk.Frame.__init__(self, parent, borderwidth=10, relief='groove')
self.type = type
self.funds = {}
self.fee = 0
self.allchecked = tkinter.IntVar()
self.focus = 0
self.currvars = [tkinter.StringVar(value='0') for i in range(2)]
self.ratevar = tkinter.StringVar(value='0')
self.feevar = tkinter.StringVar(value='0')
self.ignoretrace = False
# init widgets
validatecommand = (self.register(validate_float), '%P')
self.currentries = [ttk.Entry(self, justify='right', validate='key', validatecommand=validatecommand, textvariable=self.currvars[i]) for i in range(2)]
self.currlabels = [ttk.Label(self, text='') for i in range(2)]
self.rateentry = ttk.Entry(self, justify='right', validate='key', validatecommand=validatecommand, textvariable=self.ratevar)
self.feeentry = ttk.Entry(self, justify='right', state='readonly', validate='key', validatecommand=validatecommand, textvariable=self.feevar)
self.feelabel = ttk.Label(self, text='')
self.orderbutton = ttk.Button(self, text='Place Order', state='disabled', command=self.placeorder)
# frame layout
ttk.Label(self, text=type).grid(column=0, row=0, sticky='w')
ttk.Label(self, text='Amount:').grid(column=0, row=1, sticky='w')
self.currentries[0].grid(column=1, row=1, sticky='nsew')
self.currlabels[0].grid(column=2, row=1, sticky='w')
ttk.Label(self, text='Value:').grid(column=0, row=2, sticky='w')
self.currentries[1].grid(column=1, row=2, sticky='nsew')
self.currlabels[1].grid(column=2, row=2, sticky='w')
ttk.Label(self, text='Rate:').grid(column=0, row=3, sticky='w')
self.rateentry.grid(column=1, row=3, sticky='nsew')
ttk.Label(self, text='Fee:').grid(column=0, row=4, sticky='w')
self.feelabel.grid(column=2, row=4, sticky='w')
self.feeentry.grid(column=1, row=4, sticky='nsew')
ttk.Checkbutton(self, text='All', variable=self.allchecked, command=self.update_amounts).grid(column=1, row=5, sticky='nw')
self.orderbutton.grid(column=1, row=5, sticky='ne')
self.grid_columnconfigure(0, weight=0, minsize=50)
self.grid_columnconfigure(1, weight=1)
self.grid_columnconfigure(2, weight=0, minsize=50)
self.grid_rowconfigure(0, weight=0)
self.grid_rowconfigure(1, weight=1, pad=5)
self.grid_rowconfigure(2, weight=1, pad=5)
self.grid_rowconfigure(3, weight=1, pad=5)
self.grid_rowconfigure(4, weight=1, pad=5)
self.grid_rowconfigure(5, weight=1)
# events
self.ratevar.trace('w', self.update_amounts)
for i in range(2):
self.currvars[i].trace('w', lambda name, index, op, focus=i: self.setfocus(focus))
def setfocus(self, focus, *args):
"""Change focus due to currency entry edit (using trace)."""
if not self.ignoretrace:
self.focus = focus
self.update_amounts(args)
def setrate(self, rate):
self.ratevar.set(format_float(rate))
def placeorder(self):
self.orderbutton.config(state='disabled', text='Placing Order...')
# get all trade data from current entries and labels
pair = '_'.join(self.currlabels[i].cget('text') for i in range(2)).lower()
type = self.type.lower()
rate = float(self.rateentry.get())
amount = float(self.currentries[0].get())
threading.Thread(target=self.master.placeorder, args=[pair, type, rate, amount]).start()
def update(self, pair, funds, fee, cantrade, ordering):
"""Update currency labels and amounts."""
if len(pair) == 2:
for i in range(2):
self.currlabels[i].config(text=pair[i])
self.feelabel.config(text=(pair[0] if self.type == 'Buy' else pair[1]))
# enable/disable order button
amount = self.currvars[0].get()
amount = float(0.0 if amount == '' else amount)
rate = self.ratevar.get()
rate = float(0.0 if rate == '' else rate)
if cantrade and len(pair) == 2 and amount > 0.0 and rate > 0.0 and not ordering:
self.orderbutton.config(state='normal', text='Place Order')
elif ordering:
self.orderbutton.config(state='disabled', text='Placing Order...')
else:
self.orderbutton.config(state='disabled', text='Place Order')
self.funds = funds
self.fee = float(fee) / 100.0
self.update_amounts()
def update_amounts(self, *args):
"""Update currency amounts."""
self.ignoretrace = True
# auto-fill focus in case of a checked All button
pair = [self.currlabels[i].cget('text') for i in range(2)]
if self.funds and self.allchecked.get() and pair[0] and pair[1]:
self.focus = 1 if self.type == 'Buy' else 0
balance = self.funds[pair[self.focus].lower()]
self.currvars[self.focus].set(format_float(balance))
# calculate non-focused entry
rate = self.ratevar.get()
rate = float(0.0 if rate == '' else rate)
op = operator.mul if self.focus == 0 else operator.truediv
nonfocus = 1 - self.focus
focus = self.currvars[self.focus].get()
focus = float(focus) if focus else 0.0
self.currvars[nonfocus].set(format_float(op(focus, rate) if rate != 0.0 else 0.0))
# calculate fee
feedval = self.currvars[0].get() if self.type == 'Buy' else self.currvars[1].get()
feedval = float(feedval) if feedval else 0.0
self.feevar.set(format_float(self.fee * feedval))
# (re)set readonly/normal entry states
state = 'readonly' if self.allchecked.get() else 'normal'
for currentry in self.currentries:
currentry.config(state=state)
self.ignoretrace = False
class ConsoleFrame(ttk.Frame):
"""Console."""
def __init__(self, parent):
ttk.Frame.__init__(self, parent, borderwidth=10, relief='groove')
self.queue = queue.Queue()
# init widgets
self.text = tkinter.Text(self, height=4, state='disabled')
vsb = ttk.Scrollbar(self, orient='vertical', command=self.text.yview)
self.text.config(yscrollcommand=vsb.set)
# frame layout
ttk.Label(self, text='Console').grid(column=0, row=0, sticky='w', columnspan=2)
self.text.grid(column=0, row=1, sticky='nsew')
vsb.grid(column=1, row=1, sticky='nse')
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=0)
self.grid_rowconfigure(0, weight=0)
self.grid_rowconfigure(1, weight=1, pad=5)
def print(self, text):
self.queue.put(text)
def update(self):
atend = self.text.yview()[1] == 1.0
self.text.config(state='normal')
while not self.queue.empty():
self.text.insert('end', '{}: {}\n'.format(datetime.datetime.now().strftime('%H:%M:%S'), self.queue.get()))
self.text.config(state='disabled')
if atend:
self.text.see('end')
class Console:
def print(self, text):
print(text)
class OrderFrame(ttk.Frame):
"""Frame for showing open orders."""
status = ['Active', 'Filled', 'Partially Filled', 'Cancelled']
def __init__(self, parent):
ttk.Frame.__init__(self, parent, borderwidth=10, relief='groove')
# init widgets
self.table = ttk.Treeview(self, columns=['id', 'time', 'pair', 'type', 'rate', 'amount', 'value', 'status'], show='headings', height=3)
vsb = ttk.Scrollbar(self, orient='vertical', command=self.table.yview)
self.table.config(yscrollcommand=vsb.set)
self.orderbutton = ttk.Button(self, text='Cancel Order(s)', state='disabled', command=self.cancelorders)
# frame layout
ttk.Label(self, text='Open Orders').grid(column=0, row=0, sticky='w')
self.table.grid(column=0, row=1, sticky='nsew')
vsb.grid(column=1, row=1, sticky='ns')
self.orderbutton.grid(column=0, row=2, sticky='nse')
self.grid_columnconfigure(0, weight=1, pad=5)
self.grid_columnconfigure(1, weight=0, pad=5)
self.grid_rowconfigure(0, weight=0)
self.grid_rowconfigure(1, weight=1, pad=5)
# table layout
self.table.heading('id', text='ID', anchor='w')
self.table.heading('time', text='Placed on', anchor='w')
self.table.heading('pair', text='Pair', anchor='w')
self.table.heading('type', text='Type', anchor='w')
self.table.heading('rate', text='Rate', anchor='w')
self.table.heading('amount', text='Amount', anchor='w')
self.table.heading('value', text='Value', anchor='w')
self.table.heading('status', text='Status', anchor='w')
self.table.column('id', width=15)
self.table.column('time', width=60)
self.table.column('pair', width=10)
self.table.column('type', width=20)
self.table.column('rate', width=30)
self.table.column('amount', width=60)
self.table.column('value', width=60)
self.table.column('status', width=40)
def cancelorders(self):
"""Cancel all selected orders."""
self.orderbutton.config(state='disabled', text='Cancelling...')
selects = self.table.selection()
selectids = []
for select in selects:
selectids.append(int(self.table.item(select)['values'][0]))
threading.Thread(target=self.master.cancelorders, args=[selectids]).start()
def update(self, orders, cantrade, cancelling):
"""Build order list and update table."""
# enable/disable order button
if cantrade and orders and not cancelling:
self.orderbutton.config(state='normal', text='Cancel Order(s)')
elif cancelling:
self.orderbutton.config(state='disabled', text='Cancelling...')
else:
self.orderbutton.config(state='disabled', text='Cancel Order(s)')
# store old selection keys
selects = self.table.selection()
selectids = []
for select in selects:
selectids.append(int(self.table.item(select)['values'][0]))
# delete old entries
self.table.delete(*self.table.get_children())
if not orders:
return
# insert new entries and select old keys
for id in orders:
order = orders[id]
time = datetime.datetime.utcfromtimestamp(order['timestamp_created'])
pair = order['pair'].upper().split('_')
rate = float(order['rate'])
amount = float(order['amount'])
value = format_float(rate * amount) + ' ' + pair[1]
amount = format_float(amount) + ' ' + pair[0]
status = OrderFrame.status[order['status']]
values = [id, time, '/'.join(pair), order['type'].capitalize(), rate, amount, value, status]
item = self.table.insert('', 'end', values=values)
if int(id) in selectids:
self.table.selection_add(item)
class DepthFrame(ttk.Frame):
"""Treeview and components for a list of offers."""
def __init__(self, parent, type):
"""type: Ask | Bid"""
ttk.Frame.__init__(self, parent, borderwidth=10, relief='groove')
self.type = type
# init widgets
self.table = ttk.Treeview(self, columns=['rate', 'curr0', 'curr1'], show='headings')
vsb = ttk.Scrollbar(self, orient='vertical', command=self.table.yview)
self.table.configure(yscrollcommand = vsb.set)
# frame layout
ttk.Label(self, text=type).grid(column=0, row=0, sticky='w')
self.table.grid(column=0, row=1, sticky='nsew')
vsb.grid(column=1, row=1, sticky='ns')
self.grid_columnconfigure(0, weight=1, pad=5)
self.grid_columnconfigure(1, weight=0, pad=5)
self.grid_rowconfigure(0, weight=0)
self.grid_rowconfigure(1, weight=1, pad=5)
# table layout
self.table.column('rate', width=60)
self.table.column('curr0', width=80)
self.table.column('curr1', width=80)
def update(self, depth, pair):
"""Clear and rebuild the depth table."""
if not depth or len(pair) != 2:
return
# update headings
self.table.heading('rate', text='Rate', anchor='w')
self.table.heading('curr0', text=pair[0], anchor='w')
self.table.heading('curr1', text=pair[1], anchor='w')
# store old selection keys
selects = self.table.selection()
selectrates = []
for select in selects:
selectrates.append(float(self.table.item(select)['values'][0]))
# delete old entries
self.table.delete(*self.table.get_children())
# insert new entries and select old keys
orders = depth[self.type.lower() + 's']
for order in orders:
values = [float(order[0]), float(order[1]), format_float(float(order[0]) * float(order[1]))]
item = self.table.insert('', 'end', values=values)
if values[0] in selectrates:
self.table.selection_add(item)
class BalanceFrame(ttk.Frame):
"""Tree view for personal balances."""
def __init__(self, parent):
ttk.Frame.__init__(self, parent, borderwidth=10, relief='groove')
# init widgets
self.table = ttk.Treeview(self, columns = ['curr', 'funds'], show='headings')
vsb = ttk.Scrollbar(self, orient='vertical', command=self.table.yview)
self.table.configure(yscrollcommand = vsb.set)
# frame layout
ttk.Label(self, text='Funds').grid(column=0, row=0, columnspan=2, sticky='w')
self.table.grid(column=0, row=1, sticky='nsew')
vsb.grid(column=1, row=1, sticky='ns')
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=0)
self.grid_rowconfigure(1, weight=1)
# table layout
self.table.column('curr', width=60)
self.table.column('funds', width=100)
self.table.heading('curr', text='Currency', anchor='w')
self.table.heading('funds', text='Balance', anchor='w')
def update(self, funds):
"""Clear and rebuild the balance table."""
if not funds:
return
# store old selection keys
selects = self.table.selection()
selectcurrs = []
for select in selects:
selectcurrs.append(self.table.item(select)['values'][0])
# delete old entries
for entry in self.table.get_children():
self.table.delete(entry)
# insert new sorted entries and select old keys
funds = list(funds.items())
funds.sort()
for fund in funds:
curr = fund[0].upper()
item = self.table.insert('', 'end', values=[curr, format_float(fund[1])])
if curr in selectcurrs:
self.table.selection_add(item)
class Main(tkinter.Tk):
"""Main frame."""
def __init__(self):
tkinter.Tk.__init__(self)
self.title('BTCeGUI')
self.lockdata = threading.Lock()
self.locknonce = threading.Lock()
self.info = {}
self.depth = {}
self.userinfo = {}
self.orders={}
self.pair = {}
self.run = True
self.buying = False
self.selling = False
self.cancelling = False
# layout
self.geometry('800x800+100+100')
self.currencybox = CurrencyBox(self)
self.currencybox.grid(column=0, row=0, stick='nw')
self.buybox = TradeFrame(self, 'Buy')
self.buybox.grid(column=0, row=1, sticky='nsew', padx=20, pady=5)
self.sellbox = TradeFrame(self, 'Sell')
self.sellbox.grid(column=1, row=1, sticky='nsew', padx=20, pady=5)
self.askframe = DepthFrame(self, 'Ask')
self.askframe.grid(column=0, row=2, sticky='nsew', padx=5, pady=5)
self.bidframe = DepthFrame(self, 'Bid')
self.bidframe.grid(column=1, row=2, sticky='nsew', padx=5, pady=5)
self.balanceframe = BalanceFrame(self)
self.balanceframe.grid(column=2, row=2, sticky='nsew', padx=5, pady=5)
self.orderframe = OrderFrame(self)
self.orderframe.grid(column=0, row=3, sticky='nsew', padx=5, pady=5, columnspan=3)
self.console = ConsoleFrame(self)
self.console.grid(column=0, row=4, sticky='nsew', padx=5, pady=5, columnspan=3)
global console
console = self.console
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(1, weight=1)
self.grid_columnconfigure(2, weight=0)
self.grid_rowconfigure(0, weight=0)
self.grid_rowconfigure(1, weight=0)
self.grid_rowconfigure(2, weight=1)
self.grid_rowconfigure(3, weight=0)
self.grid_rowconfigure(4, weight=0)
# events
self.askframe.table.bind('<Double-1>', lambda event: self.ondouble_depth(self.askframe.table, self.buybox, event))
self.bidframe.table.bind('<Double-1>', lambda event: self.ondouble_depth(self.bidframe.table, self.sellbox, event))
# api threads
if api.secret == b'copy API secret here' or api.key == b'copy API key here':
console.print('No API secret/key found. Only public data available.')
else:
threading.Thread(target=self.update_userinfo_loop).start()
threading.Thread(target=self.update_orders_loop).start()
threading.Thread(target=self.update_depth_loop).start()
threading.Thread(target=self.update_info_loop).start()
self.sync()
def exit(self):
"""Stop running threads."""
self.run = False
# redirect console prints to the normal console
global console
console = Console()
def ondouble_depth(self, table, box, event):
"""Send double-clicked rate to trade box."""
item = table.identify('item', event.x, event.y)
if (item):
box.setrate(table.item(item, 'values')[0])
def sync(self):
"""Sync GUI to states."""
self.lockdata.acquire()
userinfo = copy.copy(self.userinfo)
orders = copy.copy(self.orders)
info = copy.copy(self.info)
depth = copy.copy(self.depth)
self.pair = copy.copy(self.currencybox.get().split('/'))
self.lockdata.release()
pairs = None
if info:
pairs = info.get('pairs')
self.currencybox.update(pairs)
funds = None
if userinfo:
funds = userinfo.get('funds')
# update depth tables
fee = 0
pair = []
if (depth):
pair = next(iter(depth))
if pairs:
fee = pairs[pair]['fee']
depth = depth[pair]
pair = pair.upper().split('_')
cantrade = True if userinfo and userinfo['rights']['trade'] == 1 else False
self.askframe.update(depth, pair)
self.bidframe.update(depth, pair)
self.balanceframe.update(funds)
self.buybox.update(pair, funds, fee, cantrade, self.buying)
self.sellbox.update(pair, funds, fee, cantrade, self.selling)
self.orderframe.update(orders, cantrade, self.cancelling)
self.console.update()
self.after(100, self.sync)
def update_depth_loop(self):
while self.run:
self.update_depth()
time.sleep(1.0)
def update_depth(self):
# if currency pair is valid get depth table
self.lockdata.acquire()
pair = copy.copy(self.pair)
self.lockdata.release()
depth = {}
if len(pair) == 2:
depth = BTCe.API.depth('_'.join(pair).lower())
if depth and 'success' in depth.keys():
if depth['success'] == 1:
depth = depth['return']
else:
console.print('[WARNING] Error requesting depth: {}'.format(depth['error']))
depth = None
self.lockdata.acquire()
self.depth = depth
self.lockdata.release()
def update_userinfo_loop(self):
acc = 0.0
while self.run:
self.update_userinfo()
while acc < 5.0 and self.run:
time.sleep(0.5)
acc += 0.5
acc = 0.0
def update_userinfo(self):
self.locknonce.acquire()
userinfo = api.getinfo()
self.locknonce.release()
if userinfo and 'success' in userinfo.keys():
if userinfo['success'] == 1:
userinfo = userinfo['return']
else:
console.print('[WARNING] Error requesting user info: {}'.format(userinfo['error']))
userinfo = None
self.lockdata.acquire()
self.userinfo = userinfo
self.lockdata.release()
def update_orders_loop(self):
acc = 0.0
while self.run:
self.update_orders()
while acc < 10.0 and self.run:
time.sleep(0.5)
acc += 0.5
acc = 0.0
def update_orders(self):
self.locknonce.acquire()
orders = api.activeorders()
self.locknonce.release()
if orders and 'success' in orders.keys():
if orders['success'] == 1:
orders = orders['return']
else:
if orders['error'] != 'no orders':
console.print('[WARNING] Error requesting open orders: {}'.format(orders['error']))
orders = None
self.lockdata.acquire()
self.orders = orders
self.lockdata.release()
def update_info_loop(self):
acc = 0.0
while self.run:
self.update_info()
while acc < 7.0 and self.run:
time.sleep(0.5)
acc += 0.5
acc = 0.0
def update_info(self):
acc = 0.0
while self.run:
info = BTCe.API.info()
if info and 'success' in info.keys():
if info['success'] == 1:
info = info['return']
else:
console.print('[WARNING] Error requesting public info: {}'.format(info['error']))
info = None
self.lockdata.acquire()
self.info = info
self.lockdata.release()
while acc < 30.0 and self.run:
time.sleep(0.5)
acc += 0.5
acc = 0.0
def placeorder(self, pair, type, rate, amount):
console.print('Placing order {}.'.format([pair, type, rate, amount]))
if type == 'buy':
self.buying = True
elif type == 'sell':
self.selling = True
else:
return
self.locknonce.acquire()
response = api.trade(pair, type, rate, amount)
self.locknonce.release()
if response and 'success' in response.keys():
if response['success'] == 1:
console.print('Order placed successfully.')
else:
console.print('[WARNING] Error placing order: {}'.format(response['error']))
self.update_orders()
self.update_userinfo()
if type == 'buy':
self.buying = False
elif type == 'sell':
self.selling = False
def cancelorders(self, ids):
self.cancelling = True
for id in ids:
console.print('Cancel order {}.'.format(id))
self.locknonce.acquire()
response = api.cancelorder(id)
self.locknonce.release()
if response and 'success' in response.keys():
if response['success'] == 1:
console.print('Order cancelled successfully.')
else:
console.print('[WARNING] Error cancelling order: {}'.format(response['error']))
self.update_orders()
self.update_userinfo()
self.cancelling = False
root = Main()
root.mainloop()
root.exit() | mit | -531,146,083,927,897,600 | 31.53012 | 153 | 0.678504 | false |
swharden/SWHLab | doc/uses/EPSCs-and-IPSCs/smooth histogram method/05.py | 1 | 1812 | """
MOST OF THIS CODE IS NOT USED
ITS COPY/PASTED AND LEFT HERE FOR CONVENIENCE
"""
import os
import sys
# in case our module isn't installed (running from this folder)
if not os.path.abspath('../../../') in sys.path:
sys.path.append('../../../') # helps spyder get docs
import swhlab
import swhlab.common as cm
import matplotlib.pyplot as plt
import numpy as np
import warnings # suppress VisibleDeprecationWarning warning
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
def analyzeSweep(abf,plotToo=True,color=None,label=None):
Y=abf.sweepYsmartbase()[abf.pointsPerSec*.5:]
AV,SD=np.average(Y),np.std(Y)
dev=5 # number of stdevs from the avg to set the range
R1,R2=[(AV-SD)*dev,(AV+SD)*dev]
nBins=1000
hist,bins=np.histogram(Y,bins=nBins,range=[R1,R2],density=True)
histSmooth=abf.convolve(hist,cm.kernel_gaussian(nBins/5))
if plotToo:
plt.plot(bins[1:],hist,'.',color=color,alpha=.2,ms=10)
plt.plot(bins[1:],histSmooth,'-',color=color,lw=5,alpha=.5,label=label)
return
if __name__=="__main__":
#abfFile=R"C:\Users\scott\Documents\important\demodata\abfs\16d07022.abf"
abfFile=R"X:\Data\2P01\2016\2016-09-01 PIR TGOT\16d07022.abf"
abf=swhlab.ABF(abfFile)
# prepare figure
plt.figure(figsize=(10,10))
plt.grid()
plt.title("smart baseline value distribution")
plt.xlabel(abf.units2)
plt.ylabel("normalized density")
# do the analysis
abf.kernel=abf.kernel_gaussian(sizeMS=500)
abf.setsweep(175)
analyzeSweep(abf,color='b',label="baseline")
abf.setsweep(200)
analyzeSweep(abf,color='g',label="TGOT")
abf.setsweep(375)
analyzeSweep(abf,color='y',label="washout")
# show figure
plt.legend()
plt.margins(0,.1)
plt.show()
print("DONE")
| mit | -1,441,784,876,486,513,400 | 28.225806 | 79 | 0.683223 | false |
enalisnick/stick-breaking_dgms | models/variational_coders/decoders.py | 1 | 3064 | import numpy as np
import theano
import theano.tensor as T
### Regular Decoder
class Decoder(object):
def __init__(self, rng, input, latent_size, out_size, activation, W_z = None, b = None):
self.input = input
self.activation = activation
# setup the params
if W_z is None:
W_values = np.asarray(0.01 * rng.standard_normal(size=(latent_size, out_size)), dtype=theano.config.floatX)
W_z = theano.shared(value=W_values, name='W_hid_z')
if b is None:
b_values = np.zeros((out_size,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b')
self.W_z = W_z
self.b = b
self.pre_act_out = T.dot(self.input, self.W_z) + self.b
self.output = self.activation(self.pre_act_out)
# gather parameters
self.params = [self.W_z, self.b]
### Supervised Decoder
class Supervised_Decoder(Decoder):
def __init__(self, rng, input, labels, latent_size, label_size, out_size, activation, W_z = None, W_y = None, b = None):
self.labels = labels
# init parent class
super(Supervised_Decoder, self).__init__(rng=rng, input=input, latent_size=latent_size, out_size=out_size, activation=activation, W_z=W_z, b=b)
# setup the params
if W_y is None:
W_values = np.asarray(0.01 * rng.standard_normal(size=(label_size, out_size)), dtype=theano.config.floatX)
W_y = theano.shared(value=W_values, name='W_y')
self.W_y = W_y
self.output = self.activation( self.pre_act_out + T.dot(self.labels, self.W_y) )
# gather parameters
self.params += [self.W_y]
### Marginalized Decoder (for semi-supervised model)
class Marginalized_Decoder(Decoder):
def __init__(self, rng, input, batch_size, latent_size, label_size, out_size, activation, W_z, W_y, b):
# init parent class
super(Marginalized_Decoder, self).__init__(rng=rng, input=input, latent_size=latent_size, out_size=out_size, activation=activation, W_z=W_z, b=b)
# setup the params
self.W_y = W_y
# compute marginalized outputs
labels_tensor = T.extra_ops.repeat( T.shape_padaxis(T.eye(n=label_size, m=label_size), axis=0), repeats=batch_size, axis=0)
self.output = self.activation(T.extra_ops.repeat(T.shape_padaxis(T.dot(self.input, self.W_z), axis=1), repeats=label_size, axis=1) + T.dot(labels_tensor, self.W_y) + self.b)
# no params here since we'll grab them from the supervised decoder
| mit | 8,828,024,413,954,331,000 | 49.229508 | 181 | 0.520235 | false |
IBMPredictiveAnalytics/SPSSINC_RECODEEX | src/SPSSINC_RECODEEX.py | 1 | 20980 |
#/***********************************************************************
# * Licensed Materials - Property of IBM
# *
# * IBM SPSS Products: Statistics Common
# *
# * (C) Copyright IBM Corp. 1989, 2020
# *
# * US Government Users Restricted Rights - Use, duplication or disclosure
# * restricted by GSA ADP Schedule Contract with IBM Corp.
# ************************************************************************/
import spss, spssaux
from spssaux import _smartquote
from spssaux import u
import spss, spssaux
from extension import Template, Syntax, processcmd
import locale, os, re, copy, codecs, string
__author__ = 'spss, JKP'
__version__= '1.1.0'
# history
# 04-jun-2010 original version
# 11-nov-2014 Allow input value labels in generated value labels
helptext = """SPSSINC RECODEEX
inputvarlist = outputvarlist
/RECODES "(input value(s) = recode) ... (else={COPY*|SYSMIS})"
[/OPTIONS [STRINGSIZE=n] [VALUELABELS={YES*|NO}] [COPYVARIABLELABELS={YES*|NO}]
[SUFFIX="value"] [PREFIX="value"]]
Recode variables into other variables with optional variable and value label generation.
Examples:
RECODEEX fatherage motherage = fatheragerc motheragerc
/RECODES "(LO THRU 50=1) (51 thru 75=2) (ELSE=COPY)"
/OPTIONS SUFFIX="rc".
RECODEEX bdate = bdaterc
/RECODES "(LO THRU 1950-12-31=1)(1951-01-01 THRU 1990-12-31=2)".
RECODE duration = durationrc
/RECODES "(LO THRU 10 12:00:00=1)(10 12:00:00 THRU HIGH=2)".
This command extends the built-in RECODE command in several ways.
- Date or time constants are used for variables of these types
- Value labels can be automatically generated for the outputs
- Variable labels can be copied
- Variable types can be changed for the output variables.
inputvarlist specifies the variables to be recoded. They must all have the same type
(numeric, string, a date format, or a time format).
MOYR, WKYR and WKDAY formats are not supported.
outputvarlist specifies an equal number of variables for the results. If STRINGSIZE is specified,
the output variables will all be made strings of that length. The type of any existing variables will be
changed to match if necessary. If STRINGSIZE is not specified, no variable types
will be changed, and any new variables will be numeric.
A variable cannot be used as both an input and output variable.
Recode specifications have the same general form as for the RECODE command:
(input-values = output-value)
See the RECODE command for details.
THE ENTIRE RECODE SPECIFICATION must be enclosed in quotes.
Input or output string values must also be quoted.
If the variables have a date format, recode values have the form yyyy-mm-dd.
If the values have a time format, recode values have the form hh:mm, hh:mm:ss.ss
or these forms preceded by days, e.g., 10 08:03.
VALUELABELS specifies whether value labels should be created for the output values.
They will consist of the input values that are mapped to each output with two caveats:
An else specification does not contribute to the labels.
If an input value is mapped to more than one output value, it will appear in each corresponding
value label even though the RECODE command processes from left to right.
If COPYVARIABLELABELS=YES, the variable label, if any, of each input variable
will be copied to the output variable. PREFIX and SUFFIX can specify text to be
prepended or appended to the label with a separating blank.
/HELP displays this help and does nothing else.
"""
# MOYR, WKYR and WKDAY formats are not supported
datefmts = set(["DATE", "ADATE", "EDATE", "JDATE", "SDATE", "QYR", "DATETIME"])
timefmts = set(["TIME", "DTIME"])
numfmts = set(["F", "N", "E"])
strfmts = set(["A", "AHEX"])
def Run(args):
"""Execute the SPSSINC RECODEEX extension command"""
# debugging
# makes debug apply only to the current thread
#try:
#import wingdbstub
#if wingdbstub.debugger != None:
#import time
#wingdbstub.debugger.StopDebug()
#time.sleep(2)
#wingdbstub.debugger.StartDebug()
#import thread
#wingdbstub.debugger.SetDebugThreads({thread.get_ident(): 1}, default_policy=0)
#except:
#pass
args = args[list(args.keys())[0]]
oobj = Syntax([
Template("", subc="", ktype="literal", var="varlist", islist=True),
Template("", subc="RECODES", ktype="literal", var="recodes", islist=True),
Template("STRINGSIZE", subc="OPTIONS", ktype="int", var="stringsize", vallist=[1, 32767]),
Template("VALUELABELS", subc="OPTIONS", ktype="bool", var="makevaluelabels"),
Template("USEINPUTVALLABELS", subc="OPTIONS", ktype="bool",
var="useinputvallabels"),
Template("COPYVARIABLELABELS", subc="OPTIONS", ktype="bool", var="copyvariablelabels"),
Template("SUFFIX", subc="OPTIONS", ktype="literal", var="suffix"),
Template("PREFIX", subc="OPTIONS", ktype="literal", var="prefix"),
Template("HELP", subc="", ktype="bool")])
#enable localization
global _
try:
_("---")
except:
def _(msg):
return msg
# A HELP subcommand overrides all else
if "HELP" in args:
#print helptext
helper()
else:
processcmd(oobj, args, recode)
def helper():
"""open html help in default browser window
The location is computed from the current module name"""
import webbrowser, os.path
path = os.path.splitext(__file__)[0]
helpspec = "file://" + path + os.path.sep + \
"markdown.html"
# webbrowser.open seems not to work well
browser = webbrowser.get()
if not browser.open_new(helpspec):
print(("Help file not found:" + helpspec))
try: #override
from extension import helper
except:
pass
def recode(varlist, recodes, stringsize=None, makevaluelabels=True, copyvariablelabels=True, useinputvallabels=False,
suffix="", prefix=""):
vardict = spssaux.VariableDict(caseless=True)
isutf8 = spss.PyInvokeSpss.IsUTF8mode()
ecutf8 = codecs.getencoder("utf_8")
inputlist, outputlist, vartype = parsevarlist(varlist, vardict)
if len(recodes) > 1:
raise ValueError(_("The RECODES subcommand must consist of a single, quoted specification"))
# recodespec is a list of textual recode syntax, one item per value set
# vldefs is a dictionary with keys the target values
# and values the input codes
# inputdict is a dictionary with keys the target values
# and values a list of the input codes
recodespec, vldefs, inputdict = parserecodes(recodes[0], vartype, stringsize)
valuelabelmessage = checklabelconsistency(inputlist, vardict)
if stringsize:
alter = []
create = []
for v in outputlist:
try:
if vardict[v].VariableType != stringsize:
alter.append(v)
except:
create.append(v)
if create:
spss.Submit("STRING %s (A%s)." % (" ".join(create), stringsize))
if alter:
spss.Submit("ALTER TYPE %s (A%s)" % (" ".join(alter), stringsize))
spss.Submit("""RECODE %s %s INTO %s.""" % (" ".join(inputlist), " ".join(recodespec), " ".join(outputlist)))
# generate variable labels if requested
if copyvariablelabels:
if prefix and not prefix.endswith(" "):
prefix = prefix + " "
if suffix and not suffix.startswith(" "):
suffix = " " + suffix
for vin, vout in zip(inputlist, outputlist):
spss.Submit("""VARIABLE LABEL %s %s.""" % \
(vout, _smartquote(prefix + vardict[vin].VariableLabel + suffix, True)))
# generate value labels if requested
# all values for given target are merged but else clause is omitted
# VALUE LABELS syntax quotes values regardless of variable type
# vldefs is a dictionary with keys of the output values and
# values a string listing the input values. If copying value labels
# the first input variable is used as the source.
if makevaluelabels:
if useinputvallabels:
vldefs = makevallabels(vldefs, inputdict,
vardict[inputlist[0]].ValueLabels, isutf8, ecutf8)
# ensure that copy as target does not generate a value label
copyset = set()
for target in vldefs:
if target.lower() == "copy":
copyset.add(target)
for c in copyset:
del(vldefs[c])
#spss.Submit(r"""VALUE LABELS %s %s.""" % (" ".join(outputlist), \
#" ".join([_smartquote(val, vartype == 2) + " " + _smartquote(label, True) for val, label in vldefs.items()])))
spss.Submit(r"""VALUE LABELS %s %s.""" % (" ".join(outputlist), \
" ".join([val + " " + _smartquote(label, True) for val, label in list(vldefs.items())])))
if valuelabelmessage:
print(valuelabelmessage)
def makevallabels(vldefs, inputlabels, valuelabels,
isutf8, ecutf8):
"""convert values to value labels where available up to length limit
vldefs is a list of target values
value is string listing input values
valuelabels is a dictionary of labels
inputlabels is a dictionary of input values to the recode
The input values are a list preceded by the join sequence
"""
for target in vldefs:
labels = [valuelabels.get(val, val) for val in inputlabels[target]]
labels = ", ".join(labels)
vldefs[target] = (truncatestring(labels, isutf8, 120, ecutf8))
return vldefs
def truncatestring(name, unicodemode, maxlength, ecutf8):
"""Return a name truncated to no more than maxlength BYTES.
name is the candidate string
unicodemode identifies whether in Unicode mode or not
maxlength is the maximum byte count allowed. It must be a positive integer
ecutf8 is a utf-8 codec
If name is a (code page) string, truncation is straightforward. If it is Unicode utf-8,
the utf-8 byte representation must be used to figure this out but still truncate on a character
boundary."""
if not unicodemode:
if len(name) > maxlength:
name = name[:maxlength-3] + "..."
else:
newname = []
nnlen = 0
# In Unicode mode, length must be calculated in terms of utf-8 bytes
for c in name:
c8 = ecutf8(c)[0] # one character in utf-8
nnlen += len(c8)
if nnlen <= maxlength:
newname.append(c)
else:
newname = newname[:-4]
newname.append("...")
break
name = "".join(newname)
return name
def parsevarlist(varlist, vardict):
"""return input variable list, output variable list, and basic type
varlist is a list whose combined elements have the "var var var = var var var"
vardict is a variable dictionary
In return, type is coded as
1 = numeric
2 = string
3 = date
4 = time
type constraints are enforced here but no attempt is made to check output variable types"""
try:
sepindex = varlist.index("=")
inputv = varlist[:sepindex]
outputv = varlist[sepindex+1:]
except:
raise ValueError(_("Variable list must have the form inputvars = outputvars"))
if len(inputv) != len(outputv):
raise ValueError(_("The number of input and output variables differ"))
if set(inputv).intersection(set(outputv)):
raise ValueError(_("Input and Output variable lists must be distinct"))
fmts = [vardict[v].VariableFormat.rstrip("0123456789.") for v in inputv]
fmtypes = [f in numfmts and 1 or f in strfmts and 2 or f in datefmts and 3\
or f in timefmts and 4 for f in fmts or 0]
if len(set(fmtypes)) > 1:
raise ValueError(_("All input variables must have the same basic type"))
if fmtypes[0] == 0:
raise ValueError(_("Unsupported format type: %s") % fmts[0])
return inputv, outputv, fmtypes[0]
def parserecodes(recodes, vartype, stringsize):
"""Return list of recode specs for values
recodes is the text of the RECODES subcommand. Expected form is
(input values = outputvalue) ...
where input values could be a list of values, including THRU , HIGH, HIGHEST etc
For dates, expected form is yyyy-mm-dd
For times, expected form is hh:mm:ss.fraction where all parts after hh are optional
Else spec is returned as is (RECODE will check it)
vartype is 1 - 4 as above"""
# first, process out all ( and ) characters embedded inside a literal (only matters for string variables)
recodes = protected(recodes)
allmappings = re.findall(r"\(.+?\)", recodes) # find all parenthesized mappings
if not allmappings:
raise ValueError(_("The recode specification did not include any parenthesized specifications."))
recodespec = []
recodetargets = {}
inputlist = {}
for item in allmappings:
itemcopy = copy.copy(item)
if vartype == 3:
item, count = re.subn(r"\d+-\d+-\d+(\s+\d+:\d+(:[.0-9]+)*)*", yrmodamo, item) # convert date or date/time expressions
if count == 0:
raise ValueError(_("A date variable recode specification did not include a date value: %s") % item)
elif vartype == 2:
item = re.sub(r"\02", "(", item)
item = re.sub(r"\03", ")", item)
itemcopy = copy.copy(item)
elif vartype == 4:
item, count = re.subn(r"(\d+\s+)*\d+:\d+(:[0-9.]+)*", timemo, item)
if count == 0:
raise ValueError(_("A time variable recode specification did not include a time value: %s") % item)
recodespec.append(item)
parts = mapdef(itemcopy) # get input, target for recode target value
if not parts[0] == "else":
try:
recodetargets[parts[1]] = recodetargets[parts[1]] + "," + parts[0]
except: # new target value
recodetargets[parts[1]] = parts[0]
inputlist[parts[1]] = splitter(parts[0])
return recodespec, recodetargets, inputlist
# characters legal in recode spec keywords
# string.letters is affected by local setting so need to subset
letters = string.ascii_letters[:52]
def splitter(pplus):
"""split string according to SPSS Statistics rules and return as list
pplus is the string to split
If the recode spec contains RECODE keywords,
return the expression as a list of length 1"""
quo = None
pplus = list(pplus +" ")
i = 0
pplusout = []
recodekeyword = False
while i < len(pplus) -1:
ch = pplus[i]
if ch == quo:
if pplus[i+1] == quo:
i+=1
pplusout.append(ch)
else:
quo = None
else:
if ch in ['"', "'"]:
quo = ch
else:
pplusout.append(ch)
if quo and ch == " ":
#pplus[i] = "\a"
pplusout[-1] = "\a"
if not quo and ch in letters: # plain alphabetics
recodekeyword = True
i += 1
inputs = "".join(pplusout).split()
inputs = [item.replace("\a", " ") for item in inputs]
if recodekeyword:
inputs = [" ".join(inputs)] # Can't find a label for this
return inputs
def checklabelconsistency(varnames, vardict):
"""Print warning message if value labels for varnames are inconsistent
varnames is a list of variable names to check
vardict is a VariableDict object"""
if len(varnames) <= 1:
return
clashes = []
for i,var in enumerate(varnames):
vallabels = set([(k.lower(), v) for k, v in list(vardict[var].ValueLabels.items())])
if i == 0:
refset = copy.copy(vallabels)
else:
if refset and not vallabels.issubset(refset):
clashes.append(var)
if clashes:
return _("""Warning: The following variables have value labels sets inconsistent with the
first variable being recoded (%s). The coding may be inconsistent.
If generating labels from the input value labels, the labels from
the first input variable are used to label the output values
for all the output variables.
%s""") % (varnames[0], " ".join(clashes))
else:
return None
def mapdef(spec):
"""return target value and inputs as a duple
spec has form (inputs = target)"""
# can't simply look for = not surrounded by quotes because ('x'='y') is legit :-(
litranges = []
for ch in ["'", '"']: #single quote or double quote
pat = """%(ch)s[^%(ch)s]*%(ch)s""" % locals() # quote non-quote-of-same-type* quote
moit = re.finditer(pat, spec)
# for each literal found, replace ( and )
for m in moit:
litranges.append(m.span())
for i in range(len(spec), 0, -1):
pos = i-1
if spec[pos] == "=":
inlit = False
for r in litranges:
if r[0] <= pos < r[1]:
inlit = True
break
if inlit:
continue
return (spec[1:pos].strip(), spec[pos+1:-1].strip())
else:
raise ValueError(_("Invalid recode specification: %s") % spec)
# break expression into input and target separated by unquoted =
###return (parts[0][1:].strip(), parts[1][:-1].strip())
def protected(astr):
"""Return a string where all ( or ) characters embedded in quotes are converted to x02 or x03
astr is the text to search"""
# astr will always be pretty short in practice
for ch in ["'", '"']: #single quote or double quote
pat = """%(ch)s[^%(ch)s]*%(ch)s""" % locals() # quote non-quote-of-same-type* quote
moit = re.finditer(pat, astr)
# for each literal found, replace ( and )
for m in moit:
st, end = m.start(), m.end()
astr = astr[:st] + re.sub(r"\(", "\x02", astr[st:end]) + astr[end:]
astr = astr[:st] + re.sub(r"\)", "\x03", astr[st:end]) + astr[end:]
return astr
def yrmodamo(mo):
"""convert a date expression with an optional time portion to a number for recode
mo is the match object"""
# input like
#2005-03-31 or
#2005-03-31 8:30 or
#2005-03-31 8:30:05.2
parts = mo.group().split() # break up date and time portions on white space
date = parts[0].split("-")
timeseconds = 0.
dateval = yrmoda(date)
# time portion, if any. hours and minutes are required; seconds are optional.
if len(parts) ==2:
timeparts = parts[1].split(":") # either 2 or 3 parts
timeparts = [float(t) for t in timeparts]
timeseconds = (timeparts[0] * 60. + timeparts(1)) * 60.
if len(timeparts) == 3:
timeseconds = timeseconds + timeparts[2]
return str(dateval + timeseconds)
def timemo(mo):
"""convert a time expression to a number for recode
mo is the match object"""
# input like
#d hh:mm
#d hh:mm:ss.ss
#hh:mm
#hh:mm:ss.ss
parts = mo.group().split() # days and time
# time portion
t = [float(v) for v in parts[-1].split(":")]
t0 = (t[0] * 60. + t[1]) * 60. # hours and minutes
if len(t) == 3:
t0 = t0 + t[2] # and seconds
if len(parts) == 2: # day portion?
t0 = t0 + float(parts[0]) * 86400.
return str(t0)
def _smartquote(s, quoteit=True, qchar='"'):
""" smartquote a string so that internal quotes are distinguished from surrounding
quotes for SPSS and return that string with the surrounding quotes. qchar is the
character to use for surrounding quotes.
if quoteit is True, s is a string that needs quoting; otherwise it does not
"""
if quoteit:
return qchar + s.replace(qchar, qchar+qchar) + qchar
else:
return s
def yrmoda(ymd):
"""compute SPSS internal date value from four digit year, month, and day.
ymd is a list of numbers in that order. The parts will be truncated to integers.
The result is equivalent to the SPSS subroutine yrmoda result converted to seconds"""
if len(ymd) != 3:
raise ValueError("date specification must have the form yyyy-mm-dd")
year = int(ymd[0])
month = int(ymd[1])
day = int(ymd[2])
if year < 1582 or month < 1 or month > 13 or day <0 or day > 31:
raise ValueError((_("Invalid date value: %d %d %d")) % (year, month, day))
yrmo = year * 365 + (year+3)//4 - (year+99)//100 + (year + 399)//400 \
+ 3055 *(month+2)//100 - 578192
if month > 2:
yrmo-= 2
if (year%4 == 0 and (year%100 != 0 or year%400 ==0)):
yrmo+= 1
return (yrmo + day) * 86400 #24 * 60 * 60
| apache-2.0 | 8,460,138,943,254,883,000 | 37.214936 | 129 | 0.614585 | false |
Fireforge/AcronatorServer | acronization.py | 1 | 5783 | import json
import argparse
import random
import sys
import requests
BIGHUGELABS_API_KEY = 'f79909b74265ba8593daf87741f3c874'
buzzWords = ['alignment','bot', 'collusion', 'derivative', 'engagement', 'focus', 'gathering' ,'housing','liability','management','nomenclature','operation','procedure','reduction','strategic','technology','undertaking','vision','widget','yardbird']
forbiddenWords = ['who','what','when','where','why','were','am','and','there','their']
class AcronymLetter:
def __init__(self, letter, word_list):
self.letter = letter.upper()
self.words = word_list
def __str__(self):
outString = ''
for word in self.words:
if len(outString) == 0:
outString = self.letter + " - " + str(word)
else:
outString = outString + ", " + str(word)
return outString
class Word:
def __init__(self, word, priority):
self.word = word
self.priority = priority
def __str__(self):
return self.word + " : " + str(self.priority)
def acronym_finder(inputAcronym, inputGeneralKeywords, numOutputs=5, minWordLength=2):
# holds letter objects
acronym = []
inputError = False
if minWordLength < 2:
print('You dun goofed. Minimum word length must be greater than 1')
inputError = True
if numOutputs < 1:
print('WTF! How does it make sense to print any less than 1 output?')
inputError = True
if inputError:
sys.exit()
# Generate possible word names from the synonym API
for keyword in inputGeneralKeywords:
thesaurusList_url = "http://words.bighugelabs.com/api/2/" + BIGHUGELABS_API_KEY + "/" + keyword + "/json"
thesaurusResponse = requests.get(thesaurusList_url)
if thesaurusResponse.status_code == 200:
thesaurusJson = json.loads(thesaurusResponse.text)
# this is normal for some words.
elif thesaurusResponse.status_code == 404:
continue
else:
print("Shit: " + str(thesaurusResponse.status_code))
letters = []
for i, c in enumerate(inputAcronym):
letters.append(c)
distinctLetters = list(set(letters))
# Rank possible synonym words for each letter in the acronym
for letter in distinctLetters:
firstLetter = letter.lower()
wordList = []
if thesaurusResponse.status_code == 200:
for wordType in thesaurusJson.keys():
for meaningType in thesaurusJson[wordType].keys():
for word in thesaurusJson[wordType][meaningType]:
if word[0] == firstLetter and word.count(' ') == 0 and len(word) >= minWordLength:
for w in wordList:
if w.word == word:
priority = w.priority + 1
wordList.remove(w)
wordList.insert(0,Word(word,priority))
break
else:
wordList.append(Word(word,1))
randomWords_url = "http://api.wordnik.com:80/v4/words.json/search/" + firstLetter + "?caseSensitive=false&includePartOfSpeech=noun&minCorpusCount=5&maxCorpusCount=-1&minDictionaryCount=1&maxDictionaryCount=-1&minLength=" + str(minWordLength) + "&maxLength=-1&skip=0&limit=" + str(4 * minWordLength * minWordLength * minWordLength) + "&api_key=a2a73e7b926c924fad7001ca3111acd55af2ffabf50eb4ae5"
randomWordsResponse = requests.get(randomWords_url)
if randomWordsResponse.status_code == 200:
randomWordsJson = json.loads(randomWordsResponse.text)
for entry in randomWordsJson["searchResults"]:
word = entry["word"]
if word[0] == firstLetter and len(word) >= minWordLength and word.count(' ') == 0:
wordList.append(Word(word,0))
sorted(wordList, key=lambda word: word.priority)
acronym.append(AcronymLetter(firstLetter,wordList))
# Generate possible acronym results
winners = []
for x in range (0,numOutputs):
winner = ''
for i, c in enumerate(inputAcronym):
for letter in acronym:
if letter.letter == c:
try:
word = letter.words[0]
if len(winner) == 0:
winner = word.word
letter.words.remove(word)
else:
winner = winner + ' ' + word.word
letter.words.remove(word)
except IndexError:
print("Can't get all {} words".format(len(acronym)))
# Sanity Check if the winner is a valid acronym
#if len(winner.split(' ')) == len(acronym):
winners.append(winner)
return winners
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='De-Generate Acronym')
parser.add_argument('acronym', metavar='ACR',help='the acronym')
parser.add_argument('--numOutputs', metavar='NOU',type=int, help='number of outputs', default=1)
parser.add_argument('--minLength', metavar='MIN',type=int, help='minimum length of words used', default=2)
parser.add_argument('keywords', metavar='KEY', nargs='+',help='some keywords')
args = parser.parse_args()
winner_list = acronym_finder(
inputAcronym=args.acronym,
numOutputs=args.numOutputs,
inputGeneralKeywords=args.keywords,
minWordLength=args.minLength)
print('\n'.join(winner_list))
# Test call
# print(acronym_finder('hello', 5, 'world'))
| mit | 8,642,745,247,605,629,000 | 38.882759 | 402 | 0.582743 | false |
vlegoff/tsunami | src/primaires/salle/commandes/etendue/creer.py | 1 | 2733 | # -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'créer' de la commande 'étendue'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmCreer(Parametre):
"""Commande 'etendue créer'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "créer", "create")
self.schema = "<cle>"
self.aide_courte = "crée une étendue d'eau"
self.aide_longue = \
"Permet de créer une nouvelle étendue d'eau. Cette commande " \
"prend en paramètre la clé de l'étendue à créer (ne doit pas " \
"déjà exister)."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
cle = dic_masques["cle"].cle
# On vérifie que cette étendue n'existe pas
if cle in type(self).importeur.salle.etendues.keys():
personnage << "|err|Cette clé {} existe déjà.|ff|".format(
repr(cle))
return
type(self).importeur.salle.creer_etendue(cle)
personnage << "L'étendue {} a bien été créée.".format(repr(cle))
| bsd-3-clause | 6,137,721,465,769,932,000 | 43.327869 | 79 | 0.698225 | false |
overdev/easygl-0.1.0-alpha1 | easygl/display/window.py | 1 | 11847 | # !/usr/bin/python
# -*- coding: utf-8 -*-
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Jorge A. Gomes (jorgegomes83 at hotmail dot com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
import datetime as dt
import OpenGL.GL as GL
import pygame as pg
import pygame.locals as co
from enum import Enum
from typing import Union, Optional
from contextlib import contextmanager
from easygl.structures import Vec4, Vec2
from .events import *
__all__ = [
'BlendMode',
'DisplayError',
'GLWindow',
'Multisamples',
'Projection',
]
class DisplayError(Exception):
pass
class Multisamples(Enum):
none = 0
double = 1
triple = 2
quad = 3
class BlendMode(Enum):
none = 0
add = 1
alpha = 2
multiply = 3
class Projection(Enum):
custom = 0
ortho_up = 1
ortho_down = 2
class GLWindow(object):
_current = None # type: GLWindow
def __init__(self, width, height, title, multisamples, blendmode, projection, **kwargs):
# type: (int, int, str, Multisamples, BlendMode, Projection) -> None
if self.__class__._current is not None:
raise DisplayError("Display already initialized. Call reset() method to change settings.")
color = kwargs.get('clear_color', (0., 0., 0., 1.))
size = width, height
flags = co.OPENGL
flags |= co.RESIZABLE if kwargs.get('resizable', False) else 0
flags |= co.DOUBLEBUF if kwargs.get('doublebuf', False) else 0
flags |= co.FULLSCREEN if kwargs.get('fullscreen', False) else 0
flags |= co.HWSURFACE if kwargs.get('hwsurface', False) else 0
pg.init()
if multisamples is not Multisamples.none:
samples = {
Multisamples.double: 2,
Multisamples.triple: 3,
Multisamples.quad: 4
}.get(multisamples, 2)
pg.display.gl_set_attribute(pg.GL_MULTISAMPLESAMPLES, samples)
surface = pg.display.set_mode(size, flags)
# print(surface)
width, height = surface.get_size()
pg.display.set_caption(title, title)
if multisamples is not Multisamples.none:
GL.glEnable(GL.GL_MULTISAMPLE)
GL.glEnable(GL.GL_BLEND)
GL.glClearColor(*color)
GL.glViewport(0, 0, width, height)
self._handling_input = False
self._rendering = False
self._close_request = False
self._blend_mode = None
self.blend_mode = blendmode
self._projection = projection
self._flip_time = 0
self._input_time = 0
self._render_time = 0
self._delta = 1
@property
def projection(self):
return self._projection
@property
def title(self):
return pg.display.get_caption()
@title.setter
def title(self, value):
pg.display.set_caption(repr(value), repr(value))
@property
def should_close(self):
# type: () -> bool
return self._close_request
@property
def resolution(self):
# type: () -> tuple
return pg.display.get_surface().get_size()
@property
def width(self):
# type: () -> int
return pg.display.get_surface().get_width()
@property
def height(self):
# type: () -> int
return pg.display.get_surface().get_height()
@property
def blend_mode(self):
# type: () -> BlendMode
return self._blend_mode
@blend_mode.setter
def blend_mode(self, value):
# type: (BlendMode) -> None
if value is not self._blend_mode:
self._blend_mode = value
if value is BlendMode.none:
GL.glBlendFunc(GL.GL_ONE, GL.GL_ZERO)
elif value is BlendMode.add:
try:
GL.glBlendFuncSeparate(GL.GL_SRC_ALPHA, GL.GL_ONE, GL.GL_ONE, GL.GL_ONE)
except (NameError, Exception):
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE)
elif value is BlendMode.alpha:
try:
GL.glBlendFuncSeparate(
GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA, GL.GL_ONE, GL.GL_ONE_MINUS_SRC_ALPHA)
except (NameError, Exception):
GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)
elif value is BlendMode.multiply:
try:
GL.glBlendFuncSeparate(GL.GL_ALPHA, GL.GL_ONE, GL.GL_ONE, GL.GL_ONE)
except (NameError, Exception):
GL.glBlendFunc(GL.GL_DST_COLOR, GL.GL_ZERO)
@property
def mouse_pos(self):
# type: () -> Vec2
x, y = pg.mouse.get_pos()
if self._projection is Projection.ortho_up:
y = pg.display.get_surface().get_height() - y
return Vec2(x, y)
@property
def mouse_motion(self):
# type: () -> Vec2
x, y = pg.mouse.get_rel()
if self._projection is Projection.ortho_up:
y = -y
return Vec2(x, y)
@property
def frame_delta(self):
# type: () -> int
return self._delta
@contextmanager
def input(self, raw=False):
# type: () -> None
if self._handling_input:
raise RuntimeError("Can't nest input processing contexts.")
self._handling_input = True
time = pg.time.get_ticks()
delta = time - self._input_time
self._input_time = time
if raw:
yield delta, pg.event.get(), pg.key.get_pressed(), Vec2(*pg.mouse.get_pos()), Vec2(*pg.mouse.get_rel())
else:
events = []
for event in pg.event.get():
if event.type == co.ACTIVEEVENT:
events.append(Focus(event.gain, event.state, pg.time.get_ticks()))
elif event.type == co.QUIT:
now = dt.datetime.now()
ms = pg.time.get_ticks()
self._close_request = True
events.append(CloseWindow(ms, now))
elif event.type == co.KEYDOWN:
ctrl = event.mod & co.KMOD_ALT != 0
shift = event.mod & co.KMOD_SHIFT != 0
alt = event.mod & co.KMOD_ALT != 0
events.append(KeyDown(event.key, event.unicode, event.mod, ctrl, shift, alt))
elif event.type == co.KEYUP:
ctrl = event.mod & co.KMOD_ALT != 0
shift = event.mod & co.KMOD_SHIFT != 0
alt = event.mod & co.KMOD_ALT != 0
events.append(KeyUp(event.key, event.mod, ctrl, shift, alt))
elif event.type == co.MOUSEMOTION:
height = pg.display.get_surface().get_height()
x, y = event.pos
mx, my = event.rel
if self._projection is Projection.ortho_up:
y = height - y
my = -my
lbutton, mbutton, rbutton = event.buttons
events.append(MouseMotion(Vec2(x, y), Vec2(mx, my), lbutton, mbutton, rbutton))
elif event.type == co.MOUSEBUTTONDOWN:
height = pg.display.get_surface().get_height()
x, y = event.pos
if self._projection is Projection.ortho_up:
y = height - y
if event.button == 1:
events.append(LeftButtonDown(Vec2(x, y), x, y))
elif event.button == 2:
events.append(MiddleButtonDown(Vec2(x, y), x, y))
elif event.button == 3:
events.append(RightButtonDown(Vec2(x, y), x, y))
elif event.button == 4:
events.append(MouseWheelUp(Vec2(x, y), x, y))
else:
events.append(MouseWheelDown(Vec2(x, y), x, y))
elif event.type == co.MOUSEBUTTONUP:
height = pg.display.get_surface().get_height()
x, y = event.pos
if self._projection is Projection.ortho_up:
y = height - y
if event.button == 1:
events.append(LeftButtonUp(Vec2(x, y), x, y))
elif event.button == 2:
events.append(MiddleButtonUp(Vec2(x, y), x, y))
else:
events.append(RightButtonUp(Vec2(x, y), x, y))
elif event.type == co.VIDEORESIZE:
events.append(VideoResize(event.w, event.h, event.size))
elif event.type == co.VIDEOEXPOSE:
now = dt.datetime.now()
ms = pg.time.get_ticks()
events.append(VideoExpose(ms, now))
elif event.type == co.JOYAXISMOTION:
events.append(JoyAxis(event.joy, event.axis, event.value))
elif event.type == co.JOYBALLMOTION:
events.append(JoyBall(event.joy, event.ball, event.rel))
elif event.type == co.JOYHATMOTION:
events.append(JoyHat(event.joy, event.hat, event.value))
elif event.type == co.JOYBUTTONDOWN:
events.append(JoyButtonDown(event.joy, event.button))
elif event.type == co.JOYBUTTONUP:
events.append(JoyButtonUp(event.joy, event.button))
keys = pg.key.get_pressed()
mouse_pos = Vec2(*pg.mouse.get_pos())
mouse_rel = Vec2(*pg.mouse.get_rel())
if self._projection is Projection.ortho_up:
mouse_pos.y = self.height - mouse_pos.y
mouse_rel.y = -mouse_rel.y
yield delta, events, keys, mouse_pos, mouse_rel
self._handling_input = False
@contextmanager
def rendering(self, clear_color=None):
# type: (Optional[Union[Vec4, tuple, list]]) -> None
if self._rendering:
raise RuntimeError("Can't nest GLWindow rendering contexts.")
self._rendering = True
time = pg.time.get_ticks()
delta = time - self._render_time
self._render_time = time
yield delta
pg.display.flip()
time = pg.time.get_ticks()
self._delta = time - self._flip_time
self._flip_time = time
if clear_color is not None:
GL.glClearColor(*clear_color)
GL.glClear(GL.GL_COLOR_BUFFER_BIT)
self._rendering = False
def close(self):
# type: () -> None
self._close_request = True
| mit | 6,686,868,297,029,935,000 | 34.364179 | 115 | 0.543682 | false |
testalt/electrum-ppc-server | src/utils.py | 1 | 6099 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from itertools import imap
import threading
import time
import hashlib
import sys
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
global PUBKEY_ADDRESS
global SCRIPT_ADDRESS
PUBKEY_ADDRESS = 138
SCRIPT_ADDRESS = 5
def rev_hex(s):
return s.decode('hex')[::-1].encode('hex')
def int_to_hex(i, length=1):
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def var_int(i):
if i < 0xfd:
return int_to_hex(i)
elif i <= 0xffff:
return "fd" + int_to_hex(i, 2)
elif i <= 0xffffffff:
return "fe" + int_to_hex(i, 4)
else:
return "ff" + int_to_hex(i, 8)
Hash = lambda x: hashlib.sha256(hashlib.sha256(x).digest()).digest()
hash_encode = lambda x: x[::-1].encode('hex')
hash_decode = lambda x: x.decode('hex')[::-1]
def header_to_string(res):
pbh = res.get('prev_block_hash')
if pbh is None:
pbh = '0'*64
return int_to_hex(res.get('version'), 4) \
+ rev_hex(pbh) \
+ rev_hex(res.get('merkle_root')) \
+ int_to_hex(int(res.get('timestamp')), 4) \
+ int_to_hex(int(res.get('bits')), 4) \
+ int_to_hex(int(res.get('nonce')), 4)
def hex_to_int(s):
return int('0x' + s[::-1].encode('hex'), 16)
def header_from_string(s):
return {
'version': hex_to_int(s[0:4]),
'prev_block_hash': hash_encode(s[4:36]),
'merkle_root': hash_encode(s[36:68]),
'timestamp': hex_to_int(s[68:72]),
'bits': hex_to_int(s[72:76]),
'nonce': hex_to_int(s[76:80]),
}
############ functions from pywallet #####################
def hash_160(public_key):
try:
md = hashlib.new('ripemd160')
md.update(hashlib.sha256(public_key).digest())
return md.digest()
except:
import ripemd
md = ripemd.new(hashlib.sha256(public_key).digest())
return md.digest()
def public_key_to_pubkey_address(public_key):
return hash_160_to_pubkey_address(hash_160(public_key))
def public_key_to_bc_address(public_key):
""" deprecated """
return public_key_to_pubkey_address(public_key)
def hash_160_to_pubkey_address(h160, addrtype=None):
""" deprecated """
if not addrtype:
addrtype = PUBKEY_ADDRESS
return hash_160_to_address(h160, addrtype)
def hash_160_to_pubkey_address(h160):
return hash_160_to_address(h160, PUBKEY_ADDRESS)
def hash_160_to_script_address(h160):
return hash_160_to_address(h160, SCRIPT_ADDRESS)
def hash_160_to_address(h160, addrtype = 0):
""" Checks if the provided hash is actually 160bits or 20 bytes long and returns the address, else None
"""
if h160 is None or len(h160) is not 20:
return None
vh160 = chr(addrtype) + h160
h = Hash(vh160)
addr = vh160 + h[0:4]
return b58encode(addr)
def bc_address_to_hash_160(addr):
if addr is None or len(addr) is 0:
return None
bytes = b58decode(addr, 25)
return bytes[1:21] if bytes is not None else None
def b58encode(v):
"""encode v, which is a string of bytes, to base58."""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0':
nPad += 1
else:
break
return (__b58chars[0]*nPad) + result
def b58decode(v, length):
""" decode v into a string of len bytes."""
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]:
nPad += 1
else:
break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def EncodeBase58Check(vchIn):
hash = Hash(vchIn)
return b58encode(vchIn + hash[0:4])
def DecodeBase58Check(psz):
vchRet = b58decode(psz, None)
key = vchRet[0:-4]
csum = vchRet[-4:]
hash = Hash(key)
cs32 = hash[0:4]
if cs32 != csum:
return None
else:
return key
########### end pywallet functions #######################
def random_string(length):
with open("/dev/urandom", 'rb') as f:
return b58encode( f.read(length) )
def timestr():
return time.strftime("[%d/%m/%Y-%H:%M:%S]")
### logger
import logging
import logging.handlers
logger = logging.getLogger('electrum-ppc')
def init_logger(logfile):
hdlr = logging.handlers.WatchedFileHandler(logfile)
formatter = logging.Formatter('%(asctime)s %(message)s', "[%d/%m/%Y-%H:%M:%S]")
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
def print_log(*args):
logger.info(" ".join(imap(str, args)))
def print_warning(message):
logger.warning(message)
| agpl-3.0 | -1,488,351,987,294,061,300 | 23.396 | 107 | 0.610756 | false |
rmit-ir/SummaryRank | summaryrank/svmlight_tools.py | 1 | 11505 | """
SVMLight tools
"""
import sys
import argparse
import gzip
import math
import numpy as np
import random
import re
PROG = 'python svmlight_format.py'
ID_NAME_PATTERN = re.compile(r'^#\s*(\d+)\s*:\s*(\S+.*)\s*$')
class AutoHelpArgumentParser(argparse.ArgumentParser):
""" """
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
def _open(filename):
if filename.endswith('.gz'):
return gzip.open(filename)
else:
return file(filename)
def _get_between_text(s, head, tail):
b = s.index(head) + len(head)
e = s.index(tail, b)
return s[b:e]
def get_rows(iterable, with_preamble=False):
""" Return (optionally) a preamble and a sequence of lines. """
preamble = []
firstline = None
for line in iterable:
if not line.startswith('#'):
firstline = line
break
preamble.append(line)
if with_preamble:
yield preamble
if firstline is not None:
yield firstline
for line in iterable:
yield line
def get_preamble_features(preamble):
""" Return a sequence of (fid, name) pairs. """
for line in preamble:
m = ID_NAME_PATTERN.match(line)
if m:
yield int(m.group(1)), m.group(2)
def get_preamble_lines(preamble, fids, mapping):
""" Return a filtered list of preamble lines according to selected fids. """
for line in preamble:
m = ID_NAME_PATTERN.match(line)
if not m:
yield line
continue
fid, name = int(m.group(1)), m.group(2)
if fid not in fids:
continue
yield '# {}: {}\n'.format(mapping[fid], name)
def get_vectors(lines):
""" Return a sequence of (vector, metadata) pairs. """
for line in lines:
body, comment = line.split('# ', 1)
fields = body.split()
rel = int(fields[0])
qid = fields[1].split(':')[1]
docno = comment.split()[0].split(':', 1)[1]
vector = dict()
for field in fields[2:]:
fid, val = field.split(':')
vector[int(fid)] = float(val)
yield vector, {'rel': rel, 'qid': qid, 'docno': docno}
def write_preamble(out, features):
""" Print preamble """
print >>out, '# Features in use'
for fid, cls in enumerate(features, 1):
print >>out, '# {}: {}'.format(fid, cls)
def write_vectors_columnwise(out, qids, rels, docnos, columns):
""" Print feature vectors, assuming columnwise input """
nrows = len(qids)
assert nrows == len(rels) == len(docnos)
assert all([len(column) == nrows for column in columns])
for i in range(nrows):
row_values = [column[i] for column in columns]
row = ' '.join(['{}:{}'.format(fid, val) for fid, val in enumerate(row_values, 1)])
print >>out, '{} qid:{} {} # docno:{}'.format(rels[i], qids[i], row, docnos[i])
def describe(argv):
""" Print the preamble """
parser = AutoHelpArgumentParser(prog='describe')
parser.add_argument('vector_file',
help='the input vector file')
args = parser.parse_args(argv)
rows = get_rows(_open(args.vector_file), with_preamble=True)
preamble = next(rows)
for line in preamble:
print line,
def cut(argv):
""" Cut and print a select subset of features """
parser = AutoHelpArgumentParser(prog='cut')
parser.add_argument('-f', dest='fields', metavar='LIST',
help='select only these fields')
parser.add_argument('--renumbering', action='store_true',
help='renumber the feature IDs')
parser.add_argument('vector_file',
help='the input vector file')
args = parser.parse_args(argv)
selector = set()
if args.fields:
for comp in args.fields.split(','):
if comp.find('-') >= 0:
l, u = map(int, comp.split('-'))
selector.update(range(l, u + 1))
else:
selector.add(int(comp))
if len(selector) == 0:
print >>sys.stderr, 'must specify a list of fields'
return 1
fids = sorted(selector)
mapped = dict((fid, fid) for fid in fids)
if args.renumbering:
mapped.update((fid, i) for i, fid in enumerate(fids, 1))
rows = get_rows(_open(args.vector_file), with_preamble=True)
preamble = next(rows)
for line in get_preamble_lines(preamble, selector, mapped):
print line,
for vector, metadata in get_vectors(rows):
row = ' '.join(['{}:{}'.format(mapped[fid], vector[fid]) for fid in fids])
print '{} qid:{} {} # docno:{}'.format(
metadata['rel'], metadata['qid'], row, metadata['docno'])
def join(argv):
""" Merge multiple sets of features """
parser = AutoHelpArgumentParser(prog='join')
parser.add_argument('vector_files', metavar='vector_file', type=str, nargs='+',
help='input vector files')
args = parser.parse_args(argv)
if len(args.vector_files) < 2:
print >>sys.stderr, 'must specify at least two vector files'
return 1
rows_list = [get_rows(_open(name), with_preamble=True) for name in args.vector_files]
preamble_list = [next(rows) for rows in rows_list]
features_list = [get_preamble_features(preamble) for preamble in preamble_list]
trans_list = []
fid_to_name = []
new_fid = 0
for features in features_list:
trans = dict()
for fid, name in features:
new_fid += 1
fid_to_name.append((new_fid, name))
trans[fid] = new_fid
trans_list.append(trans)
print '# Features in use'
for fid, name in fid_to_name:
print '# {}: {}'.format(fid, name)
vectors_list = [get_vectors(rows) for rows in rows_list]
while True:
vm_list = [next(vectors, None) for vectors in vectors_list]
if not all(vm_list):
assert not any(vm_list)
break
m_list = [m for _, m in vm_list]
assert m_list.count(m_list[0]) == len(m_list)
metadata = m_list[0]
v_list = [v for v, _ in vm_list]
buf = []
for i in range(len(v_list)):
for fid in sorted(v_list[i]):
buf.append('{}:{}'.format(trans_list[i][fid], v_list[i][fid]))
print '{} qid:{} {} # docno:{}'.format(
metadata['rel'], metadata['qid'], ' '.join(buf), metadata['docno'])
def shuffle(argv):
""" Shuffle the data on query topic """
parser = AutoHelpArgumentParser(prog='shuffle')
parser.add_argument('-seed',
help='use a custom seed instead of the system default')
parser.add_argument('vector_file',
help='input vector file')
args = parser.parse_args(argv)
if args.seed is not None:
random.seed(args.seed)
# scan through to get all the qids, have everything buffered
rows = get_rows(_open(args.vector_file), with_preamble=True)
preamble = next(rows)
buf = dict()
for line in rows:
qid = _get_between_text(line, 'qid:', ' ')
if qid not in buf:
buf[qid] = []
buf[qid].append(line)
qids = sorted(buf)
random.shuffle(qids)
# produce output
print ''.join(preamble),
for qid in qids:
for line in buf[qid]:
print line,
def split(argv):
""" Split data into a select number of folds """
parser = AutoHelpArgumentParser(prog='split')
parser.add_argument('-k', type=int,
help='number of folds (default: %(default)s)')
parser.add_argument('--prefix', type=str,
help='prefix of output files (default: name of vector_file)')
parser.add_argument('-r', '--random', action='store_true',
help='use random partition rather than sequential')
parser.add_argument('-c', '--complete', action='store_true',
help='output training sets as well')
parser.add_argument('vector_file',
help='input vector file')
parser.set_defaults(k=5)
args = parser.parse_args(argv)
prefix = args.prefix or args.vector_file
# Scan through to get all qids
rows = get_rows(_open(args.vector_file))
seen_qids = set()
for line in rows:
seen_qids.add(_get_between_text(line, 'qid:', ' '))
qids = list(seen_qids)
if args.random:
random.shuffle(qids)
# Assign fold numbers, the lowest being 0 internally
fold_number = dict()
fold_size = int(math.ceil(float(len(qids)) / args.k))
for k in range(args.k):
fold_number.update(
[(qid, k) for qid in qids[k * fold_size:(k + 1) * fold_size]])
# Second pass
rows = get_rows(_open(args.vector_file), with_preamble=True)
preamble = next(rows)
test_files = ['{}.fold-{}_test'.format(prefix, k + 1) for k in range(args.k)]
test_outputs = [file(name, "w") for name in test_files]
for output in test_outputs:
output.writelines(preamble)
if args.complete:
training_files = ['{}.fold-{}_training'.format(prefix, k + 1) for k in range(args.k)]
training_outputs = [file(name, "w") for name in training_files]
for output in training_outputs:
output.writelines(preamble)
full_set = set(range(args.k))
if args.complete:
for line in rows:
qid = _get_between_text(line, 'qid:', ' ')
test_outputs[fold_number[qid]].write(line)
for fold in full_set - set([fold_number[qid]]):
training_outputs[fold].write(line)
else:
for line in rows:
qid = _get_between_text(line, 'qid:', ' ')
test_outputs[fold_number[qid]].write(line)
# FIXME
def normalize(argv):
""" Normalize feature values. """
parser = AutoHelpArgumentParser(prog='normalize')
parser.add_argument('-m',
help='normalizaion method name')
parser.add_argument('vector_file',
help='input vector file')
args = parser.parse_args(argv)
def get_vector_groups(rows):
qid = None
group = dict()
for vector, m in get_vectors(rows):
if m['qid'] != qid:
if group:
yield qid, group
qid = m['qid']
group = dict.fromkeys(vector, [])
for fid, val in vector.items():
group[fid].append(val)
if group:
yield qid, group
# first pass over data to collect means
rows = get_rows(_open(args.vector_file))
# means = dict()
min_values, gaps = dict(), dict()
for qid, group in get_vector_groups(rows):
# means[qid] = dict((fid, np.mean(values)) for fid, values in group)
min_values[qid] = {fid: min(values) for fid, values in group.items()}
gaps[qid] = {fid: max(values) - min(values) for fid, values in group.items()}
# second pass
rows = get_rows(_open(args.vector_file))
preamble = next(rows)
print ''.join(preamble),
for vector, m in get_vectors(rows):
buf = []
for fid in sorted(vector):
new_value = float(vector[fid] - min_values[m['qid']][fid]) / gaps[m['qid']][fid]
buf.append('{}:{}'.format(fid, new_value))
row = ' '.join(buf)
print '{} qid:{} {} # docno:{}'.format(m['rel'], m['qid'], row, m['docno'])
| mit | -3,312,483,875,548,357,000 | 31.226891 | 93 | 0.567492 | false |
BladeSun/NliWithKnowledge | session2/dam_bk_1.py | 1 | 59183 | '''
Build a neural machine translation model with soft attention
'''
import theano
import theano.tensor as tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
import cPickle as pkl
import ipdb
import numpy
import copy
import os
import warnings
import sys
import time
import logging
from collections import OrderedDict
from data_iterator_bk import TextIterator
profile = False
# push parameters to Theano shared variables
def zipp(params, tparams):
for kk, vv in params.iteritems():
tparams[kk].set_value(vv)
# pull parameters from Theano shared variables
def unzip(zipped):
new_params = OrderedDict()
for kk, vv in zipped.iteritems():
new_params[kk] = vv.get_value()
return new_params
# get the list of parameters: Note that tparams must be OrderedDict
def itemlist(tparams):
return [vv for kk, vv in tparams.iteritems()]
# dropout
def dropout_layer(state_before, use_noise, trng):
proj = tensor.switch(
use_noise,
state_before * trng.binomial(state_before.shape, p=0.5, n=1,
dtype=state_before.dtype),
state_before * 0.5)
return proj
# make prefix-appended name
def _p(pp, name):
return '%s_%s' % (pp, name)
# initialize Theano shared variables according to the initial parameters
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
# load parameters
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('%s is not in the archive' % kk)
continue
params[kk] = pp[kk]
return params
# layers: 'name': ('parameter initializer', 'feedforward')
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'gru': ('param_init_gru', 'gru_layer'),
'gru_cond': ('param_init_gru_cond', 'gru_cond_layer'),
'funcf_layer': ('param_init_funcf_layer', 'funcf_layer'),
}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
# some utilities
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin, nout=None, scale=0.01, ortho=True):
if nout is None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
def relu(x):
return tensor.nnet.relu(x)
def tanh(x):
return tensor.tanh(x)
def linear(x):
return x
def concatenate(tensor_list, axis=0):
"""
Alternative implementation of `theano.tensor.concatenate`.
This function does exactly the same thing, but contrary to Theano's own
implementation, the gradient is implemented on the GPU.
Backpropagating through `theano.tensor.concatenate` yields slowdowns
because the inverse operation (splitting) needs to be done on the CPU.
This implementation does not have that problem.
:usage:
>>> x, y = theano.tensor.matrices('x', 'y')
>>> c = concatenate([x, y], axis=1)
:parameters:
- tensor_list : list
list of Theano tensor expressions that should be concatenated.
- axis : int
the tensors will be joined along this axis.
:returns:
- out : tensor
the concatenated tensor expression.
"""
concat_size = sum(tt.shape[axis] for tt in tensor_list)
output_shape = ()
for k in range(axis):
output_shape += (tensor_list[0].shape[k],)
output_shape += (concat_size,)
for k in range(axis + 1, tensor_list[0].ndim):
output_shape += (tensor_list[0].shape[k],)
out = tensor.zeros(output_shape)
offset = 0
for tt in tensor_list:
indices = ()
for k in range(axis):
indices += (slice(None),)
indices += (slice(offset, offset + tt.shape[axis]),)
for k in range(axis + 1, tensor_list[0].ndim):
indices += (slice(None),)
out = tensor.set_subtensor(out[indices], tt)
offset += tt.shape[axis]
return out
# batch preparation
def prepare_data(seqs_x, seqs_x_syn, seqs_y, seqs_y_syn, label, maxlen=None, n_words_src=30000,
n_words=30000, bk_for_x=None, bk_for_y=None, bk_dim=10):
# x: a list of sentences
lengths_x = [len(s) for s in seqs_x]
lengths_y = [len(s) for s in seqs_y]
if maxlen is not None:
new_seqs_x = []
new_seqs_x_syn = []
new_seqs_y = []
new_seqs_y_syn = []
new_lengths_x = []
new_lengths_y = []
new_label = []
for l_x, s_x, s_x_syn, l_y, s_y, s_y_syn, ll in zip(lengths_x, seqs_x, seqs_x_syn, lengths_y, seqs_y, seqs_y_syn, label):
if l_x < maxlen and l_y < maxlen:
new_seqs_x.append(s_x)
new_seqs_x_syn.append(s_x_syn)
new_lengths_x.append(l_x)
new_seqs_y.append(s_y)
new_seqs_y_syn.append(s_y_syn)
new_lengths_y.append(l_y)
new_label.append(ll)
lengths_x = new_lengths_x
seqs_x = new_seqs_x
seqs_x_syn = new_seqs_x_syn
lengths_y = new_lengths_y
seqs_y = new_seqs_y
seqs_y_syn = new_seqs_y_syn
label = new_label
if len(lengths_x) < 1 or len(lengths_y) < 1:
return None, None, None, None, None, None, None
n_samples = len(seqs_x)
maxlen_x = numpy.max(lengths_x) + 2
maxlen_y = numpy.max(lengths_y) + 2
x = numpy.zeros((maxlen_x, n_samples)).astype('int64')
x_syn = numpy.zeros((maxlen_x, n_samples)).astype('int64')
y = numpy.zeros((maxlen_y, n_samples)).astype('int64')
y_syn = numpy.zeros((maxlen_y, n_samples)).astype('int64')
flabel = numpy.array(label).astype('int64')
x_mask = numpy.zeros((maxlen_x, n_samples)).astype('float32')
y_mask = numpy.zeros((maxlen_y, n_samples)).astype('float32')
for idx, [s_x, s_x_syn, s_y, s_y_syn] in enumerate(zip(seqs_x, seqs_x_syn, seqs_y, seqs_y_syn)):
x[0, idx] = 1
x[lengths_x[idx]+1, idx] = 2
x[1:lengths_x[idx] + 1, idx] = s_x
x_mask[:lengths_x[idx] + 2, idx] = 1.
x_syn[0, idx] = 3 # 3 for none
x_syn[lengths_x[idx]+1, idx] = 3
x_syn[1:lengths_x[idx] + 1, idx] = s_x_syn
y[0, idx] = 1
y[lengths_y[idx]+1, idx] = 2
y[1:lengths_y[idx] + 1, idx] = s_y
y_mask[:lengths_y[idx] + 2, idx] = 1.
y_syn[0, idx] = 3 # 3 for none
y_syn[lengths_y[idx]+1, idx] = 3
y_syn[1:lengths_y[idx] + 1, idx] = s_y_syn
getbk = lambda sid, batch_id, target, bkdict: numpy.array([numpy.array(bkdict[sid][tid]).astype('float32') if tid in bkdict[sid] else numpy.zeros(bk_dim).astype('float32') for tid in target[:, batch_id]])
bk_x = numpy.array([getbk(z[0], z[1], y_syn, bk_for_x) if z[0] in bk_for_x else numpy.zeros((maxlen_y,bk_dim)).astype('float32') for z in zip(x_syn.reshape(-1).tolist(), range(n_samples) * maxlen_x) ]).reshape(maxlen_x, n_samples, maxlen_y, bk_dim)
bk_y = numpy.array([getbk(z[0], z[1], x_syn, bk_for_y) if z[0] in bk_for_y else numpy.zeros((maxlen_x,bk_dim)).astype('float32') for z in zip(y_syn.reshape(-1).tolist(), range(n_samples) * maxlen_y) ]).reshape(maxlen_y, n_samples, maxlen_x, bk_dim)
bk_x = bk_x[:,:,:,(0,11,12)]
return x, x_mask, bk_x, y, y_mask, bk_y, flabel
# feedforward layer: affine transformation + point-wise nonlinearity
def param_init_fflayer(options, params, prefix='ff', nin=None, nout=None,
ortho=True):
if nin is None:
nin = options['dim_proj']
if nout is None:
nout = options['dim_proj']
params[_p(prefix, 'W')] = norm_weight(nin, nout, scale=0.01, ortho=ortho)
params[_p(prefix, 'b')] = numpy.zeros((nout,)).astype('float32')
return params
def fflayer(tparams, state_below, options, prefix='rconv',
activ='lambda x: tensor.tanh(x)', **kwargs):
return eval(activ)(
tensor.dot(state_below, tparams[_p(prefix, 'W')]) +
tparams[_p(prefix, 'b')])
# functionF layer
def param_init_funcf_layer(options, params, prefix='funcF', nin=None, nout=None,
ortho=True):
if nin is None:
nin = options['dim_word']
if nout is None:
nout = options['dim_proj']
params[_p(prefix, 'W1')] = norm_weight(nin, nout, scale=0.01, ortho=ortho)
params[_p(prefix, 'b1')] = numpy.zeros((nout,)).astype('float32')
params[_p(prefix, 'W2')] = norm_weight(nout, nout, scale=0.01, ortho=ortho)
params[_p(prefix, 'b2')] = numpy.zeros((nout,)).astype('float32')
return params
def funcf_layer(tparams, state_below, options, prefix='funcF',
activ='lambda x: tensor.tanh(x)', **kwargs):
emb_proj = (tensor.dot(state_below, tparams[_p(prefix, 'W1')]) +
tparams[_p(prefix, 'b1')])
return eval(activ)(
tensor.dot(emb_proj, tparams[_p(prefix, 'W2')]) +
tparams[_p(prefix, 'b2')])
# GRU layer
def param_init_gru(options, params, prefix='gru', nin=None, dim=None):
if nin is None:
nin = options['dim_proj']
if dim is None:
dim = options['dim_proj']
# embedding to gates transformation weights, biases
W = numpy.concatenate([norm_weight(nin, dim),
norm_weight(nin, dim)], axis=1)
params[_p(prefix, 'W')] = W
params[_p(prefix, 'b')] = numpy.zeros((2 * dim,)).astype('float32')
# recurrent transformation weights for gates
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix, 'U')] = U
# embedding to hidden state proposal weights, biases
Wx = norm_weight(nin, dim)
params[_p(prefix, 'Wx')] = Wx
params[_p(prefix, 'bx')] = numpy.zeros((dim,)).astype('float32')
# recurrent transformation weights for hidden state proposal
Ux = ortho_weight(dim)
params[_p(prefix, 'Ux')] = Ux
return params
def gru_layer(tparams, state_below, options, prefix='gru', mask=None,
**kwargs):
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix, 'Ux')].shape[1]
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
# utility function to slice a tensor
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
# state_below is the input word embeddings
# input to the gates, concatenated
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + \
tparams[_p(prefix, 'b')]
# input to compute the hidden state proposal
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + \
tparams[_p(prefix, 'bx')]
# step function to be used by scan
# arguments | sequences |outputs-info| non-seqs
def _step_slice(m_, x_, xx_, h_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
# reset and update gates
r = tensor.nnet.sigmoid(_slice(preact, 0, dim))
u = tensor.nnet.sigmoid(_slice(preact, 1, dim))
# compute the hidden state proposal
preactx = tensor.dot(h_, Ux)
preactx = preactx * r
preactx = preactx + xx_
# hidden state proposal
h = tensor.tanh(preactx)
# leaky integrate and obtain next hidden state
h = u * h_ + (1. - u) * h
h = m_[:, None] * h + (1. - m_)[:, None] * h_
return h
# prepare scan arguments
seqs = [mask, state_below_, state_belowx]
init_states = [tensor.alloc(0., n_samples, dim)]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]]
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info=init_states,
non_sequences=shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
rval = [rval]
return rval
# Conditional GRU layer with Attention
def param_init_gru_cond(options, params, prefix='gru_cond',
nin=None, dim=None, dimctx=None,
nin_nonlin=None, dim_nonlin=None):
if nin is None:
nin = options['dim']
if dim is None:
dim = options['dim']
if dimctx is None:
dimctx = options['dim']
if nin_nonlin is None:
nin_nonlin = nin
if dim_nonlin is None:
dim_nonlin = dim
W = numpy.concatenate([norm_weight(nin, dim),
norm_weight(nin, dim)], axis=1)
params[_p(prefix, 'W')] = W
params[_p(prefix, 'b')] = numpy.zeros((2 * dim,)).astype('float32')
U = numpy.concatenate([ortho_weight(dim_nonlin),
ortho_weight(dim_nonlin)], axis=1)
params[_p(prefix, 'U')] = U
Wx = norm_weight(nin_nonlin, dim_nonlin)
params[_p(prefix, 'Wx')] = Wx
Ux = ortho_weight(dim_nonlin)
params[_p(prefix, 'Ux')] = Ux
params[_p(prefix, 'bx')] = numpy.zeros((dim_nonlin,)).astype('float32')
U_nl = numpy.concatenate([ortho_weight(dim_nonlin),
ortho_weight(dim_nonlin)], axis=1)
params[_p(prefix, 'U_nl')] = U_nl
params[_p(prefix, 'b_nl')] = numpy.zeros((2 * dim_nonlin,)).astype('float32')
Ux_nl = ortho_weight(dim_nonlin)
params[_p(prefix, 'Ux_nl')] = Ux_nl
params[_p(prefix, 'bx_nl')] = numpy.zeros((dim_nonlin,)).astype('float32')
# context to LSTM
Wc = norm_weight(dimctx, dim * 2)
params[_p(prefix, 'Wc')] = Wc
Wcx = norm_weight(dimctx, dim)
params[_p(prefix, 'Wcx')] = Wcx
# attention: combined -> hidden
W_comb_att = norm_weight(dim, dimctx)
params[_p(prefix, 'W_comb_att')] = W_comb_att
# attention: context -> hidden
Wc_att = norm_weight(dimctx)
params[_p(prefix, 'Wc_att')] = Wc_att
# attention: hidden bias
b_att = numpy.zeros((dimctx,)).astype('float32')
params[_p(prefix, 'b_att')] = b_att
# attention:
U_att = norm_weight(dimctx, 1)
params[_p(prefix, 'U_att')] = U_att
c_att = numpy.zeros((1,)).astype('float32')
params[_p(prefix, 'c_tt')] = c_att
return params
def gru_cond_layer(tparams, state_below, options, prefix='gru',
mask=None, context=None, one_step=False,
init_memory=None, init_state=None,
context_mask=None,
**kwargs):
assert context, 'Context must be provided'
if one_step:
assert init_state, 'previous state must be provided'
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
# mask
if mask is None:
mask = tensor.alloc(1., state_below.shape[0], 1)
dim = tparams[_p(prefix, 'Wcx')].shape[1]
# initial/previous state
if init_state is None:
init_state = tensor.alloc(0., n_samples, dim)
# projected context
assert context.ndim == 3, \
'Context must be 3-d: #annotation x #sample x dim'
pctx_ = tensor.dot(context, tparams[_p(prefix, 'Wc_att')]) + \
tparams[_p(prefix, 'b_att')]
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
# projected x
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + \
tparams[_p(prefix, 'bx')]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + \
tparams[_p(prefix, 'b')]
def _step_slice(m_, x_, xx_, h_, ctx_, alpha_, pctx_, cc_,
U, Wc, W_comb_att, U_att, c_tt, Ux, Wcx,
U_nl, Ux_nl, b_nl, bx_nl):
preact1 = tensor.dot(h_, U)
preact1 += x_
preact1 = tensor.nnet.sigmoid(preact1)
r1 = _slice(preact1, 0, dim)
u1 = _slice(preact1, 1, dim)
preactx1 = tensor.dot(h_, Ux)
preactx1 *= r1
preactx1 += xx_
h1 = tensor.tanh(preactx1)
h1 = u1 * h_ + (1. - u1) * h1
h1 = m_[:, None] * h1 + (1. - m_)[:, None] * h_
# attention
pstate_ = tensor.dot(h1, W_comb_att)
pctx__ = pctx_ + pstate_[None, :, :]
# pctx__ += xc_
pctx__ = tensor.tanh(pctx__)
alpha = tensor.dot(pctx__, U_att) + c_tt
alpha = alpha.reshape([alpha.shape[0], alpha.shape[1]])
alpha = tensor.exp(alpha)
if context_mask:
alpha = alpha * context_mask
alpha = alpha / alpha.sum(0, keepdims=True)
ctx_ = (cc_ * alpha[:, :, None]).sum(0) # current context
preact2 = tensor.dot(h1, U_nl) + b_nl
preact2 += tensor.dot(ctx_, Wc)
preact2 = tensor.nnet.sigmoid(preact2)
r2 = _slice(preact2, 0, dim)
u2 = _slice(preact2, 1, dim)
preactx2 = tensor.dot(h1, Ux_nl) + bx_nl
preactx2 *= r2
preactx2 += tensor.dot(ctx_, Wcx)
h2 = tensor.tanh(preactx2)
h2 = u2 * h1 + (1. - u2) * h2
h2 = m_[:, None] * h2 + (1. - m_)[:, None] * h1
return h2, ctx_, alpha.T # pstate_, preact, preactx, r, u
seqs = [mask, state_below_, state_belowx]
# seqs = [mask, state_below_, state_belowx, state_belowc]
_step = _step_slice
shared_vars = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Wc')],
tparams[_p(prefix, 'W_comb_att')],
tparams[_p(prefix, 'U_att')],
tparams[_p(prefix, 'c_tt')],
tparams[_p(prefix, 'Ux')],
tparams[_p(prefix, 'Wcx')],
tparams[_p(prefix, 'U_nl')],
tparams[_p(prefix, 'Ux_nl')],
tparams[_p(prefix, 'b_nl')],
tparams[_p(prefix, 'bx_nl')]]
if one_step:
rval = _step(*(seqs + [init_state, None, None, pctx_, context] +
shared_vars))
else:
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info=[init_state,
tensor.alloc(0., n_samples,
context.shape[2]),
tensor.alloc(0., n_samples,
context.shape[0])],
non_sequences=[pctx_, context] + shared_vars,
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
return rval
'''
def init_params(options):
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
params['Wemb_dec'] = norm_weight(options['n_words'], options['dim_word'])
# encoder: bidirectional RNN
params = get_layer(options['encoder'])[0](options, params,
prefix='encoder',
nin=options['dim_word'],
dim=options['dim'])
params = get_layer(options['encoder'])[0](options, params,
prefix='encoder_r',
nin=options['dim_word'],
dim=options['dim'])
ctxdim = 2 * options['dim']
# init_state, init_cell
params = get_layer('ff')[0](options, params, prefix='ff_state',
nin=ctxdim, nout=options['dim'])
# decoder
params = get_layer(options['decoder'])[0](options, params,
prefix='decoder',
nin=options['dim_word'],
dim=options['dim'],
dimctx=ctxdim)
# readout
params = get_layer('ff')[0](options, params, prefix='ff_logit_lstm',
nin=options['dim'], nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_prev',
nin=options['dim_word'],
nout=options['dim_word'], ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_ctx',
nin=ctxdim, nout=options['dim_word'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit',
nin=options['dim_word'],
nout=options['n_words'])
return params
'''
def init_params(options):
params = OrderedDict()
# embedding
#params['Wemb'] = norm_weight(options['dict_size'], options['dim_word'])
params['Wemb'] = options['allembs']
params['op_weights'] = norm_weight(options['op_num'] * options['op_dim'], options['op_dim'])
params['op_V'] = numpy.random.randn(options['op_num']).astype('float32')
# params['Wemb_dec'] = norm_weight(options['n_words'], options['dim_word'])
params = get_layer('ff')[0](options, params,
prefix='projOp',
nin=options['dim_word'],
nout=options['op_dim'])
# funcf
#params = get_layer('funcf_layer')[0](options, params,
# prefix='funcf',
# nin=options['dim_word'],
# nout=options['dim'])
# funcG
#params = get_layer('funcf_layer')[0](options, params,
# prefix='funcG',
# nin=options['dim_word'] * 2,
# nout=options['dim'])
#params = get_layer('ff')[0](options, params, prefix='bkProj',
# nin=options['dim'] + options['bk_dim'], nout=options['dim'],
# ortho=False)
#params = get_layer('ff')[0](options, params, prefix='WeightW',
# nin=options['bk_dim'], nout=1,
# ortho=False)
params = get_layer('ff')[0](options, params, prefix='funcG',
nin=options['dim'] * 2, nout=options['dim'],
ortho=False)
# readout
params = get_layer('ff')[0](options, params, prefix='ff_logit',
nin=options['dim'] * 2, nout=options['dim'],
ortho=False)
params = get_layer('ff')[0](options, params, prefix='ff_logit_linear',
nin=options['dim'], nout=options['class_num'],
ortho=False)
return params
def build_dam(tparams, options):
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples
x = tensor.matrix('x', dtype='int64')
bk_x = tensor.tensor4('x_bk', dtype='float32')
x_mask = tensor.matrix('x_mask', dtype='float32')
y = tensor.matrix('y', dtype='int64')
bk_y = tensor.tensor4('y_bk', dtype='float32')
y_mask = tensor.matrix('y_mask', dtype='float32')
#all_embs = tensor.matrix('emb', dtype='float32')
label = tensor.vector('label', dtype='int64')
n_timesteps_h = x.shape[0]
n_timesteps_t = y.shape[0]
n_samples = x.shape[1]
emb_h = tparams['Wemb'][x.flatten()]
emb_h = emb_h.reshape([n_timesteps_h, n_samples, options['dim_word']])
if options['use_dropout']:
emb_h = dropout_layer(emb_h, use_noise, trng)
emb_t = tparams['Wemb'][y.flatten()]
emb_t = emb_t.reshape([n_timesteps_t, n_samples, options['dim_word']])
if options['use_dropout']:
emb_t = dropout_layer(emb_t, use_noise, trng)
#proj_h = get_layer('funcf_layer')[1](tparams, emb_h, options,
# prefix='funcf')
#proj_t = get_layer('funcf_layer')[1](tparams, emb_t, options,
# prefix='funcf')
weight_matrix = tensor.batched_dot(emb_h.dimshuffle(1, 0, 2), emb_t.dimshuffle(1, 2, 0))
# bk_x
bk_x = bk_x.dimshuffle(1,0,2,3)
#bk_x = bk_x[:,:,:,(0,1,11,12)]
bk_m = theano.tensor.repeat(bk_x, repeats=options['op_dim'], axis=3)
bk_op = bk_m[:,:,:,:,None] * tparams['op_weights'][None,None,None,None,:,:]
bk_op = bk_op.reshape([n_samples, n_timesteps_h, n_timesteps_t, options['op_num'] * options['op_dim'],options['op_dim']])
bk_op = bk_op.dimshuffle(0,1,2,4,3)
emb_h_tmp = emb_h.dimshuffle(1,0,'x',2) + tensor.zeros([n_samples,n_timesteps_h,n_timesteps_t,options['dim']])
emb_h_tmp = emb_h_tmp.reshape([-1, options['dim_word']])
emb_h_tmp = get_layer('ff')[1](tparams, emb_h_tmp, options,prefix='projOp', activ='relu')
bk_op = bk_op.reshape([-1, options['op_dim'], options['op_num'] * options['op_dim']])
#emb_h_tmp.dimshuffle(0, 'x', 1) * r_hop.reshape [-1, options['op_num'], options['dim']
#r_hop = tensor.batched_dot(emb_h_tmp, bk_op)
bk_op = tensor.batched_dot(emb_h_tmp, bk_op)
emb_t_tmp = emb_t.dimshuffle(1,'x',0,2) + tensor.zeros([n_samples,n_timesteps_h,n_timesteps_t,options['dim']])
emb_t_tmp = emb_t_tmp.reshape([-1, options['dim_word']])
emb_t_tmp = get_layer('ff')[1](tparams, emb_t_tmp, options,prefix='projOp', activ='relu')
weight_bk = (bk_op.reshape([-1, options['op_num'], options['op_dim']]) * emb_t_tmp.dimshuffle(0, 'x', 1)).sum(2)
weight_bk = tensor.dot(tparams['op_V'], weight_bk.T)
weight_matrix = weight_matrix + weight_bk.reshape([n_samples, n_timesteps_h, n_timesteps_t])
weight_matrix_1 = tensor.exp(weight_matrix - weight_matrix.max(1, keepdims=True)).dimshuffle(1,2,0)
weight_matrix_2 = tensor.exp(weight_matrix - weight_matrix.max(2, keepdims=True)).dimshuffle(1,2,0)
# lenH * lenT * batchSize
alpha_weight = weight_matrix_1 * x_mask.dimshuffle(0, 'x', 1)/ weight_matrix_1.sum(0, keepdims=True)
beta_weight = weight_matrix_2 * y_mask.dimshuffle('x', 0, 1)/ weight_matrix_2.sum(1, keepdims=True)
##bk_y = bk_y.dimshuffle(2, 0, 1, 3)
#emb_h_bk = theano.tensor.repeat(emb_h[:,None,:,:],repeats=n_timesteps_t, axis=1)
#emb_h_bk = theano.tensor.concatenate([emb_h_bk,bk_y.dimshuffle(2,0,1,3)], axis=3)
#emb_h_bk = get_layer('ff')[1](tparams, emb_h_bk, options,prefix='bkProj', activ='relu')
## lenH * lenT * bachSize * dim
##bk_x = bk_x.dimshuffle(0, 2, 1, 3)
#emb_t_bk = theano.tensor.repeat(emb_t[None,:,:,:],repeats=n_timesteps_h, axis=0)
#emb_t_bk = concatenate([emb_t_bk,bk_x.dimshuffle(0,2,1,3)], axis=3)
#emb_t_bk = get_layer('ff')[1](tparams, emb_t_bk, options,prefix='bkProj', activ='relu')
alpha = (emb_h.dimshuffle(0, 'x', 1, 2) * alpha_weight.dimshuffle(0, 1, 2, 'x')).sum(0)
beta = (emb_t.dimshuffle('x', 0, 1, 2) * beta_weight.dimshuffle(0, 1, 2, 'x')).sum(1)
#alpha = (emb_h_bk * alpha_weight.dimshuffle(0, 1, 2, 'x')).sum(0)
#beta = (emb_t_bk * beta_weight.dimshuffle(0, 1, 2, 'x')).sum(1)
v1 = concatenate([emb_h, beta], axis=2)
v2 = concatenate([emb_t, alpha], axis=2)
proj_v1 = get_layer('ff')[1](tparams, v1, options,prefix='funcG', activ='relu')
proj_v2 = get_layer('ff')[1](tparams, v2, options, prefix='funcG', activ='relu')
logit1 = (proj_v1 * x_mask[:, :, None]).sum(0)
logit2 = (proj_v2 * y_mask[:, :, None]).sum(0)
logit = concatenate([logit1, logit2], axis=1)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit', activ='tanh')
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options, prefix='ff_logit_linear', activ='linear')
probs = tensor.nnet.softmax(logit)
predict_label = probs.argmax(axis=1 )
#cost = -tensor.log(probs)[tensor.arange(label.shape[0]), label]
cost = tensor.nnet.categorical_crossentropy(probs, label)
return trng, use_noise, x, x_mask, bk_x, y, y_mask, bk_y, label, predict_label, cost
# build a training model
def build_model(tparams, options):
opt_ret = dict()
trng = RandomStreams(1234)
use_noise = theano.shared(numpy.float32(0.))
# description string: #words x #samples
x = tensor.matrix('x', dtype='int64')
x_mask = tensor.matrix('x_mask', dtype='float32')
y = tensor.matrix('y', dtype='int64')
y_mask = tensor.matrix('y_mask', dtype='float32')
# for the backward rnn, we just need to invert x and x_mask
xr = x[::-1]
xr_mask = x_mask[::-1]
n_timesteps = x.shape[0]
n_timesteps_trg = y.shape[0]
n_samples = x.shape[1]
# word embedding for forward rnn (source)
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
proj = get_layer(options['encoder'])[1](tparams, emb, options,
prefix='encoder',
mask=x_mask)
# word embedding for backward rnn (source)
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
projr = get_layer(options['encoder'])[1](tparams, embr, options,
prefix='encoder_r',
mask=xr_mask)
# context will be the concatenation of forward and backward rnns
ctx = concatenate([proj[0], projr[0][::-1]], axis=proj[0].ndim - 1)
# mean of the context (across time) will be used to initialize decoder rnn
ctx_mean = (ctx * x_mask[:, :, None]).sum(0) / x_mask.sum(0)[:, None]
# or you can use the last state of forward + backward encoder rnns
# ctx_mean = concatenate([proj[0][-1], projr[0][-1]], axis=proj[0].ndim-2)
# initial decoder state
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
# word embedding (target), we will shift the target sequence one time step
# to the right. This is done because of the bi-gram connections in the
# readout and decoder rnn. The first target will be all zeros and we will
# not condition on the last output.
emb = tparams['Wemb_dec'][y.flatten()]
emb = emb.reshape([n_timesteps_trg, n_samples, options['dim_word']])
emb_shifted = tensor.zeros_like(emb)
emb_shifted = tensor.set_subtensor(emb_shifted[1:], emb[:-1])
emb = emb_shifted
# decoder - pass through the decoder conditional gru with attention
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=y_mask, context=ctx,
context_mask=x_mask,
one_step=False,
init_state=init_state)
# hidden states of the decoder gru
proj_h = proj[0]
# weighted averages of context, generated by attention module
ctxs = proj[1]
# weights (alignment matrix)
opt_ret['dec_alphas'] = proj[2]
# compute word probabilities
logit_lstm = get_layer('ff')[1](tparams, proj_h, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm + logit_prev + logit_ctx)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
logit_shp = logit.shape
probs = tensor.nnet.softmax(logit.reshape([logit_shp[0] * logit_shp[1],
logit_shp[2]]))
# cost
y_flat = y.flatten()
y_flat_idx = tensor.arange(y_flat.shape[0]) * options['n_words'] + y_flat
cost = -tensor.log(probs.flatten()[y_flat_idx])
cost = cost.reshape([y.shape[0], y.shape[1]])
cost = (cost * y_mask).sum(0)
return trng, use_noise, x, x_mask, y, y_mask, opt_ret, cost
# build a sampler
def build_sampler(tparams, options, trng, use_noise):
x = tensor.matrix('x', dtype='int64')
xr = x[::-1]
n_timesteps = x.shape[0]
n_samples = x.shape[1]
# word embedding (source), forward and backward
emb = tparams['Wemb'][x.flatten()]
emb = emb.reshape([n_timesteps, n_samples, options['dim_word']])
embr = tparams['Wemb'][xr.flatten()]
embr = embr.reshape([n_timesteps, n_samples, options['dim_word']])
# encoder
proj = get_layer(options['encoder'])[1](tparams, emb, options,
prefix='encoder')
projr = get_layer(options['encoder'])[1](tparams, embr, options,
prefix='encoder_r')
# concatenate forward and backward rnn hidden states
ctx = concatenate([proj[0], projr[0][::-1]], axis=proj[0].ndim - 1)
# get the input for decoder rnn initializer mlp
ctx_mean = ctx.mean(0)
# ctx_mean = concatenate([proj[0][-1],projr[0][-1]], axis=proj[0].ndim-2)
init_state = get_layer('ff')[1](tparams, ctx_mean, options,
prefix='ff_state', activ='tanh')
print 'Building f_init...',
outs = [init_state, ctx]
f_init = theano.function([x], outs, name='f_init', profile=profile)
print 'Done'
# x: 1 x 1
y = tensor.vector('y_sampler', dtype='int64')
init_state = tensor.matrix('init_state', dtype='float32')
# if it's the first word, emb should be all zero and it is indicated by -1
emb = tensor.switch(y[:, None] < 0,
tensor.alloc(0., 1, tparams['Wemb_dec'].shape[1]),
tparams['Wemb_dec'][y])
# apply one step of conditional gru with attention
proj = get_layer(options['decoder'])[1](tparams, emb, options,
prefix='decoder',
mask=None, context=ctx,
one_step=True,
init_state=init_state)
# get the next hidden state
next_state = proj[0]
# get the weighted averages of context for this target word y
ctxs = proj[1]
logit_lstm = get_layer('ff')[1](tparams, next_state, options,
prefix='ff_logit_lstm', activ='linear')
logit_prev = get_layer('ff')[1](tparams, emb, options,
prefix='ff_logit_prev', activ='linear')
logit_ctx = get_layer('ff')[1](tparams, ctxs, options,
prefix='ff_logit_ctx', activ='linear')
logit = tensor.tanh(logit_lstm + logit_prev + logit_ctx)
if options['use_dropout']:
logit = dropout_layer(logit, use_noise, trng)
logit = get_layer('ff')[1](tparams, logit, options,
prefix='ff_logit', activ='linear')
# compute the softmax probability
next_probs = tensor.nnet.softmax(logit)
# sample from softmax distribution to get the sample
next_sample = trng.multinomial(pvals=next_probs).argmax(1)
# compile a function to do the whole thing above, next word probability,
# sampled word for the next target, next hidden state to be used
print 'Building f_next..',
inps = [y, ctx, init_state]
outs = [next_probs, next_sample, next_state]
f_next = theano.function(inps, outs, name='f_next', profile=profile)
print 'Done'
return f_init, f_next
# generate sample, either with stochastic sampling or beam search. Note that,
# this function iteratively calls f_init and f_next functions.
def gen_sample(tparams, f_init, f_next, x, options, trng=None, k=1, maxlen=30,
stochastic=True, argmax=False):
# k is the beam size we have
if k > 1:
assert not stochastic, \
'Beam search does not support stochastic sampling'
sample = []
sample_score = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
# get initial state of decoder rnn and encoder context
ret = f_init(x)
next_state, ctx0 = ret[0], ret[1]
next_w = -1 * numpy.ones((1,)).astype('int64') # bos indicator
for ii in xrange(maxlen):
ctx = numpy.tile(ctx0, [live_k, 1])
inps = [next_w, ctx, next_state]
ret = f_next(*inps)
next_p, next_w, next_state = ret[0], ret[1], ret[2]
if stochastic:
if argmax:
nw = next_p[0].argmax()
else:
nw = next_w[0]
sample.append(nw)
sample_score -= numpy.log(next_p[0, nw])
if nw == 0:
break
else:
cand_scores = hyp_scores[:, None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k - dead_k)]
voc_size = next_p.shape[1]
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k - dead_k).astype('float32')
new_hyp_states = []
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti] + [wi])
new_hyp_scores[idx] = copy.copy(costs[idx])
new_hyp_states.append(copy.copy(next_state[ti]))
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
for idx in xrange(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == 0:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = numpy.array(hyp_states)
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in xrange(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
return sample, sample_score
# calculate the log probablities on a given corpus using translation model
def pred_probs(f_log_probs, prepare_data, options, iterator, verbose=False):
probs = []
n_done = 0
correct_num = 0
all_num = 0.
for x, x_syn, y, y_syn, label in iterator:
n_done += len(x)
all_num += len(label)
x, x_mask, bk_x, y, y_mask, bk_y, label = prepare_data(x, x_syn, y, y_syn, label,
n_words_src=options['n_words_src'], bk_for_x=options['bk_for_x'],
bk_for_y=options['bk_for_y'], bk_dim=options['bk_dim'],
maxlen= options['maxlen'],n_words=options['n_words'])
pprobs, predict_label = f_log_probs(x, x_mask, bk_x, y, y_mask, bk_y, label)
for pp in pprobs:
probs.append(pp)
if numpy.isnan(numpy.mean(probs)):
ipdb.set_trace()
if verbose:
print >> sys.stderr, '%d samples computed' % (n_done)
correct_num += (label == predict_label).sum()
print 'correct ', correct_num, 'all ', all_num
return numpy.array(probs), correct_num/all_num
# optimizers
# name(hyperp, tparams, grads, inputs (list), cost) = f_grad_shared, f_update
def adam(lr, tparams, grads, inp, cost, beta1=0.9, beta2=0.999, e=1e-8):
gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup, profile=profile)
updates = []
t_prev = theano.shared(numpy.float32(0.))
t = t_prev + 1.
lr_t = lr * tensor.sqrt(1. - beta2 ** t) / (1. - beta1 ** t)
for p, g in zip(tparams.values(), gshared):
m = theano.shared(p.get_value() * 0., p.name + '_mean')
v = theano.shared(p.get_value() * 0., p.name + '_variance')
m_t = beta1 * m + (1. - beta1) * g
v_t = beta2 * v + (1. - beta2) * g ** 2
step = lr_t * m_t / (tensor.sqrt(v_t) + e)
p_t = p - step
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((t_prev, t))
upreturn = [ item for sublist in updates for item in sublist]
f_update = theano.function([lr], upreturn, updates=updates,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def adadelta(lr, tparams, grads, inp, cost):
print 'adadelta'
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rup2' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup + rg2up,
profile=profile)
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads, running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
f_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def rmsprop(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_grads = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup + rgup + rg2up,
profile=profile)
updir = [theano.shared(p.get_value() * numpy.float32(0.),
name='%s_updir' % k)
for k, p in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4))
for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads,
running_grads2)]
param_up = [(p, p + udn[1])
for p, udn in zip(itemlist(tparams), updir_new)]
f_update = theano.function([lr], [], updates=updir_new + param_up,
on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
def sgd(lr, tparams, grads, inp, cost):
gshared = [theano.shared(p.get_value() * 0.,
name='%s_grad' % k)
for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup,
profile=profile)
pup = [(p, p - lr * g) for p, g in zip(itemlist(tparams), gshared)]
f_update = theano.function([lr], [], updates=pup, profile=profile)
return f_grad_shared, f_update
def train(dim_word=100, # word vector dimensionality
dim=1000, # the number of LSTM units
bk_dim=13,
class_num=3,
op_num=3,
op_dim=50,
encoder='gru',
decoder='gru_cond',
patience=1000000, # early stopping patience
max_epochs=5000,
finish_after=10000000, # finish after this many updates
dispFreq=100,
decay_c=0., # L2 regularization penalty
alpha_c=0., # alignment regularization
clip_c=-1., # gradient clipping threshold
lrate=0.01, # learning rate
n_words_src=100000, # source vocabulary size
n_words=100000, # target vocabulary size
maxlen=100, # maximum length of the description
optimizer='rmsprop',
batch_size=16,
valid_batch_size=16,
saveto='modelOp.npz',
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
sampleFreq=100, # generate some samples after every sampleFreq
train_datasets=[
'../data/train_h_fix.tok',
'../data/train_t_fix.tok',
'../data/train_label.tok',
'../data/train_syn_h.syntok',
'../data/train_syn_t.syntok'],
valid_datasets=[
'../data/dev_h_fix.tok',
'../data/dev_t_fix.tok',
'../data/dev_label.tok',
'../data/dev_syn_h.syntok',
'../data/dev_syn_t.syntok'],
test_datasets=[
'../data/test_h_fix.tok',
'../data/test_t_fix.tok',
'../data/test_label.tok',
'../data/test_syn_h.syntok',
'../data/test_syn_t.syntok'],
dictionaries=[
'../data/snli_dict_fix.pkl',
'../data/bk_dict.pkl'],
embedings=[
'../data/snli_emb_300_fix.pkl'],
bk_dicts=[
'../data/bk_for_x.pkl',
'../data/bk_for_y.pkl'],
use_dropout=False,
reload_=False,
overwrite=False):
# Model options
model_options = locals().copy()
log = logging.getLogger(os.path.basename(__file__).split('.')[0])
# load dictionaries and invert them
worddicts = [None] * len(dictionaries)
worddicts_r = [None] * len(dictionaries)
for ii, dd in enumerate(dictionaries):
with open(dd, 'rb') as f:
worddicts[ii] = pkl.load(f)
worddicts_r[ii] = dict()
for kk, vv in worddicts[ii].iteritems():
worddicts_r[ii][vv] = kk
print 'Loading embedings ...'
with open(embedings[0], 'rb') as f:
pretrained_embs = pkl.load(f)
#pretrained_embs = theano.shared(pretrained_embs, name='pretrained_embs')
print 'Done'
model_options['allembs'] = pretrained_embs
print 'Loading bks ...'
with open(bk_dicts[0], 'rb') as f:
bk_for_x = pkl.load(f)
model_options['bk_for_x'] = bk_for_x
with open(bk_dicts[1], 'rb') as f:
bk_for_y = pkl.load(f)
model_options['bk_for_y'] = bk_for_x
print 'Done'
# reload options
if reload_ and os.path.exists(saveto):
print 'Reloading model options'
with open('%s.pkl' % saveto, 'rb') as f:
model_options = pkl.load(f)
print 'Loading data'
train = TextIterator(train_datasets[0], train_datasets[1],
train_datasets[2], train_datasets[3], train_datasets[4],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=batch_size,
maxlen=maxlen)
valid = TextIterator(valid_datasets[0], valid_datasets[1],
valid_datasets[2],valid_datasets[3],valid_datasets[4],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=valid_batch_size,
maxlen=maxlen)
test = TextIterator(test_datasets[0], test_datasets[1],
test_datasets[2], test_datasets[3], test_datasets[4],
dictionaries[0], dictionaries[1],
n_words_source=n_words_src, n_words_target=n_words,
batch_size=valid_batch_size,
maxlen=maxlen)
print 'Building model'
params = init_params(model_options)
# reload parameters
if reload_ and os.path.exists(saveto):
print 'Reloading model parameters'
params = load_params(saveto, params)
tparams = init_tparams(params)
trng, use_noise, \
x, x_mask, bk_x, y, y_mask, bk_y, label, predict_label, \
cost = \
build_dam(tparams, model_options)
inps = [x, x_mask, bk_x, y, y_mask, bk_y, label]
# print 'Building sampler'
# f_init, f_next = build_sampler(tparams, model_options, trng, use_noise)
# before any regularizer
print 'Building f_log_probs...',
f_log_probs = theano.function(inps, [cost, predict_label], profile=profile)
print 'Done'
cost = cost.mean()
# apply L2 regularization on weights
# if decay_c > 0.:
# decay_c = theano.shared(numpy.float32(decay_c), name='decay_c')
# weight_decay = 0.
# for kk, vv in tparams.iteritems():
# weight_decay += (vv ** 2).sum()
# weight_decay *= decay_c
# cost += weight_decay
## regularize the alpha weights
#if alpha_c > 0. and not model_options['decoder'].endswith('simple'):
# alpha_c = theano.shared(numpy.float32(alpha_c), name='alpha_c')
# alpha_reg = alpha_c * (
# (tensor.cast(y_mask.sum(0) // x_mask.sum(0), 'float32')[:, None] -
# opt_ret['dec_alphas'].sum(0)) ** 2).sum(1).mean()
# cost += alpha_reg
# after all regularizers - compile the computational graph for cost
print 'Building f_cost...',
f_cost = theano.function(inps, cost, profile=profile)
print 'Done'
print 'Computing gradient...',
grads = tensor.grad(cost, wrt=itemlist(tparams))
print 'Done'
# apply gradient clipping here
if clip_c > 0.:
g2 = 0.
for g in grads:
g2 += (g ** 2).sum()
new_grads = []
for g in grads:
new_grads.append(tensor.switch(g2 > (clip_c ** 2),
g / tensor.sqrt(g2) * clip_c,
g))
grads = new_grads
# compile the optimizer, the actual computational graph is compiled here
lr = tensor.scalar(name='lr')
print 'Building optimizers...',
f_grad_shared, f_update = eval(optimizer)(lr, tparams, grads, inps, cost)
print 'Done'
print 'Optimization'
best_p = None
bad_counter = 0
bad_counter_acc = 0
uidx = 0
estop = False
history_errs = []
history_accs = []
epoch_accs = []
# reload history
if reload_ and os.path.exists(saveto):
rmodel = numpy.load(saveto)
history_errs = list(rmodel['history_errs'])
if 'uidx' in rmodel:
uidx = rmodel['uidx']
if validFreq == -1:
validFreq = len(train[0]) / batch_size
if saveFreq == -1:
saveFreq = len(train[0]) / batch_size
#if sampleFreq == -1:
# sampleFreq = len(train[0]) / batch_size
for eidx in xrange(max_epochs):
n_samples = 0
for x, x_syn, y, y_syn, label in train:
n_samples += len(x)
uidx += 1
use_noise.set_value(1.)
try:
x, x_mask, bk_x, y, y_mask, bk_y, label = prepare_data(x, x_syn, y, y_syn, label, maxlen=maxlen,
n_words_src=n_words_src, bk_for_x=model_options['bk_for_x'],
bk_for_y=model_options['bk_for_y'], bk_dim=model_options['bk_dim'],
n_words=n_words)
except ValueError:
print prepare_data(x, x_syn, y, y_syn, label, maxlen=maxlen,
n_words_src=n_words_src, bk_for_x=model_options['bk_for_x'],
bk_for_y=model_options['bk_for_y'], bk_dim=model_options['bk_dim'],
n_words=n_words)
raise
if x is None:
print 'Minibatch with zero sample under length ', maxlen
uidx -= 1
continue
ud_start = time.time()
# compute cost, grads and copy grads to shared variables
cost = f_grad_shared(x, x_mask, bk_x, y, y_mask, bk_y, label)
# do the update on parameters
#print 'Befor:'
#print tparams['ff_logit_W'].get_value()
f_update(lrate)
#print 'After:'
#print tparams['ff_logit_W'].get_value()
#update = f_update(lrate)
#print update
ud = time.time() - ud_start
# check for bad numbers, usually we remove non-finite elements
# and continue training - but not done here
if numpy.isnan(cost) or numpy.isinf(cost):
print 'NaN detected'
return 1., 1., 1.
# verbose
if numpy.mod(uidx, dispFreq) == 0:
log.info('Epoch: %d Update: %d Cost: %f UD: %f'%(eidx, uidx, cost, ud))
# save the best model so far, in addition, save the latest model
# into a separate file with the iteration number for external eval
if numpy.mod(uidx, saveFreq) == 0:
print 'Saving the best model...',
if best_p is not None:
params = best_p
else:
params = unzip(tparams)
numpy.savez(saveto, history_errs=history_errs, uidx=uidx, **params)
pkl.dump(model_options, open('%s.pkl' % saveto, 'wb'))
print 'Done'
# save with uidx
if not overwrite:
print 'Saving the model at iteration {}...'.format(uidx),
saveto_uidx = '{}.iter{}.npz'.format(
os.path.splitext(saveto)[0], uidx)
numpy.savez(saveto_uidx, history_errs=history_errs,
uidx=uidx, **unzip(tparams))
print 'Done'
# validate model on validation set and early stop if necessary
if numpy.mod(uidx, validFreq) == 0:
use_noise.set_value(0.)
#print 'Here:'
#print tparams['ff_logit_W'].get_value()
#print unzip(tparams)
valid_errs, valid_acc = pred_probs(f_log_probs, prepare_data,
model_options, valid)
valid_err = valid_errs.mean()
history_errs.append(valid_err)
test_errs, test_acc = pred_probs(f_log_probs, prepare_data,
model_options, test)
test_err = test_errs.mean()
history_accs.append(test_acc)
if uidx == 0 or valid_err <= numpy.array(history_errs).min():
best_p = unzip(tparams)
bad_counter = 0
if len(history_errs) > patience and valid_err >= \
numpy.array(history_errs)[:-patience].min():
bad_counter += 1
if bad_counter > patience:
print 'Early Stop!'
#estop = True
#break
if numpy.isnan(valid_err):
ipdb.set_trace()
log.info('Epoch: %d Update: %d ValidAcc: %f TestAcc: %f' % (eidx, uidx, valid_acc, test_acc))
# finish after this many updates
if uidx >= finish_after:
print 'Finishing after %d iterations!' % uidx
estop = True
break
print 'Seen %d samples' % n_samples
if len(history_accs) > 0:
epoch_accs.append(history_accs[-1])
if len(epoch_accs) > 1 and epoch_accs[-1] <= numpy.array(epoch_accs)[:-1].max():
bad_counter_acc += 1
if bad_counter_acc > 2:
print 'Early Stop Acc!'
#estop = True
#break
if estop:
break
if best_p is not None:
zipp(best_p, tparams)
use_noise.set_value(0.)
test_err, acc = pred_probs(f_log_probs, prepare_data,
model_options, test)
print 'Test acc ', acc
params = copy.copy(best_p)
numpy.savez(saveto, zipped_params=best_p,
history_errs=history_errs,
uidx=uidx,
**params)
return valid_err
if __name__ == '__main__':
pass
| bsd-3-clause | 2,461,992,773,011,421,700 | 36.937821 | 252 | 0.529831 | false |
spesmilo/electrum | electrum/gui/qt/amountedit.py | 1 | 4862 | # -*- coding: utf-8 -*-
from decimal import Decimal
from typing import Union
from PyQt5.QtCore import pyqtSignal, Qt, QSize
from PyQt5.QtGui import QPalette, QPainter
from PyQt5.QtWidgets import (QLineEdit, QStyle, QStyleOptionFrame, QSizePolicy)
from .util import char_width_in_lineedit, ColorScheme
from electrum.util import (format_satoshis_plain, decimal_point_to_base_unit_name,
FEERATE_PRECISION, quantize_feerate)
class FreezableLineEdit(QLineEdit):
frozen = pyqtSignal()
def setFrozen(self, b):
self.setReadOnly(b)
self.setFrame(not b)
self.frozen.emit()
class SizedFreezableLineEdit(FreezableLineEdit):
def __init__(self, *, width: int, parent=None):
super().__init__(parent)
self._width = width
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed)
def sizeHint(self) -> QSize:
sh = super().sizeHint()
return QSize(self._width, sh.height())
class AmountEdit(SizedFreezableLineEdit):
shortcut = pyqtSignal()
def __init__(self, base_unit, is_int=False, parent=None):
# This seems sufficient for hundred-BTC amounts with 8 decimals
width = 16 * char_width_in_lineedit()
super().__init__(width=width, parent=parent)
self.base_unit = base_unit
self.textChanged.connect(self.numbify)
self.is_int = is_int
self.is_shortcut = False
self.extra_precision = 0
def decimal_point(self):
return 8
def max_precision(self):
return self.decimal_point() + self.extra_precision
def numbify(self):
text = self.text().strip()
if text == '!':
self.shortcut.emit()
return
pos = self.cursorPosition()
chars = '0123456789'
if not self.is_int: chars +='.'
s = ''.join([i for i in text if i in chars])
if not self.is_int:
if '.' in s:
p = s.find('.')
s = s.replace('.','')
s = s[:p] + '.' + s[p:p+self.max_precision()]
self.setText(s)
# setText sets Modified to False. Instead we want to remember
# if updates were because of user modification.
self.setModified(self.hasFocus())
self.setCursorPosition(pos)
def paintEvent(self, event):
QLineEdit.paintEvent(self, event)
if self.base_unit:
panel = QStyleOptionFrame()
self.initStyleOption(panel)
textRect = self.style().subElementRect(QStyle.SE_LineEditContents, panel, self)
textRect.adjust(2, 0, -10, 0)
painter = QPainter(self)
painter.setPen(ColorScheme.GRAY.as_color())
painter.drawText(textRect, int(Qt.AlignRight | Qt.AlignVCenter), self.base_unit())
def get_amount(self) -> Union[None, Decimal, int]:
try:
return (int if self.is_int else Decimal)(str(self.text()))
except:
return None
def setAmount(self, x):
self.setText("%d"%x)
class BTCAmountEdit(AmountEdit):
def __init__(self, decimal_point, is_int=False, parent=None):
AmountEdit.__init__(self, self._base_unit, is_int, parent)
self.decimal_point = decimal_point
def _base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point())
def get_amount(self):
# returns amt in satoshis
try:
x = Decimal(str(self.text()))
except:
return None
# scale it to max allowed precision, make it an int
power = pow(10, self.max_precision())
max_prec_amount = int(power * x)
# if the max precision is simply what unit conversion allows, just return
if self.max_precision() == self.decimal_point():
return max_prec_amount
# otherwise, scale it back to the expected unit
amount = Decimal(max_prec_amount) / pow(10, self.max_precision()-self.decimal_point())
return Decimal(amount) if not self.is_int else int(amount)
def setAmount(self, amount_sat):
if amount_sat is None:
self.setText(" ") # Space forces repaint in case units changed
else:
self.setText(format_satoshis_plain(amount_sat, decimal_point=self.decimal_point()))
self.repaint() # macOS hack for #6269
class FeerateEdit(BTCAmountEdit):
def __init__(self, decimal_point, is_int=False, parent=None):
super().__init__(decimal_point, is_int, parent)
self.extra_precision = FEERATE_PRECISION
def _base_unit(self):
return 'sat/byte'
def get_amount(self):
sat_per_byte_amount = BTCAmountEdit.get_amount(self)
return quantize_feerate(sat_per_byte_amount)
def setAmount(self, amount):
amount = quantize_feerate(amount)
super().setAmount(amount)
| mit | -6,846,242,850,368,492,000 | 32.531034 | 95 | 0.613122 | false |
gjwajda/Computational-Tools-For-Big-Data | Exercise11/exercise11_2.py | 1 | 3455 | #!C:/Users/Greg/Anaconda/Python
import json
from time import time
from os import listdir
import re
import numpy as np
from random import shuffle
from pprint import pprint
#Folder with json files
path = "./json/"
# Function to load all json files into python
def merge_json(path):
merged_json = []
for filename in listdir(path):
with open(path + filename) as json_file:
json_data = json.load(json_file)
# Filter out any articles that don't contain topics or body
json_data = filter(lambda x: "topics" in x.keys() and "body" in x.keys(), json_data)
merged_json += json_data
return merged_json
#Function for creating 2D matrix of size x*y
def declareMatrix(x,y):
matrix = [[0]*y for i in range(x)]
return matrix;
#Bag of words function with json files and desired element to access
def bagOfWords(merge_list,des_string):
#To Count how many lines we are reading
line_count = 0
#To store the list of words in each line
lines = []
#To store the unique words
word_uniq = []
# Look in first 100 articles
for json in merge_list[:100]:
body = json[des_string]
line_count += 1
#Collect string, make lowercase, remove digits, remove
#punctuation, remove email addresses, remove websites
#and split into words for easier access
text = body.lower()
text = re.sub('[\[\]!~*\-,><}{;)(:#$"&%.?]',' ',text)
text = text.replace("\\n",' ')
text = text.split()
for word in text:
if word in word_uniq: #If word is in list of unique words, do nothing
next
else:
word_uniq.append(word) #Add to unique word list
#Add the line's words to a line list
lines.append(text)
#Declare Bag of Words Matrix
bag_matrix = declareMatrix(line_count,len(word_uniq))
#Fill in Bag of Words Matrix
for l in xrange(len(lines)):
for w in lines[l]:
bag_matrix[l][word_uniq.index(w)] += 1
#Print off dimensions of matrix
print "%d * %d are the dimensions of bag of words matrix" % (len(bag_matrix), len(bag_matrix[0]))
return np.array(bag_matrix)
def minhash(bag, numHashes):
# Transpose the bag of words so columns are different articles
# and rows are different words
bag = zip(*bag)
# Find how many rows there are to help with permutations
permutation_length = len(bag)
# Create output matrix
minhash_output = declareMatrix(numHashes, len(bag[0]))
for hashNum in xrange(numHashes):
# Create row permutation array
permutation = [i for i in range(permutation_length)]
shuffle(permutation)
# Go through each column, finding first non-zero
for column in xrange(len(bag[0])):
# Go through shuffled rows to find first nonzero
for i in xrange(len(bag)):
# Find current row in permutation
curr_row = permutation[i]
curr_item = bag[curr_row][column]
# For first nonzero item, store iteration in which it was found
if curr_item != 0:
minhash_output[hashNum][column] = i+1
break
return minhash_output
######################################
start_time = time()
merged_json = merge_json(path)
data = bagOfWords( merged_json, "body" )
print data
print("------ %s seconds ------" % (time() - start_time))
time2 = time()
minhashed = ( minhash(data, 10) )
s = [[str(e) for e in row] for row in minhashed]
lens = [max(map(len, col)) for col in zip(*s)]
fmt = '\t'.join('{{:{}}}'.format(x) for x in lens)
table = [fmt.format(*row) for row in s]
print '\n'.join(table)
print("------ %s seconds ------" % (time() - time2))
| mit | -7,924,714,258,189,322,000 | 26.204724 | 98 | 0.665702 | false |
tboyce1/home-assistant | homeassistant/components/climate/econet.py | 1 | 7250 | """
Support for Rheem EcoNet water heaters.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.econet/
"""
import datetime
import logging
import voluptuous as vol
from homeassistant.components.climate import (
DOMAIN, PLATFORM_SCHEMA, STATE_ECO, STATE_ELECTRIC, STATE_GAS,
STATE_HEAT_PUMP, STATE_HIGH_DEMAND, STATE_OFF, STATE_PERFORMANCE,
SUPPORT_OPERATION_MODE, SUPPORT_TARGET_TEMPERATURE, ClimateDevice)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_TEMPERATURE, CONF_PASSWORD, CONF_USERNAME,
TEMP_FAHRENHEIT)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['pyeconet==0.0.4']
_LOGGER = logging.getLogger(__name__)
ATTR_VACATION_START = 'next_vacation_start_date'
ATTR_VACATION_END = 'next_vacation_end_date'
ATTR_ON_VACATION = 'on_vacation'
ATTR_TODAYS_ENERGY_USAGE = 'todays_energy_usage'
ATTR_IN_USE = 'in_use'
ATTR_START_DATE = 'start_date'
ATTR_END_DATE = 'end_date'
SUPPORT_FLAGS_HEATER = (SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE)
SERVICE_ADD_VACATION = 'econet_add_vacation'
SERVICE_DELETE_VACATION = 'econet_delete_vacation'
ADD_VACATION_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(ATTR_START_DATE): cv.positive_int,
vol.Required(ATTR_END_DATE): cv.positive_int,
})
DELETE_VACATION_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
ECONET_DATA = 'econet'
HA_STATE_TO_ECONET = {
STATE_ECO: 'Energy Saver',
STATE_ELECTRIC: 'Electric',
STATE_HEAT_PUMP: 'Heat Pump',
STATE_GAS: 'gas',
STATE_HIGH_DEMAND: 'High Demand',
STATE_OFF: 'Off',
STATE_PERFORMANCE: 'Performance'
}
ECONET_STATE_TO_HA = {value: key for key, value in HA_STATE_TO_ECONET.items()}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the EcoNet water heaters."""
from pyeconet.api import PyEcoNet
hass.data[ECONET_DATA] = {}
hass.data[ECONET_DATA]['water_heaters'] = []
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
econet = PyEcoNet(username, password)
water_heaters = econet.get_water_heaters()
hass_water_heaters = [
EcoNetWaterHeater(water_heater) for water_heater in water_heaters]
add_devices(hass_water_heaters)
hass.data[ECONET_DATA]['water_heaters'].extend(hass_water_heaters)
def service_handle(service):
"""Handle the service calls."""
entity_ids = service.data.get('entity_id')
all_heaters = hass.data[ECONET_DATA]['water_heaters']
_heaters = [
x for x in all_heaters
if not entity_ids or x.entity_id in entity_ids]
for _water_heater in _heaters:
if service.service == SERVICE_ADD_VACATION:
start = service.data.get(ATTR_START_DATE)
end = service.data.get(ATTR_END_DATE)
_water_heater.add_vacation(start, end)
if service.service == SERVICE_DELETE_VACATION:
for vacation in _water_heater.water_heater.vacations:
vacation.delete()
_water_heater.schedule_update_ha_state(True)
hass.services.register(DOMAIN, SERVICE_ADD_VACATION, service_handle,
schema=ADD_VACATION_SCHEMA)
hass.services.register(DOMAIN, SERVICE_DELETE_VACATION, service_handle,
schema=DELETE_VACATION_SCHEMA)
class EcoNetWaterHeater(ClimateDevice):
"""Representation of an EcoNet water heater."""
def __init__(self, water_heater):
"""Initialize the water heater."""
self.water_heater = water_heater
@property
def name(self):
"""Return the device name."""
return self.water_heater.name
@property
def available(self):
"""Return if the the device is online or not."""
return self.water_heater.is_connected
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def device_state_attributes(self):
"""Return the optional device state attributes."""
data = {}
vacations = self.water_heater.get_vacations()
if vacations:
data[ATTR_VACATION_START] = vacations[0].start_date
data[ATTR_VACATION_END] = vacations[0].end_date
data[ATTR_ON_VACATION] = self.water_heater.is_on_vacation
todays_usage = self.water_heater.total_usage_for_today
if todays_usage:
data[ATTR_TODAYS_ENERGY_USAGE] = todays_usage
data[ATTR_IN_USE] = self.water_heater.in_use
return data
@property
def current_operation(self):
"""
Return current operation as one of the following.
["eco", "heat_pump", "high_demand", "electric_only"]
"""
current_op = ECONET_STATE_TO_HA.get(self.water_heater.mode)
return current_op
@property
def operation_list(self):
"""List of available operation modes."""
op_list = []
modes = self.water_heater.supported_modes
for mode in modes:
ha_mode = ECONET_STATE_TO_HA.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
else:
error = "Invalid operation mode mapping. " + mode + \
" doesn't map. Please report this."
_LOGGER.error(error)
return op_list
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS_HEATER
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
if target_temp is not None:
self.water_heater.set_target_set_point(target_temp)
else:
_LOGGER.error("A target temperature must be provided")
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
op_mode_to_set = HA_STATE_TO_ECONET.get(operation_mode)
if op_mode_to_set is not None:
self.water_heater.set_mode(op_mode_to_set)
else:
_LOGGER.error("An operation mode must be provided")
def add_vacation(self, start, end):
"""Add a vacation to this water heater."""
if not start:
start = datetime.datetime.now()
else:
start = datetime.datetime.fromtimestamp(start)
end = datetime.datetime.fromtimestamp(end)
self.water_heater.set_vacation_mode(start, end)
def update(self):
"""Get the latest date."""
self.water_heater.update_state()
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.water_heater.set_point
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.water_heater.min_set_point
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.water_heater.max_set_point
| apache-2.0 | 5,136,632,818,618,651,000 | 31.657658 | 78 | 0.638897 | false |
KirillShaman/escalate_gspread | app/channels/models.py | 1 | 5656 | # The MIT License (MIT)
# Escalate Copyright (c) [2014] [Chris Smith]
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
from app import flask_app, datetime, db
import re
from multiprocessing.pool import ThreadPool as Pool
import requests
import bs4
from peewee import *
from app.gspreadsheet import Gspreadsheet
from app import GUser, werkzeug_cache
# ________________________________________________________________________
class ChannelCounter(Model):
name = CharField()
runnable = CharField()
gspread_link = CharField()
channel = CharField(null=True)
created_at = DateTimeField(null=True)
updated_at = DateTimeField(null=True)
class Meta:
database = db
db_table = 'channel_counters'
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def is_runnable(self):
return self.runnable == 'yes'
def __repr__(self):
return 'id={}, name={}'.format(self.id, self.name)
# ________________________________________________________________________
class Channel(Model):
ROOT_URL_PREFIX = 'http://www.youtube.com/user/'
ROOT_URL_SUFFIX = '/videos'
name = CharField()
channel = CharField()
url = CharField()
title = CharField(null=True)
views = IntegerField(null=True)
likes = IntegerField(null=True)
dislikes = IntegerField(null=True)
timestamp = DateTimeField(null=True)
class Meta:
database = db
db_table = 'channel_counts'
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def __repr__(self):
return 'id={}, name={}, url={}, title={}'.format(self.id, self.name, self.url, self.title)
@classmethod
def get_video_page_urls(cls, channel):
response = requests.get(Channel.ROOT_URL_PREFIX + channel + Channel.ROOT_URL_SUFFIX)
soup = bs4.BeautifulSoup(response.text)
urls = []
for title in soup.findAll('h3', attrs={'class': 'yt-lockup-title'}):
urls.append("https://www.youtube.com%s" % title.find('a')['href'])
return urls
@classmethod
def get_video_data(cls, video_page_url):
video_data = {}
video_data['url'] = video_page_url
video_data['title'] = ""
video_data['views'] = 0
video_data['likes'] = 0
video_data['dislikes'] = 0
try:
response = requests.get(
video_data['url'],
headers={'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36'}
)
soup = bs4.BeautifulSoup(response.text)
video_data['title'] = soup.select('span.watch-title')[0].get_text().strip()
video_data['views'] = int(re.sub('[^0-9]', '', soup.select('.watch-view-count')[0].get_text().split()[0]))
video_data['likes'] = int(re.sub('[^0-9]', '',
soup.select('#watch-like-dislike-buttons span.yt-uix-button-content')[0].get_text().split()[0]))
video_data['dislikes'] = int(re.sub('[^0-9]', '',
soup.select('#watch-like-dislike-buttons span.yt-uix-button-content')[2].get_text().split()[0]))
except Exception as e:
# some or all of the channels could not be scraped
print("Error: Channel:get_video_data: %s"%e)
pass
return video_data
@staticmethod
def scrape(video_counter):
guser = werkzeug_cache.get('guser')
gs = Gspreadsheet(guser.gmail, guser.gpassword, None)
gs.login()
ss = gs.gclient.open_by_url(video_counter.gspread_link)
ws = ss.sheet1
urls = gs.col_one(ws)
results = []
try:
pool = Pool(flask_app.config['MULTIPROCESSING_POOL_SIZE'])
# video_page_urls = Channel.get_video_page_urls(channel)
# results = pool.map(Channel.get_video_data, video_page_urls)
results = pool.map(Channel.get_video_data, urls)
now_timestamp = datetime.utcnow()
nrow = 2
for i in range(len(results)):
# gspread update cells in row:
acells = ws.range("B%s:E%s" % (nrow, nrow))
acells[0].value = results[i]['title']
acells[1].value = results[i]['views']
acells[2].value = results[i]['likes']
acells[3].value = results[i]['dislikes']
ws.update_cells(acells)
c = Channel.create(
name=video_counter.name,
channel='',
url=results[i]['url'],
title=results[i]['title'],
views=results[i]['views'],
likes=results[i]['likes'],
dislikes=results[i]['dislikes'],
timestamp=now_timestamp
)
nrow += 1
except Exception as e:
print("Error: Channel:channel_scrape:\n%s" % e)
return len(results)
| mit | 1,731,833,847,790,810,600 | 34.572327 | 138 | 0.637553 | false |
jgasteiz/fuzzingtheweb | settings.py | 1 | 3336 | # -*- coding: utf-8 -*-
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Javi Manzano', '[email protected]'),
)
MANAGERS = ADMINS
ALLOWED_HOSTS = ['176.58.120.22', 'fuzzingtheweb.com']
TIME_ZONE = 'Europe/London'
USE_TZ = True
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
MEDIA_ROOT = '/home/ubuntu/media/'
MEDIA_URL = '/static/media/'
STATIC_ROOT = '/static/'
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
MARKITUP_FILTER = ('markdown.markdown', {'safe_mode': True})
SECRET_KEY = '%3maeu=guk3p#67j-2--drhy$*^vx+=l9r9bltk-n-^cw4#nic'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
'/home/ubuntu/django_apps/fuzzopress/blog/templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.markup',
'blogadmin',
'markitup',
'south',
'blog',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
MARKITUP_SET = 'markitup/sets/markdown'
# Settings for main blog app
FUZZOPRESS_SETTINGS = {
'contact': [
{
'name': 'github',
'icon': 'icon-github-alt',
'url': 'https://github.com/jgasteiz',
'show': True,
},
{
'name': 'twitter',
'icon': 'icon-twitter',
'url': 'https://twitter.com/jgasteiz',
'show': True,
},
{
'name': 'googleplus',
'icon': 'icon-google-plus-sign',
'url': 'https://plus.google.com/u/0/104971241169939266087/posts',
'show': True,
},
{
'name': 'email',
'icon': 'icon-envelope-alt',
'url': 'mailto:[email protected]',
'show': True,
}
],
'analytics': {
'show': True,
'code': 'UA-23612418-1'
},
'tags': {
'show': True
},
'archive': {
'show': True
},
'finder': {
'show': True
},
'entries_per_page': 5
}
try:
from local_settings import *
except ImportError:
pass
| mit | -5,936,774,613,336,678,000 | 22.492958 | 77 | 0.57554 | false |
Paczesiowa/youtube-dl | youtube_dl/extractor/dailymotion.py | 1 | 10862 | # coding: utf-8
from __future__ import unicode_literals
import re
import json
import itertools
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
int_or_none,
orderedSet,
str_to_int,
unescapeHTML,
)
class DailymotionBaseInfoExtractor(InfoExtractor):
@staticmethod
def _build_request(url):
"""Build a request with the family filter disabled"""
request = compat_urllib_request.Request(url)
request.add_header('Cookie', 'family_filter=off; ff=off')
return request
class DailymotionIE(DailymotionBaseInfoExtractor):
"""Information Extractor for Dailymotion"""
_VALID_URL = r'(?i)(?:https?://)?(?:(www|touch)\.)?dailymotion\.[a-z]{2,3}/(?:(embed|#)/)?video/(?P<id>[^/?_]+)'
IE_NAME = 'dailymotion'
_FORMATS = [
('stream_h264_ld_url', 'ld'),
('stream_h264_url', 'standard'),
('stream_h264_hq_url', 'hq'),
('stream_h264_hd_url', 'hd'),
('stream_h264_hd1080_url', 'hd180'),
]
_TESTS = [
{
'url': 'https://www.dailymotion.com/video/x2iuewm_steam-machine-models-pricing-listed-on-steam-store-ign-news_videogames',
'md5': '2137c41a8e78554bb09225b8eb322406',
'info_dict': {
'id': 'x2iuewm',
'ext': 'mp4',
'uploader': 'IGN',
'title': 'Steam Machine Models, Pricing Listed on Steam Store - IGN News',
'upload_date': '20150306',
}
},
# Vevo video
{
'url': 'http://www.dailymotion.com/video/x149uew_katy-perry-roar-official_musi',
'info_dict': {
'title': 'Roar (Official)',
'id': 'USUV71301934',
'ext': 'mp4',
'uploader': 'Katy Perry',
'upload_date': '20130905',
},
'params': {
'skip_download': True,
},
'skip': 'VEVO is only available in some countries',
},
# age-restricted video
{
'url': 'http://www.dailymotion.com/video/xyh2zz_leanna-decker-cyber-girl-of-the-year-desires-nude-playboy-plus_redband',
'md5': '0d667a7b9cebecc3c89ee93099c4159d',
'info_dict': {
'id': 'xyh2zz',
'ext': 'mp4',
'title': 'Leanna Decker - Cyber Girl Of The Year Desires Nude [Playboy Plus]',
'uploader': 'HotWaves1012',
'age_limit': 18,
}
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'https://www.dailymotion.com/video/%s' % video_id
# Retrieve video webpage to extract further information
request = self._build_request(url)
webpage = self._download_webpage(request, video_id)
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
# It may just embed a vevo video:
m_vevo = re.search(
r'<link rel="video_src" href="[^"]*?vevo.com[^"]*?video=(?P<id>[\w]*)',
webpage)
if m_vevo is not None:
vevo_id = m_vevo.group('id')
self.to_screen('Vevo video detected: %s' % vevo_id)
return self.url_result('vevo:%s' % vevo_id, ie='Vevo')
age_limit = self._rta_search(webpage)
video_upload_date = None
mobj = re.search(r'<meta property="video:release_date" content="([0-9]{4})-([0-9]{2})-([0-9]{2}).+?"/>', webpage)
if mobj is not None:
video_upload_date = mobj.group(1) + mobj.group(2) + mobj.group(3)
embed_url = 'https://www.dailymotion.com/embed/video/%s' % video_id
embed_request = self._build_request(embed_url)
embed_page = self._download_webpage(
embed_request, video_id, 'Downloading embed page')
info = self._search_regex(r'var info = ({.*?}),$', embed_page,
'video info', flags=re.MULTILINE)
info = json.loads(info)
if info.get('error') is not None:
msg = 'Couldn\'t get video, Dailymotion says: %s' % info['error']['title']
raise ExtractorError(msg, expected=True)
formats = []
for (key, format_id) in self._FORMATS:
video_url = info.get(key)
if video_url is not None:
m_size = re.search(r'H264-(\d+)x(\d+)', video_url)
if m_size is not None:
width, height = map(int_or_none, (m_size.group(1), m_size.group(2)))
else:
width, height = None, None
formats.append({
'url': video_url,
'ext': 'mp4',
'format_id': format_id,
'width': width,
'height': height,
})
if not formats:
raise ExtractorError('Unable to extract video URL')
# subtitles
video_subtitles = self.extract_subtitles(video_id, webpage)
view_count = str_to_int(self._search_regex(
r'video_views_count[^>]+>\s+([\d\.,]+)',
webpage, 'view count', fatal=False))
title = self._og_search_title(webpage, default=None)
if title is None:
title = self._html_search_regex(
r'(?s)<span\s+id="video_title"[^>]*>(.*?)</span>', webpage,
'title')
return {
'id': video_id,
'formats': formats,
'uploader': info['owner.screenname'],
'upload_date': video_upload_date,
'title': title,
'subtitles': video_subtitles,
'thumbnail': info['thumbnail_url'],
'age_limit': age_limit,
'view_count': view_count,
}
def _get_subtitles(self, video_id, webpage):
try:
sub_list = self._download_webpage(
'https://api.dailymotion.com/video/%s/subtitles?fields=id,language,url' % video_id,
video_id, note=False)
except ExtractorError as err:
self._downloader.report_warning('unable to download video subtitles: %s' % compat_str(err))
return {}
info = json.loads(sub_list)
if (info['total'] > 0):
sub_lang_list = dict((l['language'], [{'url': l['url'], 'ext': 'srt'}]) for l in info['list'])
return sub_lang_list
self._downloader.report_warning('video doesn\'t have subtitles')
return {}
class DailymotionPlaylistIE(DailymotionBaseInfoExtractor):
IE_NAME = 'dailymotion:playlist'
_VALID_URL = r'(?:https?://)?(?:www\.)?dailymotion\.[a-z]{2,3}/playlist/(?P<id>.+?)/'
_MORE_PAGES_INDICATOR = r'(?s)<div class="pages[^"]*">.*?<a\s+class="[^"]*?icon-arrow_right[^"]*?"'
_PAGE_TEMPLATE = 'https://www.dailymotion.com/playlist/%s/%s'
_TESTS = [{
'url': 'http://www.dailymotion.com/playlist/xv4bw_nqtv_sport/1#video=xl8v3q',
'info_dict': {
'title': 'SPORT',
'id': 'xv4bw_nqtv_sport',
},
'playlist_mincount': 20,
}]
def _extract_entries(self, id):
video_ids = []
for pagenum in itertools.count(1):
request = self._build_request(self._PAGE_TEMPLATE % (id, pagenum))
webpage = self._download_webpage(request,
id, 'Downloading page %s' % pagenum)
video_ids.extend(re.findall(r'data-xid="(.+?)"', webpage))
if re.search(self._MORE_PAGES_INDICATOR, webpage) is None:
break
return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion')
for video_id in orderedSet(video_ids)]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
playlist_id = mobj.group('id')
webpage = self._download_webpage(url, playlist_id)
return {
'_type': 'playlist',
'id': playlist_id,
'title': self._og_search_title(webpage),
'entries': self._extract_entries(playlist_id),
}
class DailymotionUserIE(DailymotionPlaylistIE):
IE_NAME = 'dailymotion:user'
_VALID_URL = r'https?://(?:www\.)?dailymotion\.[a-z]{2,3}/(?:(?:old/)?user/)?(?P<user>[^/]+)$'
_PAGE_TEMPLATE = 'http://www.dailymotion.com/user/%s/%s'
_TESTS = [{
'url': 'https://www.dailymotion.com/user/nqtv',
'info_dict': {
'id': 'nqtv',
'title': 'Rémi Gaillard',
},
'playlist_mincount': 100,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user = mobj.group('user')
webpage = self._download_webpage(
'https://www.dailymotion.com/user/%s' % user, user)
full_user = unescapeHTML(self._html_search_regex(
r'<a class="nav-image" title="([^"]+)" href="/%s">' % re.escape(user),
webpage, 'user'))
return {
'_type': 'playlist',
'id': user,
'title': full_user,
'entries': self._extract_entries(user),
}
class DailymotionCloudIE(DailymotionBaseInfoExtractor):
_VALID_URL = r'http://api\.dmcloud\.net/embed/[^/]+/(?P<id>[^/?]+)'
_TEST = {
# From http://www.francetvinfo.fr/economie/entreprises/les-entreprises-familiales-le-secret-de-la-reussite_933271.html
# Tested at FranceTvInfo_2
'url': 'http://api.dmcloud.net/embed/4e7343f894a6f677b10006b4/556e03339473995ee145930c?auth=1464865870-0-jyhsm84b-ead4c701fb750cf9367bf4447167a3db&autoplay=1',
'only_matching': True,
}
@classmethod
def _extract_dmcloud_url(self, webpage):
mobj = re.search(r'<iframe[^>]+src=[\'"](http://api\.dmcloud\.net/embed/[^/]+/[^\'"]+)[\'"]', webpage)
if mobj:
return mobj.group(1)
mobj = re.search(r'<input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=[\'"](http://api\.dmcloud\.net/embed/[^/]+/[^\'"]+)[\'"]', webpage)
if mobj:
return mobj.group(1)
def _real_extract(self, url):
video_id = self._match_id(url)
request = self._build_request(url)
webpage = self._download_webpage(request, video_id)
title = self._html_search_regex(r'<title>([^>]+)</title>', webpage, 'title')
video_info = self._parse_json(self._search_regex(
r'var\s+info\s*=\s*([^;]+);', webpage, 'video info'), video_id)
# TODO: parse ios_url, which is in fact a manifest
video_url = video_info['mp4_url']
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': video_info.get('thumbnail_url'),
}
| unlicense | -8,050,191,618,828,455,000 | 35.816949 | 167 | 0.532179 | false |
joegomes/deepchem | deepchem/models/autoencoder_models/test_tensorflowEncoders.py | 1 | 1452 | from unittest import TestCase
from nose.tools import assert_equals
from rdkit import Chem
import deepchem as dc
from deepchem.data import DiskDataset
from deepchem.feat.one_hot import zinc_charset
from deepchem.models.autoencoder_models.autoencoder import TensorflowMoleculeEncoder, TensorflowMoleculeDecoder
class TestTensorflowEncoders(TestCase):
def test_fit(self):
tf_enc = TensorflowMoleculeEncoder.zinc_encoder()
smiles = [
"Cn1cnc2c1c(=O)n(C)c(=O)n2C", "O=C(O)[C@@H]1/C(=C/CO)O[C@@H]2CC(=O)N21",
"Cn1c2nncnc2c(=O)n(C)c1=O", "Cn1cnc2c1c(=O)[nH]c(=O)n2C",
"NC(=O)c1ncc[nH]c1=O", "O=C1OCc2c1[nH]c(=O)[nH]c2=O",
"Cn1c(N)c(N)c(=O)n(C)c1=O", "CNc1nc2c([nH]1)c(=O)[nH]c(=O)n2C",
"CC(=O)N1CN(C(C)=O)[C@@H](O)[C@@H]1O",
"CC(=O)N1CN(C(C)=O)[C@H](O)[C@H]1O", "Cc1[nH]c(=O)[nH]c(=O)c1CO",
"O=C1NCCCc2c1no[n+]2[O-]", "Cc1nc(C(N)=O)c(N)n1CCO",
"O=c1[nH]cc(N2CCOCC2)c(=O)[nH]1"
]
featurizer = dc.feat.one_hot.OneHotFeaturizer(zinc_charset, 120)
mols = [Chem.MolFromSmiles(x) for x in smiles]
features = featurizer.featurize(mols)
dataset = DiskDataset.from_numpy(features, features)
prediction = tf_enc.predict_on_batch(dataset.X)
tf_de = TensorflowMoleculeDecoder.zinc_decoder()
one_hot_decoded = tf_de.predict_on_batch(prediction)
decoded_smiles = featurizer.untransform(one_hot_decoded)
assert_equals(len(decoded_smiles), len(smiles))
| mit | 627,227,895,701,837,200 | 38.243243 | 111 | 0.664601 | false |
limodou/uliweb | uliweb/utils/setup.py | 1 | 5756 | from setuptools import setup
from setuptools.command import build_py as b
import os,sys
import glob
#remove build and dist directory
import shutil
#if os.path.exists('build'):
# shutil.rmtree('build')
#if os.path.exists('dist'):
# shutil.rmtree('dist')
def copy_dir(self, package, src, dst):
self.mkpath(dst)
for r in os.listdir(src):
if r in ['.svn', '_svn']:
continue
fpath = os.path.join(src, r)
if os.path.isdir(fpath):
copy_dir(self, package + '.' + r, fpath, os.path.join(dst, r))
else:
ext = os.path.splitext(fpath)[1]
if ext in ['.pyc', '.pyo', '.bak', '.tmp']:
continue
target = os.path.join(dst, r)
self.copy_file(fpath, target)
def find_dir(self, package, src):
for r in os.listdir(src):
if r in ['.svn', '_svn']:
continue
fpath = os.path.join(src, r)
if os.path.isdir(fpath):
for f in find_dir(self, package + '.' + r, fpath):
yield f
else:
ext = os.path.splitext(fpath)[1]
if ext in ['.pyc', '.pyo', '.bak', '.tmp']:
continue
yield fpath
def build_package_data(self):
for package in self.packages or ():
src_dir = self.get_package_dir(package)
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
copy_dir(self, package, src_dir, build_dir)
setattr(b.build_py, 'build_package_data', build_package_data)
def get_source_files(self):
filenames = []
for package in self.packages or ():
src_dir = self.get_package_dir(package)
filenames.extend(list(find_dir(self, package, src_dir)))
return filenames
setattr(b.build_py, 'get_source_files', get_source_files)
from setuptools.command.develop import develop
from distutils import sysconfig
unlink = os.unlink
def rm(obj):
import shutil
if os.path.exists(obj):
try:
if os.path.isdir(obj):
if os.path.islink(obj):
unlink(obj)
else:
shutil.rmtree(obj)
else:
if os.path.islink(obj):
unlink(obj)
else:
os.remove(obj)
except:
import traceback
traceback.print_exc()
raise
__CSL = None
def symlink(source, link_name):
'''symlink(source, link_name)
Creates a symbolic link pointing to source named link_name
copys from http://stackoverflow.com/questions/1447575/symlinks-on-windows/7924557
'''
global __CSL
if __CSL is None:
import ctypes
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
__CSL = csl
flags = 0
if source is not None and os.path.isdir(source):
flags = 1
if __CSL(link_name, source, flags) == 0:
raise ctypes.WinError()
def pre_run(func):
def _f(self):
global unlink
if self.distribution.package_dir and sys.platform == 'win32':
try:
import ntfslink
except:
print 'You need to install ntfslink package first in windows platform.'
print 'You can find it at https://github.com/juntalis/ntfslink-python'
sys.exit(1)
if not hasattr(os, 'symlink'):
os.symlink = symlink
os.path.islink = ntfslink.symlink.check
unlink = ntfslink.symlink.unlink
func(self)
return _f
develop.run = pre_run(develop.run)
def post_install_for_development(func):
def _f(self):
func(self)
packages = self.distribution.packages
package_dir = self.distribution.package_dir
libpath = sysconfig.get_python_lib()
if not package_dir: return
for p in sorted(packages):
#if the package is something like 'x.y.z'
#then create site-packages/x/y
#then create symlink to z to src directory
ps = p.split('.')
if len(ps)>1:
path = libpath
for x in ps[:-1]:
path = os.path.join(path, x)
if not os.path.exists(path):
os.makedirs(path)
inifile = os.path.join(path, '__init__.py')
if not os.path.exists(inifile):
with open(inifile, 'w') as f:
f.write('\n')
pkg = os.path.join(libpath, *ps)
d = package_dir.get(p, None)
if d is None:
print "Error: the package %s directory can't be found in package_dir, please config it first" % p
sys.exit(1)
src = os.path.abspath(os.path.join(os.getcwd(), d))
print 'Linking ', src, 'to', pkg
rm(pkg)
os.symlink(src, pkg)
return _f
develop.install_for_development = post_install_for_development(develop.install_for_development)
def post_uninstall_link(func):
def _f(self):
func(self)
packages = self.distribution.packages
package_dir = self.distribution.package_dir
if not package_dir: return
libpath = sysconfig.get_python_lib()
for p in sorted(packages, reverse=True):
print 'Unlink... %s' % p
pkg = os.path.join(libpath, p.replace('.', '/'))
rm(pkg)
return _f
develop.uninstall_link = post_uninstall_link(develop.uninstall_link)
| bsd-2-clause | 7,548,749,908,409,896,000 | 31.519774 | 113 | 0.539611 | false |
ayushagrawal288/zamboni | mkt/extensions/tests/test_models.py | 1 | 3724 | # -*- coding: utf-8 -*-
import mock
from nose.tools import eq_, ok_
from django.forms import ValidationError
from mkt.extensions.models import Extension
from mkt.files.tests.test_models import UploadCreationMixin, UploadTest
from mkt.site.storage_utils import private_storage
from mkt.site.tests import TestCase
class TestExtensionUpload(UploadCreationMixin, UploadTest):
# Expected manifest, to test zip file parsing.
expected_manifest = {
'description': u'A Dummÿ Extension',
'default_locale': 'en_GB',
'icons': {
'128': '/icon.png'
},
'version': '0.1',
'author': 'Mozilla',
'name': u'My Lîttle Extension'
}
def create_extension(self):
extension = Extension.objects.create(
default_language='fr', version='0.9', manifest={})
return extension
def test_upload_new(self):
eq_(Extension.objects.count(), 0)
upload = self.upload('extension')
extension = Extension.from_upload(upload)
eq_(extension.version, '0.1')
eq_(extension.name, u'My Lîttle Extension')
eq_(extension.default_language, 'en-GB')
eq_(extension.slug, u'my-lîttle-extension')
eq_(extension.filename, 'extension-%s.zip' % extension.version)
ok_(extension.filename in extension.file_path)
ok_(extension.file_path.startswith(extension.path_prefix))
ok_(private_storage.exists(extension.file_path))
eq_(extension.manifest, self.expected_manifest)
eq_(Extension.objects.count(), 1)
@mock.patch('mkt.extensions.utils.ExtensionParser.manifest_contents')
def test_upload_no_version(self, manifest_mock):
manifest_mock.__get__ = mock.Mock(return_value={'name': 'lol'})
upload = self.upload('extension')
with self.assertRaises(ValidationError):
Extension.from_upload(upload)
@mock.patch('mkt.extensions.utils.ExtensionParser.manifest_contents')
def test_upload_no_name(self, manifest_mock):
manifest_mock.__get__ = mock.Mock(return_value={'version': '0.1'})
upload = self.upload('extension')
with self.assertRaises(ValidationError):
Extension.from_upload(upload)
def test_upload_existing(self):
extension = self.create_extension()
upload = self.upload('extension')
with self.assertRaises(NotImplementedError):
Extension.from_upload(upload, instance=extension)
class TestExtensionDeletion(TestCase):
def test_delete_with_file(self):
"""Test that when a Extension instance is deleted, the corresponding
file on the filesystem is also deleted."""
extension = Extension.objects.create(version='0.1')
file_path = extension.file_path
with private_storage.open(file_path, 'w') as f:
f.write('sample data\n')
assert private_storage.exists(file_path)
try:
extension.delete()
assert not private_storage.exists(file_path)
finally:
if private_storage.exists(file_path):
private_storage.delete(file_path)
def test_delete_no_file(self):
"""Test that the Extension instance can be deleted without the file
being present."""
extension = Extension.objects.create(version='0.1')
filename = extension.file_path
assert (not private_storage.exists(filename),
'File exists at: %s' % filename)
extension.delete()
def test_delete_signal(self):
"""Test that the Extension instance can be deleted with the filename
field being empty."""
extension = Extension.objects.create()
extension.delete()
| bsd-3-clause | -2,164,110,055,124,655,900 | 37.75 | 76 | 0.64543 | false |
henryroe/Py2MASS | py2mass/__main__.py | 1 | 2088 | from __future__ import absolute_import
import pickle
import sys
from .py2mass import set_2mass_path, fetch_2mass_xsc_box, fetch_2mass_psc_box, __version__
def main():
show_help = False
if len(sys.argv) == 1 or "help" in sys.argv:
show_help = True
else:
if sys.argv[1] == 'psc':
try:
ra_range = [float(sys.argv[2]), float(sys.argv[3])]
dec_range = [float(sys.argv[4]), float(sys.argv[5])]
except:
raise Error("Expected 4 numbers after radec_range: \n\t" +
"RA_low_deg RA_high_deg DEC_low_deg DEC_high_deg ")
stars = fetch_2mass_psc_box(ra_range, dec_range)
if 'pickle' in sys.argv:
pickle.dump(stars, sys.stdout)
else:
sys.stdout.write(stars.to_string() + '\n')
elif sys.argv[1] == 'xsc':
try:
ra_range = [float(sys.argv[2]), float(sys.argv[3])]
dec_range = [float(sys.argv[4]), float(sys.argv[5])]
except:
raise Error("Expected 4 numbers after radec_range: \n\t" +
"RA_low_deg RA_high_deg DEC_low_deg DEC_high_deg ")
sources = fetch_2mass_xsc_box(ra_range, dec_range)
if 'pickle' in sys.argv:
pickle.dump(sources, sys.stdout)
else:
sys.stdout.write(sources.to_string() + '\n')
else:
show_help = True
if show_help:
print "Usage:"
print "py2mass [psc|xsc] minRA maxRA minDEC maxDEC [pickle]"
print "----"
print " psc - 2MASS Point Source Catalog"
print " xsc - 2MASS Extended Source Catalog"
print " Default output is a nicely formatted text table."
print " Optional keyword (pickle) will dump a pickle of that table, "
print " which can then be read back in from file with, e.g.:"
print " import pickle"
print " stars = pickle.load(open(filename, 'r'))"
if __name__ == '__main__':
main() | mit | -1,982,956,073,703,998,700 | 39.173077 | 90 | 0.527299 | false |
graphql-python/graphql-core | tests/validation/test_unique_directive_names.py | 1 | 2732 | from functools import partial
from graphql.utilities import build_schema
from graphql.validation.rules.unique_directive_names import UniqueDirectiveNamesRule
from .harness import assert_sdl_validation_errors
assert_errors = partial(assert_sdl_validation_errors, UniqueDirectiveNamesRule)
assert_valid = partial(assert_errors, errors=[])
def describe_validate_unique_directive_names():
def no_directive():
assert_valid(
"""
type Foo
"""
)
def one_directive():
assert_valid(
"""
directive @foo on SCHEMA
"""
)
def many_directives():
assert_valid(
"""
directive @foo on SCHEMA
directive @bar on SCHEMA
directive @baz on SCHEMA
"""
)
def directive_and_non_directive_definitions_named_the_same():
assert_valid(
"""
query foo { __typename }
fragment foo on foo { __typename }
type foo
directive @foo on SCHEMA
"""
)
def directives_named_the_same():
assert_errors(
"""
directive @foo on SCHEMA
directive @foo on SCHEMA
""",
[
{
"message": "There can be only one directive named '@foo'.",
"locations": [(2, 24), (4, 24)],
}
],
)
def adding_new_directive_to_existing_schema():
schema = build_schema("directive @foo on SCHEMA")
assert_valid("directive @bar on SCHEMA", schema=schema)
def adding_new_directive_with_standard_name_to_existing_schema():
schema = build_schema("type foo")
assert_errors(
"directive @skip on SCHEMA",
[
{
"message": "Directive '@skip' already exists in the schema."
" It cannot be redefined.",
"locations": [(1, 12)],
}
],
schema,
)
def adding_new_directive_to_existing_schema_with_same_named_type():
schema = build_schema("type foo")
assert_valid("directive @foo on SCHEMA", schema=schema)
def adding_conflicting_directives_to_existing_schema():
schema = build_schema("directive @foo on SCHEMA")
assert_errors(
"directive @foo on SCHEMA",
[
{
"message": "Directive '@foo' already exists in the schema."
" It cannot be redefined.",
"locations": [(1, 12)],
}
],
schema,
)
| mit | 1,620,604,357,516,946,200 | 26.049505 | 84 | 0.503294 | false |
valohai/valohai-cli | valohai_cli/git.py | 1 | 2207 | import os
import subprocess
from typing import Sequence
from valohai_cli.exceptions import NoCommit, NoGitRepo
def check_git_output(args: Sequence[str], directory: str) -> bytes:
try:
return subprocess.check_output(
args=args,
cwd=directory,
shell=False,
stderr=subprocess.STDOUT,
env=dict(os.environ, LC_ALL='C'),
)
except subprocess.CalledProcessError as cpe:
if cpe.returncode == 128:
output_text = cpe.output.decode().lower()
if 'not a git repository' in output_text:
raise NoGitRepo(directory)
if 'bad revision' in output_text:
raise NoCommit(directory)
raise
def get_current_commit(directory: str) -> str:
"""
(Try to) get the current commit of the Git working copy in `directory`.
:param directory: Directory path.
:return: Commit SHA
"""
return check_git_output(['git', 'rev-parse', 'HEAD'], directory).strip().decode()
def describe_current_commit(directory: str) -> str:
"""
(Try to) describe the lineage and status of the Git working copy in `directory`.
:param directory: Directory path.
:return: Git description string
"""
return check_git_output(['git', 'describe', '--always', '--long', '--dirty', '--all'], directory).strip().decode()
def get_file_at_commit(directory: str, commit: str, path: str) -> bytes:
"""
Get the contents of repository `path` at commit `commit` given the
Git working directory `directory`.
:param directory: Git working directory.
:param commit: Commit ID
:param path: In-repository path
:return: File contents as bytes
"""
args = ['git', 'show', f'{commit}:{path}']
return check_git_output(args, directory)
def expand_commit_id(directory: str, commit: str) -> str:
"""
Expand the possibly abbreviated (or otherwise referred to, i.e. "HEAD")
commit ID, and verify it exists.
:param directory: Git working directory
:param commit: Commit ID
:return: Expanded commit ID.
"""
return check_git_output(['git', 'rev-parse', '--verify', commit], directory).decode().strip()
| mit | -3,123,302,948,295,166,000 | 31.455882 | 118 | 0.63208 | false |
maartenbreddels/ipyvolume | ipyvolume/utils.py | 1 | 9843 | from __future__ import print_function
import os
import io
import time
import functools
import collections
import collections.abc
import numpy as np
import requests
import IPython
import zmq
# https://stackoverflow.com/questions/14267555/find-the-smallest-power-of-2-greater-than-n-in-python
def next_power_of_2(x):
return 1 if x == 0 else 2 ** (x - 1).bit_length()
# original from http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth
def dict_deep_update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
r = dict_deep_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def nested_setitem(obj, dotted_name, value):
items = dotted_name.split(".")
for item in items[:-1]:
if item not in obj:
obj[item] = {}
obj = obj[item]
obj[items[-1]] = value
def download_to_bytes(url, chunk_size=1024 * 1024 * 10, loadbar_length=10):
"""Download a url to bytes.
if chunk_size is not None, prints a simple loading bar [=*loadbar_length] to show progress (in console and notebook)
:param url: str or url
:param chunk_size: None or int in bytes
:param loadbar_length: int length of load bar
:return: (bytes, encoding)
"""
stream = False if chunk_size is None else True
print("Downloading {0:s}: ".format(url), end="")
response = requests.get(url, stream=stream)
# raise error if download was unsuccessful
response.raise_for_status()
encoding = response.encoding
total_length = response.headers.get('content-length')
if total_length is not None:
total_length = float(total_length)
if stream:
print("{0:.2f}Mb/{1:} ".format(total_length / (1024 * 1024), loadbar_length), end="")
else:
print("{0:.2f}Mb ".format(total_length / (1024 * 1024)), end="")
if stream:
print("[", end="")
chunks = []
loaded = 0
loaded_size = 0
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
# print our progress bar
if total_length is not None:
while loaded < loadbar_length * loaded_size / total_length:
print("=", end='')
loaded += 1
loaded_size += chunk_size
chunks.append(chunk)
if total_length is None:
print("=" * loadbar_length, end='')
else:
while loaded < loadbar_length:
print("=", end='')
loaded += 1
content = b"".join(chunks)
print("] ", end="")
else:
content = response.content
print("Finished")
response.close()
return content, encoding
def download_yield_bytes(url, chunk_size=1024 * 1024 * 10):
"""Yield a downloaded url as byte chunks.
:param url: str or url
:param chunk_size: None or int in bytes
:yield: byte chunks
"""
response = requests.get(url, stream=True)
# raise error if download was unsuccessful
response.raise_for_status()
total_length = response.headers.get('content-length')
if total_length is not None:
total_length = float(total_length)
length_str = "{0:.2f}Mb ".format(total_length / (1024 * 1024))
else:
length_str = ""
print("Yielding {0:s} {1:s}".format(url, length_str))
for chunk in response.iter_content(chunk_size=chunk_size):
yield chunk
response.close()
def download_to_file(url, filepath, resume=False, overwrite=False, chunk_size=1024 * 1024 * 10, loadbar_length=10):
"""Download a url.
prints a simple loading bar [=*loadbar_length] to show progress (in console and notebook)
:type url: str
:type filepath: str
:param filepath: path to download to
:param resume: if True resume download from existing file chunk
:param overwrite: if True remove any existing filepath
:param chunk_size: None or int in bytes
:param loadbar_length: int length of load bar
:return:
"""
resume_header = None
loaded_size = 0
write_mode = 'wb'
if os.path.exists(filepath):
if overwrite:
os.remove(filepath)
elif resume:
# if we want to resume, first try and see if the file is already complete
loaded_size = os.path.getsize(filepath)
clength = requests.head(url).headers.get('content-length')
if clength is not None:
if int(clength) == loaded_size:
return None
# give the point to resume at
resume_header = {'Range': 'bytes=%s-' % loaded_size}
write_mode = 'ab'
else:
return None
stream = False if chunk_size is None else True
# start printing with no return character, so that we can have everything on one line
print("Downloading {0:s}: ".format(url), end="")
response = requests.get(url, stream=stream, headers=resume_header)
# raise error if download was unsuccessful
response.raise_for_status()
# get the size of the file if available
total_length = response.headers.get('content-length')
if total_length is not None:
total_length = float(total_length) + loaded_size
print("{0:.2f}Mb/{1:} ".format(total_length / (1024 * 1024), loadbar_length), end="")
print("[", end="")
parent = os.path.dirname(filepath)
if not os.path.exists(parent) and parent:
os.makedirs(parent)
with io.open(filepath, write_mode) as f:
loaded = 0
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk: # filter out keep-alive new chunks
# print our progress bar
if total_length is not None and chunk_size is not None:
while loaded < loadbar_length * loaded_size / total_length:
print("=", end='')
loaded += 1
loaded_size += chunk_size
f.write(chunk)
if total_length is None:
print("=" * loadbar_length, end='')
else:
while loaded < loadbar_length:
print("=", end='')
loaded += 1
print("] Finished")
def reduce_size(data, max_size, extent):
new_extent = []
for axis in range(3):
shape = data.shape
xmin, xmax = extent[2 - axis]
while shape[axis] > max_size:
slices1 = [slice(None, None, None)] * 3
slices1[axis] = slice(0, -1, 2)
slices2 = [slice(None, None, None)] * 3
slices2[axis] = slice(1, None, 2)
# print(data.shape, data.__getitem__(slices1).shape, data.__getitem__(slices2).shape)
data = (data[slices1] + data[slices2]) / 2
if shape[axis] % 2:
width = xmax - xmin
xmax = xmin + width / shape[axis] * (shape[axis] - 1)
shape = data.shape
new_extent.append((xmin, xmax))
return data, new_extent[::-1]
def grid_slice(amin, amax, shape, bmin, bmax):
"""Give a slice such that [amin, amax] is in [bmin, bmax].
Given a grid with shape, and begin and end coordinates amin, amax, what slice
do we need to take such that it minimally covers bmin, bmax.
amin, amax = 0, 1; shape = 4
0 0.25 0.5 0.75 1
| | | | |
bmin, bmax = 0.5, 1.0 should give 2,4, 0.5, 1.0
bmin, bmax = 0.4, 1.0 should give 1,4, 0.25, 1.0
bmin, bmax = -1, 1.0 should give 0,4, 0, 1.0
what about negative bmin and bmax ?
It will just flip bmin and bmax
bmin, bmax = 1.0, 0.5 should give 2,4, 0.5, 1.5
amin, amax = 1, 0; shape = 4
1 0.75 0.5 0.25 0
| | | | |
bmin, bmax = 0.5, 1.0 should give 0,2, 1.0, 0.5
bmin, bmax = 0.4, 1.0 should give 0,3, 1.0, 0.25
"""
width = amax - amin
bmin, bmax = min(bmin, bmax), max(bmin, bmax)
# normalize the coordinates
nmin = (bmin - amin) / width
nmax = (bmax - amin) / width
# grid indices
if width < 0:
imin = max(0, int(np.floor(nmax * shape)))
imax = min(shape, int(np.ceil(nmin * shape)))
else:
imin = max(0, int(np.floor(nmin * shape)))
imax = min(shape, int(np.ceil(nmax * shape)))
# transform back to the coordinate system of x
nmin = imin / shape
nmax = imax / shape
# if width < 0:
# return imin, imax, amin + nmax * width, amin + nmin * width
# else:
return (imin, imax), (amin + nmin * width, amin + nmax * width)
def get_ioloop():
ipython = IPython.get_ipython()
if ipython and hasattr(ipython, 'kernel'):
return zmq.eventloop.ioloop.IOLoop.instance()
def debounced(delay_seconds=0.5, method=False):
def wrapped(f):
counters = collections.defaultdict(int)
@functools.wraps(f)
def execute(*args, **kwargs):
if method: # if it is a method, we want to have a counter per instance
key = args[0]
else:
key = None
counters[key] += 1
def debounced_execute(counter=counters[key]):
if counter == counters[key]: # only execute if the counter wasn't changed in the meantime
f(*args, **kwargs)
ioloop = get_ioloop()
def thread_safe():
ioloop.add_timeout(time.time() + delay_seconds, debounced_execute)
if ioloop is None: # we live outside of IPython (e.g. unittest), so execute directly
debounced_execute()
else:
ioloop.add_callback(thread_safe)
return execute
return wrapped
| mit | -3,361,862,247,141,486,600 | 32.253378 | 120 | 0.574215 | false |
thom-at-redhat/cfme_tests | scripts/template_upload_rhevm.py | 1 | 16744 | #!/usr/bin/env python2
"""This script takes various parameters specified in
cfme_data['template_upload']['template_upload_rhevm'] and/or by command-line arguments.
Parameters specified by command-line have higher priority, and override data in cfme_data.
This script is designed to run either as a standalone rhevm template uploader, or it can be used
together with template_upload_all script. This is why all the function calls, which would
normally be placed in main function, are located in function run(**kwargs).
"""
import argparse
import fauxfactory
import sys
from ovirtsdk.api import API
from ovirtsdk.xml import params
from utils.conf import cfme_data
from utils.conf import credentials
from utils.ssh import SSHClient
from utils.wait import wait_for
# temporary vm name (this vm will be deleted)
TEMP_VM_NAME = 'auto-vm-%s' % fauxfactory.gen_alphanumeric(8)
# temporary template name (this template will be deleted)
TEMP_TMP_NAME = 'auto-tmp-%s' % fauxfactory.gen_alphanumeric(8)
def parse_cmd_line():
parser = argparse.ArgumentParser(argument_default=None)
parser.add_argument("--image_url", dest="image_url",
help="URL of ova file to upload", default=None)
parser.add_argument("--template_name", dest="template_name",
help="Name of the new template", default=None)
parser.add_argument("--edomain", dest="edomain",
help="Export domain for the remplate", default=None)
parser.add_argument("--sdomain", dest="sdomain",
help="Storage domain for vm and disk", default=None)
parser.add_argument("--cluster", dest="cluster",
help="Set cluster to operate in", default=None)
parser.add_argument("--disk_size", dest="disk_size",
help="Size of the second (database) disk, in B",
default=None, type=int)
parser.add_argument("--disk_format", dest="disk_format",
help="Format of the second (database) disk", default=None)
parser.add_argument("--disk_interface", dest="disk_interface",
help="Interface of second (database) disk", default=None)
parser.add_argument("--provider", dest="provider",
help="Rhevm provider (to look for in cfme_data)", default=None)
args = parser.parse_args()
return args
def make_ssh_client(rhevip, sshname, sshpass):
connect_kwargs = {
'username': sshname,
'password': sshpass,
'hostname': rhevip
}
return SSHClient(**connect_kwargs)
def get_ova_name(ovaurl):
"""Returns ova filename."""
return ovaurl.split("/")[-1]
def download_ova(ssh_client, ovaurl):
"""Downloads ova file using ssh_client and url
Args:
ssh_client: :py:class:`utils.ssh.SSHClient` instance
ovaurl: URL of ova file
"""
command = 'curl -O %s' % ovaurl
exit_status, output = ssh_client.run_command(command)
if exit_status != 0:
print "RHEVM: There was an error while downloading ova file:"
print output
sys.exit(127)
def template_from_ova(api, username, password, rhevip, edomain, ovaname, ssh_client):
"""Uses rhevm-image-uploader to make a template from ova file.
Args:
api: API for RHEVM.
username: Username to chosen RHEVM provider.
password: Password to chosen RHEVM provider.
rhevip: IP of chosen RHEVM provider.
edomain: Export domain of selected RHEVM provider.
ovaname: Name of ova file.
ssh_client: :py:class:`utils.ssh.SSHClient` instance
"""
if api.storagedomains.get(edomain).templates.get(TEMP_TMP_NAME) is not None:
print "RHEVM: Warning: found another template with this name."
print "RHEVM: Skipping this step. Attempting to continue..."
return
command = ['rhevm-image-uploader']
command.append("-u %s" % username)
command.append("-p %s" % password)
command.append("-r %s:443" % rhevip)
command.append("-N %s" % TEMP_TMP_NAME)
command.append("-e %s" % edomain)
command.append("upload %s" % ovaname)
command.append("-m --insecure")
exit_status, output = ssh_client.run_command(' '.join(command))
if exit_status != 0:
print "RHEVM: There was an error while making template from ova file:"
print output
sys.exit(127)
def import_template(api, edomain, sdomain, cluster):
"""Imports template from export domain to storage domain.
Args:
api: API to RHEVM instance.
edomain: Export domain of selected RHEVM provider.
sdomain: Storage domain of selected RHEVM provider.
cluster: Cluster to save imported template on.
"""
if api.templates.get(TEMP_TMP_NAME) is not None:
print "RHEVM: Warning: found another template with this name."
print "RHEVM: Skipping this step, attempting to continue..."
return
actual_template = api.storagedomains.get(edomain).templates.get(TEMP_TMP_NAME)
actual_storage_domain = api.storagedomains.get(sdomain)
actual_cluster = api.clusters.get(cluster)
import_action = params.Action(async=False, cluster=actual_cluster,
storage_domain=actual_storage_domain)
actual_template.import_template(action=import_action)
# Check if the template is really there
if not api.templates.get(TEMP_TMP_NAME):
print "RHEVM: The template failed to import"
sys.exit(127)
def make_vm_from_template(api, cluster):
"""Makes temporary VM from imported template. This template will be later deleted.
It's used to add a new disk and to convert back to template.
Args:
api: API to chosen RHEVM provider.
cluster: Cluster to save the temporary VM on.
"""
if api.vms.get(TEMP_VM_NAME) is not None:
print "RHEVM: Warning: found another VM with this name."
print "RHEVM: Skipping this step, attempting to continue..."
return
actual_template = api.templates.get(TEMP_TMP_NAME)
actual_cluster = api.clusters.get(cluster)
params_vm = params.VM(name=TEMP_VM_NAME, template=actual_template, cluster=actual_cluster)
api.vms.add(params_vm)
# we must wait for the vm do become available
def check_status():
status = api.vms.get(TEMP_VM_NAME).get_status()
if status.state != 'down':
return False
return True
wait_for(check_status, fail_condition=False, delay=5)
# check, if the vm is really there
if not api.vms.get(TEMP_VM_NAME):
print "RHEVM: VM could not be provisioned"
sys.exit(127)
def check_disks(api):
disks = api.vms.get(TEMP_VM_NAME).disks.list()
for disk in disks:
if disk.get_status().state != "ok":
return False
return True
# sometimes, rhevm is just not cooperative. This is function used to wait for template on
# export domain to become unlocked
def check_edomain_template(api, edomain):
template = api.storagedomains.get(edomain).templates.get(TEMP_TMP_NAME)
if template.get_status().state != "ok":
return False
return True
def add_disk_to_vm(api, sdomain, disk_size, disk_format, disk_interface):
"""Adds second disk to a temporary VM.
Args:
api: API to chosen RHEVM provider.
sdomain: Storage domain to save new disk onto.
disk_size: Size of the new disk (in B).
disk_format: Format of the new disk.
disk_interface: Interface of the new disk.
"""
if len(api.vms.get(TEMP_VM_NAME).disks.list()) > 1:
print "RHEVM: Warning: found more than one disk in existing VM."
print "RHEVM: Skipping this step, attempting to continue..."
return
actual_sdomain = api.storagedomains.get(sdomain)
temp_vm = api.vms.get(TEMP_VM_NAME)
params_disk = params.Disk(storage_domain=actual_sdomain, size=disk_size,
interface=disk_interface, format=disk_format)
temp_vm.disks.add(params_disk)
wait_for(check_disks, [api], fail_condition=False, delay=5, num_sec=900)
# check, if there are two disks
if len(api.vms.get(TEMP_VM_NAME).disks.list()) < 2:
print "RHEVM: Disk failed to add"
sys.exit(127)
def templatize_vm(api, template_name, cluster):
"""Templatizes temporary VM. Result is template with two disks.
Args:
api: API to chosen RHEVM provider.
template_name: Name of the final template.
cluster: Cluster to save the final template onto.
"""
if api.templates.get(template_name) is not None:
print "RHEVM: Warning: found finished template with this name."
print "RHEVM: Skipping this step, attempting to continue..."
return
temporary_vm = api.vms.get(TEMP_VM_NAME)
actual_cluster = api.clusters.get(cluster)
new_template = params.Template(name=template_name, vm=temporary_vm, cluster=actual_cluster)
api.templates.add(new_template)
wait_for(check_disks, [api], fail_condition=False, delay=5, num_sec=900)
# check, if template is really there
if not api.templates.get(template_name):
print "RHEVM: VM failed to templatize"
sys.exit(127)
def cleanup(api, edomain, ssh_client, ovaname):
"""Cleans up all the mess that the previous functions left behind.
Args:
api: API to chosen RHEVM provider.
edomain: Export domain of chosen RHEVM provider.
"""
command = 'rm %s' % ovaname
exit_status, output = ssh_client.run_command(command)
temporary_vm = api.vms.get(TEMP_VM_NAME)
if temporary_vm is not None:
temporary_vm.delete()
temporary_template = api.templates.get(TEMP_TMP_NAME)
if temporary_template is not None:
temporary_template.delete()
# waiting for template on export domain
wait_for(check_edomain_template, [api, edomain], fail_condition=False, delay=5)
unimported_template = api.storagedomains.get(edomain).templates.get(TEMP_TMP_NAME)
if unimported_template is not None:
unimported_template.delete()
def api_params_resolution(item_list, item_name, item_param):
"""Picks and prints info about parameter obtained by api call.
Args:
item_list: List of possible candidates to pick from.
item_name: Name of parameter obtained by api call.
item_param: Name of parameter representing data in the script.
"""
if len(item_list) == 0:
print "RHEVM: Cannot find %s (%s) automatically." % (item_name, item_param)
print "Please specify it by cmd-line parameter '--%s' or in cfme_data." % item_param
return None
elif len(item_list) > 1:
print "RHEVM: Found multiple instances of %s. Picking '%s'." % (item_name, item_list[0])
else:
print "RHEVM: Found %s '%s'." % (item_name, item_list[0])
return item_list[0]
def get_edomain(api):
"""Discovers suitable export domain automatically.
Args:
api: API to RHEVM instance.
"""
edomain_names = []
for domain in api.storagedomains.list(status=None):
if domain.get_type() == 'export':
edomain_names.append(domain.get_name())
return api_params_resolution(edomain_names, 'export domain', 'edomain')
def get_sdomain(api):
"""Discovers suitable storage domain automatically.
Args:
api: API to RHEVM instance.
"""
sdomain_names = []
for domain in api.storagedomains.list(status=None):
if domain.get_type() == 'data':
sdomain_names.append(domain.get_name())
return api_params_resolution(sdomain_names, 'storage domain', 'sdomain')
def get_cluster(api):
"""Discovers suitable cluster automatically.
Args:
api: API to RHEVM instance.
"""
cluster_names = []
for cluster in api.clusters.list():
for host in api.hosts.list():
if host.get_cluster().id == cluster.id:
cluster_names.append(cluster.get_name())
return api_params_resolution(cluster_names, 'cluster', 'cluster')
def check_kwargs(**kwargs):
for key, val in kwargs.iteritems():
if val is None:
print "RHEVM: please supply required parameter '%s'." % key
sys.exit(127)
def update_params_api(api, **kwargs):
"""Updates parameters with ones determined from api call.
Args:
api: API to RHEVM instance.
kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm']
"""
if kwargs.get('edomain') is None:
kwargs['edomain'] = get_edomain(api)
if kwargs.get('sdomain') is None:
kwargs['sdomain'] = get_sdomain(api)
if kwargs.get('cluster') is None:
kwargs['cluster'] = get_cluster(api)
return kwargs
def make_kwargs(args, cfme_data, **kwargs):
"""Assembles all the parameters in case of running as a standalone script.
Makes sure, that the parameters given by command-line arguments have higher priority.
Makes sure, that all the needed parameters have proper values.
Args:
args: Arguments given from command-line.
cfme_data: Data in cfme_data.yaml
kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm']
"""
args_kwargs = dict(args._get_kwargs())
if len(kwargs) is 0:
return args_kwargs
template_name = kwargs.get('template_name', None)
if template_name is None:
template_name = cfme_data['basic_info']['appliance_template']
kwargs.update({'template_name': template_name})
for kkey, kval in kwargs.iteritems():
for akey, aval in args_kwargs.iteritems():
if aval is not None:
if kkey == akey:
if kval != aval:
kwargs[akey] = aval
for akey, aval in args_kwargs.iteritems():
if akey not in kwargs.iterkeys():
kwargs[akey] = aval
return kwargs
def run(**kwargs):
"""Calls all the functions needed to upload new template to RHEVM.
This is called either by template_upload_all script, or by main function.
Args:
**kwargs: Kwargs generated from cfme_data['template_upload']['template_upload_rhevm'].
"""
ovaname = get_ova_name(kwargs.get('image_url'))
mgmt_sys = cfme_data['management_systems'][kwargs.get('provider')]
rhevurl = mgmt_sys['hostname']
rhevm_credentials = mgmt_sys['credentials']
username = credentials[rhevm_credentials]['username']
password = credentials[rhevm_credentials]['password']
ssh_rhevm_creds = mgmt_sys['hosts'][0]['credentials']
sshname = credentials[ssh_rhevm_creds]['username']
sshpass = credentials[ssh_rhevm_creds]['password']
rhevip = mgmt_sys['ipaddress']
apiurl = 'https://%s:443/api' % rhevurl
ssh_client = make_ssh_client(rhevip, sshname, sshpass)
api = API(url=apiurl, username=username, password=password,
insecure=True, persistent_auth=False)
template_name = kwargs.get('template_name', None)
if template_name is None:
template_name = cfme_data['basic_info']['appliance_template']
kwargs = update_params_api(api, **kwargs)
check_kwargs(**kwargs)
if api.templates.get(template_name) is not None:
print "RHEVM: Found finished template with this name."
print "RHEVM: The script will now end."
else:
print "RHEVM: Downloading .ova file..."
download_ova(ssh_client, kwargs.get('image_url'))
try:
print "RHEVM: Templatizing .ova file..."
template_from_ova(api, username, password, rhevip, kwargs.get('edomain'),
ovaname, ssh_client)
print "RHEVM: Importing new template..."
import_template(api, kwargs.get('edomain'), kwargs.get('sdomain'),
kwargs.get('cluster'))
print "RHEVM: Making a temporary VM from new template..."
make_vm_from_template(api, kwargs.get('cluster'))
print "RHEVM: Adding disk to created VM..."
add_disk_to_vm(api, kwargs.get('sdomain'), kwargs.get('disk_size'),
kwargs.get('disk_format'), kwargs.get('disk_interface'))
print "RHEVM: Templatizing VM..."
templatize_vm(api, template_name, kwargs.get('cluster'))
finally:
cleanup(api, kwargs.get('edomain'), ssh_client, ovaname)
ssh_client.close()
api.disconnect()
print "RHEVM: Done."
if __name__ == "__main__":
args = parse_cmd_line()
kwargs = cfme_data['template_upload']['template_upload_rhevm']
final_kwargs = make_kwargs(args, cfme_data, **kwargs)
run(**final_kwargs)
| gpl-2.0 | 5,226,702,199,609,717,000 | 35.242424 | 96 | 0.643992 | false |
TheCamusean/DLRCev3 | scripts/Computer_vision_files/pose_estimation.py | 1 | 2128 | import numpy as np
import cv2
import glob
import math as m
import time
import timeout_decorator as tm
def draw(img, corners, imgpts):
corner = tuple(corners[0].ravel())
img = cv2.line(img, corner, tuple(imgpts[0].ravel()), (255,0,0), 5)
img = cv2.line(img, corner, tuple(imgpts[1].ravel()), (0,255,0), 5)
img = cv2.line(img, corner, tuple(imgpts[2].ravel()), (0,0,255), 5)
return img
def silent_timeout(t, *args):
try:
return tm.timeout(t)(cv2.findChessboardCorners)(*args)
except tm.timeout_decorator.TimeoutError:
print("Timed out")
return (False, False)
# LOAD THE PARAMETERS
data = np.load('camera_parameters.npz')
mtx=data["cam_matrix"]
dist=data["dist_coeff"]
# Now you have the camera calibration parameters
#FROM HERE IS TO PLOT THE AXIS IN THE CHESSBOARD
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((9*6,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
axis = np.float32([[3,0,0], [0,3,0], [0,0,-3]]).reshape(-1,3)
## TO DRAW THE AXIS
cap = cv2.VideoCapture(1)
#img = cv2.imread('Chessboard_9.jpg')
while True:
t0=time.time()
while (time.time()-t0<0.1):
ret,img=cap.read()
tinit=time.time()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ret, corners = silent_timeout(0.07,gray, (9,6),None)
if ret == True:
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
# Find the rotation and translation vectors.
retval,rvecs2, tvecs2, inliers2= cv2.solvePnPRansac(objp, corners2, mtx, dist)
tvecs2=2.5*tvecs2
print("translation x:{},y:{},z:{}".format(tvecs2[0],tvecs2[1],tvecs2[2]))
#print("rotation x:{},y:{},z:{}".format(rvecs2[0],rvecs2[1],rvecs2[2]))
# project 3D points to image plane
imgpts, jac = cv2.projectPoints(axis, rvecs2, tvecs2, mtx, dist)
img = draw(img,corners2,imgpts)
print("retard",time.time()-tinit)
cv2.imshow('img',img)
if cv2.waitKey(1) & 0xFF==32:
break
break
| mit | -4,619,761,552,942,554,000 | 29.4 | 86 | 0.629229 | false |
pauliacomi/pyGAPS | tests/characterisation/test_t_plot.py | 1 | 2958 | """
This test module has tests relating to t-plots
All functions in /calculations/tplot.py are tested here.
The purposes are:
- testing the user-facing API function (tplot)
- testing individual low level functions against known results.
Functions are tested against pre-calculated values on real isotherms.
All pre-calculated data for characterisation can be found in the
/.conftest file together with the other isotherm parameters.
"""
import pytest
from matplotlib.testing.decorators import cleanup
from numpy import isclose
import pygaps
import pygaps.utilities.exceptions as pgEx
from .conftest import DATA
from .conftest import DATA_N77_PATH
@pytest.mark.characterisation
class TestTPlot():
"""Tests t-plot calculations."""
def test_alphas_checks(self, basic_pointisotherm):
"""Checks for built-in safeguards."""
# Will raise a "no suitable model exception"
with pytest.raises(pgEx.ParameterError):
pygaps.t_plot(basic_pointisotherm, thickness_model='random')
@pytest.mark.parametrize('sample', [sample for sample in DATA])
def test_tplot(self, sample):
"""Test calculation with several model isotherms."""
sample = DATA[sample]
# exclude datasets where it is not applicable
if sample.get('t_area', None):
filepath = DATA_N77_PATH / sample['file']
isotherm = pygaps.isotherm_from_json(filepath)
res = pygaps.t_plot(isotherm)
results = res.get('results')
err_relative = 0.1 # 10 percent
err_absolute_area = 0.1 # units
err_absolute_volume = 0.01 # units
assert isclose(
results[-1].get('adsorbed_volume'), sample['t_pore_volume'],
err_relative, err_absolute_area
)
assert isclose(
results[0].get('area'), sample['t_area'], err_relative,
err_absolute_volume
)
def test_tplot_choice(self):
"""Test choice of points."""
sample = DATA['MCM-41']
filepath = DATA_N77_PATH / sample['file']
isotherm = pygaps.isotherm_from_json(filepath)
res = pygaps.t_plot(isotherm, limits=[0.7, 1.0])
results = res.get('results')
err_relative = 0.1 # 10 percent
err_absolute_area = 0.1 # units
err_absolute_volume = 0.01 # units
assert isclose(
results[-1].get('adsorbed_volume'), sample['t_pore_volume'],
err_relative, err_absolute_area
)
assert isclose(
results[-1].get('area'), sample['s_t_area'], err_relative,
err_absolute_volume
)
@cleanup
def test_tplot_output(self):
"""Test verbosity."""
sample = DATA['MCM-41']
filepath = DATA_N77_PATH / sample['file']
isotherm = pygaps.isotherm_from_json(filepath)
pygaps.t_plot(isotherm, 'Halsey', verbose=True)
| mit | 8,951,880,220,444,265,000 | 31.505495 | 76 | 0.620352 | false |
Xiol/CVEChecker | old/rhsa.py | 1 | 5144 | #!/usr/bin/env python -OO
# This Source Code Form is subject to the terms of the Mozilla
# Public License, v. 2.0. If a copy of the MPL was not distributed
# with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# CVE -> RHSA Report Generator
#
# Requires Beautiful Soup: http://www.crummy.com/software/BeautifulSoup/
# Currently only tested with Python 2.6, but no reason it shouldn't work
# with older Python versions (minimum 2.3). Not compatible with Python 3.
#
# Use like: ./rhsa.py < cvelist.txt, where cvelist.txt is a whitespace
# separated list of CVE numbers in the format CVE-YYYY-XXXX.
#
# This will find the CVE on the CVE_BASE_URL site and scrape for the
# related RHSA. If it can't find the CVE, chances are it doesn't affect
# Red Hat or Linux. If it can't find an RHSA, then it'll be something
# they don't intend to fix, so output the statement from Red Hat.
# Otherwise, consider resolved and output the link to the RHSA.
# This of course assumes you ARE running the latest CentOS/RHEL release
# versions of the software you're checking the CVEs for.
#
# No guarantees anything this outputs is correct or proper.
import sys
import re
import urllib2
import sqlite3
import os
import snmp
from time import sleep
from BeautifulSoup import BeautifulSoup
CVE_BASE_URL = "https://www.redhat.com/security/data/cve/"
RHEL_VERSION = "5"
rhsa_r = re.compile(".*Red Hat Enterprise Linux version "+RHEL_VERSION+".*")
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
conn = sqlite3.connect(os.path.join(curdir, 'cache.db'), check_same_thread = False)
cur = conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS cache (id INTEGER PRIMARY KEY AUTOINCREMENT, timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, platform TEXT NOT NULL, cve TEXT NOT NULL, result TEXT NOT NULL)")
cur.execute("CREATE INDEX IF NOT EXISTS cve_idx ON cache (cve)")
conn.commit()
cur.close()
def get_cve_info(cve, platform='x86_64', host=None):
if platform not in ['x86_64','i386']:
return { 'cve': "Platform must be 'x86_64' or 'i386'.", 'verinfo': None }
if host:
snmpq = snmp.SNMPQueryTool(host)
snmpq.get_installed_packages()
cve = cve.strip()
#cachechk = _retr_cve(cve, platform)
#if cachechk is not None:
# return cachechk
cveurl = CVE_BASE_URL + cve + ".html"
try:
html = urllib2.urlopen(cveurl).read()
except urllib2.HTTPError:
# 404 or general screwup, don't cache in case it turns up later
return { 'cve': cve + " -- !!FIX!! Not found on Red Hat's website. Google it, might be Windows only or bad CVE reference.", 'verinfo': None }
except urllib2.URLError:
return { 'cve': "There was a problem with the URL.", 'verinfo': None }
soup = BeautifulSoup(html)
if soup.find(text=rhsa_r) is not None:
# If we've found the above, we have an RHSA (in theory!)
rhsa = soup.find(text=rhsa_r).findNext('a')['href']
rhsa_soup = BeautifulSoup(urllib2.urlopen(rhsa).read())
ver = rhsa_soup.find('a',attrs={"name": "Red Hat Enterprise Linux (v. "+RHEL_VERSION+" server)"}).findNext(text="SRPMS:").findNext('td').contents[0]
ver = ver.replace(".src.", '.'+platform+'.')
result = "Resolved in version "+ver+": " + rhsa
_add_cve(cve, result, platform)
return { 'cve': cve + " -- " + result, 'verinfo': None }
elif soup.find(text="Statement"):
statement = ' '.join([text for text in soup.find(text="Statement").findNext('p').findAll(text=True)])
result = "Red Hat Statement: \""+ statement + "\" - " + cveurl
_add_cve(cve, result, platform)
return { 'cve': cve + " -- " + result, 'verinfo': None }
elif soup.find(text="CVE not found"):
# They changed their website! This is needed to pick up the lack of a CVE now.
result = "!!FIX!! Not found on Red Hat's website. Google it, might be Windows only or bad CVE reference."
return { 'cve': cve + " -- " + result, 'verinfo': None }
else:
result = "!!FIX!! No RHSA for version "+RHEL_VERSION+", no statement either. See: " + cveurl
#_add_cve(cve, result, platform)
return { 'cve': cve + " -- " + result, 'verinfo': None }
def _add_cve(cve, result, platform):
cur = conn.cursor()
cur.execute("""INSERT INTO cache(cve, result, platform) VALUES (?, ?, ?)""", (cve, result, platform))
conn.commit()
cur.close()
def _retr_cve(cve, platform):
cur = conn.cursor()
cur.execute("""SELECT cve,result FROM cache WHERE cve=? AND platform=? LIMIT 1""", (cve, platform))
result = cur.fetchone()
cur.close()
if result is not None:
result = ' -- '.join([t for t in result if t is not None])
return result
if __name__ == '__main__':
rawdata = ""
if sys.stdin.isatty():
print "No input detected. You need to pipe a whitespace separated list of CVEs in!"
print "e.g. `./rhsa.py < cvelist.txt` or your preferred method."
sys.exit(1)
else:
rawdata = sys.stdin.read()
cves = rawdata.split()
for cve in cves:
print get_cve_info(cve)['cve']
| mpl-2.0 | -8,972,298,139,212,128,000 | 40.483871 | 197 | 0.648523 | false |
jszymon/pacal | pacal/examples/central_limit_demo.py | 1 | 2068 | #!===============================
#! Demo of central limit theorem
#!===============================
from __future__ import print_function
import sys
from pylab import *
from pacal import *
from pacal import params
import time
params.general.warn_on_dependent = False
if __name__ == "__main__":
colors = "kbgrcmy"
def central_limit_demo(X, N = 5, xmin = None, xmax = None, ymax = None, **args):
tic=time.time()
figure()
title("Limit of averages of " + X.getName())
X.plot(linewidth = 4, color = "c", **args)
Y = X
print("Limit of averages of " + X.getName() + ": ", end=' ')
for i in range(N-1):
print(i+2, end=' ')
sys.stdout.flush()
Y += X
(Y/(i+2)).plot(color = colors[i%len(colors)], **args)
if xmin is not None:
xlim(xmin = xmin)
if xmax is not None:
xlim(xmax = xmax)
ylim(ymin = 0)
if ymax is not None:
ylim(ymax = ymax)
print()
print("time===", time.time()-tic)
#show()
#!----------------------
#! uniform distribution
#!----------------------
X = UniformDistr(0,1)
central_limit_demo(X, xmin=-0.1, xmax=1.1)
#!----------------------
#! Chi^2_1
#!----------------------
X = ChiSquareDistr(1)
central_limit_demo(X, N=5, ymax=1.5, xmax=3)
#!----------------------
#! Student T w. 2df
#!----------------------
X = StudentTDistr(2)
central_limit_demo(X, N = 5, xmin=-5, xmax=5)
#!----------------------
#! a ratio distribution
#!----------------------
X = UniformDistr(1,3) / UniformDistr(-2,1)
central_limit_demo(X, N = 5, xmin=-5, xmax=5)
#!----------------------
#! Cauchy distribution
#!----------------------
X = CauchyDistr()
central_limit_demo(X, xmin = -10, xmax = 10)
#!----------------------
#! Levy distribution
#!----------------------
X = LevyDistr()
central_limit_demo(X, xmax=5, numberOfPoints = 10000)
show()
| gpl-3.0 | -3,555,198,765,811,452,400 | 26.210526 | 84 | 0.436654 | false |
educloudalliance/eca-auth-data | authdata/tests/test_forms.py | 1 | 2524 |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2014-2015 Haltu Oy, http://haltu.fi
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# pylint: disable=locally-disabled, no-member
from django.test import TestCase
import authdata.models
import authdata.forms
from authdata.tests import factories as f
class TestUserCreationForm(TestCase):
def test_clean_username(self):
obj = authdata.forms.UserCreationForm({'username': 'foo'})
self.assertTrue(obj.is_valid())
username = obj.clean_username()
self.assertEqual(username, 'foo')
def test_clean_username_error_duplicate(self):
f.UserFactory(username='foo')
obj = authdata.forms.UserCreationForm({'username': 'foo'})
self.assertFalse(obj.is_valid())
self.assertEqual(obj.errors, {'username': [u'A user with that username already exists.']})
def test_save(self):
self.assertEqual(authdata.models.User.objects.count(), 0)
obj = authdata.forms.UserCreationForm({'username': 'foo'})
self.assertTrue(obj.is_valid())
user = obj.save(commit=True)
self.assertTrue(user)
self.assertEqual(authdata.models.User.objects.count(), 1)
class TestUserChangeForm(TestCase):
def test_clean_password(self):
user_obj = f.UserFactory(username='foo', password='originalpass1')
obj = authdata.forms.UserChangeForm({'password': 'bar'}, instance=user_obj)
password = obj.clean_password()
self.assertEqual(password, 'originalpass1')
# vim: tabstop=2 expandtab shiftwidth=2 softtabstop=2
| mit | -6,247,756,150,725,642,000 | 37.242424 | 94 | 0.742472 | false |
anacode/anacode-toolkit | anacode/api/writers.py | 1 | 20217 | # -*- coding: utf-8 -*-
import os
import csv
import datetime
import pandas as pd
from itertools import chain
from functools import partial
from anacode import codes
def backup(root, files):
"""Backs up `files` from `root` directory and return list of backed up
file names. Backed up files will have datetime suffix appended to original
file name.
:param root: Absolute path to folder where files to backup are located
:type root: str
:param files: Names of files that needs backing up
:type files: str
:return: list -- List of backed up file names
"""
backed_up = []
join = os.path.join
root_contents = os.listdir(root)
dt_str = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S')
for file_name in files:
if file_name not in root_contents:
continue
new_name = file_name + '_' + dt_str
os.rename(join(root, file_name), join(root, new_name))
backed_up.append(new_name)
return backed_up
HEADERS = {
'categories': [u'doc_id', u'text_order', u'category', u'probability'],
'concepts': [u'doc_id', u'text_order', u'concept', u'freq',
u'relevance_score', u'concept_type'],
'concepts_surface_strings': [u'doc_id', u'text_order', u'concept',
u'surface_string', u'text_span'],
'sentiments': [u'doc_id', u'text_order', u'sentiment_value'],
'absa_entities': [u'doc_id', u'text_order', u'entity_name', u'entity_type',
u'surface_string', u'text_span'],
'absa_normalized_texts': [u'doc_id', u'text_order', u'normalized_text'],
'absa_relations': [u'doc_id', u'text_order', u'relation_id',
u'opinion_holder', u'restriction', u'sentiment_value',
u'is_external', u'surface_string', u'text_span'],
'absa_relations_entities': [u'doc_id', u'text_order', u'relation_id',
u'entity_type', u'entity_name'],
'absa_evaluations': [u'doc_id', u'text_order', u'evaluation_id',
u'sentiment_value', u'surface_string', u'text_span'],
'absa_evaluations_entities': [u'doc_id', u'text_order', u'evaluation_id',
u'entity_type', u'entity_name'],
}
# `anacode.agg.aggregations.ApiDataset.from_path` depends
# on ordering of files defined in values here
CSV_FILES = {
'categories': ['categories.csv'],
'concepts': ['concepts.csv', 'concepts_surface_strings.csv'],
'sentiments': ['sentiments.csv'],
'absa': [
'absa_entities.csv', 'absa_normalized_texts.csv',
'absa_relations.csv', 'absa_relations_entities.csv',
'absa_evaluations.csv', 'absa_evaluations_entities.csv'
]
}
def categories_to_list(doc_id, analyzed, single_document=False):
"""Converts categories response to flat list with doc_id included.
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for categories call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with one key 'categories' pointing to flat list
of categories
"""
cat_list = []
for order, text_analyzed in enumerate(analyzed):
for result_dict in text_analyzed:
row = [doc_id, 0, result_dict.get('label'),
result_dict.get('probability')]
if single_document:
row[1] += order
else:
row[0] += order
cat_list.append(row)
return {'categories': cat_list}
def concepts_to_list(doc_id, analyzed, single_document=False):
"""Converts concepts response to flat lists with doc_id included
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for concepts call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with two keys: 'concepts' pointing to flat list
of found concepts and their metadata and 'concepts_surface_strings'
pointing to flat list of strings realizing found concepts
"""
con_list, exp_list = [], []
for order, text_analyzed in enumerate(analyzed):
for concept in text_analyzed or []:
row = [doc_id, 0, concept.get('concept'),
concept.get('freq'), concept.get('relevance_score'),
concept.get('type')]
if single_document:
row[1] += order
else:
row[0] += order
con_list.append(row)
for string in concept.get('surface', []):
surface_str, span = string['surface_string'], string['span']
exp_list.append([row[0], row[1], concept.get('concept'),
surface_str, '-'.join(map(str, span))])
return {'concepts': con_list, 'concepts_surface_strings': exp_list}
def sentiments_to_list(doc_id, analyzed, single_document=False):
"""Converts sentiments response to flat lists with doc_id included
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for sentiment call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with one key 'sentiments' pointing to flat list
of sentiment probabilities
"""
sen_list = []
for order, sentiment in enumerate(analyzed):
row = [doc_id, 0, sentiment['sentiment_value']]
if single_document:
# this should not happen
row[1] += order
else:
row[0] += order
sen_list.append(row)
return {'sentiments': sen_list}
def _absa_entities_to_list(doc_id, order, entities):
ent_list = []
for entity_dict in entities:
text_span = '-'.join(map(str, entity_dict['surface']['span']))
surface_string = entity_dict['surface']['surface_string']
for semantics in entity_dict['semantics']:
row = [doc_id, order, semantics['value'], semantics['type'],
surface_string, text_span]
ent_list.append(row)
return ent_list
def _absa_normalized_text_to_list(doc_id, order, normalized_text):
return [[doc_id, order, normalized_text]]
def _absa_relations_to_list(doc_id, order, relations):
rel_list, ent_list = [], []
for rel_index, rel in enumerate(relations):
rel_row = [doc_id, order, rel_index,
rel['semantics']['opinion_holder'],
rel['semantics']['restriction'],
rel['semantics']['sentiment_value'],
rel['external_entity'],
rel['surface']['surface_string'],
'-'.join(map(str, rel['surface']['span']))]
rel_list.append(rel_row)
for ent in rel['semantics'].get('entity', []):
ent_row = [doc_id, order, rel_index, ent['type'], ent['value']]
ent_list.append(ent_row)
return rel_list, ent_list
def _absa_evaluations_to_list(doc_id, order, evaluations):
eval_list, ent_list = [], []
for eval_index, evaluation in enumerate(evaluations):
eval_row = [doc_id, order, eval_index,
evaluation['semantics']['sentiment_value'],
evaluation['surface']['surface_string'],
'-'.join(map(str, evaluation['surface']['span']))]
eval_list.append(eval_row)
for ent in evaluation['semantics'].get('entity', []):
ent_row = [doc_id, order, eval_index, ent['type'], ent['value']]
ent_list.append(ent_row)
return eval_list, ent_list
def absa_to_list(doc_id, analyzed, single_document=False):
"""Converts ABSA response to flat lists with doc_id included
:param doc_id: Will be inserted to each row as first element
:param analyzed: Response json from anacode api for ABSA call
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
:return: dict -- Dictionary with six keys: 'absa_entities' pointing to flat
list of found entities with metadata, 'absa_normalized_texts' pointing to
flat list of normalized chinese texts, 'absa_relations' pointing to found
entity relations with metadata, 'absa_relations_entities' pointing to flat
list of entities that belong to absa relations, 'absa_evaluations'
pointing to flat list of entity evaluations with metadata and
'absa_evaluations_entities' specifying entities in absa_evaluations
"""
absa = {
'absa_entities': [],
'absa_normalized_texts': [],
'absa_relations': [],
'absa_relations_entities': [],
'absa_evaluations': [],
'absa_evaluations_entities': []
}
for order, text_analyzed in enumerate(analyzed):
if single_document:
current_id = doc_id
text_order = order
else:
current_id = doc_id + order
text_order = 0
entities = text_analyzed['entities']
ents = _absa_entities_to_list(current_id, text_order, entities)
text = text_analyzed['normalized_text']
texts = _absa_normalized_text_to_list(current_id, text_order, text)
relations = text_analyzed['relations']
rels, rel_ents = _absa_relations_to_list(current_id, text_order,
relations)
evaluations = text_analyzed['evaluations']
evals, eval_ents = _absa_evaluations_to_list(current_id, text_order,
evaluations)
absa['absa_entities'].extend(ents)
absa['absa_normalized_texts'].extend(texts)
absa['absa_relations'].extend(rels)
absa['absa_relations_entities'].extend(rel_ents)
absa['absa_evaluations'].extend(evals)
absa['absa_evaluations_entities'].extend(eval_ents)
return absa
class Writer(object):
"""Base "abstract" class containing common methods that are
needed by all implementations of Writer interface.
The writer interface consists of init, close and write_bulk methods.
"""
def __init__(self):
self.ids = {'scrape': 0, 'analyze': 0}
def write_row(self, call_type, call_result):
"""Decides what kind of data it got and calls appropriate write method.
:param call_type: Library's ID of anacode call
:type call_type: int
:param call_result: JSON response from Anacode API
:type call_result: list
"""
if call_type == codes.SCRAPE:
self.write_scrape(call_result)
if call_type == codes.ANALYZE:
self.write_analysis(call_result)
def _add_new_data_from_dict(self, new_data):
"""Not implemented here!
Used by write methods to submit new Anacode API response data for storage.
:param new_data: dict; keys are data sets names and values are
flat lists of rows
:type new_data: dict
"""
pass
def write_scrape(self, scraped):
self.ids['scrape'] += 1
def write_analysis(self, analyzed):
"""Inspects analysis result for performed analysis and delegates
persisting of results to appropriate write methods.
:param analyzed: JSON object analysis response
:type: dict
"""
single_document = analyzed.get('single_document', False)
analyzed_length = 1
if 'categories' in analyzed:
categories = analyzed['categories']
self.write_categories(categories, single_document=single_document)
if not single_document:
analyzed_length = len(categories)
if 'concepts' in analyzed:
concepts = analyzed['concepts']
self.write_concepts(concepts, single_document=single_document)
if not single_document:
analyzed_length = len(concepts)
if 'sentiment' in analyzed:
sentiment = analyzed['sentiment']
self.write_sentiment(sentiment, single_document=single_document)
if not single_document:
analyzed_length = len(sentiment)
if 'absa' in analyzed:
absa = analyzed['absa']
self.write_absa(analyzed['absa'], single_document=single_document)
if not single_document:
analyzed_length = len(absa)
self.ids['analyze'] += analyzed_length
def write_categories(self, analyzed, single_document=False):
"""Converts categories analysis result to flat lists and stores them.
:param analyzed: JSON categories analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = categories_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_concepts(self, analyzed, single_document=False):
"""Converts concepts analysis result to flat lists and stores them.
:param analyzed: JSON concepts analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = concepts_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_sentiment(self, analyzed, single_document=False):
"""Converts sentiment analysis result to flat lists and stores them.
:param analyzed: JSON sentiment analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = sentiments_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_absa(self, analyzed, single_document=False):
"""Converts absa analysis result to flat lists and stores them.
:param analyzed: JSON absa analysis result
:type analyzed: list
:param single_document: Is analysis describing just one document
:type single_document: bool
"""
doc_id = self.ids['analyze']
new_data = absa_to_list(doc_id, analyzed, single_document)
self._add_new_data_from_dict(new_data)
def write_bulk(self, results):
"""Stores multiple anacode api's JSON responses marked with call IDs as
tuples (call_id, call_result). Both scrape and analyze call IDs
are defined in anacode.codes module.
:param results: List of anacode responses with IDs of calls used
:type results: list
"""
for call_type, call_result in results:
self.write_row(call_type, call_result)
def init(self):
"""Not implemented here! Each subclass should decide what to do here."""
pass
def close(self):
"""Not implemented here! Each subclass should decide what to do here."""
pass
def __enter__(self):
self.init()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class DataFrameWriter(Writer):
"""Writes Anacode API output into pandas.DataFrame instances."""
def __init__(self, frames=None):
"""Initializes dictionary of result frames. Alternatively uses given
frames dict for storage.
:param frames: Might be specified to use this instead of new dict
:type frames: dict
"""
super(DataFrameWriter, self).__init__()
self.frames = {} if frames is None else frames
self._row_data = {}
def init(self):
"""Initialized empty lists for each possible data frame."""
self._row_data = {
'categories': [],
'concepts': [],
'concepts_surface_strings': [],
'sentiments': [],
'absa_entities': [],
'absa_normalized_texts': [],
'absa_relations': [],
'absa_relations_entities': [],
'absa_evaluations': [],
'absa_evaluations_entities': [],
}
def close(self):
"""Creates pandas data frames to self.frames dict and clears internal
state.
"""
for name, row in self._row_data.items():
if len(row) > 0:
self.frames[name] = pd.DataFrame(row, columns=HEADERS[name])
self._row_data = {}
def _add_new_data_from_dict(self, new_data):
"""Stores anacode api result converted to flat lists.
:param new_data: Anacode api result
:param new_data: list
"""
for name, row_list in new_data.items():
self._row_data[name].extend(row_list)
class CSVWriter(Writer):
def __init__(self, target_dir='.'):
"""Initializes Writer to store Anacode API analysis results in target_dir in
csv files.
:param target_dir: Path to directory where to store csv files
:type target_dir: str
"""
super(CSVWriter, self).__init__()
self.target_dir = os.path.abspath(os.path.expanduser(target_dir))
self._files = {}
self.csv = {}
def _open_csv(self, csv_name):
path = partial(os.path.join, self.target_dir)
try:
return open(path(csv_name), 'w', newline='')
except TypeError:
return open(path(csv_name), 'wb')
def init(self):
"""Opens all csv files for writing and writes headers to them."""
self.close()
backup(self.target_dir, chain.from_iterable(CSV_FILES.values()))
self._files = {
'categories': self._open_csv('categories.csv'),
'concepts': self._open_csv('concepts.csv'),
'concepts_surface_strings': self._open_csv(
'concepts_surface_strings.csv'
),
'sentiments': self._open_csv('sentiments.csv'),
'absa_entities': self._open_csv('absa_entities.csv'),
'absa_normalized_texts': self._open_csv(
'absa_normalized_texts.csv'
),
'absa_relations': self._open_csv('absa_relations.csv'),
'absa_relations_entities': self._open_csv(
'absa_relations_entities.csv'
),
'absa_evaluations': self._open_csv('absa_evaluations.csv'),
'absa_evaluations_entities': self._open_csv(
'absa_evaluations_entities.csv'
),
}
self.csv = {name: csv.writer(fp) for name, fp in self._files.items()}
for name, writer in self.csv.items():
writer.writerow(HEADERS[name])
def _csv_has_content(self, csv_path):
if not os.path.isfile(csv_path):
return False
with open(csv_path) as fp:
for line_count, line in enumerate(fp):
if line_count == 1 and len(line.strip()) != '':
return True
return False
def close(self):
"""Closes all csv files and removes empty ones."""
for name, file in self._files.items():
try:
file.close()
except (IOError, AttributeError):
print('Problem closing "{}"'.format(name))
for file_list in CSV_FILES.values():
for file_name in file_list:
path = os.path.join(self.target_dir, file_name)
if os.path.isfile(path) and not self._csv_has_content(path):
os.unlink(path)
self._files = {}
self.csv = {}
def _add_new_data_from_dict(self, new_data):
"""Stores anacode api result converted to flat lists.
:param new_data: Anacode api result
:param new_data: list
"""
for name, row_list in new_data.items():
self.csv[name].writerows(row_list)
| bsd-3-clause | -7,003,819,235,267,185,000 | 37.729885 | 84 | 0.598012 | false |
sony/nnabla | python/src/nnabla/backward_function/dropout.py | 1 | 1383 | # Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla.functions as F
from .utils import no_grad, get_output
def dropout_backward(inputs, p=0.5, seed=-1, output_mask=False):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
if not output_mask:
raise ValueError(
"dropout_backward is supported for output_mask=True.")
dy0 = inputs[0]
dy1 = inputs[1]
x0 = inputs[2]
y1 = get_output(x0, "Dropout", nth_output=1)
m0 = y1.get_unlinked_variable() # mask
dx0 = dy0 * m0 / (1 - p)
return dx0
| apache-2.0 | 1,812,822,212,141,513,200 | 32.731707 | 86 | 0.702097 | false |
mosra/m.css | documentation/test_doxygen/test_doxyfile.py | 1 | 6508 | #
# This file is part of m.css.
#
# Copyright © 2017, 2018, 2019, 2020 Vladimír Vondruš <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
import copy
import os
import shutil
import subprocess
import unittest
from doxygen import parse_doxyfile, State, default_config
from . import BaseTestCase
class Doxyfile(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
# Display ALL THE DIFFS
self.maxDiff = None
expected_doxyfile = {
'DOT_FONTNAME': 'Helvetica',
'DOT_FONTSIZE': 10,
'HTML_OUTPUT': 'html',
'OUTPUT_DIRECTORY': '',
'PROJECT_BRIEF': 'is cool',
'PROJECT_LOGO': '',
'PROJECT_NAME': 'My Pet Project',
'SHOW_INCLUDE_FILES': True,
'XML_OUTPUT': 'xml'
}
expected_config = {
'DOXYFILE': 'Doxyfile',
'FAVICON': ('favicon-dark.png', 'image/png'),
'LINKS_NAVBAR1': [(None, 'Pages', 'pages.html', 'pages', []),
(None, 'Modules', 'modules.html', 'modules', [])],
# different order
'LINKS_NAVBAR2': [(None, 'Files', 'files.html', 'files', []),
(None, 'Classes', 'annotated.html', 'annotated', [])],
'FINE_PRINT': 'this is "quotes"',
'THEME_COLOR': '#22272e',
'STYLESHEETS': ['a.css', 'b.css'],
'HTML_HEADER': None,
'EXTRA_FILES': ['css', 'another.png', 'hello'],
'PAGE_HEADER': 'this is "quotes" \'apostrophes\'',
'CLASS_INDEX_EXPAND_LEVELS': 1,
'CLASS_INDEX_EXPAND_INNER': False,
'FILE_INDEX_EXPAND_LEVELS': 1,
'M_CODE_FILTERS_PRE': {},
'M_CODE_FILTERS_POST': {},
'M_MATH_CACHE_FILE': 'm.math.cache',
'SEARCH_DISABLED': False,
'SEARCH_DOWNLOAD_BINARY': False,
'SEARCH_BASE_URL': None,
'SEARCH_EXTERNAL_URL': None,
'SEARCH_HELP':
"""<p class="m-noindent">Search for symbols, directories, files, pages or
modules. You can omit any prefix from the symbol or file path; adding a
<code>:</code> or <code>/</code> suffix lists all members of given symbol or
directory.</p>
<p class="m-noindent">Use <span class="m-label m-dim">↓</span>
/ <span class="m-label m-dim">↑</span> to navigate through the list,
<span class="m-label m-dim">Enter</span> to go.
<span class="m-label m-dim">Tab</span> autocompletes common prefix, you can
copy a link to the result using <span class="m-label m-dim">⌘</span>
<span class="m-label m-dim">L</span> while <span class="m-label m-dim">⌘</span>
<span class="m-label m-dim">M</span> produces a Markdown link.</p>
""",
'SHOW_UNDOCUMENTED': False,
'VERSION_LABELS': False,
}
def test(self):
# Basically mirroring what's in the Doxyfile-legacy. It's silly because
# we don't need to check most of these here anyway but whatever. To
# make this a bit saner, all existing tests are using the
# "legacy Doxyfile" config anyway, so it should be tested more than
# enough... until we port away from that. This should get then further
# extended to cover the cases that are no longer tested by other code.
state = State({**copy.deepcopy(default_config), **{
'EXTRA_FILES': ['css', 'another.png', 'hello'],
'STYLESHEETS': ['a.css', 'b.css'],
'PAGE_HEADER': 'this is "quotes" \'apostrophes\'',
'FINE_PRINT': 'this is "quotes"',
'LINKS_NAVBAR1': [(None, 'pages', []),
(None, 'modules', [])],
'LINKS_NAVBAR2': [(None, 'files', []),
(None, 'annotated', [])]
}})
parse_doxyfile(state, 'test_doxygen/doxyfile/Doxyfile')
self.assertEqual(state.doxyfile, self.expected_doxyfile)
self.assertEqual(state.config, self.expected_config)
def test_legacy(self):
state = State(copy.deepcopy(default_config))
parse_doxyfile(state, 'test_doxygen/doxyfile/Doxyfile-legacy')
self.assertEqual(state.doxyfile, self.expected_doxyfile)
self.assertEqual(state.config, self.expected_config)
def test_subdirs(self):
state = State(copy.deepcopy(default_config))
with self.assertRaises(NotImplementedError):
parse_doxyfile(state, 'test_doxygen/doxyfile/Doxyfile-subdirs')
class UpgradeCustomVariables(BaseTestCase):
def test(self):
# Copy the Doxyfile to a new location because it gets overwritten
shutil.copyfile(os.path.join(self.path, 'Doxyfile'),
os.path.join(self.path, 'Doxyfile-upgrade'))
subprocess.run(['doxygen', '-u', 'Doxyfile-upgrade'], cwd=self.path, check=True)
with open(os.path.join(self.path, 'Doxyfile-upgrade'), 'r') as f:
contents = f.read()
self.assertFalse('UNKNOWN_VARIABLE' in contents)
self.assertFalse('COMMENTED_OUT_VARIABLE' in contents)
self.assertTrue('## HASHED_COMMENTED_VARIABLE = 2' in contents)
self.assertTrue('##! HASHED_BANG_COMMENTED_VARIABLE = 3 \\' in contents)
self.assertTrue('##! HASHED_BANG_COMMENTED_VARIABLE_CONT' in contents)
self.assertTrue('##!HASHED_BANG_COMMENTED_VARIABLE_NOSPACE = 4' in contents)
self.assertTrue('INPUT = 5' in contents)
self.assertTrue('##! HASHED_BANG_COMMENTED_VARIABLE_END = 6' in contents)
| mit | 1,598,037,354,109,257,000 | 42.34 | 88 | 0.627903 | false |
tBaxter/activity-monitor | activity_monitor/models.py | 1 | 4354 | from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.functional import cached_property
from .managers import ActivityItemManager
class Activity(models.Model):
"""
Stores an action that occurred that is being tracked
according to ACTIVITY_MONITOR settings.
"""
actor = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name="subject",
on_delete="CASCADE"
)
timestamp = models.DateTimeField()
verb = models.CharField(blank=True, null=True, max_length=255, editable=False)
override_string = models.CharField(blank=True, null=True, max_length=255, editable=False)
target = models.CharField(blank=True, null=True, max_length=255, editable=False)
actor_name = models.CharField(blank=True, null=True, max_length=255, editable=False)
content_object = GenericForeignKey()
content_type = models.ForeignKey(ContentType, on_delete="CASCADE")
object_id = models.PositiveIntegerField()
objects = ActivityItemManager()
class Meta:
ordering = ['-timestamp']
unique_together = [('content_type', 'object_id')]
get_latest_by = 'timestamp'
verbose_name_plural = 'actions'
def __unicode__(self):
return "{0}: {1}".format(self.content_type.model_class().__name__, self.content_object)
def save(self, *args, **kwargs):
"""
Store a string representation of content_object as target
and actor name for fast retrieval and sorting.
"""
if not self.target:
self.target = str(self.content_object)
if not self.actor_name:
self.actor_name = str(self.actor)
super(Activity, self).save()
def get_absolute_url(self):
"""
Use original content object's
get_absolute_url method.
"""
return self.content_object.get_absolute_url()
@cached_property
def short_action_string(self):
"""
Returns string with actor and verb, allowing target/object
to be filled in manually.
Example:
[actor] [verb] or
"Joe cool posted a comment"
"""
output = "{0} ".format(self.actor)
if self.override_string:
output += self.override_string
else:
output += self.verb
return output
@cached_property
def full_action_string(self):
"""
Returns full string with actor, verb and target content object.
Example:
[actor] [verb] [content object/target] or
Joe cool posted a new topic: "my new topic"
"""
output = "{} {}".format(self.short_action_string, self.content_object)
return output
@cached_property
def image(self):
"""
Attempts to provide a representative image from a content_object based on
the content object's get_image() method.
If there is a another content.object, as in the case of comments and other GFKs,
then it will follow to that content_object and then get the image.
Requires get_image() to be defined on the related model even if it just
returns object.image, to avoid bringing back images you may not want.
Note that this expects the image only. Anything related (caption, etc) should be stripped.
"""
obj = self.content_object
# First, try to get from a get_image() helper method
try:
image = obj.get_image()
except AttributeError:
try:
image = obj.content_object.get_image()
except:
image = None
# if we didn't find one, try to get it from foo.image
# This allows get_image to take precedence for greater control.
if not image:
try:
image = obj.image
except AttributeError:
try:
image = obj.content_object.image
except:
return None
# Finally, ensure we're getting an image, not an image object
# with caption and byline and other things.
try:
return image.image
except AttributeError:
return image
| mit | 5,448,646,263,097,085,000 | 32.236641 | 98 | 0.616904 | false |
DistributedSystemsGroup/zoe | zoe_master/scheduler/elastic_scheduler.py | 1 | 14941 | # Copyright (c) 2017, Daniele Venzano
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The Elastic scheduler is the implementation of the scheduling algorithm presented in this paper:
https://arxiv.org/abs/1611.09528
"""
import logging
import threading
import time
from zoe_lib.state import Execution, SQLManager, Service # pylint: disable=unused-import
from zoe_master.exceptions import ZoeException
from zoe_master.backends.interface import terminate_execution, terminate_service, start_elastic, start_essential, update_service_resource_limits
from zoe_master.scheduler.simulated_platform import SimulatedPlatform
from zoe_master.exceptions import UnsupportedSchedulerPolicyError
from zoe_master.stats import NodeStats # pylint: disable=unused-import
from zoe_master.metrics.base import StatsManager # pylint: disable=unused-import
log = logging.getLogger(__name__)
SELF_TRIGGER_TIMEOUT = 60 # the scheduler will trigger itself periodically in case platform resources have changed outside its control
def catch_exceptions_and_retry(func):
"""Decorator to catch exceptions in threaded functions."""
def wrapper(self):
"""The wrapper."""
while True:
try:
func(self)
except BaseException: # pylint: disable=broad-except
log.exception('Unmanaged exception in thread loop')
else:
log.debug('Thread terminated')
break
return wrapper
class ExecutionProgress:
"""Additional data for tracking execution sizes while in the queue."""
def __init__(self):
self.last_time_scheduled = 0
self.progress_sequence = []
class ZoeElasticScheduler:
"""The Scheduler class for size-based scheduling. Policy can be "FIFO" or "SIZE"."""
def __init__(self, state: SQLManager, policy, metrics: StatsManager):
if policy not in ('FIFO', 'SIZE', 'DYNSIZE'):
raise UnsupportedSchedulerPolicyError
self.metrics = metrics
self.trigger_semaphore = threading.Semaphore(0)
self.policy = policy
self.queue = []
self.queue_running = []
self.queue_termination = []
self.additional_exec_state = {}
self.loop_quit = False
self.loop_th = threading.Thread(target=self.loop_start_th, name='scheduler')
self.core_limit_recalc_trigger = threading.Event()
self.core_limit_th = threading.Thread(target=self._adjust_core_limits, name='adjust_core_limits')
self.state = state
for execution in self.state.executions.select(status='running'):
if execution.all_services_running:
self.queue_running.append(execution)
else:
self.queue.append(execution)
self.additional_exec_state[execution.id] = ExecutionProgress()
self.loop_th.start()
self.core_limit_th.start()
def trigger(self):
"""Trigger a scheduler run."""
self.trigger_semaphore.release()
def incoming(self, execution: Execution):
"""
This method adds the execution to the end of the queue and triggers the scheduler.
:param execution: The execution
:return:
"""
exec_data = ExecutionProgress()
self.additional_exec_state[execution.id] = exec_data
self.queue.append(execution)
self.trigger()
def terminate(self, execution: Execution) -> None:
"""
Inform the master that an execution has been terminated. This can be done asynchronously.
:param execution: the terminated execution
:return: None
"""
execution.set_cleaning_up()
self.queue_termination.append(execution)
def _terminate_executions(self):
while len(self.queue_termination) > 0:
execution = self.queue_termination.pop(0)
try:
self.queue.remove(execution)
except ValueError:
try:
self.queue_running.remove(execution)
except ValueError:
log.warning('Execution {} is not in any queue, attempting termination anyway'.format(execution.id))
try:
del self.additional_exec_state[execution.id]
except KeyError:
pass
terminate_execution(execution)
log.info('Execution {} terminated successfully'.format(execution.id))
def _refresh_execution_sizes(self):
if self.policy == "FIFO":
return
elif self.policy == "SIZE":
return
elif self.policy == "DYNSIZE":
for execution in self.queue: # type: Execution
try:
exec_data = self.additional_exec_state[execution.id]
except KeyError:
continue
if exec_data.last_time_scheduled == 0:
continue
elif execution.size <= 0:
execution.set_size(execution.total_reservations.cores.min * execution.total_reservations.memory.min)
continue
new_size = execution.size - (time.time() - exec_data.last_time_scheduled) * (256 * 1024 ** 2) # to be tuned
execution.set_size(new_size)
def _pop_all(self):
out_list = []
for execution in self.queue: # type: Execution
if execution.status != Execution.TERMINATED_STATUS or execution.status != Execution.CLEANING_UP_STATUS:
out_list.append(execution)
else:
log.debug('While popping, throwing away execution {} that is in status {}'.format(execution.id, execution.status))
return out_list
def _requeue(self, execution: Execution):
self.additional_exec_state[execution.id].last_time_scheduled = time.time()
if execution not in self.queue: # sanity check: the execution should be in the queue
log.warning("Execution {} wants to be re-queued, but it is not in the queue".format(execution.id))
@catch_exceptions_and_retry
def loop_start_th(self): # pylint: disable=too-many-locals
"""The Scheduler thread loop."""
auto_trigger = SELF_TRIGGER_TIMEOUT
while True:
ret = self.trigger_semaphore.acquire(timeout=1)
if not ret: # Semaphore timeout, do some cleanup
auto_trigger -= 1
if auto_trigger == 0:
auto_trigger = SELF_TRIGGER_TIMEOUT
self.trigger()
continue
if self.loop_quit:
break
self._check_dead_services()
self._terminate_executions()
if len(self.queue) == 0:
log.debug("Scheduler loop has been triggered, but the queue is empty")
self.core_limit_recalc_trigger.set()
continue
log.debug("Scheduler loop has been triggered")
while True: # Inner loop will run until no new executions can be started or the queue is empty
self._refresh_execution_sizes()
if self.policy == "SIZE" or self.policy == "DYNSIZE":
self.queue.sort(key=lambda execution: execution.size)
jobs_to_attempt_scheduling = self._pop_all()
log.debug('Scheduler inner loop, jobs to attempt scheduling:')
for job in jobs_to_attempt_scheduling:
log.debug("-> {} ({})".format(job, job.size))
try:
platform_state = self.metrics.current_stats
except ZoeException:
log.error('Cannot retrieve platform state, cannot schedule')
for job in jobs_to_attempt_scheduling:
self._requeue(job)
break
cluster_status_snapshot = SimulatedPlatform(platform_state)
jobs_to_launch = []
free_resources = cluster_status_snapshot.aggregated_free_memory()
# Try to find a placement solution using a snapshot of the platform status
for job in jobs_to_attempt_scheduling: # type: Execution
jobs_to_launch_copy = jobs_to_launch.copy()
# remove all elastic services from the previous simulation loop
for job_aux in jobs_to_launch: # type: Execution
cluster_status_snapshot.deallocate_elastic(job_aux)
job_can_start = False
if not job.is_running:
job_can_start = cluster_status_snapshot.allocate_essential(job)
if job_can_start or job.is_running:
jobs_to_launch.append(job)
# Try to put back the elastic services
for job_aux in jobs_to_launch:
cluster_status_snapshot.allocate_elastic(job_aux)
current_free_resources = cluster_status_snapshot.aggregated_free_memory()
if current_free_resources >= free_resources:
jobs_to_launch = jobs_to_launch_copy
break
free_resources = current_free_resources
placements = cluster_status_snapshot.get_service_allocation()
log.info('Allocation after simulation: {}'.format(placements))
# We port the results of the simulation into the real cluster
for job in jobs_to_launch: # type: Execution
if not job.essential_services_running:
ret = start_essential(job, placements)
if ret == "fatal":
jobs_to_attempt_scheduling.remove(job)
self.queue.remove(job)
continue # trow away the execution
elif ret == "requeue":
self._requeue(job)
continue
elif ret == "ok":
job.set_running()
assert ret == "ok"
start_elastic(job, placements)
if job.all_services_active:
log.info('execution {}: all services are active'.format(job.id))
jobs_to_attempt_scheduling.remove(job)
self.queue.remove(job)
self.queue_running.append(job)
self.core_limit_recalc_trigger.set()
for job in jobs_to_attempt_scheduling:
self._requeue(job)
if len(self.queue) == 0:
log.debug('empty queue, exiting inner loop')
break
if len(jobs_to_launch) == 0:
log.debug('No executions could be started, exiting inner loop')
break
def quit(self):
"""Stop the scheduler thread."""
self.loop_quit = True
self.trigger()
self.core_limit_recalc_trigger.set()
self.loop_th.join()
self.core_limit_th.join()
def stats(self):
"""Scheduler statistics."""
if self.policy == "SIZE":
queue = sorted(self.queue, key=lambda execution: execution.size)
else:
queue = self.queue
return {
'queue_length': len(self.queue),
'running_length': len(self.queue_running),
'termination_queue_length': len(self.queue_termination),
'queue': [s.id for s in queue],
'running_queue': [s.id for s in self.queue_running],
'termination_queue': [s.id for s in self.queue_termination]
}
@catch_exceptions_and_retry
def _adjust_core_limits(self):
self.core_limit_recalc_trigger.clear()
while not self.loop_quit:
self.core_limit_recalc_trigger.wait()
if self.loop_quit:
break
stats = self.metrics.current_stats
for node in stats.nodes: # type: NodeStats
new_core_allocations = {}
node_services = self.state.services.select(backend_host=node.name, backend_status=Service.BACKEND_START_STATUS)
if len(node_services) == 0:
continue
for service in node_services:
new_core_allocations[service.id] = service.resource_reservation.cores.min
if node.cores_reserved < node.cores_total:
cores_free = node.cores_total - node.cores_reserved
cores_to_add = cores_free / len(node_services)
else:
cores_to_add = 0
for service in node_services:
update_service_resource_limits(service, cores=new_core_allocations[service.id] + cores_to_add)
self.core_limit_recalc_trigger.clear()
def _check_dead_services(self):
# Check for executions that are no longer viable since an essential service died
for execution in self.queue_running:
for service in execution.services:
if service.essential and service.backend_status == service.BACKEND_DIE_STATUS:
log.info("Essential service {} ({}) of execution {} died, terminating execution".format(service.id, service.name, execution.id))
service.restarted()
execution.set_error_message("Essential service {} died".format(service.name))
self.terminate(execution)
break
# Check for executions that need to be re-queued because one of the elastic components died
# Do it in two loops to prevent rescheduling executions that need to be terminated
for execution in self.queue_running:
for service in execution.services:
if not service.essential and service.backend_status == service.BACKEND_DIE_STATUS:
log.info("Elastic service {} ({}) of execution {} died, rescheduling".format(service.id, service.name, execution.id))
terminate_service(service)
service.restarted()
self.queue_running.remove(execution)
self.queue.append(execution)
break
| apache-2.0 | -5,541,096,387,478,391,000 | 41.810888 | 148 | 0.581554 | false |
jonathanslenders/pymux | pymux/pipes/win32_client.py | 1 | 1071 | from __future__ import unicode_literals
from .win32 import read_message_from_pipe, write_message_to_pipe, connect_to_pipe
from ctypes import windll
from prompt_toolkit.eventloop import From, Return
import six
__all__ = [
'PipeClient',
]
class PipeClient(object):
r"""
Windows pipe client.
:param pipe_name: Name of the pipe. E.g. \\.\pipe\pipe_name
"""
def __init__(self, pipe_name):
assert isinstance(pipe_name, six.text_type)
self.pipe_handle = connect_to_pipe(pipe_name)
def write_message(self, text):
"""
(coroutine)
Write message into the pipe.
"""
yield From(write_message_to_pipe(self.pipe_handle, text))
def read_message(self):
"""
(coroutine)
Read one single message from the pipe and return as text.
"""
message = yield From(read_message_from_pipe(self.pipe_handle))
raise Return(message)
def close(self):
"""
Close the connection.
"""
windll.kernel32.CloseHandle(self.pipe_handle)
| bsd-3-clause | -6,415,395,963,983,889,000 | 25.121951 | 81 | 0.615313 | false |
praba230890/PYPOWER | pypower/t/t_scale_load.py | 2 | 21996 | # Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Tests for code in C{scale_load}.
"""
from os.path import dirname, join
from numpy import array, zeros, in1d, vstack, flatnonzero as find
from pypower.loadcase import loadcase
from pypower.isload import isload
from pypower.scale_load import scale_load, ScalingError
from pypower.idx_bus import PD, QD, BUS_AREA
from pypower.idx_gen import GEN_BUS, QG, PMIN, QMIN, QMAX
from pypower.t.t_begin import t_begin
from pypower.t.t_is import t_is
from pypower.t.t_ok import t_ok
from pypower.t.t_end import t_end
def t_scale_load(quiet=False):
"""Tests for code in C{scale_load}.
@author: Ray Zimmerman (PSERC Cornell)
"""
n_tests = 275
t_begin(n_tests, quiet)
ppc = loadcase(join(dirname(__file__), 't_auction_case'))
ppc['gen'][7, GEN_BUS] = 2 ## multiple d. loads per area, same bus as gen
ppc['gen'][7, [QG, QMIN, QMAX]] = array([3, 0, 3])
## put it load before gen in matrix
ppc['gen'] = vstack([ppc['gen'][7, :], ppc['gen'][:7, :], ppc['gen'][8, :]])
ld = find(isload(ppc['gen']))
a = [None] * 3
lda = [None] * 3
for k in range(3):
a[k] = find(ppc['bus'][:, BUS_AREA] == k + 1) ## buses in area k
tmp = find( in1d(ppc['gen'][ld, GEN_BUS] - 1, a[k]) )
lda[k] = ld[tmp] ## disp loads in area k
area = [None] * 3
for k in range(3):
area[k] = {'fixed': {}, 'disp': {}, 'both': {}}
area[k]['fixed']['p'] = sum(ppc['bus'][a[k], PD])
area[k]['fixed']['q'] = sum(ppc['bus'][a[k], QD])
area[k]['disp']['p'] = -sum(ppc['gen'][lda[k], PMIN])
area[k]['disp']['qmin'] = -sum(ppc['gen'][lda[k], QMIN])
area[k]['disp']['qmax'] = -sum(ppc['gen'][lda[k], QMAX])
area[k]['disp']['q'] = area[k]['disp']['qmin'] + area[k]['disp']['qmax']
area[k]['both']['p'] = area[k]['fixed']['p'] + area[k]['disp']['p']
area[k]['both']['q'] = area[k]['fixed']['q'] + area[k]['disp']['q']
total = {'fixed': {}, 'disp': {}, 'both': {}}
total['fixed']['p'] = sum(ppc['bus'][:, PD])
total['fixed']['q'] = sum(ppc['bus'][:, QD])
total['disp']['p'] = -sum(ppc['gen'][ld, PMIN])
total['disp']['qmin'] = -sum(ppc['gen'][ld, QMIN])
total['disp']['qmax'] = -sum(ppc['gen'][ld, QMAX])
total['disp']['q'] = total['disp']['qmin'] + total['disp']['qmax']
total['both']['p'] = total['fixed']['p'] + total['disp']['p']
total['both']['q'] = total['fixed']['q'] + total['disp']['q']
##----- single load zone, one scale factor -----
load = array([2])
t = 'all fixed loads (PQ) * 2 : '
bus, _ = scale_load(load, ppc['bus'])
t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), load * total['fixed']['q'], 8, [t, 'total fixed Q'])
opt = {'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), load * total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all fixed loads (P) * 2 : '
opt = {'pq': 'P'}
bus, _ = scale_load(load, ppc['bus'], None, None, opt)
t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
opt = {'pq': 'P', 'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all loads (PQ) * 2 : '
bus, gen = scale_load(load, ppc['bus'], ppc['gen'])
t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), load * total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load * total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), load * total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), load * total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all loads (P) * 2 : '
opt = {'pq': 'P'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), load * total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load * total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all disp loads (PQ) * 2 : '
opt = {'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load * total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), load * total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), load * total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all disp loads (P) * 2 : '
opt = {'pq': 'P', 'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load * total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
##----- single load zone, one scale quantity -----
load = array([200.0])
t = 'all fixed loads (PQ) => total = 200 : '
opt = {'scale': 'QUANTITY'}
bus, _ = scale_load(load, ppc['bus'], None, None, opt)
t_is(sum(bus[:, PD]), load, 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), load / total['fixed']['p'] * total['fixed']['q'], 8, [t, 'total fixed Q'])
opt = {'scale': 'QUANTITY', 'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), load - total['disp']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), (load - total['disp']['p'])/total['fixed']['p']*total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all fixed loads (P) => total = 200 : '
opt = {'scale': 'QUANTITY', 'pq': 'P'}
bus, _ = scale_load(load, ppc['bus'], None, None, opt)
t_is(sum(bus[:, PD]), load, 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
opt = {'scale': 'QUANTITY', 'pq': 'P', 'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), load - total['disp']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all loads (PQ) => total = 200 : '
opt = {'scale': 'QUANTITY'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), load / total['both']['p']*total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), load / total['both']['p']*total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load / total['both']['p']*total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), load / total['both']['p']*total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), load / total['both']['p']*total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all loads (P) => total = 200 : '
opt = {'scale': 'QUANTITY', 'pq': 'P'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), load / total['both']['p']*total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load / total['both']['p']*total['disp']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all disp loads (PQ) => total = 200 : '
opt = {'scale': 'QUANTITY', 'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load - total['fixed']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), (load - total['fixed']['p'])/total['disp']['p']*total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), (load - total['fixed']['p'])/total['disp']['p']*total['disp']['qmax'], 8, [t, 'total disp Qmax'])
t = 'all disp loads (P) => total = 200 : '
opt = {'scale': 'QUANTITY', 'pq': 'P', 'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
t_is(sum(bus[:, PD]), total['fixed']['p'], 8, [t, 'total fixed P'])
t_is(sum(bus[:, QD]), total['fixed']['q'], 8, [t, 'total fixed Q'])
t_is(-sum(gen[ld, PMIN]), load - total['fixed']['p'], 8, [t, 'total disp P'])
t_is(-sum(gen[ld, QMIN]), total['disp']['qmin'], 8, [t, 'total disp Qmin'])
t_is(-sum(gen[ld, QMAX]), total['disp']['qmax'], 8, [t, 'total disp Qmax'])
##----- 3 zones, area scale factors -----
t = 'area fixed loads (PQ) * [3 2 1] : '
load = array([3, 2, 1])
bus, _ = scale_load(load, ppc['bus'])
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), load[k] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
opt = {'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), load[k] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'area fixed loads (P) * [3 2 1] : '
load = array([3, 2, 1])
opt = {'pq': 'P'}
bus, _ = scale_load(load, ppc['bus'], None, None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
opt = {'pq': 'P', 'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'all area loads (PQ) * [3 2 1] : '
bus, gen = scale_load(load, ppc['bus'], ppc['gen'])
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), load[k] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), load[k] * area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), load[k] * area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'all area loads (P) * [3 2 1] : '
opt = {'pq': 'P'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'area disp loads (PQ) * [3 2 1] : '
opt = {'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), load[k] * area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), load[k] * area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'area disp loads (P) * [3 2 1] : '
opt = {'pq': 'P', 'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
##----- 3 zones, area scale quantities -----
t = 'area fixed loads (PQ) => total = [100 80 60] : '
load = array([100, 80, 60], float)
opt = {'scale': 'QUANTITY'}
bus, _ = scale_load(load, ppc['bus'], None, None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), load[k] / area[k]['fixed']['p'] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
opt = {'scale': 'QUANTITY', 'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] - area[k]['disp']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), (load[k] - area[k]['disp']['p']) / area[k]['fixed']['p'] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'area fixed loads (P) => total = [100 80 60] : '
load = array([100, 80, 60], float)
opt = {'scale': 'QUANTITY', 'pq': 'P'}
bus, _ = scale_load(load, ppc['bus'], None, None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
opt = {'scale': 'QUANTITY', 'pq': 'P', 'which': 'FIXED'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k]-area[k]['disp']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'all area loads (PQ) => total = [100 80 60] : '
opt = {'scale': 'QUANTITY'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] / area[k]['both']['p'] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), load[k] / area[k]['both']['p'] * area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k] / area[k]['both']['p'] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), load[k] / area[k]['both']['p'] * area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), load[k] / area[k]['both']['p'] * area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'all area loads (P) => total = [100 80 60] : '
opt = {'scale': 'QUANTITY', 'pq': 'P'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), load[k] / area[k]['both']['p'] * area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k] / area[k]['both']['p'] * area[k]['disp']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'area disp loads (PQ) => total = [100 80 60] : throws expected exception'
load = array([100, 80, 60], float)
opt = {'scale': 'QUANTITY', 'which': 'DISPATCHABLE'}
err = 0
try:
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
except ScalingError as e:
expected = 'scale_load: impossible to make zone 2 load equal 80 by scaling non-existent dispatchable load'
err = expected not in str(e)
t_ok(err, t)
t = 'area disp loads (PQ) => total = [100 74.3941 60] : '
load = array([100, area[1]['fixed']['p'], 60], float)
opt = {'scale': 'QUANTITY', 'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k]-area[k]['fixed']['p'], 8, '%s area %d disp P' % (t, k))
if k == 1:
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
else:
t_is(-sum(gen[lda[k], QMIN]), (load[k] - area[k]['fixed']['p']) / area[k]['disp']['p'] * area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), (load[k] - area[k]['fixed']['p']) / area[k]['disp']['p'] * area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
t = 'area disp loads (P) => total = [100 74.3941 60] : '
opt = {'scale': 'QUANTITY', 'pq': 'P', 'which': 'DISPATCHABLE'}
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], None, opt)
for k in range(len(load)):
t_is(sum(bus[a[k], PD]), area[k]['fixed']['p'], 8, '%s area %d fixed P' % (t, k))
t_is(sum(bus[a[k], QD]), area[k]['fixed']['q'], 8, '%s area %d fixed Q' % (t, k))
t_is(-sum(gen[lda[k], PMIN]), load[k]-area[k]['fixed']['p'], 8, '%s area %d disp P' % (t, k))
t_is(-sum(gen[lda[k], QMIN]), area[k]['disp']['qmin'], 8, '%s area %d disp Qmin' % (t, k))
t_is(-sum(gen[lda[k], QMAX]), area[k]['disp']['qmax'], 8, '%s area %d disp Qmax' % (t, k))
##----- explict single load zone -----
t = 'explicit single load zone'
load_zone = zeros(ppc['bus'].shape[0])
load_zone[[2, 3]] = 1
load = array([2.0])
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], load_zone)
Pd = ppc['bus'][:, PD]
Pd[[2, 3]] = load * Pd[[2, 3]]
t_is( bus[:, PD], Pd, 8, t)
##----- explict multiple load zone -----
t = 'explicit multiple load zone'
load_zone = zeros(ppc['bus'].shape[0])
load_zone[[2, 3]] = 1
load_zone[[6, 7]] = 2
load = array([2, 0.5])
bus, gen = scale_load(load, ppc['bus'], ppc['gen'], load_zone)
Pd = ppc['bus'][:, PD]
Pd[[2, 3]] = load[0] * Pd[[2, 3]]
Pd[[6, 7]] = load[1] * Pd[[6, 7]]
t_is( bus[:, PD], Pd, 8, t)
t_end()
if __name__ == '__main__':
t_scale_load(quiet=False)
| bsd-3-clause | 6,540,660,341,247,444,000 | 55.544987 | 161 | 0.496636 | false |
MangoMangoDevelopment/neptune | lib/ros_comm-1.12.0/tools/rosgraph/test/test_rosenv.py | 1 | 2781 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
def test_vars():
import rosgraph.rosenv
assert 'ROS_MASTER_URI' == rosgraph.rosenv.ROS_MASTER_URI
assert rosgraph.rosenv.ROS_IP == 'ROS_IP'
assert rosgraph.rosenv.ROS_HOSTNAME == 'ROS_HOSTNAME'
assert rosgraph.rosenv.ROS_NAMESPACE == 'ROS_NAMESPACE'
def test_get_master_uri():
from rosgraph.rosenv import get_master_uri
val = get_master_uri()
if 'ROS_MASTER_URI' in os.environ:
assert val == os.environ['ROS_MASTER_URI']
# environment override
val = get_master_uri(env=dict(ROS_MASTER_URI='foo'))
assert val == 'foo'
# argv override precedence, first arg wins
val = get_master_uri(env=dict(ROS_MASTER_URI='foo'), argv=['__master:=bar', '__master:=bar2'])
assert val == 'bar'
# empty env
assert None == get_master_uri(env={})
# invalid argv
try:
val = get_master_uri(argv=['__master:='])
assert False, "should have failed"
except ValueError:
pass
# invalid argv
try:
val = get_master_uri(argv=['__master:=foo:=bar'])
assert False, "should have failed"
except ValueError:
pass
| bsd-3-clause | -1,259,702,739,431,144,000 | 35.116883 | 98 | 0.705142 | false |
possnfiffer/py-emde | py-emde-wind-direction.py | 1 | 5361 | ndata = []
with open('data/winddir_20160202.dat') as f:
for line in f:
ndata.append(line.split(" "))
flist = []
for l in ndata:
if l[4].endswith('00') or l[4].endswith('15') or l[4].endswith('30') or l[4].endswith('45'):
if float(l[-1].strip()) == 0 or float(l[-1].strip()) == 360:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'N'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
# NNE = 22.5º
elif float(l[-1].strip()) > 0 and float(l[-1].strip()) <= 22.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'NNE'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#NE = 45º
elif float(l[-1].strip()) > 22.5 and float(l[-1].strip()) <= 45:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'NE'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#ENE = 67.5º
elif float(l[-1].strip()) > 45 and float(l[-1].strip()) <= 67.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'ENE'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#E = 90º
elif float(l[-1].strip()) > 67.5 and float(l[-1].strip()) <= 90:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'E'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#ESE = 112.5º
elif float(l[-1].strip()) > 90 and float(l[-1].strip()) <= 112.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'ESE'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#SE = 135º
elif float(l[-1].strip()) > 112.5 and float(l[-1].strip()) <= 135:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'SE'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#SSE = 157.5º
elif float(l[-1].strip()) > 135 and float(l[-1].strip()) <= 157.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'SSE'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#S = 180º
elif float(l[-1].strip()) > 157.5 and float(l[-1].strip()) <= 180:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'S'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#SSW = 202.5º
elif float(l[-1].strip()) > 180 and float(l[-1].strip()) <= 202.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'SSW'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#SW = 225º
elif float(l[-1].strip()) > 202.5 and float(l[-1].strip()) <= 225:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'SW'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#WSW = 247.5º
elif float(l[-1].strip()) > 225 and float(l[-1].strip()) <= 247.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'WSW'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#W = 270º
elif float(l[-1].strip()) > 247.5 and float(l[-1].strip()) <= 270:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'W'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#WNW = 292.5º
elif float(l[-1].strip()) > 270 and float(l[-1].strip()) <= 292.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'WNW'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#NW = 315º
elif float(l[-1].strip()) > 292.5 and float(l[-1].strip()) <= 315:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'NW'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
#NNW = 337.5º
elif float(l[-1].strip()) > 315 and float(l[-1].strip()) <= 337.5:
flist.append('DAVAD ZZCOUCAR SITE_ID:32534' + ' ' + l[2]+ l[0]+ l[1]+ l[3]+ l[4]+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'NNW'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X'+ ' ' + 'X')
fstring = ''
for l in flist:
fstring += l + ' '
data = '//AA ' + fstring + '//ZZ'
from sparkpost import SparkPost
# Send email using the SparkPost api
sp = SparkPost() # uses environment variable named SPARKPOST_API_KEY
response = sp.transmission.send(
recipients=['[email protected]'],
bcc=['[email protected]'],
text=data,
from_email='[email protected]',
subject='DATA'
)
print(response)
| bsd-2-clause | 7,669,188,134,167,574,000 | 68.428571 | 205 | 0.378601 | false |
eLvErDe/methlab | pymethlab/updatehelper.py | 1 | 1970 | # methlab - A music library application
# Copyright (C) 2007 Ingmar K. Steen ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
__all__ = ['UpdateHelper']
import threading
class UpdateHelper:
def __init__(self, db, scanner_class):
self.db = db
self.scanner_class = scanner_class
self.scanner = None
self.lock = threading.Lock()
self.stop_flag = threading.Event()
self.stopped_flag = threading.Event()
self.stopped_flag.set()
def set_scanner_class(self, scanner_class):
self.lock.acquire()
if self.scanner:
self.lock.release()
self.stop()
self.lock.acquire()
self.scanner_class = scanner_class
self.lock.release()
def stop(self):
self.stop_flag.set()
self.stopped_flag.wait()
def update(self, callback):
def run_scanner():
self.scanner.update()
self.lock.acquire()
self.scanner = None
self.lock.release()
self.stopped_flag.set()
callback()
if not self.stopped_flag.isSet():
return False
self.lock.acquire()
self.stopped_flag.clear()
self.stop_flag.clear()
self.scanner = self.scanner_class(self.db, lambda: not self.stop_flag.isSet())
threading.Thread(target = run_scanner).start()
self.lock.release()
return True
| gpl-2.0 | -6,294,964,913,357,571,000 | 30.774194 | 82 | 0.682741 | false |
sugarsweetrobotics/wasanbon | wasanbon/core/plugins/admin/wsconverter_plugin/host/outport_converter.py | 1 | 2057 | import os, sys
from common_converter import *
_template = """
import yaml, traceback
import RTC
import OpenRTM_aist
_data = $CONSTRUCTOR
_port = OpenRTM_aist.OutPort("$NAME", _data)
def convert(data, d_list):
it = iter(d_list)
$CODE
print 'converted:', data
return data
def _sendData(d_list):
convert(_data, d_list)
_port.write()
def execute(comp, webSocketSender):
comp.addOutPort("$NAME", _port)
webSocketSender.outports[u"$NAME"] = _sendData
"""
def create_outport_converter_module(parser, name, typename, verbose=False):
module_dir = 'modules'
if not os.path.isdir(module_dir):
os.mkdir(module_dir)
global_module = parser.global_module
typs = global_module.find_types(typename)
if len(typs) == 0:
print 'Invalid Type Name (%s)' % typename
raise InvalidDataTypeException()
module_name = typs[0].parent.name
copy_idl_and_compile(parser, typs[0].filepath)
filename = '%s_OutPort_%s.py' % (name, typename.replace('::', '_').strip())
f = open(os.path.join(module_dir, filename), 'w')
import value_dic as vd
value_dic = vd.generate_value_dic(global_module, typename, root_name='data', verbose=verbose)
#if verbose:
# print '-------value-------'
# import yaml
# print yaml.dump(value_dic, default_flow_style=False)
#import inport_converter as ip
global _template
output = "%s" % _template
code = create_fromlist_converter(value_dic, list_name='d_list', indent = ' ')
if verbose:
print '------data to list-----'
print code
output = output.replace('$NAME', name)
typs = global_module.find_types(typename)
output = output.replace('$CONSTRUCTOR', parser.generate_constructor_python(typs[0]))
output = output.replace('$CODE', code)
#import outport_converter as op
#code = op.create_converter(value_dic)
#print '------list to data-----'
#print code
output = 'import %s\n' % module_name + output
f.write(output)
f.close()
| gpl-3.0 | -3,378,357,359,583,994,400 | 26.065789 | 97 | 0.628099 | false |
spmjc/plugin.video.freplay | resources/lib/channels/rmcd.py | 1 | 1233 | #-*- coding: utf-8 -*-
import urllib2
import re
import CommonFunctions
common = CommonFunctions
from resources.lib import utils
from resources.lib import globalvar
import json
title=['RMC Decouverte']
img=['rmcd']
readyForUse=True
url_video_json='https://edge.api.brightcove.com/playback/v1/accounts/%s/videos/%s'
def list_shows(channel,page):
shows = []
shows.append([channel,'empty', 'Toutes les videos','','shows'])
return shows
def list_videos(channel,page):
videos=[]
filePath=utils.downloadCatalog('http://rmcdecouverte.bfmtv.com/mediaplayer-replay/' ,'rmcd.html',False,{})
html=open(filePath).read().replace('\n', ' ').replace('\r', '')
match = re.compile(r'<figure class="figure modulx1-5-inside-bloc">(.*?)<a href="(.*?)" title="(.*?)">(.*?)data-original="(.*?)" alt=',re.DOTALL).findall(html)
for a,url,title,b,img in match:
title=utils.formatName(title)
infoLabels = {"Title": title.encode('utf-8')}
videos.append( [channel, url.replace('\t','').encode('utf-8') , title.encode('utf-8') , img.encode('utf-8'),infoLabels,'play'] )
return videos
def getVideoURL(channel,url):
url='http://rmcdecouverte.bfmtv.com' + url
return utils.getExtURL(url)
| gpl-2.0 | 8,747,088,300,629,931,000 | 32.324324 | 161 | 0.664234 | false |
jplana/python-etcd | setup.py | 1 | 1239 | from setuptools import setup, find_packages
import sys, os
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.rst')).read()
NEWS = open(os.path.join(here, 'NEWS.txt')).read()
version = '0.4.5'
install_requires = [
'urllib3>=1.7.1',
'dnspython>=1.13.0'
]
test_requires = [
'mock',
'nose',
'pyOpenSSL>=0.14'
]
setup(
name='python-etcd',
version=version,
description="A python client for etcd",
long_description=README + '\n\n' + NEWS,
classifiers=[
"Topic :: System :: Distributed Computing",
"Topic :: Software Development :: Libraries",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Database :: Front-Ends",
],
keywords='etcd raft distributed log api client',
author='Jose Plana',
author_email='[email protected]',
url='http://github.com/jplana/python-etcd',
license='MIT',
packages=find_packages('src'),
package_dir = {'': 'src'},
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=test_requires,
test_suite='nose.collector',
)
| mit | 6,686,755,014,281,602,000 | 25.361702 | 54 | 0.619048 | false |
GreenGear5/planet-wars | bots/ml-rfc/ml-rfc.py | 1 | 4276 | #!/usr/bin/env python
"""
Uses the Random Forest classifier
"""
from api import State, util
import random, os
from sklearn.externals import joblib
DEFAULT_MODEL = os.path.dirname(os.path.realpath(__file__)) + '/model.pkl'
class Bot:
__max_depth = -1
__randomize = True
__model = None
def __init__(self, randomize=True, depth=12, model_file=DEFAULT_MODEL):
print(model_file)
self.__randomize = randomize
self.__max_depth = depth
# Load the model
self.__model = joblib.load(model_file)
def get_move(self, state):
val, move = self.value(state)
return move
def value(self, state, alpha=float('-inf'), beta=float('inf'), depth=0):
"""
Return the value of this state and the associated move
:param state:
:param alpha: The highest score that the maximizing player can guarantee given current knowledge
:param beta: The lowest score that the minimizing player can guarantee given current knowledge
:param depth: How deep we are in the tree
:return: val, move: the value of the state, and the best move.
"""
if state.finished():
return (1.0, None) if state.winner() == 1 else (-1.0, None)
if depth == self.__max_depth:
return self.heuristic(state), None
best_value = float('-inf') if maximizing(state) else float('inf')
best_move = None
moves = state.moves()
if self.__randomize:
random.shuffle(moves)
for move in moves:
next_state = state.next(move)
value, m = self.value(next_state, alpha, beta, depth + 1)
if maximizing(state):
if value > best_value:
best_value = value
best_move = move
alpha = best_value
else:
if value < best_value:
best_value = value
best_move = move
beta = best_value
# Prune the search tree
# We know this state will never be chosen, so we stop evaluating its children
if alpha < beta:
break
return best_value, best_move
def heuristic(self, state):
# Convert the state to a feature vector
feature_vector = [features(state)]
# These are the classes: ('won', 'lost')
classes = list(self.__model.classes_)
# Ask the model for a prediction
# This returns a probability for each class
prob = self.__model.predict_proba(feature_vector)[0]
# print prob
# print('{} {} {}'.format(classes, prob, util.ratio_ships(state, 1)))
# Weigh the win/loss outcomes (-1 and 1) by their probabilities
res = -1.0 * prob[classes.index('lost')] + 1.0 * prob[classes.index('won')]
return res
def maximizing(state):
"""
Whether we're the maximizing player (1) or the minimizing player (2).
:param state:
:return:
"""
return state.whose_turn() == 1
def features(state):
# type: (State) -> tuple[float, ...]
"""
Extract features from this state. Remember that every feature vector returned should have the same length.
:param state: A state to be converted to a feature vector
:return: A tuple of floats: a feature vector representing this state.
"""
my_id = state.whose_turn()
opponent_id = 1 if my_id == 0 else 0
# How many ships does p1 have in garrisons?
p1_garrisons = 0.0
# How many ships does p2 have in garrisons?
p2_garrisons = 0.0
p1_planets = 0
p2_planets = 0
for planet in state.planets(my_id):
p1_garrisons += state.garrison(planet)
p1_planets += 1
for planet in state.planets(opponent_id):
p2_garrisons += state.garrison(planet)
p2_planets += 1
# How many ships does p1 have in fleets?
p1_fleets = 0.0
# How many ships does p2 have in fleets?
p2_fleets = 0.0
for fleet in state.fleets():
if fleet.owner() == my_id:
p1_fleets = fleet.size()
else:
p2_fleets += fleet.size()
return p1_garrisons, p2_garrisons, p1_fleets, p2_fleets, p1_planets, p2_planets
| mit | -1,888,951,242,847,442,400 | 27.697987 | 110 | 0.58232 | false |
Flavsditz/projects | eyeTracking/pupil/pupil_src/capture/recorder.py | 1 | 4963 | import os, sys
import cv2
import atb
import numpy as np
from plugin import Plugin
from time import strftime,localtime,time,gmtime
from ctypes import create_string_buffer
from git_version import get_tag_commit
class Recorder(Plugin):
"""Capture Recorder"""
def __init__(self, session_str, fps, img_shape, shared_record, eye_tx):
Plugin.__init__(self)
self.session_str = session_str
self.base_path = os.path.join(os.path.abspath(__file__).rsplit('pupil_src', 1)[0], "recordings")
self.shared_record = shared_record
self.frame_count = 0
self.timestamps = []
self.eye_tx = eye_tx
self.start_time = time()
# set up base folder called "recordings"
try:
os.mkdir(self.base_path)
except:
print "recordings folder already exists, using existing."
session = os.path.join(self.base_path, self.session_str)
try:
os.mkdir(session)
except:
print "recordings session folder already exists, using existing."
# set up self incrementing folder within session folder
counter = 0
while True:
self.path = os.path.join(self.base_path, session, "%03d/" % counter)
try:
os.mkdir(self.path)
break
except:
print "We dont want to overwrite data, incrementing counter & trying to make new data folder"
counter += 1
self.meta_info_path = os.path.join(self.path, "info.csv")
with open(self.meta_info_path, 'w') as f:
f.write("Pupil Recording Name:\t"+self.session_str+ "\n")
f.write("Start Date: \t"+ strftime("%d.%m.%Y", localtime(self.start_time))+ "\n")
f.write("Start Time: \t"+ strftime("%H:%M:%S", localtime(self.start_time))+ "\n")
video_path = os.path.join(self.path, "world.avi")
self.writer = cv2.VideoWriter(video_path, cv2.cv.CV_FOURCC(*'DIVX'), fps, (img_shape[1], img_shape[0]))
self.height = img_shape[0]
self.width = img_shape[1]
# positions path to eye process
self.shared_record.value = True
self.eye_tx.send(self.path)
atb_pos = (10, 540)
self._bar = atb.Bar(name = self.__class__.__name__, label='REC: '+session_str,
help="capture recording control", color=(220, 0, 0), alpha=150,
text='light', position=atb_pos,refresh=.3, size=(300, 80))
self._bar.rec_name = create_string_buffer(512)
self._bar.add_var("rec time",self._bar.rec_name, getter=lambda: create_string_buffer(self.get_rec_time_str(),512), readonly=True)
self._bar.add_button("stop", self.stop_and_destruct, key="s", help="stop recording")
self._bar.define("contained=true")
def get_rec_time_str(self):
rec_time = gmtime(time()-self.start_time)
return strftime("%H:%M:%S", rec_time)
def update(self, frame):
self.frame_count += 1
self.timestamps.append(frame.timestamp)
self.writer.write(frame.img)
def stop_and_destruct(self):
try:
camera_matrix = np.load("camera_matrix.npy")
dist_coefs = np.load("dist_coefs.npy")
cam_path = os.path.join(self.path, "camera_matrix.npy")
dist_path = os.path.join(self.path, "dist_coefs.npy")
np.save(cam_path, camera_matrix)
np.save(dist_path, dist_coefs)
except:
print "no camera intrinsics found, will not copy them into recordings folder"
timestamps_path = os.path.join(self.path, "timestamps.npy")
np.save(timestamps_path,np.array(self.timestamps))
try:
with open(self.meta_info_path, 'a') as f:
f.write("Duration Time: \t"+ self.get_rec_time_str()+ "\n")
f.write("World Camera Frames: \t"+ str(self.frame_count)+ "\n")
f.write("World Camera Resolution: \t"+ str(self.width)+"x"+str(self.height)+"\n")
f.write("Capture Software Version: \t"+ get_tag_commit()+ "\n")
f.write("user:\t"+os.getlogin()+"\n")
try:
sysname, nodename, release, version, machine = os.uname()
except:
sysname, nodename, release, version, machine = sys.platform,None,None,None,None
f.write("Platform:\t"+sysname+"\n")
f.write("Machine:\t"+nodename+"\n")
f.write("Release:\t"+release+"\n")
f.write("Version:\t"+version+"\n")
except:
print "Could not save metadata. Please report this bug!"
print "Stopping recording"
self.shared_record.value = False
self.alive = False
def __del__(self):
"""incase the plugin get deleted while recording
"""
self.stop_and_destruct()
def get_auto_name():
return strftime("%Y_%m_%d", localtime())
| gpl-2.0 | -8,185,987,220,556,389,000 | 39.024194 | 137 | 0.576667 | false |
li282886931/apistore | server/controllers/novelhandler.py | 1 | 8379 | # -*- coding: utf-8 -*-
import json
import datetime
import urllib
import hashlib
import tornado.gen
import tornado.web
import tornado.httpclient
from controllers.basehandler import BaseHandler
from models.novelmodel import NovelModel
import utils
from utils import json_success, json_failed, cache_error
import const
from settings import BASEURL, appsecret, NOVELSEARCH
class Novel(BaseHandler):
def initialize(self):
super(Novel, self).initialize()
self.novel = NovelModel()
def get(self):
return self.post()
class GetTagList(Novel):
"""获取小说分类"""
@cache_error
@utils.checkSign
def post(self):
first = self.get_argument("first", None)
second = self.get_argument("second", None)
if first == None and second == None:
tag_list = self.novel.loadAllTag()
elif first != None and second == None:
tag_list = self.novel.loadAllSecondTag(first)
elif first == None and second != None:
tag_list = self.novel.loadAllFirstTag(second)
else:
tag_list = self.novel.loadFirstSecondTag(first, second)
result = [{'first': v['first'], 'second': v['id'], 'name': v['second']} for v in tag_list]
self.write(json_success(result))
class GetNovelList(Novel):
"""获取某分类下的小说列表"""
@cache_error
@utils.checkSign
def post(self):
first = self.get_argument("first", None)
second = self.get_argument("second", None)
page = self.get_argument("page", None)
page = 1 if not page else int(page)
limit = self.get_argument("limit", None)
limit = const.NOVEL_LIMIT if not limit else int(limit)
if not first or not second:
raise ValueError(1)
else:
novel_list = self.novel.loadNovelList(first, second, page, limit)
result = [{'novelid': v['id'],
'title': v['title'],
'novelpv': v['novelpv'],
'author': v['author'],
'introduction': "".join(v['introduction'].split()),
'picture': "/static/spider/" + v['picture']} for v in novel_list]
self.write(json_success(result))
class GetNovelIntroduction(Novel):
"""获取小说简介"""
@cache_error
@utils.checkSign
def post(self):
novelid = self.get_argument("novelid", None)
if not novelid:
raise ValueError(1)
else:
intro = self.novel.loadNovelIntroduction(int(novelid))
if intro.__len__() != 1:
raise ValueError(500)
else:
result = {
'title': intro[0]['title'],
'novelid': intro[0]['id'],
'author': intro[0]['author'],
'picture': "/static/spider/"+intro[0]['picture'],
'introduction': "".join(intro[0]['introduction'].split()),
}
self.write(json_success(result))
class GetNovelChapter(Novel):
"""获取小说的章节列表"""
@cache_error
@utils.checkSign
def post(self):
novelid = self.get_argument("novelid", None)
if not novelid:
raise ValueError(401)
else:
chapter_list = self.novel.loadNovelChapter(int(novelid))
result = [{'subtitle': v['subtitle'], 'chapter': i, 'chapterid': v['id']} for i, v in enumerate(chapter_list, 1)]
self.write(json_success(result))
class GetNovelContent(Novel):
"""获取小说的内容"""
@cache_error
@utils.checkSign
def post(self):
chapterid = self.get_argument("chapterid", None)
if not chapterid:
raise ValueError(401)
else:
c = self.novel.loadNovelContent(int(chapterid))
if len(c) != 1:
raise ValueError(500)
else:
result = {'title': c[0]['title'], 'subtitle': c[0]['subtitle'], 'novelid': c[0]['novelid'],
'content': c[0]['text'].encode("utf-8"), 'chapterid': c[0]['id'],
'prev': self.novel.loadPrevNext(int(c[0]['chapter']), int(c[0]['novelid']))[0],
'next': self.novel.loadPrevNext(int(c[0]['chapter']), int(c[0]['novelid']))[1]}
#获取上一章节和下一章节
self.write(json_success(result))
class NovelClick(Novel):
"""计算小说点击数"""
@cache_error
@utils.checkSign
def post(self):
novelid = self.get_argument("novelid", None)
novelid = int(novelid) if novelid else None
if not novelid:
raise ValueError(401)
else:
if self.novel.loadNovelIntroduction(novelid).__len__() != 1:
raise ValueError(406)
n = self.novel.addNovelPv(novelid)[0]
result = {'novelid': n['id'], 'novelpv': n['novelpv']}
self.write(json_success(result))
class GetNovelRank(Novel):
"""获取小说排名"""
@cache_error
@utils.checkSign
def post(self):
page = self.get_argument("page", None)
page = 1 if not page else int(page)
limit = self.get_argument("limit", None)
limit = const.NOVEL_LIMIT if not limit else int(limit)
novel_list = self.novel.loadNovelRank(page, limit)
result = [{
'novelid': v['novelid'],
'title': v['title'],
'introduction': "".join(v['introduction'].split()),
'novelpv': v['novelpv'],
'author': v['author'],
'first': v['first'],
'second': v['second'],
'picture': "/static/spider/" + v['picture'],
'rank': (page-1)*limit + i} for i, v in enumerate(novel_list, 1)]
self.write(json_success(result))
class NovelSearch(Novel):
"""获取小说的搜索结果"""
def initialize(self):
super(NovelSearch, self).initialize()
self.uri = NOVELSEARCH + "/search/"
self.method = "POST"
self.headers = self.request.headers
self.body = None
@cache_error
@utils.checkSign
@tornado.gen.coroutine
def post(self):
wd = self.get_argument("wd", None)
sign_method = self.get_argument("sign_method", None)
if not wd:
raise ValueError(401)
#拼装body
data = {
"appid": self.appid,
"sign_method": sign_method,
"text": wd,
"docids": "0-" + str(self.novel.getNovelDocMaxId(self.appid)),
}
sign = utils.md5sign(appsecret, data)
data["sign"] = sign
self.body = urllib.urlencode(data)
resp = yield self.client()
result = []
try:
jsonret = json.loads(resp.body)
if jsonret["code"] == 200:
if len(jsonret["result"]["docs"]) != 0:
novellist = self.novel.getNovelListById(jsonret["result"]["docs"])
for v in novellist:
result.append({
'id': v['id'],
'picture': "/static/spider/" + v['picture'],
'introduction': "".join(v['introduction'].split()),
'title': v['title'],
'first': v['first'],
'second': v['second'],
'novelv': v['novelpv'],
'author': v['author'],
})
self.write(json_success(result))
else:
self.write(json_success([]))
else:
self.write(json_failed(int(jsonret["code"])))
except Exception as e:
self.write(json_failed(500))
class NovelDownload(Novel):
"""小说下载地址"""
def initialize(self):
super(NovelDownload, self).initialize()
@cache_error
@utils.checkSign
def post(self):
novelid = self.get_argument("novelid", None)
print novelid
if not novelid:
raise ValueError(401)
md5novelid = hashlib.md5(novelid).hexdigest()
self.write(json_success({"novelid": int(novelid), "novelsrc": BASEURL + "/static/novel/" + md5novelid + ".txt"}))
| gpl-2.0 | -1,168,748,543,770,488,600 | 33.103734 | 125 | 0.528045 | false |
goblinhack/MundusMeus | python/things/rock.py | 1 | 1729 | import tp
import mm
def thing_init(t):
return
def rock_init(name, short_name, tiles=[]):
x = tp.Tp(name)
x.set_is_shadow_caster_soft(True)
x.set_is_shadow_caster(True)
x.set_short_name(short_name)
x.set_is_movement_blocking(True)
x.set_is_rock(True)
x.set_z_depth(mm.Z_DEPTH_ROCK)
x.set_is_solid_ground(True)
x.set_blit_top_off(1)
x.set_blit_bot_off(1)
if tiles is not None:
for t in tiles:
x.set_tile(t, delay_ms=150)
else:
x.set_tile(tile=name, delay_ms=150)
x.thing_init = thing_init
def init():
rock_init(name="rock",
short_name="A boring rock",
tiles=[
"rock.1",
"rock.2",
"rock.3",
"rock.4",
"rock.5",
"rock.6",
"rock.7",
"rock.8",
"rock.9",
"rock.10",
"rock.11",
"rock.12",
"rock.13",
"rock.14",
"rock.15",
"rock.16",
"rock.17",
"rock.18",
"rock.19",
"rock.20",
"rock.21",
"rock.22",
"rock.23",
"rock.24",
"rock.25",
"rock.26",
"rock.27",
"rock.28",
"rock.29",
"rock.30",
"rock.31",
"rock.32",
])
init()
| lgpl-3.0 | -4,111,445,784,893,475,000 | 24.057971 | 43 | 0.338924 | false |
jeremi/couchdbkit | couchdbkit/ext/django/loading.py | 1 | 5298 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2008-2009 Benoit Chesneau <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Maintain registry of documents used in your django project
and manage db sessions
"""
import sys
import os
import urllib
import urlparse
from couchdbkit import Server, contain, ResourceConflict
from couchdbkit.loaders import FileSystemDocLoader
from couchdbkit.resource import PreconditionFailed
from django.conf import settings
from django.db.models import signals, get_app
from django.core.exceptions import ImproperlyConfigured
from django.utils.datastructures import SortedDict
from restkit.httpc import HttpClient, BasicAuth
COUCHDB_DATABASES = getattr(settings, "COUCHDB_DATABASES", [])
class CouchdbkitHandler(object):
""" The couchdbkit handler for django """
# share state between instances
__shared_state__ = dict(
_databases = {},
app_schema = SortedDict()
)
def __init__(self, databases, transport=None):
""" initialize couchdbkit handler with COUCHDB_DATABASES
settings """
self.__dict__ = self.__shared_state__
if transport is None:
self.transport = HttpClient()
# create databases sessions
for app_name, uri in databases:
if isinstance(uri, tuple):
# case when you want to specify server uri
# and database name specifically. usefull
# when you proxy couchdb on some path
server_part, dbname = uri
parts = urlparse.urlsplit(urllib.unquote(server_part))
else:
parts = urlparse.urlsplit(urllib.unquote(uri))
dbname = parts[2].split("/")[1]
if parts[0] != 'http' and parts[0] != 'https':
raise ValueError('Invalid dbstring')
if "@" in parts[1]:
server_parts = parts[1].split('@')
if ":" in server_parts[0]:
username, password = server_parts[0].split(":")
else:
username = server_parts[0]
password = ''
server_uri = "%s://%s" % (parts[0], server_parts[1])
else:
server_uri = '%s://%s' % (parts[0], parts[1])
username = password = ""
if username:
self.transport.add_authorization(BasicAuth(username, password))
server = Server(server_uri, self.transport)
app_label = app_name.split('.')[-1]
self._databases[app_label] = server[dbname]
def sync(self, app, verbosity=2):
""" used to sync views of all applications and eventually create
database """
app_name = app.__name__.rsplit('.', 1)[0]
app_label = app_name.split('.')[-1]
if app_label in self._databases:
if verbosity >=1:
print "sync `%s` in CouchDB" % app_name
db = self._databases[app_label]
try:
db.server.create_db(db.dbname)
except:
pass
app_path = os.path.abspath(os.path.join(sys.modules[app.__name__].__file__, ".."))
loader = FileSystemDocLoader(app_path, "_design", design_name=app_label)
loader.sync(db)
def get_db(self, app_label):
""" retrieve db session for a django application """
return self._databases[app_label]
def register_schema(self, app_label, *schema):
""" register a Document object"""
for s in schema:
schema_name = schema[0].__name__.lower()
schema_dict = self.app_schema.setdefault(app_label, SortedDict())
if schema_name in schema_dict:
fname1 = os.path.abspath(sys.modules[s.__module__].__file__)
fname2 = os.path.abspath(sys.modules[schema_dict[schema_name].__module__].__file__)
if os.path.splitext(fname1)[0] == os.path.splitext(fname2)[0]:
continue
schema_dict[schema_name] = s
s._db = self.get_db(app_label)
def get_schema(self, app_label, schema_name):
""" retriev Document object from its name and app name """
return self.app_schema.get(app_label, SortedDict()).get(schema_name.lower())
couchdbkit_handler = CouchdbkitHandler(COUCHDB_DATABASES)
register_schema = couchdbkit_handler.register_schema
get_schema = couchdbkit_handler.get_schema
get_db = couchdbkit_handler.get_db
| isc | 7,504,999,183,328,596,000 | 39.442748 | 99 | 0.601737 | false |
insilichem/ommprotocol | ommprotocol/md.py | 1 | 30554 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ommprotocol: A command line application to launch
# MD protocols with OpenMM
# By Jaime RGP <@jaimergp>
"""
ommprotocol.md
--------------
All the logic that governs the simulation of a MD protocol,
stage by stage.
A protocol is a chained list of stages, which are Stage instances.
"""
# Stdlib
from __future__ import print_function, division
import os
import sys
from contextlib import contextmanager
import logging
# 3rd party
import numpy as np
from simtk import unit as u
from simtk import openmm as mm
from simtk.openmm import app
from mdtraj import Topology as MDTrajTopology
# Own
from .io import REPORTERS, ProgressBarReporter, SerializedReporter, prepare_system_options
from .utils import (random_string, assert_not_exists, timed_input,
available_platforms, warned_getattr)
logger = logging.getLogger(__name__)
OPENMM_VERSION = tuple(map(int, mm.__version__.split('.')))
if sys.version_info.major == 3:
basestring = str
###########################
# Defaults
###########################
FORCEFIELDS = ['amber99sbildn.xml', 'tip3p.xml']
SELECTORS = {
None: 'none',
'protein_no_H': 'protein and element != H',
'calpha': 'name == CA'
}
PRECISION = {
'CPU': None,
'CUDA': 'CudaPrecision',
'OpenCL': 'OpenCLPrecision'
}
SYSTEM_OPTIONS = {
'nonbondedMethod': app.NoCutoff,
}
DEFAULT_OPTIONS = {
'system_options': SYSTEM_OPTIONS,
}
def protocol(handler, cfg):
"""
Run all the stages in protocol
Parameters
----------
handler : SystemHandler
Container of initial conditions of simulation
cfg : dict
Imported YAML file.
"""
# Stages
if 'stages' not in cfg:
raise ValueError('Protocol must include stages of simulation')
pos, vel, box = handler.positions, handler.velocities, handler.box
stages = cfg.pop('stages')
for stage_options in stages:
options = DEFAULT_OPTIONS.copy()
options.update(cfg)
stage_system_options = prepare_system_options(stage_options)
options.update(stage_options)
options['system_options'].update(stage_system_options)
stage = Stage(handler, positions=pos, velocities=vel, box=box,
total_stages=len(stages), **options)
pos, vel, box = stage.run()
del stage
class Stage(object):
"""
Controls a simulation stage from a SystemHandler instance. It will handle
the actual OpenMM system and then the Simulation object. Integrators,
barostat and restraints are all easily handled too.
Using it is easy: instantiate with a SystemHandler object and then call
`run()`. However, you can also use it as an OpenMM high-level controller.
Parameters
----------
handler : simtk.openmm.Topology
The topology input file (PRMTOP, PDB)
positions : simtk.Quantity, optional
The starting coordinates of this stage. Only needed if
handler is a PRMTOP file.
steps : int, optional
Number of MD steps to simulate. If 0, no MD will take place
timestep : float, optional
Integration timestep, in fs. Defaults to 1.0.
forcefields : list of str or file-like, optional
Forcefields to apply in PDB inputs.
velocities : simtk.unit.Quantity, optional
The initial velocities of this stage. If None, they will be set
to the requested temperature
box_vectors : simtk.unit.Quantity, optional
Replacement periodic box vectors, instead of handler's.
barostat : bool, optional
True for NPT @ 1 atmosphere. False for NVT
restrained_atoms, constrained_atoms : str or None, optional
Parts of the system that should remain restrained or constrained
during the stage. Available values in SELECTORS dict.
If None, no atoms will be fixed.
distance_restrained_atoms : list of lists
Pairs of atom indices that will be distance restrained
distance_restraint_length : float or list of floats
Distances at which ``distance_restrained_atoms`` should be. It can be
a single value (all pairs will be restrained at this distance), or n
values, n being the number of pairs to be assigned to. If the value is
'initial', use the initial distance.
distance_restraint_strength : float or list of floats
Force constants for ``distance_restrained_atoms``. It can be
a single value (all pairs will be restrained at this distance), or n
values, n being the number of pairs to be assigned to.
minimization : bool, optional
If True, minimize before MD
minimization_tolerance : float, optional, default=10 kJ/mol
Threshold value minimization should converge to
minimization_max_iterations : int, optional, default=10000
Limit minimization iterations up to this value. If zero, don't limit.
temperature : float, optional
Target temperature of system in Kelvin, defaults to 300K
trajectory : 'PDB' or 'DCD', optional
Output format of trajectory file, if desired.
trajectory_every : int, optional
Frequency of trajectory write, in number of simulation steps
trajectory_new_every : int, optional
Create a new file for trajectory (only DCD) every n steps.
trajectory_atom_subset : int, optional
Save information for just these atoms (only DCD).
restart_every : int, optional
Frequencty of restart file creation. Defaults to 1E6 steps (1ns)
report_every : int, optional
Frequency of stdout print, in number of simulation steps
verbose : bool, optional
Whether to report information to stdout or not
project_name : str, optional
Name of the essay (common for several stages). If not set,
five random characters will be used.
name : str, optional
Name of the stage, used as a suffix for the output files generated
by this stage. If not supplied, a random string will be used.
output : str, optional
Location of output files. Working directory by default.
platform : str, optional
Which platform to use ('CPU', 'CUDA', 'OpenCL'). If not set,
OpenMM will choose the fastest available.
platform_properties : dict, optional
Additional options to be passed to the platform constructor.
system_options : dict, optional
Set of options to configure the system. See SYSTEM_OPTIONS dict
for defaults.
restraint_strength : float, optional
If restraints are in use, the strength of the applied force in
kJ/mol. Defaults to 5.0.
pressure : float, optional
Barostat pressure, in bar. Defaults to 1.01325.
integrator : simtk.openmm.Integrator, optional
Which integrator to use. Defaults to LangevinIntegrator.
friction : float, optional
Friction coefficient for LangevinIntegrator, in 1/ps. Defaults to 1.0.
barostat_interval : float, optional
Interval of steps at which barostat updates. Defaults to 25 steps.
save_state_at_end : bool, optional
Whether to create a state.xml file at the end of the stage or not.
attempt_rescue : bool, optional
Whether to try to generate an emergency state file if an exception
is raised.
total_stages : int, optional
"""
_PROJECTNAME = random_string(length=5)
_stage_index = [0]
def __init__(self, handler, positions=None, velocities=None, box=None,
steps=0, minimization=False, barostat=False, temperature=300,
timestep=1.0, pressure=1.01325, integrator='LangevinIntegrator',
barostat_interval=25, system_options=None, platform=None,
platform_properties=None, trajectory=None, trajectory_every=2000,
outputpath='.', trajectory_atom_subset=None, trajectory_new_every=0,
restart=None, restart_every=1000000, report=True, report_every=1000,
project_name=None, name=None, restrained_atoms=None,
restraint_strength=5, constrained_atoms=None, friction=1.0,
distance_restrained_atoms=None, distance_restraint_length=2,
distance_restraint_strength=5, total_stages=None, verbose=True,
minimization_tolerance=10, minimization_max_iterations=10000,
save_state_at_end=True, attempt_rescue=True,
**kwargs):
for k in kwargs:
if not k.startswith('_'):
logger.warning('Option %s not recognized!', k)
# System properties
self.handler = handler
self.positions = positions
self.velocities = velocities
self.box = box
self.system_options = system_options if system_options else {}
self.restrained_atoms = restrained_atoms
self.restraint_strength = restraint_strength
self.constrained_atoms = constrained_atoms
self.distance_restrained_atoms = np.reshape(distance_restrained_atoms, (-1, 2)) \
if distance_restrained_atoms else None
self.distance_restraint_length = distance_restraint_length
self.distance_restraint_strength = distance_restraint_strength
# Simulation conditions
self.steps = int(steps)
self.minimization = minimization
self.minimization_tolerance = minimization_tolerance
self.minimization_max_iterations = minimization_max_iterations
self.barostat = barostat
self.temperature = temperature
self.timestep = timestep
self.pressure = pressure
self._integrator_name = integrator
self.friction = friction
self.barostat_interval = int(barostat_interval)
# Hardware
self._platform = platform
self.platform_properties = {} if platform_properties is None else platform_properties
# Output parameters
self.project_name = project_name if project_name is not None else self._PROJECTNAME
self.name = name if name is not None else random_string(length=5)
self.outputpath = outputpath
self.verbose = verbose
self.trajectory = trajectory
self.trajectory_every = int(trajectory_every)
self.trajectory_new_every = int(trajectory_new_every)
self.trajectory_atom_subset = self.subset(trajectory_atom_subset) if trajectory_atom_subset else None
self.restart = restart
self.restart_every = int(restart_every)
self.report = report
self.report_every = int(report_every)
self.save_state_at_end = save_state_at_end
self.attempt_rescue = attempt_rescue
self.total_stages = total_stages
# Private attributes
self._system = None
self._simulation = None
self._integrator = None
self._progress_reporter = None
self._log_reporter = None
self._trajectory_reporter = None
self._restart_reporter = None
self._mass_options = {}
self._stage_index[0] += 1
def run(self):
"""
Launch MD simulation, which may consist of:
1. Optional minimization
2. Actual MD simulation, with n steps.
This method also handles reporters.
Returns
-------
positions, velocities : unit.Quantity([natoms, 3])
Position, velocity of each atom in the system
box : unit.Quantity([1, 3])
Periodic conditions box vectors
"""
if self.verbose:
status = '#{}'.format(self.stage_index)
if self.total_stages is not None:
status += '/{}'.format(self.total_stages)
status += ': {}'.format(self.name)
pieces = []
if self.restrained_atoms is not None:
pieces.append('restrained {}'.format(self.restrained_atoms))
if self.constrained_atoms is not None:
pieces.append('constrained {}'.format(self.constrained_atoms))
if self.distance_restrained_atoms is not None:
pieces.append('distance restrained for {} atom pairs'.format(len(self.distance_restrained_atoms)))
if pieces:
status += ' [{}]'.format(', '.join(pieces))
logger.info(status)
# Add forces
self.apply_restraints()
self.apply_constraints()
if self.barostat:
self.apply_barostat()
if self.minimization:
if self.verbose:
logger.info(' Minimizing...')
self.minimize()
uses_pbc = self.system.usesPeriodicBoundaryConditions()
if self.steps:
# Stdout progress
if self.report and self.progress_reporter not in self.simulation.reporters:
self.simulation.reporters.append(self.progress_reporter)
# Log report
if self.report and self.log_reporter not in self.simulation.reporters:
self.simulation.reporters.append(self.log_reporter)
# Trajectory / movie files
if self.trajectory and self.trajectory_reporter not in self.simulation.reporters:
self.simulation.reporters.append(self.trajectory_reporter)
# Checkpoint or restart files
if self.restart and self.restart_reporter not in self.simulation.reporters:
self.simulation.reporters.append(self.restart_reporter)
# MD simulation
if self.verbose:
pbc = 'PBC ' if uses_pbc else ''
conditions = 'NPT' if self.barostat else 'NVT'
logger.info(' Running {}MD for {} steps @ {}K, {}'.format(pbc, self.steps,
self.temperature,
conditions))
with self.handle_exceptions():
self.simulate()
if self.save_state_at_end:
path = self.new_filename(suffix='.state')
self.simulation.saveState(path)
# Save and return state
state = self.simulation.context.getState(getPositions=True, getVelocities=True,
enforcePeriodicBox=uses_pbc)
return state.getPositions(), state.getVelocities(), state.getPeriodicBoxVectors()
def minimize(self, tolerance=None, max_iterations=None):
"""
Minimize energy of the system until meeting `tolerance` or
performing `max_iterations`.
"""
if tolerance is None:
tolerance = self.minimization_tolerance
if max_iterations is None:
max_iterations = self.minimization_max_iterations
self.simulation.minimizeEnergy(tolerance * u.kilojoules_per_mole, max_iterations)
def simulate(self, steps=None):
"""
Advance simulation n steps
"""
if steps is None:
steps = self.steps
self.simulation.step(steps)
@property
def system(self):
if self._system is None:
if self.constrained_atoms and self.system_options.pop('constraints', None):
logger.warning(' Warning: `constraints` and `constrained_atoms` are incompatible. '
'Removing `constraints` option for this stage.')
self._system = self.handler.create_system(**self.system_options)
return self._system
@system.deleter
def system(self):
del self._system
self._system = None
@property
def simulation(self):
if self._simulation is None:
platform = self.platform
try:
sim = self._simulation = app.Simulation(self.handler.topology, self.system,
self.integrator, *platform)
except Exception as e:
template = '{}. Try with: {}.'
if 'Illegal property name' in str(e):
msg = template.format(e, ', '.join(platform[0].getPropertyNames()))
raise ValueError(msg)
elif 'There is no registered Platform' in str(e):
msg = template.format(e, ', '.join(available_platforms()))
raise ValueError(msg)
raise e
# Box vectors
box = self.box if self.box is not None else self.handler.box
if box is not None:
sim.context.setPeriodicBoxVectors(*box)
# Positions
pos = self.positions if self.positions is not None else self.handler.positions
if pos is None:
raise ValueError('Positions must be set to start a simulation.')
sim.context.setPositions(pos)
# Velocities
vel = self.velocities if self.velocities is not None else self.handler.velocities
if vel is not None:
sim.context.setVelocities(vel)
else:
sim.context.setVelocitiesToTemperature(self.temperature*u.kelvin)
return self._simulation
@simulation.deleter
def simulation(self):
del self._simulation
self._simulation = None
@property
def integrator(self):
if self._integrator is None:
try:
i = getattr(mm, self._integrator_name)
except (TypeError, AttributeError):
raise NotImplementedError('Integrator {} not found'
.format(self._integrator_name))
self._integrator = i(self.temperature * u.kelvin,
self.friction / u.picoseconds,
self.timestep * u.femtoseconds)
return self._integrator
@integrator.deleter
def integrator(self):
del self._integrator
self._integrator = None
@property
def platform(self):
if self._platform is None:
return None,
platform = mm.Platform.getPlatformByName(self._platform)
if self._platform.upper() == 'CUDA' and OPENMM_VERSION < (7, 2, 3) \
and 'DisablePmeStream' not in self.platform_properties:
self.platform_properties['DisablePmeStream'] = 'true'
# Patch to allow env-defined GPUs
device = self.platform_properties.get('DeviceIndex', '')
if str(device).startswith('ENV_'):
envvar = os.environ.get(device[4:], None)
if envvar is not None:
logger.warning('Setting DeviceIndex from env var %s to %s', device[4:], envvar)
self.platform_properties['DeviceIndex'] = envvar
return platform, self.platform_properties
def reporter(self, name):
try:
return REPORTERS[name.upper()]
except KeyError:
raise NotImplementedError('Reporter {} not found'.format(name))
def apply_barostat(self):
if not self.system.usesPeriodicBoundaryConditions():
raise ValueError('Barostat can only be used with PBC conditions.')
self.system.addForce(mm.MonteCarloBarostat(self.pressure*u.bar,
self.temperature*u.kelvin,
self.barostat_interval))
def apply_constraints(self):
if self.constrained_atoms is not None:
indices = self.subset(self.constrained_atoms)
system = self.system
for i in indices:
system.setParticleMass(int(i), 0.0)
def apply_restraints(self):
if self.restrained_atoms:
indices = self.subset(self.restrained_atoms)
r_force = self.restraint_force(indices, self.restraint_strength)
self.system.addForce(r_force)
if self.distance_restrained_atoms is not None:
atoms = self.distance_restrained_atoms
if isinstance(self.distance_restraint_length, (int, float, basestring)):
distances = [self.distance_restraint_length] * atoms.shape[0]
elif len(self.distance_restraint_length) == atoms.shape[0]:
distances = self.distance_restraint_length
else:
raise ValueError('Restraint distances do not match '
'number of distance restrained pairs.')
if isinstance(self.distance_restraint_strength, (int, float)):
strengths = [self.distance_restraint_strength] * atoms.shape[0]
elif len(self.distance_restraint_strength) == atoms.shape[0]:
strengths = self.distance_restraint_strength
else:
raise ValueError('Restraint distance strengths do not '
'match number of distance restrained pairs.')
d_force = self.distance_restraint_force(atoms, distances, strengths)
self.system.addForce(d_force)
def restraint_force(self, indices=None, strength=5.0):
"""
Force that restrains atoms to fix their positions, while allowing
tiny movement to resolve severe clashes and so on.
Returns
-------
force : simtk.openmm.CustomExternalForce
A custom force to restrain the selected atoms
"""
if self.system.usesPeriodicBoundaryConditions():
expression = 'k*periodicdistance(x, y, z, x0, y0, z0)^2'
else:
expression = 'k*((x-x0)^2 + (y-y0)^2 + (z-z0)^2)'
force = mm.CustomExternalForce(expression)
force.addGlobalParameter('k', strength*u.kilocalories_per_mole/u.angstroms**2)
force.addPerParticleParameter('x0')
force.addPerParticleParameter('y0')
force.addPerParticleParameter('z0')
positions = self.positions if self.positions is not None else self.handler.positions
if indices is None:
indices = range(self.handler.topology.getNumAtoms())
for index in indices:
force.addParticle(int(index), positions[index].value_in_unit(u.nanometers))
return force
def distance_restraint_force(self, atoms, distances, strengths):
"""
Parameters
----------
atoms : tuple of tuple of int or str
Pair of atom indices to be restrained, with shape (n, 2),
like ((a1, a2), (a3, a4)). Items can be str compatible with MDTraj DSL.
distances : tuple of float
Equilibrium distances for each pair
strengths : tuple of float
Force constant for each pair
"""
system = self.system
force = mm.HarmonicBondForce()
force.setUsesPeriodicBoundaryConditions(self.system.usesPeriodicBoundaryConditions())
for pair, distance, strength in zip(atoms, distances, strengths):
indices = []
for atom in pair:
if isinstance(atom, str):
index = self.subset(atom)
if len(index) != 1:
raise ValueError('Distance restraint for selection `{}` returns != 1 atom!: {}'
.format(atom, index))
indices.append(int(index[0]))
elif isinstance(atom, (int, float)):
indices.append(int(atom))
else:
raise ValueError('Distance restraint atoms must be int or str DSL selections')
if distance == 'current':
pos = self.positions or system.positions
distance = np.linalg.norm(pos[indices[0]] - pos[indices[1]])
force.addBond(indices[0], indices[1], distance*u.nanometers,
strength*u.kilocalories_per_mole/u.angstroms**2)
return force
def subset(self, selector):
"""
Returns a list of atom indices corresponding to a MDTraj DSL
query. Also will accept list of numbers, which will be coerced
to int and returned.
"""
if isinstance(selector, (list, tuple)):
return map(int, selector)
selector = SELECTORS.get(selector, selector)
mdtop = MDTrajTopology.from_openmm(self.handler.topology)
return mdtop.select(selector)
@property
def system_mass(self):
system_mass = sum(a.element.mass._value for a in self.handler.topology.atoms())
return system_mass * u.dalton
@property
def progress_reporter(self):
if self._progress_reporter is None:
if os.environ.get('OMMPROTOCOL_SLAVE'):
rep = SerializedReporter(sys.stdout, self.report_every)
else:
rep = ProgressBarReporter(sys.stdout, self.report_every, total_steps=self.steps)
self._progress_reporter = rep
return self._progress_reporter
@progress_reporter.deleter
def progress_reporter(self):
try:
self.simulation.reporters.remove(self._progress_reporter)
except ValueError:
pass
self._progress_reporter = None
@property
def log_reporter(self):
if self._log_reporter is None:
mass = {'systemMass': self.system_mass} if self.constrained_atoms else {}
path = self.new_filename(suffix='.log')
rep = app.StateDataReporter(path, self.report_every, step=True,
potentialEnergy=True, kineticEnergy=True,
temperature=True, volume=True, progress=True,
remainingTime=True, speed=True,
totalSteps=self.steps, separator='\t', **mass)
self._log_reporter = rep
return self._log_reporter
@log_reporter.deleter
def log_reporter(self):
try:
self.simulation.reporters.remove(self._log_reporter)
except ValueError:
pass
self._log_reporter = None
@property
def trajectory_reporter(self):
if self._trajectory_reporter is None:
suffix = '.{}'.format(self.trajectory.lower())
path = self.new_filename(suffix=suffix)
options = {}
if self.trajectory == 'DCD':
options.update({'new_every': self.trajectory_new_every,
'atomSubset': self.trajectory_atom_subset})
rep = self.reporter(self.trajectory)(path, self.trajectory_every, **options)
self._trajectory_reporter = rep
return self._trajectory_reporter
@trajectory_reporter.deleter
def trajectory_reporter(self):
try:
self.simulation.reporters.remove(self._trajectory_reporter)
except ValueError:
pass
self._trajectory_reporter = None
@property
def restart_reporter(self):
if self._restart_reporter is None:
suffix = '.{}.{}'.format(self.restart.lower(), self.restart_every)
path, ext_or_int, n = self.new_filename(suffix=suffix).rsplit('.', 2)
try:
ext_or_int = int(ext_or_int) # Is ext an integer?
except ValueError: # Ext is the actual file extension
path = '{}.{}'.format(path, ext_or_int)
else: # Ext is an int! Reformat
name, ext = os.path.splitext(path)
path = '{}.{}{}'.format(name, ext_or_int, ext)
rep = self.reporter(self.restart)(path, self.restart_every)
self._restart_reporter = rep
return self._restart_reporter
@restart_reporter.deleter
def restart_reporter(self):
try:
self.simulation.reporters.remove(self._restart_reporter)
except ValueError:
pass
self._restart_reporter = None
@property
def stage_index(self):
return self._stage_index[0]
@stage_index.setter
def stage_index(self, value):
self._stage_index[0] = value
def new_filename(self, suffix='', prefix='', avoid_overwrite=True):
fn_template = '{prefix}{project}_{index:0{index_len}d}_{stage}{suffix}'
filename = fn_template.format(prefix=prefix, project=self.project_name,
index=self.stage_index, stage=self.name,
suffix=suffix,
index_len=len(str(self.total_stages)))
path = os.path.join(self.outputpath, filename)
if avoid_overwrite:
path = assert_not_exists(path)
return path
@contextmanager
def handle_exceptions(self, verbose=True):
"""
Handle Ctrl+C and accidental exceptions and attempt to save
the current state of the simulation
"""
try:
yield
except (KeyboardInterrupt, Exception) as ex:
if not self.attempt_rescue:
raise ex
if isinstance(ex, KeyboardInterrupt):
reraise = False
answer = timed_input('\n\nDo you want to save current state? (y/N): ')
if answer and answer.lower() not in ('y', 'yes'):
if verbose:
sys.exit('Ok, bye!')
else:
reraise = True
logger.error('\n\nAn error occurred: %s', ex)
if verbose:
logger.info('Saving state...')
try:
self.backup_simulation()
except Exception:
if verbose:
logger.error('FAILED :(')
else:
if verbose:
logger.info('SUCCESS!')
finally:
if reraise:
raise ex
sys.exit()
def backup_simulation(self):
"""
Creates an emergency report run, .state included
"""
path = self.new_filename(suffix='_emergency.state')
self.simulation.saveState(path)
uses_pbc = self.system.usesPeriodicBoundaryConditions()
state_kw = dict(getPositions=True, getVelocities=True,
getForces=True, enforcePeriodicBox=uses_pbc,
getParameters=True, getEnergy=True)
state = self.simulation.context.getState(**state_kw)
for reporter in self.simulation.reporters:
if not isinstance(reporter, app.StateDataReporter):
reporter.report(self.simulation, state)
| lgpl-3.0 | -3,036,808,844,007,292,000 | 40.233468 | 114 | 0.600445 | false |
tgibson37/tiny-c | cairopy.py | 1 | 2552 | import sys
import os
import cairo
import shlex
def comment(cmt):
cmt = cmt[:-1]
#print "in comment: ", cmt
def draw(x):
global ctx
'''
hard way: very long list of if-elif's
if x[0] == "moveto":
ctx.move_to( int(x[1]), int(x[2]) )
elif x[0] == "lineto":
ctx.line_to( int(x[1]), int(x[2]) )
ctx.stroke()
easy way: do ALL cmds with int or float only args:
given x = [name,1,2] produce string = "ctx.<name>(1, 2)"
then eval(string)
'''
cmd=x[0]
argslist=x[1:]
args= ','.join(argslist)
string = "ctx." + cmd + "(" + args + ")"
#print "in draw: ", string
eval(string)
def window(x):
global surface, ctx, width, height
width = int(x[1])
height= int(x[2])
#print "in window: ", x
surface = cairo.ImageSurface(cairo.FORMAT_RGB24, width, height)
ctx = cairo.Context(surface)
ctx.rectangle(0,0,width,height)
ctx.set_source_rgb(1,1,1)
ctx.fill()
ctx.set_source_rgb(0,0,0)
def show():
global surface
#print "in show: ", pngname
ctx.stroke()
surface.write_to_png(pngname)
from subprocess import call
if len(show_app)>0:
call([show_app, pngname])
def usage():
print("Usage: ", sys.argv[0], "drawfile")
# It all starts here
if len(sys.argv)>1:
drawname = sys.argv[1]
drawfile = sys.argv[1] + ".draw"
pngname = sys.argv[1] + ".png"
else:
usage()
sys.exit()
if os.name == "nt":
show_app = "mspaint.exe"
else:
show_app = "display"
M_PI = 3.14159265358979323846
with open(drawfile) as f:
for line in f:
x = shlex.split(line)
if ord(line[:1])==35: # decimal 35 is hash
comment(line)
elif x[0]=="arc":
r1 = float(x[1])
r2 = float(x[2])
r3 = float(x[3])
d4 = int(x[4])
r4 = float(d4*(M_PI/180))
d5 = int(x[5])
r5 = float(d5*(M_PI/180))
ctx.arc(r1,r2,r3,r4,r5)
elif x[0]=="arcneg":
r1 = float(x[1])
r2 = float(x[2])
r3 = float(x[3])
d4 = int(x[4])
r4 = float(d4*(M_PI/180))
d5 = int(x[5])
r5 = float(d5*(M_PI/180))
ctx.arc_negative(r1,r2,r3,r4,r5)
elif x[0]=="setdash":
#print ""
#print x
d = x[1:-1]
id = [float(x) for x in d]
st = float(x[-1])
#print "id:", id
#print "st:", st
ctx.set_dash(id,st)
elif x[0]=="setrgb":
r1 = float(x[1])/256
r2 = float(x[2])/256
r3 = float(x[3])/256
ctx.set_source_rgb(r1,r2,r3)
elif x[0]=="show":
if len(x)>1:
image_display=x[1]
show()
elif x[0]=="showapp":
if len(x)>1:
show_app = x[1]
else:
show_app = ""
elif x[0]=="showtext":
ctx.show_text(x[1])
elif x[0]=="window":
window(x)
else:
draw(x)
'''
elif x[0]=="":
ctx.
'''
| gpl-3.0 | 6,053,136,622,952,715,000 | 20.266667 | 64 | 0.574843 | false |
glenngillen/dotfiles | .vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/python/debugpy/server/cli.py | 1 | 14441 | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import re
import runpy
import sys
# debugpy.__main__ should have preloaded pydevd properly before importing this module.
# Otherwise, some stdlib modules above might have had imported threading before pydevd
# could perform the necessary detours in it.
assert "pydevd" in sys.modules
import pydevd
import debugpy
from debugpy.common import compat, fmt, log
from debugpy.server import api
TARGET = "<filename> | -m <module> | -c <code> | --pid <pid>"
HELP = """debugpy {0}
See https://aka.ms/debugpy for documentation.
Usage: debugpy --listen | --connect
[<host>:]<port>
[--wait-for-client]
[--configure-<name> <value>]...
[--log-to <path>] [--log-to-stderr]
{1}
[<arg>]...
""".format(
debugpy.__version__, TARGET
)
class Options(object):
mode = None
address = None
log_to = None
log_to_stderr = False
target = None # unicode
target_kind = None
wait_for_client = False
adapter_access_token = None
options = Options()
options.config = {"qt": "none", "subProcess": True}
def in_range(parser, start, stop):
def parse(s):
n = parser(s)
if start is not None and n < start:
raise ValueError(fmt("must be >= {0}", start))
if stop is not None and n >= stop:
raise ValueError(fmt("must be < {0}", stop))
return n
return parse
pid = in_range(int, 0, None)
def print_help_and_exit(switch, it):
print(HELP, file=sys.stderr)
sys.exit(0)
def print_version_and_exit(switch, it):
print(debugpy.__version__)
sys.exit(0)
def set_arg(varname, parser=(lambda x: x)):
def do(arg, it):
value = parser(next(it))
setattr(options, varname, value)
return do
def set_const(varname, value):
def do(arg, it):
setattr(options, varname, value)
return do
def set_address(mode):
def do(arg, it):
if options.address is not None:
raise ValueError("--listen and --connect are mutually exclusive")
# It's either host:port, or just port.
value = next(it)
host, sep, port = value.partition(":")
if not sep:
host = "127.0.0.1"
port = value
try:
port = int(port)
except Exception:
port = -1
if not (0 <= port < 2 ** 16):
raise ValueError("invalid port number")
options.mode = mode
options.address = (host, port)
return do
def set_config(arg, it):
prefix = "--configure-"
assert arg.startswith(prefix)
name = arg[len(prefix) :]
value = next(it)
if name not in options.config:
raise ValueError(fmt("unknown property {0!r}", name))
expected_type = type(options.config[name])
try:
if expected_type is bool:
value = {"true": True, "false": False}[value.lower()]
else:
value = expected_type(value)
except Exception:
raise ValueError(fmt("{0!r} must be a {1}", name, expected_type.__name__))
options.config[name] = value
def set_target(kind, parser=(lambda x: x), positional=False):
def do(arg, it):
options.target_kind = kind
target = parser(arg if positional else next(it))
if isinstance(target, bytes):
# target may be the code, so, try some additional encodings...
try:
target = target.decode(sys.getfilesystemencoding())
except UnicodeDecodeError:
try:
target = target.decode("utf-8")
except UnicodeDecodeError:
import locale
target = target.decode(locale.getpreferredencoding(False))
options.target = target
return do
# fmt: off
switches = [
# Switch Placeholder Action
# ====== =========== ======
# Switches that are documented for use by end users.
("-(\\?|h|-help)", None, print_help_and_exit),
("-(V|-version)", None, print_version_and_exit),
("--log-to" , "<path>", set_arg("log_to")),
("--log-to-stderr", None, set_const("log_to_stderr", True)),
("--listen", "<address>", set_address("listen")),
("--connect", "<address>", set_address("connect")),
("--wait-for-client", None, set_const("wait_for_client", True)),
("--configure-.+", "<value>", set_config),
# Switches that are used internally by the client or debugpy itself.
("--adapter-access-token", "<token>", set_arg("adapter_access_token")),
# Targets. The "" entry corresponds to positional command line arguments,
# i.e. the ones not preceded by any switch name.
("", "<filename>", set_target("file", positional=True)),
("-m", "<module>", set_target("module")),
("-c", "<code>", set_target("code")),
("--pid", "<pid>", set_target("pid", pid)),
]
# fmt: on
def consume_argv():
while len(sys.argv) >= 2:
value = sys.argv[1]
del sys.argv[1]
yield value
def parse_argv():
seen = set()
it = consume_argv()
while True:
try:
arg = next(it)
except StopIteration:
raise ValueError("missing target: " + TARGET)
switch = compat.filename(arg)
if not switch.startswith("-"):
switch = ""
for pattern, placeholder, action in switches:
if re.match("^(" + pattern + ")$", switch):
break
else:
raise ValueError("unrecognized switch " + switch)
if switch in seen:
raise ValueError("duplicate switch " + switch)
else:
seen.add(switch)
try:
action(arg, it)
except StopIteration:
assert placeholder is not None
raise ValueError(fmt("{0}: missing {1}", switch, placeholder))
except Exception as exc:
raise ValueError(fmt("invalid {0} {1}: {2}", switch, placeholder, exc))
if options.target is not None:
break
if options.mode is None:
raise ValueError("either --listen or --connect is required")
if options.adapter_access_token is not None and options.mode != "connect":
raise ValueError("--adapter-access-token requires --connect")
if options.target_kind == "pid" and options.wait_for_client:
raise ValueError("--pid does not support --wait-for-client")
assert options.target is not None
assert options.target_kind is not None
assert options.address is not None
def start_debugging(argv_0):
# We need to set up sys.argv[0] before invoking either listen() or connect(),
# because they use it to report the "process" event. Thus, we can't rely on
# run_path() and run_module() doing that, even though they will eventually.
sys.argv[0] = compat.filename_str(argv_0)
log.debug("sys.argv after patching: {0!r}", sys.argv)
debugpy.configure(options.config)
if options.mode == "listen":
debugpy.listen(options.address)
elif options.mode == "connect":
debugpy.connect(options.address, access_token=options.adapter_access_token)
else:
raise AssertionError(repr(options.mode))
if options.wait_for_client:
debugpy.wait_for_client()
def run_file():
target = options.target
start_debugging(target)
target_as_str = compat.filename_str(target)
# run_path has one difference with invoking Python from command-line:
# if the target is a file (rather than a directory), it does not add its
# parent directory to sys.path. Thus, importing other modules from the
# same directory is broken unless sys.path is patched here.
if os.path.isfile(target_as_str):
dir = os.path.dirname(target_as_str)
sys.path.insert(0, dir)
else:
log.debug("Not a file: {0!r}", target)
log.describe_environment("Pre-launch environment:")
log.info("Running file {0!r}", target)
runpy.run_path(target_as_str, run_name=compat.force_str("__main__"))
def run_module():
# Add current directory to path, like Python itself does for -m. This must
# be in place before trying to use find_spec below to resolve submodules.
sys.path.insert(0, str(""))
# We want to do the same thing that run_module() would do here, without
# actually invoking it. On Python 3, it's exposed as a public API, but
# on Python 2, we have to invoke a private function in runpy for this.
# Either way, if it fails to resolve for any reason, just leave argv as is.
argv_0 = sys.argv[0]
target_as_str = compat.filename_str(options.target)
try:
if sys.version_info >= (3,):
from importlib.util import find_spec
spec = find_spec(target_as_str)
if spec is not None:
argv_0 = spec.origin
else:
_, _, _, argv_0 = runpy._get_module_details(target_as_str)
except Exception:
log.swallow_exception("Error determining module path for sys.argv")
start_debugging(argv_0)
# On Python 2, module name must be a non-Unicode string, because it ends up
# a part of module's __package__, and Python will refuse to run the module
# if __package__ is Unicode.
log.describe_environment("Pre-launch environment:")
log.info("Running module {0!r}", options.target)
# Docs say that runpy.run_module is equivalent to -m, but it's not actually
# the case for packages - -m sets __name__ to "__main__", but run_module sets
# it to "pkg.__main__". This breaks everything that uses the standard pattern
# __name__ == "__main__" to detect being run as a CLI app. On the other hand,
# runpy._run_module_as_main is a private function that actually implements -m.
try:
run_module_as_main = runpy._run_module_as_main
except AttributeError:
log.warning("runpy._run_module_as_main is missing, falling back to run_module.")
runpy.run_module(target_as_str, alter_sys=True)
else:
run_module_as_main(target_as_str, alter_argv=True)
def run_code():
# Add current directory to path, like Python itself does for -c.
sys.path.insert(0, str(""))
code = compile(options.target, str("<string>"), str("exec"))
start_debugging(str("-c"))
log.describe_environment("Pre-launch environment:")
log.info("Running code:\n\n{0}", options.target)
eval(code, {})
def attach_to_pid():
pid = options.target
log.info("Attaching to process with PID={0}", pid)
encode = lambda s: list(bytearray(s.encode("utf-8"))) if s is not None else None
script_dir = os.path.dirname(debugpy.server.__file__)
assert os.path.exists(script_dir)
script_dir = encode(script_dir)
setup = {
"mode": options.mode,
"address": options.address,
"wait_for_client": options.wait_for_client,
"log_to": options.log_to,
"adapter_access_token": options.adapter_access_token,
}
setup = encode(json.dumps(setup))
python_code = """
import codecs;
import json;
import sys;
decode = lambda s: codecs.utf_8_decode(bytearray(s))[0] if s is not None else None;
script_dir = decode({script_dir});
setup = json.loads(decode({setup}));
sys.path.insert(0, script_dir);
import attach_pid_injected;
del sys.path[0];
attach_pid_injected.attach(setup);
"""
python_code = (
python_code.replace("\r", "")
.replace("\n", "")
.format(script_dir=script_dir, setup=setup)
)
log.info("Code to be injected: \n{0}", python_code.replace(";", ";\n"))
# pydevd restriction on characters in injected code.
assert not (
{'"', "'", "\r", "\n"} & set(python_code)
), "Injected code should not contain any single quotes, double quotes, or newlines."
pydevd_attach_to_process_path = os.path.join(
os.path.dirname(pydevd.__file__), "pydevd_attach_to_process"
)
assert os.path.exists(pydevd_attach_to_process_path)
sys.path.append(pydevd_attach_to_process_path)
try:
import add_code_to_python_process # noqa
log.info("Injecting code into process with PID={0} ...", pid)
add_code_to_python_process.run_python_code(
pid,
python_code,
connect_debugger_tracing=True,
show_debug_info=int(os.getenv("DEBUGPY_ATTACH_BY_PID_DEBUG_INFO", "0")),
)
except Exception:
log.reraise_exception("Code injection into PID={0} failed:", pid)
log.info("Code injection into PID={0} completed.", pid)
def main():
original_argv = list(sys.argv)
try:
parse_argv()
except Exception as exc:
print(str(HELP) + str("\nError: ") + str(exc), file=sys.stderr)
sys.exit(2)
if options.log_to is not None:
debugpy.log_to(options.log_to)
if options.log_to_stderr:
debugpy.log_to(sys.stderr)
api.ensure_logging()
log.info(
str("sys.argv before parsing: {0!r}\n" " after parsing: {1!r}"),
original_argv,
sys.argv,
)
try:
run = {
"file": run_file,
"module": run_module,
"code": run_code,
"pid": attach_to_pid,
}[options.target_kind]
run()
except SystemExit as exc:
log.reraise_exception(
"Debuggee exited via SystemExit: {0!r}", exc.code, level="debug"
)
| mit | 5,915,127,247,936,287,000 | 30.234375 | 89 | 0.57226 | false |
nettrom/importance | python/wikiproject/confusion-matrix.py | 1 | 8866 | #!/usr/env/python
# -*- coding: utf-8 -*-
'''
Script to predict articles for an entire WikiProject using its trained
model and the entire snapshot dataset.
Copyright (c) 2017 Morten Wang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import re
import logging
import pickle
from yaml import load
import pandas as pd
import numpy as np
import scipy.stats as st
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import GradientBoostingClassifier as gbm
from sklearn.metrics import confusion_matrix
class WikiProjectPredictor:
def __init__(self):
self.config = None
self.model = None
self.le = None
def load_datasets(self):
'''
Read in the datasets for this WikiProject, join them into a combined
dataset and add the necessary columns.
'''
# read in snapshot
snapshot = pd.read_table(self.config['snapshot file'])
# read in dataset
dataset = pd.read_table(self.config['dataset'])
# read in clickstream
clickstream = pd.read_table(self.config['clickstream file'])
# read in disambiguations
disambiguations = pd.read_table(self.config['disambiguation file'])
# read in the list of side-chained articles
sidechained = pd.read_table(self.config['sidechain file'])
# Log-transform number of inlinks, views, and calculate prop_proj_inlinks
dataset['log_inlinks'] = np.log10(1 + dataset['num_inlinks'])
dataset['log_views'] = np.log10(1 + dataset['num_views'])
dataset['prop_proj_inlinks'] = 1 + dataset['num_proj_inlinks']/(1 + dataset['num_inlinks'])
# Calculate the proportion of clicks from articles
clickstream['prop_from_art'] = np.minimum(
1.0, clickstream['n_from_art']/(1 + clickstream['n_clicks']))
# Join the datasets
# snapshot[dataset[clickstream]]
res = pd.merge(snapshot,
pd.merge(dataset, clickstream,
on='page_id'),
left_on='art_page_id', right_on='page_id')
# filter out pages where the talk page is an archive
res = res[res.talk_is_archive == 0]
# filter out pages where the article is a redirect
res = res[res.art_is_redirect == 0]
# filter out pages where there is no corresponding article
res = res[res.art_page_id > 0]
# filter out disambiguations
res = res[res.art_page_id.isin(disambiguations.page_id) == False]
# filter out all side-chained articles
if not sidechained.empty:
res = res[res.art_page_id.isin(sidechained.page_id) == False]
# calculate proportion of active inlinks
res['prop_act_inlinks'] = np.minimum(
1.0, res['n_act_links']/(1 + res['num_inlinks']))
# add rank variables for views and inlinks, and make them percentiles
res['rank_links'] = res.num_inlinks.rank(method='min')
res['rank_links_perc'] = res.num_inlinks.rank(method='min', pct=True)
res['rank_views'] = res.num_views.rank(method='min')
res['rank_views_perc'] = res.num_views.rank(method='min', pct=True)
# make sure importance ratings are an ordered categorical variable
res['importance_rating'] = res.importance_rating.astype(
'category', categories=['Low', 'Mid', 'High', 'Top'], ordered=True)
self.dataset = res
return()
def predict_ratings(self):
'''
Trim the given dataset down to the right columns, make predictions
of the importance rating, and also probabilities for each rating.
:param dataset: the dataset to make predictions on
:type dataset: `pandas.DataFrame`
'''
X = self.dataset.loc[:, self.config['predictors']].as_matrix()
logging.info('predicting importance ratings')
classes = self.model.predict(X)
logging.info('predicting rating probabilities')
probabilities = self.model.predict_proba(X)
self.dataset['pred_rating'] = pd.Series(classes,
index=self.dataset.index)
for i in range(probabilities.shape[1]):
col_name = 'proba_{}'.format(self.le.inverse_transform(i))
self.dataset[col_name] = probabilities[:,i]
## Return the dataset with predictions and probabilities added
return()
def make_confusion_matrix(self, config_file, print_wikitable=False):
'''
Load in the datasets and models defined in the given configuration file,
then predict the importance of all articles in the datasets.
'''
logging.info('loading the configuration file')
# load in the configuration
with open(config_file) as infile:
self.config = load(infile)
logging.info('loading the model')
# load in the model
with open(self.config['model file'], 'rb') as infile:
self.model = pickle.load(infile)
logging.info('loading the label encoder')
# load in the label encoder
with open(self.config['label encoder file'], 'rb') as infile:
self.le = pickle.load(infile)
logging.info('reading in the datasets')
# read in the datasets
self.load_datasets()
# make predictions for all pages and print out a confusion matrix
logging.info('making predictions')
self.predict_ratings()
## Add a column with the name of the predicted rating
self.dataset['pred_rating_name'] = self.le.inverse_transform(
self.dataset['pred_rating'])
ratings = ['Top', 'High', 'Mid', 'Low'] # ratings in descending order
if print_wikitable:
conf_matrix = confusion_matrix(self.dataset['importance_rating'],
self.dataset['pred_rating_name'],
labels=ratings)
# print header
wikitable = '''{| class="wikitable sortable"
|-
|
'''
for rating in ratings:
wikitable = "{}! {}\n".format(wikitable, rating)
# print content
for (i, rating) in enumerate(ratings):
wikitable = "{}|-\n| {}\n".format(wikitable, rating)
for (j, rating) in enumerate(ratings):
wikitable = "{}| style='text-align:right;' | {{{{formatnum:{n}}}}}\n".format(wikitable, n=conf_matrix[i, j])
# print footer
print(wikitable + "|}")
else:
print(pd.crosstab(self.dataset['importance_rating'],
self.dataset['pred_rating_name'],
rownames=['True'],
colnames=['Predicted'],
margins=True))
return()
def main():
import argparse
cli_parser = argparse.ArgumentParser(
description="script to make predictions for all articles in a WikiProject")
# Verbosity option
cli_parser.add_argument('-v', '--verbose', action='store_true',
help='write informational output')
cli_parser.add_argument('-w', '--wikitable', action='store_true',
help='print the confusion matrix as a wikitable')
## YAML configuration file for the global model
cli_parser.add_argument('config_file',
help='path to the global model YAML configuration file')
args = cli_parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.INFO)
predictor = WikiProjectPredictor()
predictor.make_confusion_matrix(args.config_file, args.wikitable)
return()
if __name__ == '__main__':
main()
| mit | 8,025,864,012,591,268,000 | 37.051502 | 128 | 0.615385 | false |
MarkusHackspacher/unknown-horizons | horizons/gui/tabs/residentialtabs.py | 1 | 5161 | # ###################################################
# Copyright (C) 2008-2017 The Unknown Horizons Team
# [email protected]
# This file is part of Unknown Horizons.
#
# Unknown Horizons is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# ###################################################
from horizons.command.uioptions import SetTaxSetting
from horizons.component.namedcomponent import NamedComponent
from horizons.constants import SETTLER
from horizons.gui.util import create_resource_icon, get_happiness_icon_and_helptext
from horizons.i18n import gettext_lazy as LazyT
from horizons.messaging import SettlerUpdate
from horizons.util.loaders.actionsetloader import ActionSetLoader
from horizons.util.python.callback import Callback
from .overviewtab import OverviewTab
class SettlerOverviewTab(OverviewTab):
widget = 'overview_settler.xml'
helptext = LazyT("Settler overview")
def init_widget(self):
super().init_widget()
name = self.instance.settlement.get_component(NamedComponent).name
self.widget.findChild(name="headline").text = name
setup_tax_slider(self.widget.child_finder('tax_slider'),
self.widget.child_finder('tax_val_label'),
self.instance.settlement,
self.instance.level)
taxes = self.instance.settlement.tax_settings[self.instance.level]
self.widget.child_finder('tax_val_label').text = str(taxes)
action_set = ActionSetLoader.get_set(self.instance._action_set_id)
action_gfx = list(action_set.items())[0][1]
image = list(action_gfx[45].keys())[0]
self.widget.findChild(name="building_image").image = image
def on_settler_level_change(self, message):
assert isinstance(message, SettlerUpdate)
setup_tax_slider(self.widget.child_finder('tax_slider'),
self.widget.child_finder('tax_val_label'),
self.instance.settlement,
message.level)
taxes = self.instance.settlement.tax_settings[self.instance.level]
self.widget.child_finder('tax_val_label').text = str(taxes)
imgs = list(ActionSetLoader.get_set(self.instance._action_set_id).items())[0][1]
self.widget.findChild(name="building_image").image = list(imgs[45].keys())[0]
def show(self):
super().show()
SettlerUpdate.subscribe(self.on_settler_level_change, sender=self.instance)
def hide(self):
SettlerUpdate.discard(self.on_settler_level_change, sender=self.instance)
super().hide()
def refresh(self):
image, helptext = get_happiness_icon_and_helptext(self.instance.happiness, self.instance.session)
self.widget.child_finder('happiness_label').image = image
self.widget.child_finder('happiness_label').helptext = helptext
self.widget.child_finder('happiness').progress = self.instance.happiness
self.widget.child_finder('inhabitants').text = "{}/{}".format(
self.instance.inhabitants,
self.instance.inhabitants_max)
self.widget.child_finder('taxes').text = str(self.instance.last_tax_payed)
self.update_consumed_res()
name = self.instance.settlement.get_component(NamedComponent).name
self.widget.findChild(name="headline").text = name
events = {
'headline': Callback(self.instance.session.ingame_gui.show_change_name_dialog,
self.instance.settlement)
}
self.widget.mapEvents(events)
super().refresh()
def update_consumed_res(self):
"""Updates the container that displays the needed resources of the settler"""
container = self.widget.findChild(name="needed_res")
# remove icons from the container
container.removeAllChildren()
# create new ones
resources = self.instance.get_currently_not_consumed_resources()
for res in resources:
icon = create_resource_icon(res, self.instance.session.db)
icon.max_size = icon.min_size = icon.size = (32, 32)
container.addChild(icon)
container.adaptLayout()
def setup_tax_slider(slider, val_label, settlement, level):
"""Set up a slider to work as tax slider"""
step_count = int((SETTLER.TAX_SETTINGS_MAX - SETTLER.TAX_SETTINGS_MIN) / SETTLER.TAX_SETTINGS_STEP)
slider.steps = [SETTLER.TAX_SETTINGS_MIN + SETTLER.TAX_SETTINGS_STEP * i for i in
range(step_count)]
slider.value = settlement.tax_settings[level]
def on_slider_change():
val_label.text = str(slider.value)
if settlement.tax_settings[level] != slider.value:
SetTaxSetting(settlement, level, slider.value).execute(settlement.session)
slider.capture(on_slider_change)
| gpl-2.0 | -4,440,255,941,613,164,500 | 42.737288 | 100 | 0.708584 | false |
davidsminor/gaffer | python/GafferRenderManTest/RenderManShaderTest.py | 1 | 61179 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
import Gaffer
import GafferTest
import GafferScene
import GafferRenderMan
import GafferRenderManTest
class RenderManShaderTest( GafferRenderManTest.RenderManTestCase ) :
def setUp( self ) :
GafferRenderManTest.RenderManTestCase.setUp( self )
GafferRenderMan.RenderManShader.shaderLoader().clear()
def test( self ) :
n = GafferRenderMan.RenderManShader()
n.loadShader( "plastic" )
self.failUnless( isinstance( n["parameters"]["Ks"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( n["parameters"]["Kd"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( n["parameters"]["Ka"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( n["parameters"]["roughness"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( n["parameters"]["specularcolor"], Gaffer.Color3fPlug ) )
self.assertEqual( n["parameters"]["Ks"].getValue(), 0.5 )
self.assertEqual( n["parameters"]["Kd"].getValue(), 0.5 )
self.assertEqual( n["parameters"]["Ka"].getValue(), 1 )
self.assertAlmostEqual( n["parameters"]["roughness"].getValue(), 0.1 )
self.assertEqual( n["parameters"]["specularcolor"].getValue(), IECore.Color3f( 1 ) )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferRenderMan.RenderManShader()
s["n"].loadShader( "plastic" )
ss = s.serialise()
s = Gaffer.ScriptNode()
s.execute( ss )
st = s["n"].state()
self.assertEqual( len( st ), 1 )
self.assertEqual( st[0].type, "ri:surface" )
self.assertEqual( st[0].name, "plastic" )
self.failUnless( isinstance( s["n"]["parameters"]["Ks"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( s["n"]["parameters"]["Kd"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( s["n"]["parameters"]["Ka"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( s["n"]["parameters"]["roughness"], Gaffer.FloatPlug ) )
self.failUnless( isinstance( s["n"]["parameters"]["specularcolor"], Gaffer.Color3fPlug ) )
self.assertTrue( "parameters1" not in s["n"] )
def testShader( self ) :
n = GafferRenderMan.RenderManShader()
n.loadShader( "plastic" )
s = n.state()
self.assertEqual( len( s ), 1 )
self.assertEqual( s[0].type, "ri:surface" )
self.assertEqual( s[0].name, "plastic" )
self.assertEqual( s[0].parameters["Ks"], IECore.FloatData( .5 ) )
self.assertEqual( s[0].parameters["Kd"], IECore.FloatData( .5 ) )
self.assertEqual( s[0].parameters["Ka"], IECore.FloatData( 1 ) )
self.assertEqual( s[0].parameters["roughness"], IECore.FloatData( .1 ) )
self.assertEqual( s[0].parameters["specularcolor"], IECore.Color3fData( IECore.Color3f( 1 ) ) )
def testShaderHash( self ) :
n = GafferRenderMan.RenderManShader()
n.loadShader( "checker" )
h1 = n.stateHash()
n["parameters"]["Kd"].setValue( 0.25 )
self.assertNotEqual( n.stateHash(), h1 )
def testCoshaderHash( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertTrue( "coshaderParameter" in shaderNode["parameters"] )
self.assertEqual( shaderNode["parameters"]["coshaderParameter"].typeId(), Gaffer.Plug.staticTypeId() )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
shaderNode["parameters"]["coshaderParameter"].setInput( coshaderNode["out"] )
h1 = shaderNode.stateHash()
coshaderNode["parameters"]["floatParameter"].setValue( 0.25 )
self.assertNotEqual( shaderNode.stateHash(), h1 )
def testParameterOrdering( self ) :
n = GafferRenderMan.RenderManShader()
n.loadShader( "plastic" )
self.assertEqual( n["parameters"][0].getName(), "Ks" )
self.assertEqual( n["parameters"][1].getName(), "Kd" )
self.assertEqual( n["parameters"][2].getName(), "Ka" )
self.assertEqual( n["parameters"][3].getName(), "roughness" )
self.assertEqual( n["parameters"][4].getName(), "specularcolor" )
n = GafferRenderMan.RenderManShader()
n.loadShader( "matte" )
self.assertEqual( n["parameters"][0].getName(), "Ka" )
self.assertEqual( n["parameters"][1].getName(), "Kd" )
def testCoshader( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertTrue( "coshaderParameter" in shaderNode["parameters"] )
self.assertEqual( shaderNode["parameters"]["coshaderParameter"].typeId(), Gaffer.Plug.staticTypeId() )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
shaderNode["parameters"]["coshaderParameter"].setInput( coshaderNode["out"] )
s = shaderNode.state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[0].name, coshader )
self.assertEqual( s[1].name, shader )
self.assertEqual( s[0].parameters["__handle"], s[1].parameters["coshaderParameter"] )
def testInputAcceptance( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
random = Gaffer.Random()
self.assertTrue( shaderNode["parameters"]["coshaderParameter"].acceptsInput( coshaderNode["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderParameter"].acceptsInput( random["outFloat"] ) )
self.assertTrue( shaderNode["parameters"]["floatParameter"].acceptsInput( random["outFloat"] ) )
self.assertFalse( shaderNode["parameters"]["floatParameter"].acceptsInput( coshaderNode["out"] ) )
self.assertTrue( coshaderNode["parameters"]["colorParameter"].acceptsInput( random["outColor"] ) )
self.assertFalse( coshaderNode["parameters"]["colorParameter"].acceptsInput( coshaderNode["out"] ) )
def testParameterDefaultValue( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertEqual( shaderNode["parameters"]["floatParameter"].defaultValue(), 1 )
def testParameterMinMax( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertEqual( shaderNode["parameters"]["floatParameter"].minValue(), -1 )
self.assertEqual( shaderNode["parameters"]["floatParameter"].maxValue(), 10 )
def testReload( self ) :
shader1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version1.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader1 )
shaderNode["parameters"]["float1"].setValue( 0.1 )
shaderNode["parameters"]["string1"].setValue( "test" )
shaderNode["parameters"]["color1"].setValue( IECore.Color3f( 1, 2, 3 ) )
self.assertAlmostEqual( shaderNode["parameters"]["float1"].getValue(), 0.1 )
self.assertEqual( shaderNode["parameters"]["string1"].getValue(), "test" )
self.assertEqual( shaderNode["parameters"]["color1"].getValue(), IECore.Color3f( 1, 2, 3 ) )
shader2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version2.sl" )
shaderNode.loadShader( shader2, keepExistingValues=True )
self.assertEqual( shaderNode["parameters"].keys(), [ "float1", "string1", "color1", "float2", "string2", "color2" ] )
self.assertAlmostEqual( shaderNode["parameters"]["float1"].getValue(), 0.1 )
self.assertEqual( shaderNode["parameters"]["string1"].getValue(), "test" )
self.assertEqual( shaderNode["parameters"]["color1"].getValue(), IECore.Color3f( 1, 2, 3 ) )
shaderNode.loadShader( shader1, keepExistingValues=True )
self.assertEqual( shaderNode["parameters"].keys(), [ "float1", "string1", "color1" ] )
self.assertAlmostEqual( shaderNode["parameters"]["float1"].getValue(), 0.1 )
self.assertEqual( shaderNode["parameters"]["string1"].getValue(), "test" )
self.assertEqual( shaderNode["parameters"]["color1"].getValue(), IECore.Color3f( 1, 2, 3 ) )
shaderNode.loadShader( shader1, keepExistingValues=False )
self.assertEqual( shaderNode["parameters"].keys(), [ "float1", "string1", "color1" ] )
self.assertEqual( shaderNode["parameters"]["float1"].getValue(), 1 )
self.assertEqual( shaderNode["parameters"]["string1"].getValue(), "" )
self.assertEqual( shaderNode["parameters"]["color1"].getValue(), IECore.Color3f( 1, 1, 1 ) )
def testReloadRemovesOldParameters( self ) :
shader2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version2.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader2 )
self.assertEqual( shaderNode["parameters"].keys(), [ "float1", "string1", "color1", "float2", "string2", "color2" ] )
shader3 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version3.sl" )
shaderNode.loadShader( shader3 )
self.assertEqual( shaderNode["parameters"].keys(), [ "float1", "string1", "color1", "float2" ] )
def testAutomaticReloadOnScriptLoad( self ) :
shader1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version1.sl", shaderName = "unversioned" )
s = Gaffer.ScriptNode()
s["shader"] = GafferRenderMan.RenderManShader()
s["shader"].loadShader( shader1 )
s["shader"]["parameters"]["float1"].setValue( 0.1 )
s["shader"]["parameters"]["string1"].setValue( "test" )
s["shader"]["parameters"]["color1"].setValue( IECore.Color3f( 1, 2, 3 ) )
ss = s.serialise()
self.compileShader( os.path.dirname( __file__ ) + "/shaders/version2.sl", shaderName = "unversioned" )
GafferRenderMan.RenderManShader.shaderLoader().clear()
s = Gaffer.ScriptNode()
s.execute( ss )
self.assertEqual( s["shader"]["parameters"].keys(), [ "float1", "string1", "color1", "float2", "string2", "color2" ] )
self.assertAlmostEqual( s["shader"]["parameters"]["float1"].getValue(), 0.1 )
self.assertEqual( s["shader"]["parameters"]["string1"].getValue(), "test" )
self.assertEqual( s["shader"]["parameters"]["color1"].getValue(), IECore.Color3f( 1, 2, 3 ) )
def testReloadPreservesConnections( self ) :
n = GafferRenderMan.RenderManShader()
n.loadShader( "plastic" )
random = Gaffer.Random()
n["parameters"]["Ks"].setInput( random["outFloat"] )
n["parameters"]["specularcolor"].setInput( random["outColor"] )
n.loadShader( "plastic", keepExistingValues = True )
self.assertTrue( n["parameters"]["Ks"].getInput().isSame( random["outFloat"] ) )
self.assertTrue( n["parameters"]["specularcolor"].getInput().isSame( random["outColor"] ) )
def testReloadPreservesConnectionsWhenMinMaxOrDefaultChanges( self ) :
shader1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version1.sl", shaderName = "unversioned" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader1 )
self.assertFalse( n["parameters"]["float1"].hasMinValue() )
self.assertFalse( n["parameters"]["float1"].hasMaxValue() )
self.assertEqual( n["parameters"]["string1"].defaultValue(), "" )
nn = Gaffer.Node()
nn["outFloat"] = Gaffer.FloatPlug( direction = Gaffer.Plug.Direction.Out )
nn["outString"] = Gaffer.StringPlug( direction = Gaffer.Plug.Direction.Out )
n["parameters"]["float1"].setInput( nn["outFloat"] )
n["parameters"]["string1"].setInput( nn["outString"] )
shader2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version2.sl", shaderName = "unversioned" )
GafferRenderMan.RenderManShader.shaderLoader().clear()
n.loadShader( shader1, keepExistingValues=True )
self.assertTrue( n["parameters"]["float1"].hasMinValue() )
self.assertTrue( n["parameters"]["float1"].hasMaxValue() )
self.assertEqual( n["parameters"]["float1"].minValue(), -1 )
self.assertEqual( n["parameters"]["float1"].maxValue(), 2 )
self.assertEqual( n["parameters"]["string1"].defaultValue(), "newDefaultValue" )
self.assertTrue( n["parameters"]["float1"].getInput().isSame( nn["outFloat"] ) )
self.assertTrue( n["parameters"]["string1"].getInput().isSame( nn["outString"] ) )
def testReloadPreservesPartialConnectionsWhenMinMaxOrDefaultChanges( self ) :
shader1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version1.sl", shaderName = "unversioned" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader1 )
nn = Gaffer.Node()
nn["outFloat"] = Gaffer.FloatPlug( direction = Gaffer.Plug.Direction.Out )
n["parameters"]["color1"][0].setInput( nn["outFloat"] )
n["parameters"]["color1"][1].setInput( nn["outFloat"] )
n["parameters"]["color1"][2].setValue( 0.75 )
shader2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version2.sl", shaderName = "unversioned" )
GafferRenderMan.RenderManShader.shaderLoader().clear()
n.loadShader( shader1, keepExistingValues=True )
self.assertTrue( n["parameters"]["color1"][0].getInput().isSame( nn["outFloat"] ) )
self.assertTrue( n["parameters"]["color1"][1].getInput().isSame( nn["outFloat"] ) )
self.assertEqual( n["parameters"]["color1"][2].getValue(), 0.75 )
def testReloadPreservesValuesWhenMinMaxOrDefaultChanges( self ) :
shader1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version1.sl", shaderName = "unversioned" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader1 )
n["parameters"]["float1"].setValue( 0.25 )
n["parameters"]["string1"].setValue( "dog" )
n["parameters"]["color1"].setValue( IECore.Color3f( 0.1, 0.25, 0.5 ) )
shader2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version2.sl", shaderName = "unversioned" )
GafferRenderMan.RenderManShader.shaderLoader().clear()
n.loadShader( shader1, keepExistingValues=True )
self.assertEqual( n["parameters"]["float1"].getValue(), 0.25 )
self.assertEqual( n["parameters"]["string1"].getValue(), "dog" )
self.assertEqual( n["parameters"]["color1"].getValue(), IECore.Color3f( 0.1, 0.25, 0.5 ) )
def testOutputParameters( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/version3.sl" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader )
self.failIf( "outputFloat" in n["parameters"].keys() )
def testAssignmentDirtyPropagation( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
shaderNode["parameters"]["coshaderParameter"].setInput( coshaderNode["out"] )
plane = GafferScene.Plane()
assignment = GafferScene.ShaderAssignment()
assignment["in"].setInput( plane["out"] )
assignment["shader"].setInput( shaderNode["out"] )
cs = GafferTest.CapturingSlot( assignment.plugDirtiedSignal() )
coshaderNode["parameters"]["floatParameter"].setValue( 12 )
dirtiedNames = [ x[0].fullName() for x in cs ]
self.assertEqual( len( dirtiedNames ), 3 )
self.assertEqual( dirtiedNames[0], "ShaderAssignment.shader" )
self.assertEqual( dirtiedNames[1], "ShaderAssignment.out.attributes" )
self.assertEqual( dirtiedNames[2], "ShaderAssignment.out" )
def testArrayParameters( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/arrayParameters.sl" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader )
expected = {
"dynamicFloatArray" : IECore.FloatVectorData( [] ),
"fixedFloatArray" : IECore.FloatVectorData( [ 1, 2, 3, 4 ] ),
"dynamicStringArray" : IECore.StringVectorData( [ "dynamic", "arrays", "can", "still", "have", "defaults" ] ),
"fixedStringArray" : IECore.StringVectorData( [ "hello", "goodbye" ] ),
"dynamicColorArray" : IECore.Color3fVectorData( [ IECore.Color3f( 1 ), IECore.Color3f( 2 ) ] ),
"fixedColorArray" : IECore.Color3fVectorData( [ IECore.Color3f( 1 ), IECore.Color3f( 2 ) ] ),
"dynamicVectorArray" : IECore.V3fVectorData( [] ),
"fixedVectorArray" : IECore.V3fVectorData( [ IECore.V3f( x ) for x in range( 1, 6 ) ] ),
"dynamicPointArray" : IECore.V3fVectorData( [] ),
"fixedPointArray" : IECore.V3fVectorData( [ IECore.V3f( x ) for x in range( 1, 6 ) ] ),
"dynamicNormalArray" : IECore.V3fVectorData( [] ),
"fixedNormalArray" : IECore.V3fVectorData( [ IECore.V3f( x ) for x in range( 1, 6 ) ] ),
}
self.assertEqual( set( n["parameters"].keys() ), set( expected.keys() ) )
for name, value in expected.items() :
self.assertEqual( n["parameters"][name].defaultValue(), value )
self.assertEqual( n["parameters"][name].getValue(), value )
s = n.state()[0]
for name, value in expected.items() :
self.assertEqual( s.parameters[name], value )
def testFixedCoshaderArrayParameters( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader )
self.assertEqual( n["parameters"].keys(), [ "dynamicShaderArray", "fixedShaderArray" ] )
self.assertTrue( isinstance( n["parameters"]["fixedShaderArray"], Gaffer.CompoundPlug ) )
self.assertEqual( len( n["parameters"]["fixedShaderArray"] ), 4 )
self.assertTrue( isinstance( n["parameters"]["fixedShaderArray"]["fixedShaderArray0"], Gaffer.Plug ) )
self.assertTrue( isinstance( n["parameters"]["fixedShaderArray"]["fixedShaderArray1"], Gaffer.Plug ) )
self.assertTrue( isinstance( n["parameters"]["fixedShaderArray"]["fixedShaderArray2"], Gaffer.Plug ) )
self.assertTrue( isinstance( n["parameters"]["fixedShaderArray"]["fixedShaderArray3"], Gaffer.Plug ) )
state = n.state()
self.assertEqual( state[0].parameters["fixedShaderArray"], IECore.StringVectorData( [ "" ] * 4 ) )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
n["parameters"]["fixedShaderArray"]["fixedShaderArray0"].setInput( coshaderNode["out"] )
state = n.state()
self.assertEqual( state[1].parameters["fixedShaderArray"], IECore.StringVectorData( [ state[0].parameters["__handle"].value, "", "", "" ] ) )
def testCoshaderType( self ) :
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
self.assertEqual( coshaderNode.state()[0].type, "ri:shader" )
def testCantConnectSurfaceShaderIntoCoshaderInput( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
n1 = GafferRenderMan.RenderManShader()
n1.loadShader( shader )
n2 = GafferRenderMan.RenderManShader()
n2.loadShader( "plastic" )
self.assertFalse( n1["parameters"]["coshaderParameter"].acceptsInput( n2["out"] ) )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
n3 = GafferRenderMan.RenderManShader()
n3.loadShader( coshader )
self.assertTrue( n1["parameters"]["coshaderParameter"].acceptsInput( n3["out"] ) )
arrayShader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
n4 = GafferRenderMan.RenderManShader()
n4.loadShader( arrayShader )
self.assertFalse( n4["parameters"]["fixedShaderArray"]["fixedShaderArray0"].acceptsInput( n2["out"] ) )
self.assertTrue( n4["parameters"]["fixedShaderArray"]["fixedShaderArray0"].acceptsInput( n3["out"] ) )
def testConnectionsBetweenParameters( self ) :
s = GafferRenderMan.RenderManShader()
s.loadShader( "plastic" )
s["parameters"]["Kd"].setValue( 0.25 )
s["parameters"]["Ks"].setInput( s["parameters"]["Kd"] )
shader = s.state()[0]
self.assertEqual( shader.parameters["Kd"].value, 0.25 )
self.assertEqual( shader.parameters["Ks"].value, 0.25 )
def testFixedCoshaderArrayParameterHash( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader )
h1 = n.stateHash()
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
n["parameters"]["fixedShaderArray"]["fixedShaderArray0"].setInput( coshaderNode["out"] )
h2 = n.stateHash()
self.assertNotEqual( h2, h1 )
n["parameters"]["fixedShaderArray"]["fixedShaderArray1"].setInput( coshaderNode["out"] )
h3 = n.stateHash()
self.assertNotEqual( h3, h2 )
self.assertNotEqual( h3, h1 )
n["parameters"]["fixedShaderArray"]["fixedShaderArray1"].setInput( None )
n["parameters"]["fixedShaderArray"]["fixedShaderArray2"].setInput( coshaderNode["out"] )
h4 = n.stateHash()
self.assertNotEqual( h4, h3 )
self.assertNotEqual( h4, h2 )
self.assertNotEqual( h4, h1 )
def testDisabling( self ) :
s = GafferRenderMan.RenderManShader()
s.loadShader( "plastic" )
stateHash = s.stateHash()
state = s.state()
self.assertEqual( len( state ), 1 )
self.assertEqual( state[0].name, "plastic" )
self.assertTrue( s["enabled"].isSame( s.enabledPlug() ) )
s["enabled"].setValue( False )
stateHash2 = s.stateHash()
self.assertNotEqual( stateHash2, stateHash )
state2 = s.state()
self.assertEqual( len( state2 ), 0 )
def testDisablingCoshaders( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
shaderNode["parameters"]["coshaderParameter"].setInput( coshaderNode["out"] )
s = shaderNode.state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[0].name, coshader )
self.assertEqual( s[1].name, shader )
h = shaderNode.stateHash()
coshaderNode["enabled"].setValue( False )
s2 = shaderNode.state()
self.assertEqual( len( s2 ), 1 )
self.assertEqual( s2[0].name, shader )
self.assertTrue( "coshaderParameter" not in s2[0].parameters )
self.assertNotEqual( shaderNode.stateHash(), h )
def testDisablingCoshaderArrayInputs( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode1 = GafferRenderMan.RenderManShader()
coshaderNode1.loadShader( coshader )
coshaderNode2 = GafferRenderMan.RenderManShader()
coshaderNode2.loadShader( coshader )
n["parameters"]["fixedShaderArray"][0].setInput( coshaderNode1["out"] )
n["parameters"]["fixedShaderArray"][2].setInput( coshaderNode2["out"] )
state = n.state()
h1 = n.stateHash()
self.assertEqual(
state[2].parameters["fixedShaderArray"],
IECore.StringVectorData( [
state[0].parameters["__handle"].value,
"",
state[1].parameters["__handle"].value,
""
] )
)
coshaderNode1["enabled"].setValue( False )
state = n.state()
self.assertEqual(
state[1].parameters["fixedShaderArray"],
IECore.StringVectorData( [
"",
"",
state[0].parameters["__handle"].value,
""
] )
)
h2 = n.stateHash()
self.assertNotEqual( h2, h1 )
coshaderNode2["enabled"].setValue( False )
state = n.state()
self.assertEqual(
state[0].parameters["fixedShaderArray"],
IECore.StringVectorData( [
"",
"",
"",
""
] )
)
self.assertNotEqual( n.stateHash(), h1 )
self.assertNotEqual( n.stateHash(), h2 )
def testCorrespondingInput( self ) :
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
self.assertEqual( coshaderNode.correspondingInput( coshaderNode["out"] ), None )
coshader2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderWithPassThrough.sl" )
coshaderNode2 = GafferRenderMan.RenderManShader()
coshaderNode2.loadShader( coshader2 )
self.assertTrue( coshaderNode2.correspondingInput( coshaderNode2["out"] ).isSame( coshaderNode2["parameters"]["aColorIWillTint"] ) )
def testCoshaderPassThrough( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
passThroughCoshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderWithPassThrough.sl" )
passThroughCoshaderNode = GafferRenderMan.RenderManShader()
passThroughCoshaderNode.loadShader( passThroughCoshader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
shaderNode["parameters"]["coshaderParameter"].setInput( passThroughCoshaderNode["out"] )
passThroughCoshaderNode["parameters"]["aColorIWillTint"].setInput( coshaderNode["out"] )
h = shaderNode.stateHash()
s = shaderNode.state()
self.assertEqual( len( s ), 3 )
self.assertEqual( s[2].parameters["coshaderParameter"], s[1].parameters["__handle"] )
self.assertEqual( s[1].name, passThroughCoshader )
self.assertEqual( s[1].parameters["aColorIWillTint"], s[0].parameters["__handle"] )
self.assertEqual( s[0].name, coshader )
passThroughCoshaderNode["enabled"].setValue( False )
s = shaderNode.state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[1].parameters["coshaderParameter"], s[0].parameters["__handle"] )
self.assertEqual( s[0].name, coshader )
def testSplineParameters( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/splineParameters.sl" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader )
self.assertEqual( n["parameters"].keys(), [ "floatSpline", "colorSpline", "colorSpline2" ] )
self.assertTrue( isinstance( n["parameters"]["floatSpline"], Gaffer.SplineffPlug ) )
self.assertTrue( isinstance( n["parameters"]["colorSpline"], Gaffer.SplinefColor3fPlug ) )
self.assertEqual(
n["parameters"]["floatSpline"].defaultValue(),
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
[
( 0, 0 ),
( 0, 0 ),
( 1, 1 ),
( 1, 1 ),
]
)
)
self.assertEqual(
n["parameters"]["colorSpline"].defaultValue(),
IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
[
( 0, IECore.Color3f( 0 ) ),
( 0, IECore.Color3f( 0 ) ),
( 1, IECore.Color3f( 1 ) ),
( 1, IECore.Color3f( 1 ) ),
]
)
)
floatValue = IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
[
( 0, 0 ),
( 0, 0 ),
( 1, 2 ),
( 1, 2 ),
]
)
colorValue = IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
[
( 0, IECore.Color3f( 0 ) ),
( 0, IECore.Color3f( 0 ) ),
( 1, IECore.Color3f( .5 ) ),
( 1, IECore.Color3f( .5 ) ),
]
)
n["parameters"]["floatSpline"].setValue( floatValue )
n["parameters"]["colorSpline"].setValue( colorValue )
s = n.state()[0]
self.assertEqual( s.parameters["floatSpline"].value, floatValue )
self.assertEqual( s.parameters["colorSpline"].value, colorValue )
def testSplineParameterSerialisationKeepsExistingValues( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/splineParameters.sl" )
s = Gaffer.ScriptNode()
s["n"] = GafferRenderMan.RenderManShader()
s["n"].loadShader( shader )
s["n"]["parameters"]["floatSpline"].setValue(
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
[
( 0, 0 ),
( 0, 0 ),
( 1, 2 ),
( 1, 2 ),
]
)
)
self.assertEqual(
s["n"]["parameters"]["floatSpline"].getValue(),
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
[
( 0, 0 ),
( 0, 0 ),
( 1, 2 ),
( 1, 2 ),
]
),
)
ss = s.serialise()
s2 = Gaffer.ScriptNode()
s2.execute( ss )
self.assertEqual(
s2["n"]["parameters"]["floatSpline"].getValue(),
IECore.Splineff(
IECore.CubicBasisf.catmullRom(),
[
( 0, 0 ),
( 0, 0 ),
( 1, 2 ),
( 1, 2 ),
]
),
)
def testSplineParameterDefaultValueAnnotation( self ) :
# because variable length parameters must be initialised
# with a zero length array, we have to pass the defaults we actually
# want via an annotation.
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/splineParameters.sl" )
n = GafferRenderMan.RenderManShader()
n.loadShader( shader )
self.assertEqual(
n["parameters"]["colorSpline2"].getValue(),
IECore.SplinefColor3f(
IECore.CubicBasisf.catmullRom(),
[
( 0, IECore.Color3f( 1 ) ),
( 0, IECore.Color3f( 1 ) ),
( 0.5, IECore.Color3f( 1, 0.5, 0.25 ) ),
( 1, IECore.Color3f( 0 ) ),
( 1, IECore.Color3f( 0 ) ),
]
),
)
def testCoshadersInBox( self ) :
s = Gaffer.ScriptNode()
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
s["shader"] = GafferRenderMan.RenderManShader()
s["shader"].loadShader( shader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s["coshader"] = GafferRenderMan.RenderManShader()
s["coshader"].loadShader( coshader )
s["shader"]["parameters"]["coshaderParameter"].setInput( s["coshader"]["out"] )
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["coshader"] ] ) )
self.assertTrue( s["shader"]["parameters"]["coshaderParameter"].getInput().parent().isSame( b ) )
s = s["shader"].state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[1].parameters["coshaderParameter"], s[0].parameters["__handle"] )
self.assertEqual( s[0].name, coshader )
def testCoshadersInBox( self ) :
s = Gaffer.ScriptNode()
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
s["shader"] = GafferRenderMan.RenderManShader()
s["shader"].loadShader( shader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s["coshader"] = GafferRenderMan.RenderManShader()
s["coshader"].loadShader( coshader )
s["shader"]["parameters"]["coshaderParameter"].setInput( s["coshader"]["out"] )
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["coshader"] ] ) )
self.assertTrue( s["shader"]["parameters"]["coshaderParameter"].getInput().parent().isSame( b ) )
s = s["shader"].state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[1].parameters["coshaderParameter"], s[0].parameters["__handle"] )
self.assertEqual( s[0].name, coshader )
def testShaderInBoxWithExternalCoshader( self ) :
s = Gaffer.ScriptNode()
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
s["shader"] = GafferRenderMan.RenderManShader()
s["shader"].loadShader( shader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s["coshader"] = GafferRenderMan.RenderManShader()
s["coshader"].loadShader( coshader )
s["shader"]["parameters"]["coshaderParameter"].setInput( s["coshader"]["out"] )
b = Gaffer.Box.create( s, Gaffer.StandardSet( [ s["shader"] ] ) )
self.assertTrue( b["shader"]["parameters"]["coshaderParameter"].getInput().parent().isSame( b ) )
s = b["shader"].state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[1].parameters["coshaderParameter"], s[0].parameters["__handle"] )
self.assertEqual( s[0].name, coshader )
def testNumericTypeAnnotations( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/numericTypeAnnotations.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertTrue( isinstance( shaderNode["parameters"]["floatParameter1"], Gaffer.FloatPlug ) )
self.assertTrue( isinstance( shaderNode["parameters"]["floatParameter2"], Gaffer.FloatPlug ) )
self.assertTrue( isinstance( shaderNode["parameters"]["intParameter"], Gaffer.IntPlug ) )
self.assertTrue( isinstance( shaderNode["parameters"]["boolParameter"], Gaffer.BoolPlug ) )
self.assertEqual( shaderNode["parameters"]["floatParameter1"].defaultValue(), 1.25 )
self.assertEqual( shaderNode["parameters"]["floatParameter2"].defaultValue(), 1.5 )
self.assertEqual( shaderNode["parameters"]["intParameter"].defaultValue(), 10 )
self.assertEqual( shaderNode["parameters"]["boolParameter"].defaultValue(), True )
self.assertEqual( shaderNode["parameters"]["floatParameter1"].getValue(), 1.25 )
self.assertEqual( shaderNode["parameters"]["floatParameter2"].getValue(), 1.5 )
self.assertEqual( shaderNode["parameters"]["intParameter"].getValue(), 10 )
self.assertEqual( shaderNode["parameters"]["boolParameter"].getValue(), True )
def testCoshaderTypeAnnotations( self ) :
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
coshaderType1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderType1.sl" )
coshaderType1Node = GafferRenderMan.RenderManShader()
coshaderType1Node.loadShader( coshaderType1 )
coshaderType2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderType2.sl" )
coshaderType2Node = GafferRenderMan.RenderManShader()
coshaderType2Node.loadShader( coshaderType2 )
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/typedCoshaderParameters.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertTrue( shaderNode["parameters"]["coshaderParameter"].acceptsInput( coshaderNode["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderParameter"].acceptsInput( coshaderType1Node["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderParameter"].acceptsInput( coshaderType2Node["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderParameterType1"].acceptsInput( coshaderNode["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderParameterType1"].acceptsInput( coshaderType1Node["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderParameterType1"].acceptsInput( coshaderType2Node["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderParameterType2"].acceptsInput( coshaderNode["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderParameterType2"].acceptsInput( coshaderType1Node["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderParameterType2"].acceptsInput( coshaderType2Node["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderArrayParameter"]["coshaderArrayParameter0"].acceptsInput( coshaderNode["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderArrayParameter"]["coshaderArrayParameter0"].acceptsInput( coshaderType1Node["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderArrayParameter"]["coshaderArrayParameter0"].acceptsInput( coshaderType2Node["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderArrayParameterType1"]["coshaderArrayParameterType1_0"].acceptsInput( coshaderNode["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderArrayParameterType1"]["coshaderArrayParameterType1_0"].acceptsInput( coshaderType1Node["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderArrayParameterType1"]["coshaderArrayParameterType1_0"].acceptsInput( coshaderType2Node["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderArrayParameterType2"][0].acceptsInput( coshaderNode["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderArrayParameterType2"][0].acceptsInput( coshaderType1Node["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderArrayParameterType2"][0].acceptsInput( coshaderType2Node["out"] ) )
def testMultipleCoshaderTypeAnnotations( self ) :
coshaderType1And2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderType1And2.sl" )
coshaderType1And2Node = GafferRenderMan.RenderManShader()
coshaderType1And2Node.loadShader( coshaderType1And2 )
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/typedCoshaderParameters.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertTrue( shaderNode["parameters"]["coshaderParameter"].acceptsInput( coshaderType1And2Node["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderParameterType1"].acceptsInput( coshaderType1And2Node["out"] ) )
self.assertTrue( shaderNode["parameters"]["coshaderParameterType2"].acceptsInput( coshaderType1And2Node["out"] ) )
self.assertFalse( shaderNode["parameters"]["coshaderParameterType3"].acceptsInput( coshaderType1And2Node["out"] ) )
def testSplitCoshaderPassThrough( self ) :
# C ----S S is connected to C both directly
# | | and as a pass-through of the disabled
# D ---- node D.
#
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
S = GafferRenderMan.RenderManShader()
S.loadShader( shader )
passThroughCoshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderWithPassThrough.sl" )
D = GafferRenderMan.RenderManShader()
D.loadShader( passThroughCoshader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
C = GafferRenderMan.RenderManShader()
C.loadShader( coshader )
S["parameters"]["fixedShaderArray"][0].setInput( C["out"] )
S["parameters"]["fixedShaderArray"][1].setInput( D["out"] )
D["parameters"]["aColorIWillTint"].setInput( C["out"] )
h = S.stateHash()
s = S.state()
self.assertEqual( len( s ), 3 )
self.assertEqual( s[2].parameters["fixedShaderArray"], IECore.StringVectorData( [ s[0].parameters["__handle"].value, s[1].parameters["__handle"].value, "", "" ] ) )
self.assertEqual( s[0].name, coshader )
self.assertEqual( s[1].parameters["aColorIWillTint"], s[0].parameters["__handle"] )
self.assertEqual( s[1].name, passThroughCoshader )
D["enabled"].setValue( False )
self.assertNotEqual( S.stateHash(), h )
s = S.state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[1].parameters["fixedShaderArray"], IECore.StringVectorData( [ s[0].parameters["__handle"].value, s[0].parameters["__handle"].value, "", "" ] ) )
self.assertEqual( s[0].name, coshader )
def testSerialDisabledShaders( self ) :
# C ----> D1 ----> D2 ----> S
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
S = GafferRenderMan.RenderManShader()
S.loadShader( shader )
passThroughCoshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderWithPassThrough.sl" )
D1 = GafferRenderMan.RenderManShader()
D1.loadShader( passThroughCoshader )
D2 = GafferRenderMan.RenderManShader()
D2.loadShader( passThroughCoshader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
C = GafferRenderMan.RenderManShader()
C.loadShader( coshader )
S["parameters"]["coshaderParameter"].setInput( D2["out"] )
D2["parameters"]["aColorIWillTint"].setInput( D1["out"] )
D1["parameters"]["aColorIWillTint"].setInput( C["out"] )
h1 = S.stateHash()
s = S.state()
self.assertEqual( len( s ), 4 )
self.assertEqual( s[0].name, coshader )
self.assertEqual( s[1].name, passThroughCoshader )
self.assertEqual( s[2].name, passThroughCoshader )
self.assertEqual( s[3].name, shader )
self.assertEqual( s[3].parameters["coshaderParameter"], s[2].parameters["__handle"] )
self.assertEqual( s[2].parameters["aColorIWillTint"], s[1].parameters["__handle"] )
self.assertEqual( s[1].parameters["aColorIWillTint"], s[0].parameters["__handle"] )
D2["enabled"].setValue( False )
h2 = S.stateHash()
self.assertNotEqual( h1, h2 )
s = S.state()
self.assertEqual( len( s ), 3 )
self.assertEqual( s[0].name, coshader )
self.assertEqual( s[1].name, passThroughCoshader )
self.assertEqual( s[2].name, shader )
self.assertEqual( s[2].parameters["coshaderParameter"], s[1].parameters["__handle"] )
self.assertEqual( s[1].parameters["aColorIWillTint"], s[0].parameters["__handle"] )
D1["enabled"].setValue( False )
h3 = S.stateHash()
self.assertNotEqual( h3, h2 )
self.assertNotEqual( h3, h1 )
s = S.state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[0].name, coshader )
self.assertEqual( s[1].name, shader )
self.assertEqual( s[1].parameters["coshaderParameter"], s[0].parameters["__handle"] )
def testDynamicCoshaderArrayParameters( self ) :
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
self.assertEqual( len( shaderNode["parameters"]["dynamicShaderArray"] ), 1 )
self.assertTrue( isinstance( shaderNode["parameters"]["dynamicShaderArray"][0], Gaffer.Plug ) )
self.assertTrue( shaderNode["parameters"]["dynamicShaderArray"][0].getInput() is None )
shaderNode["parameters"]["dynamicShaderArray"][0].setInput( coshaderNode["out"] )
self.assertEqual( len( shaderNode["parameters"]["dynamicShaderArray"] ), 2 )
self.assertTrue( isinstance( shaderNode["parameters"]["dynamicShaderArray"][0], Gaffer.Plug ) )
self.assertTrue( isinstance( shaderNode["parameters"]["dynamicShaderArray"][1], Gaffer.Plug ) )
self.assertTrue( shaderNode["parameters"]["dynamicShaderArray"][0].getInput().isSame( coshaderNode["out"] ) )
self.assertTrue( shaderNode["parameters"]["dynamicShaderArray"][1].getInput() is None )
shaderNode["parameters"]["dynamicShaderArray"][0].setInput( None )
self.assertEqual( len( shaderNode["parameters"]["dynamicShaderArray"] ), 1 )
self.assertTrue( isinstance( shaderNode["parameters"]["dynamicShaderArray"][0], Gaffer.Plug ) )
self.assertTrue( shaderNode["parameters"]["dynamicShaderArray"][0].getInput() is None )
def testSerialiseDynamicCoshaderArrayParameters( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s = Gaffer.ScriptNode()
s["n"] = GafferRenderMan.RenderManShader()
s["n"].loadShader( shader )
s["c"] = GafferRenderMan.RenderManShader()
s["c"].loadShader( coshader )
s["n"]["parameters"]["dynamicShaderArray"][0].setInput( s["c"]["out"] )
s["n"]["parameters"]["dynamicShaderArray"][1].setInput( s["c"]["out"] )
s["n"]["parameters"]["dynamicShaderArray"][2].setInput( s["c"]["out"] )
s["n"]["parameters"]["dynamicShaderArray"][1].setInput( None )
self.assertEqual( len( s["n"]["parameters"]["dynamicShaderArray"] ), 4 )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( len( s2["n"]["parameters"]["dynamicShaderArray"] ), 4 )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][0].getInput().isSame( s2["c"]["out"] ) )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][1].getInput() is None )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][2].getInput().isSame( s2["c"]["out"] ) )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][3].getInput() is None )
s2["n"]["parameters"]["dynamicShaderArray"][3].setInput( s2["c"]["out"] )
self.assertEqual( len( s2["n"]["parameters"]["dynamicShaderArray"] ), 5 )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][0].getInput().isSame( s2["c"]["out"] ) )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][1].getInput() is None )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][2].getInput().isSame( s2["c"]["out"] ) )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][3].getInput().isSame( s2["c"]["out"] ) )
self.assertTrue( s2["n"]["parameters"]["dynamicShaderArray"][4].getInput() is None )
def testConvertFixedCoshaderArrayToDynamic( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
shaderV2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParametersV2.sl" )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s = Gaffer.ScriptNode()
s["n"] = GafferRenderMan.RenderManShader()
s["n"].loadShader( shader )
s["c"] = GafferRenderMan.RenderManShader()
s["c"].loadShader( coshader )
s["n"]["parameters"]["fixedShaderArray"][0].setInput( s["c"]["out"] )
self.assertTrue( len( s["n"]["parameters"]["fixedShaderArray"] ), 4 )
s["n"].loadShader( shaderV2, keepExistingValues = True )
self.assertTrue( s["n"]["parameters"]["fixedShaderArray"][0].getInput().isSame( s["c"]["out"] ) )
self.assertTrue( s["n"]["parameters"]["fixedShaderArray"][1].getInput() is None )
s["n"]["parameters"]["fixedShaderArray"][0].setInput( None )
self.assertEqual( len( s["n"]["parameters"]["fixedShaderArray"] ), 1 )
self.assertTrue( s["n"]["parameters"]["fixedShaderArray"][0].getInput() is None )
def testConvertFixedCoshaderArrayToDynamicWithFirstPlugUnconnected( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
shaderV2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParametersV2.sl" )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s = Gaffer.ScriptNode()
s["n"] = GafferRenderMan.RenderManShader()
s["n"].loadShader( shader )
s["c"] = GafferRenderMan.RenderManShader()
s["c"].loadShader( coshader )
s["n"]["parameters"]["fixedShaderArray"][1].setInput( s["c"]["out"] )
self.assertTrue( len( s["n"]["parameters"]["fixedShaderArray"] ), 4 )
s["n"].loadShader( shaderV2, keepExistingValues = True )
self.assertTrue( s["n"]["parameters"]["fixedShaderArray"][1].getInput().isSame( s["c"]["out"] ) )
self.assertTrue( s["n"]["parameters"]["fixedShaderArray"][0].getInput() is None )
s["n"]["parameters"]["fixedShaderArray"][1].setInput( None )
self.assertEqual( len( s["n"]["parameters"]["fixedShaderArray"] ), 1 )
self.assertTrue( s["n"]["parameters"]["fixedShaderArray"][0].getInput() is None )
def testConvertFixedCoshaderArrayToDynamicDuringLoading( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s = Gaffer.ScriptNode()
s["n"] = GafferRenderMan.RenderManShader()
s["n"].loadShader( shader )
s["c"] = GafferRenderMan.RenderManShader()
s["c"].loadShader( coshader )
s["n"]["parameters"]["fixedShaderArray"][1].setInput( s["c"]["out"] )
self.assertTrue( len( s["n"]["parameters"]["fixedShaderArray"] ), 4 )
GafferRenderMan.RenderManShader.shaderLoader().clear()
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParametersV2.sl", shaderName = "coshaderArrayParameters" )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertTrue( s2["n"]["parameters"]["fixedShaderArray"][1].getInput().isSame( s2["c"]["out"] ) )
self.assertTrue( s2["n"]["parameters"]["fixedShaderArray"][0].getInput() is None )
s2["n"]["parameters"]["fixedShaderArray"][1].setInput( None )
self.assertEqual( len( s2["n"]["parameters"]["fixedShaderArray"] ), 1 )
self.assertTrue( s2["n"]["parameters"]["fixedShaderArray"][0].getInput() is None )
def testHashThroughBox( self ):
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
# box up an intermediate coshader:
b = Gaffer.Box()
b.addChild( Gaffer.Plug( "in" ) )
b.addChild( Gaffer.Plug( "out", direction = Gaffer.Plug.Direction.Out ) )
intermediateCoshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderWithPassThrough.sl" )
intermediateCoshaderNode = GafferRenderMan.RenderManShader()
intermediateCoshaderNode.loadShader( intermediateCoshader )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode = GafferRenderMan.RenderManShader()
coshaderNode.loadShader( coshader )
b["in"].setInput( coshaderNode["out"] )
intermediateCoshaderNode["parameters"]["aColorIWillTint"].setInput( b["in"] )
b["out"].setInput( intermediateCoshaderNode["out"] )
shaderNode["parameters"]["coshaderParameter"].setInput( b["out"] )
h1 = shaderNode.stateHash()
coshaderNode["parameters"]["floatParameter"].setValue( 0.25 )
self.assertNotEqual( shaderNode.stateHash(), h1 )
def testDanglingBoxConnection( self ):
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode1 = GafferRenderMan.RenderManShader()
shaderNode1.loadShader( shader )
shaderNode2 = GafferRenderMan.RenderManShader()
shaderNode2.loadShader( shader )
b = Gaffer.Box()
b.addChild( Gaffer.Plug( "in" ) )
b.addChild( Gaffer.Plug( "out", direction = Gaffer.Plug.Direction.Out ) )
b["shader1"] = shaderNode1
shaderNode1["parameters"]["coshaderParameter"].setInput( b["in"] )
shaderNode2["parameters"]["coshaderParameter"].setInput( b["out"] )
def testUnconnectedCustomBoxInput( self ) :
class CustomBox( Gaffer.Box ) :
def __init__( self, name = "CustomBox" ) :
Gaffer.Box.__init__( self, name )
IECore.registerRunTimeTyped( CustomBox )
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
# create a box and put a shader in it
b = CustomBox()
b["s"] = GafferRenderMan.RenderManShader()
b["s"].loadShader( shader )
# create a plug on the outside of the box, and connect it into
# the shader.
b["in"] = b["s"]["parameters"]["coshaderParameter"].createCounterpart( "in", Gaffer.Plug.Direction.In )
b["s"]["parameters"]["coshaderParameter"].setInput( b["in"] )
s = b["s"].state()
self.assertEqual( len( s ), 1 )
self.assertEqual( s[0].name, shader )
self.assertTrue( b["s"]["parameters"]["coshaderParameter"].getInput().isSame( b["in"] ) )
# check that it is now possible to connect appropriate coshaders
# into the box plug, and that appropriate networks are generated that way.
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
c = GafferRenderMan.RenderManShader()
c.loadShader( coshader )
self.assertTrue( b["in"].acceptsInput( c["out"] ) )
b["in"].setInput( c["out"] )
s = b["s"].state()
self.assertEqual( len( s ), 2 )
self.assertEqual( s[1].parameters["coshaderParameter"], s[0].parameters["__handle"] )
# check that it's not possible to use the plug on the box to create rogue connections
# that the shader itself wouldn't have accepted directly.
n = Gaffer.Node()
n["out"] = b["in"].createCounterpart( "out", Gaffer.Plug.Direction.Out )
self.assertFalse( b["in"].acceptsInput( n["out"] ) )
self.assertRaises( RuntimeError, b["in"].setInput, n["out"] )
# and check that if we remove the internal connection to the shader, the exterior plug
# will start accepting new connections again.
b["s"]["parameters"]["coshaderParameter"].setInput( None )
self.assertTrue( b["in"].acceptsInput( n["out"] ) )
b["in"].setInput( n["out"] )
self.assertTrue( b["in"].getInput().isSame( n["out"] ) )
# and that the shader will reject connection to the plug with the dodgy input.
self.assertFalse( b["s"]["parameters"]["coshaderParameter"].acceptsInput( b["in"] ) )
self.assertRaises( RuntimeError, b["s"]["parameters"]["coshaderParameter"].setInput, b["in"] )
def testCoshaderSwitching( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
coshaderNode0 = GafferRenderMan.RenderManShader()
coshaderNode0.loadShader( coshader )
coshaderNode1 = GafferRenderMan.RenderManShader()
coshaderNode1.loadShader( coshader )
coshaderNode0["parameters"]["floatParameter"].setValue( 0 )
coshaderNode1["parameters"]["floatParameter"].setValue( 1 )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
switch = GafferScene.ShaderSwitch()
switch["in"].setInput( coshaderNode0["out"] )
switch["in1"].setInput( coshaderNode1["out"] )
shaderNode["parameters"]["coshaderParameter"].setInput( switch["out"] )
self.assertEqual( shaderNode.state()[0].parameters["floatParameter"].value, 0 )
switch["index"].setValue( 1 )
self.assertEqual( shaderNode.state()[0].parameters["floatParameter"].value, 1 )
switch["enabled"].setValue( False )
self.assertEqual( shaderNode.state()[0].parameters["floatParameter"].value, 0 )
def testCoshaderTypingPreventsNewInvalidSwitchInputs( self ) :
coshaderType1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderType1.sl" )
coshaderType1Node = GafferRenderMan.RenderManShader()
coshaderType1Node.loadShader( coshaderType1 )
coshaderType2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderType2.sl" )
coshaderType2Node = GafferRenderMan.RenderManShader()
coshaderType2Node.loadShader( coshaderType2 )
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/typedCoshaderParameters.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
switch = GafferScene.ShaderSwitch()
switch["in"].setInput( coshaderType1Node["out"] )
shaderNode["parameters"]["coshaderParameterType1"].setInput( switch["out"] )
self.assertFalse( switch["in1"].acceptsInput( coshaderType2Node["out"] ) )
self.assertTrue( switch["in1"].acceptsInput( coshaderType1Node["out"] ) )
def testAcceptInputFromEmptySwitch( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
shaderNode = GafferRenderMan.RenderManShader()
shaderNode.loadShader( shader )
switch = GafferScene.ShaderSwitch()
self.assertTrue( shaderNode["parameters"]["coshaderParameter"].acceptsInput( switch["out"] ) )
def testCoshaderSwitchingInBox( self ) :
shader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
coshader = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
script = Gaffer.ScriptNode()
script["coshaderNode0"] = GafferRenderMan.RenderManShader()
script["coshaderNode0"].loadShader( coshader )
script["coshaderNode1"] = GafferRenderMan.RenderManShader()
script["coshaderNode1"].loadShader( coshader )
script["coshaderNode0"]["parameters"]["floatParameter"].setValue( 0 )
script["coshaderNode1"]["parameters"]["floatParameter"].setValue( 1 )
script["shaderNode"] = GafferRenderMan.RenderManShader()
script["shaderNode"].loadShader( shader )
script["switch"] = GafferScene.ShaderSwitch()
script["switch"]["in"].setInput( script["coshaderNode0"]["out"] )
script["switch"]["in1"].setInput( script["coshaderNode1"]["out"] )
script["shaderNode"]["parameters"]["coshaderParameter"].setInput( script["switch"]["out"] )
self.assertEqual( script["shaderNode"].state()[0].parameters["floatParameter"].value, 0 )
box = Gaffer.Box.create( script, Gaffer.StandardSet( script.children( Gaffer.Node.staticTypeId() ) ) )
self.assertEqual( box["shaderNode"].state()[0].parameters["floatParameter"].value, 0 )
promotedIndex = box.promotePlug( box["switch"]["index"] )
self.assertEqual( box["shaderNode"].state()[0].parameters["floatParameter"].value, 0 )
promotedIndex.setValue( 1 )
self.assertEqual( box["shaderNode"].state()[0].parameters["floatParameter"].value, 1 )
def testRepeatability( self ) :
s1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
sn1 = GafferRenderMan.RenderManShader()
sn2 = GafferRenderMan.RenderManShader()
sn1.loadShader( s1 )
sn2.loadShader( s2 )
sn2["parameters"]["coshaderParameter"].setInput( sn1["out"] )
self.assertEqual( sn2.stateHash(), sn2.stateHash() )
self.assertEqual( sn2.state(), sn2.state() )
def testHandlesAreHumanReadable( self ) :
s1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderParameter.sl" )
sn1 = GafferRenderMan.RenderManShader( "Shader1" )
sn2 = GafferRenderMan.RenderManShader( "Shader2" )
sn1.loadShader( s1 )
sn2.loadShader( s2 )
sn2["parameters"]["coshaderParameter"].setInput( sn1["out"] )
state = sn2.state()
self.assertTrue( "Shader1" in state[0].parameters["__handle"].value )
def testHandlesAreUniqueEvenIfNodeNamesArent( self ) :
s1 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshader.sl" )
s2 = self.compileShader( os.path.dirname( __file__ ) + "/shaders/coshaderArrayParameters.sl" )
script = Gaffer.ScriptNode()
script["in1"] = GafferRenderMan.RenderManShader()
script["in1"].loadShader( s1 )
script["in2"] = GafferRenderMan.RenderManShader()
script["in2"].loadShader( s1 )
script["shader"] = GafferRenderMan.RenderManShader()
script["shader"].loadShader( s2 )
script["shader"]["parameters"]["fixedShaderArray"][0].setInput( script["in1"]["out"] )
script["shader"]["parameters"]["fixedShaderArray"][1].setInput( script["in2"]["out"] )
box = Gaffer.Box.create( script, Gaffer.StandardSet( [ script["in1"] ] ) )
# because the nodes have different parents, we can give them the same name.
box["in1"].setName( "notUnique" )
script["in2"].setName( "notUnique" )
state = script["shader"].state()
self.assertNotEqual( state[0].parameters["__handle"], state[1].parameters["__handle"] )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | -2,309,061,230,335,838,000 | 38.419459 | 166 | 0.686167 | false |
iocube/Gimme-JSON-backend | app/fields.py | 1 | 1417 | import re
import json
from marshmallow import fields, ValidationError
from bson.objectid import ObjectId
class JSONStringField(fields.Field):
def _serialize(self, value, attr, obj):
return value
def _deserialize(self, value, attr, data):
try:
json.loads(value)
return value
except ValueError:
raise ValidationError('Please provide a valid JSON.')
class HTTPMethodField(fields.Field):
def _serialize(self, value, attr, obj):
return value
def _deserialize(self, value, attr, data):
if value in ['GET', 'POST', 'PUT', 'PATCH', 'DELETE']:
return value
raise ValidationError('\'{value}\' is not valid HTTP method'.format(value=value))
class ObjectIdField(fields.Field):
def _serialize(self, value, attr, data):
return str(value)
def _deserialize(self, value, attr, data):
if ObjectId.is_valid(value):
return {'$oid': value}
raise ValidationError('Not a valid object id.')
class EndpointField(fields.Field):
def _serialize(self, value, attr, obj):
return value
def _deserialize(self, value, attr, data):
if len(value) == 0:
raise ValidationError('Endpoint can not be empty.')
elif re.search(r'\s', value):
raise ValidationError('Endpoint should not contain space characters.')
return value
| mit | -4,832,808,094,186,189,000 | 27.918367 | 89 | 0.633028 | false |
hiseh/yinanan | tps/converter/formats.py | 1 | 2208 | #!/usr/bin/env python
class BaseFormat(object):
"""
Base format class.
Supported formats are: ogg, avi, mkv, webm, flv, mov, mp4, mpeg
"""
format_name = None
ffmpeg_format_name = None
def parse_options(self, opt):
if 'format' not in opt or opt.get('format') != self.format_name:
raise ValueError('invalid Format format')
return ['-f', self.ffmpeg_format_name]
class OggFormat(BaseFormat):
"""
Ogg container format, mostly used with Vorbis and Theora.
"""
format_name = 'ogg'
ffmpeg_format_name = 'ogg'
class AviFormat(BaseFormat):
"""
Avi container format, often used vith DivX video.
"""
format_name = 'avi'
ffmpeg_format_name = 'avi'
class MkvFormat(BaseFormat):
"""
Matroska format, often used with H.264 video.
"""
format_name = 'mkv'
ffmpeg_format_name = 'matroska'
class WebmFormat(BaseFormat):
"""
WebM is Google's variant of Matroska containing only
VP8 for video and Vorbis for audio content.
"""
format_name = 'webm'
ffmpeg_format_name = 'webm'
class FlvFormat(BaseFormat):
"""
Flash Video container format.
"""
format_name = 'flv'
ffmpeg_format_name = 'flv'
class MovFormat(BaseFormat):
"""
Mov container format, used mostly with H.264 video
content, often for mobile platforms.
"""
format_name = 'mov'
ffmpeg_format_name = 'mov'
class Mp4Format(BaseFormat):
"""
Mp4 container format, the default Format for H.264
video content.
"""
format_name = 'mp4'
ffmpeg_format_name = 'mp4'
class MpegFormat(BaseFormat):
"""
MPEG(TS) container, used mainly for MPEG 1/2 video codecs.
"""
format_name = 'mpg'
ffmpeg_format_name = 'mpegts'
class Mp3Format(BaseFormat):
"""
Mp3 container, used audio-only mp3 files
"""
format_name = 'mp3'
ffmpeg_format_name = 'mp3'
class FlacFormat(BaseFormat):
"""
Flac container
hiseh
"""
format_name = 'flac'
ffmpeg_format_name = 'flac'
format_list = [
OggFormat, AviFormat, MkvFormat, WebmFormat, FlvFormat,
MovFormat, Mp4Format, MpegFormat, Mp3Format, FlacFormat
]
| mit | 4,400,064,395,914,517,500 | 19.635514 | 72 | 0.625906 | false |
JazzeYoung/VeryDeepAutoEncoder | theano/gpuarray/tests/test_basic_ops.py | 1 | 17095 | from __future__ import absolute_import, print_function, division
import unittest
from theano.compat import izip
from six import iteritems
import numpy
import theano
import theano.tensor as T
from theano.tensor import TensorType
from theano.tensor.basic import alloc
# Don't import test classes otherwise they get tested as part of the file
from theano.tensor.tests import test_basic
from theano.tensor.tests.test_basic import rand, safe_make_node
from theano.tests import unittest_tools as utt
from ..type import (GpuArrayType, get_context,
gpuarray_shared_constructor)
from ..basic_ops import (
host_from_gpu, HostFromGpu, GpuFromHost, GpuReshape, GpuToGpu,
GpuAlloc, GpuAllocEmpty, GpuContiguous,
gpu_join, GpuJoin, GpuSplit, GpuEye, gpu_contiguous)
from ..subtensor import GpuSubtensor
from .config import mode_with_gpu, mode_without_gpu, test_ctx_name
from pygpu import gpuarray
utt.seed_rng()
rng = numpy.random.RandomState(seed=utt.fetch_seed())
def inplace_func(inputs, outputs, mode=None, allow_input_downcast=False,
on_unused_input='raise', name=None):
if mode is None:
mode = mode_with_gpu
return theano.function(inputs, outputs, mode=mode,
allow_input_downcast=allow_input_downcast,
accept_inplace=True,
on_unused_input=on_unused_input, name=name)
def fake_shared(value, name=None, strict=False, allow_downcast=None, **kwargs):
from theano.tensor.sharedvar import tensor_constructor, scalar_constructor
for c in (gpuarray_shared_constructor, tensor_constructor,
scalar_constructor):
try:
return c(value, name=name, strict=strict,
allow_downcast=allow_downcast, **kwargs)
except TypeError:
continue
def rand_gpuarray(*shape, **kwargs):
r = rng.rand(*shape) * 2 - 1
dtype = kwargs.pop('dtype', theano.config.floatX)
cls = kwargs.pop('cls', None)
if len(kwargs) != 0:
raise TypeError('Unexpected argument %s', list(kwargs.keys())[0])
return gpuarray.array(r, dtype=dtype, cls=cls,
context=get_context(test_ctx_name))
def makeTester(name, op, gpu_op, cases, checks=None, mode_gpu=mode_with_gpu,
mode_nogpu=mode_without_gpu, skip=False, eps=1e-10):
if checks is None:
checks = {}
_op = op
_gpu_op = gpu_op
_cases = cases
_skip = skip
_checks = checks
class Checker(unittest.TestCase, utt.TestOptimizationMixin):
op = staticmethod(_op)
gpu_op = staticmethod(_gpu_op)
cases = _cases
skip = _skip
checks = _checks
def setUp(self):
eval(self.__class__.__module__ + '.' + self.__class__.__name__)
def test_all(self):
if skip:
from nose.plugins.skip import SkipTest
raise SkipTest(skip)
for testname, inputs in iteritems(cases):
self.run_case(testname, inputs)
def run_case(self, testname, inputs):
inputs_ref = [theano.shared(inp) for inp in inputs]
inputs_tst = [theano.shared(inp) for inp in inputs]
try:
node_ref = safe_make_node(self.op, *inputs_ref)
node_tst = safe_make_node(self.op, *inputs_tst)
except Exception as exc:
err_msg = ("Test %s::%s: Error occured while making "
"a node with inputs %s") % (self.gpu_op, testname,
inputs)
exc.args += (err_msg,)
raise
try:
f_ref = inplace_func([], node_ref.outputs, mode=mode_nogpu)
f_tst = inplace_func([], node_tst.outputs, mode=mode_gpu)
except Exception as exc:
err_msg = ("Test %s::%s: Error occured while trying to "
"make a Function") % (self.gpu_op, testname)
exc.args += (err_msg,)
raise
self.assertFunctionContains1(f_tst, self.gpu_op)
ref_e = None
try:
expecteds = f_ref()
except Exception as exc:
ref_e = exc
try:
variables = f_tst()
except Exception as exc:
if ref_e is None:
err_msg = ("Test %s::%s: exception when calling the "
"Function") % (self.gpu_op, testname)
exc.args += (err_msg,)
raise
else:
# if we raised an exception of the same type we're good.
if isinstance(exc, type(ref_e)):
return
else:
err_msg = ("Test %s::%s: exception raised during test "
"call was not the same as the reference "
"call (got: %s, expected %s)" %
(self.gpu_op, testname, type(exc),
type(ref_e)))
exc.args += (err_msg,)
raise
for i, (variable, expected) in \
enumerate(izip(variables, expecteds)):
if variable.dtype != expected.dtype or \
variable.shape != expected.shape or \
not TensorType.values_eq_approx(variable,
expected):
self.fail(("Test %s::%s: Output %s gave the wrong "
"value. With inputs %s, expected %s "
"(dtype %s), got %s (dtype %s)." %
(self.op, testname, i, inputs, expected,
expected.dtype, variable, variable.dtype)))
for description, check in iteritems(self.checks):
if not check(inputs, variables):
self.fail(("Test %s::%s: Failed check: %s "
"(inputs were %s, ouputs were %s)") %
(self.op, testname, description,
inputs, variables))
Checker.__name__ = name
return Checker
def test_transfer_cpu_gpu():
a = T.fmatrix('a')
g = GpuArrayType(dtype='float32', broadcastable=(False, False))('g')
av = numpy.asarray(rng.rand(5, 4), dtype='float32')
gv = gpuarray.array(av, context=get_context(test_ctx_name))
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert numpy.all(fv == av)
def test_transfer_gpu_gpu():
g = GpuArrayType(dtype='float32', broadcastable=(False, False),
context_name=test_ctx_name)()
av = numpy.asarray(rng.rand(5, 4), dtype='float32')
gv = gpuarray.array(av, context=get_context(test_ctx_name))
mode = mode_with_gpu.excluding('cut_gpua_host_transfers', 'local_cut_gpua_host_gpua')
f = theano.function([g], GpuToGpu(test_ctx_name)(g), mode=mode)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, GpuToGpu)
fv = f(gv)
assert GpuArrayType.values_eq(fv, gv)
def test_transfer_strided():
# This is just to ensure that it works in theano
# libgpuarray has a much more comprehensive suit of tests to
# ensure correctness
a = T.fmatrix('a')
g = GpuArrayType(dtype='float32', broadcastable=(False, False))('g')
av = numpy.asarray(rng.rand(5, 8), dtype='float32')
gv = gpuarray.array(av, context=get_context(test_ctx_name))
av = av[:, ::2]
gv = gv[:, ::2]
f = theano.function([a], GpuFromHost(test_ctx_name)(a))
fv = f(av)
assert GpuArrayType.values_eq(fv, gv)
f = theano.function([g], host_from_gpu(g))
fv = f(gv)
assert numpy.all(fv == av)
def gpu_alloc_expected(x, *shp):
g = gpuarray.empty(shp, dtype=x.dtype, context=get_context(test_ctx_name))
g[:] = x
return g
GpuAllocTester = makeTester(
name="GpuAllocTester",
op=alloc,
gpu_op=GpuAlloc(test_ctx_name),
cases=dict(
correct01=(rand(), numpy.int32(7)),
# just gives a DeepCopyOp with possibly wrong results on the CPU
# correct01_bcast=(rand(1), numpy.int32(7)),
correct02=(rand(), numpy.int32(4), numpy.int32(7)),
correct12=(rand(7), numpy.int32(4), numpy.int32(7)),
correct13=(rand(7), numpy.int32(2), numpy.int32(4),
numpy.int32(7)),
correct23=(rand(4, 7), numpy.int32(2), numpy.int32(4),
numpy.int32(7)),
bad_shape12=(rand(7), numpy.int32(7), numpy.int32(5)),
)
)
class TestAlloc(test_basic.TestAlloc):
dtype = "float32"
mode = mode_with_gpu
shared = staticmethod(gpuarray_shared_constructor)
allocs = [GpuAlloc(test_ctx_name), GpuAlloc(test_ctx_name), T.Alloc()]
def test_alloc_empty():
for dt in ['float32', 'int8']:
f = theano.function([], GpuAllocEmpty(dt, context_name=test_ctx_name)(2, 3))
assert len(f.maker.fgraph.apply_nodes) == 1
out = f()
assert out.shape == (2, 3)
assert out.dtype == dt
f = theano.function([], [GpuAllocEmpty('uint64', test_ctx_name)(3, 2),
GpuAllocEmpty('uint64', test_ctx_name)(3, 2)])
out = f()
assert out[0].shape == (3, 2)
assert out[0].dtype == 'uint64'
assert out[1].shape == (3, 2)
assert out[1].dtype == 'uint64'
assert len([node for node in f.maker.fgraph.apply_nodes
if isinstance(node.op, GpuAllocEmpty)]) == 1
def test_shape():
x = GpuArrayType(dtype='float32', broadcastable=[False, False, False])()
v = gpuarray.zeros((3, 4, 5), dtype='float32', context=get_context(test_ctx_name))
f = theano.function([x], x.shape)
topo = f.maker.fgraph.toposort()
assert numpy.all(f(v) == (3, 4, 5))
if theano.config.mode != 'FAST_COMPILE':
assert len(topo) == 4
assert isinstance(topo[0].op, T.opt.Shape_i)
assert isinstance(topo[1].op, T.opt.Shape_i)
assert isinstance(topo[2].op, T.opt.Shape_i)
assert isinstance(topo[3].op, T.opt.MakeVector)
mode = mode_with_gpu.excluding("local_shape_to_shape_i")
f = theano.function([x], x.shape, mode=mode)
topo = f.maker.fgraph.toposort()
assert numpy.all(f(v) == (3, 4, 5))
assert len(topo) == 1
assert isinstance(topo[0].op, T.Shape)
def test_gpu_contiguous():
a = T.fmatrix('a')
i = T.iscalar('i')
a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
# The reshape is needed otherwise we make the subtensor on the CPU
# to transfer less data.
f = theano.function([a, i], gpu_contiguous(a.reshape((5, 4))[::i]),
mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert any([isinstance(node.op, GpuSubtensor) for node in topo])
assert any([isinstance(node.op, GpuContiguous) for node in topo])
assert f(a_val, 1).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
assert f(a_val, 2).flags.c_contiguous
class G_reshape(test_basic.T_reshape):
def shortDescription(self):
return None
def __init__(self, name):
test_basic.T_reshape.__init__(
self, name,
shared=gpuarray_shared_constructor,
op=GpuReshape,
mode=mode_with_gpu,
ignore_topo=(HostFromGpu, GpuFromHost,
theano.compile.DeepCopyOp,
theano.gpuarray.elemwise.GpuElemwise,
theano.tensor.opt.Shape_i,
theano.tensor.opt.MakeVector))
assert self.op == GpuReshape
class G_comparison(test_basic.test_comparison):
def setUp(self):
utt.seed_rng()
self.mode = mode_with_gpu
self.shared = gpuarray_shared_constructor
self.dtypes = ['float64', 'float32']
class G_Join_and_Split(test_basic.T_Join_and_Split):
def setUp(self):
super(G_Join_and_Split, self).setUp()
self.mode = mode_with_gpu.excluding('constant_folding')
self.join_op = GpuJoin()
self.split_op_class = GpuSplit
# Use join instead of MakeVector since there is no MakeVector on GPU
self.make_vector_op = GpuJoin()
# this is to avoid errors with limited devices
self.floatX = 'float32'
self.hide_error = theano.config.mode not in ['DebugMode', 'DEBUG_MODE']
self.shared = gpuarray_shared_constructor
def test_gpusplit_opt(self):
rng = numpy.random.RandomState(seed=utt.fetch_seed())
m = self.shared(rng.rand(4, 6).astype(self.floatX))
o = T.Split(2)(m, 0, [2, 2])
f = theano.function([], o, mode=self.mode)
assert any([isinstance(node.op, self.split_op_class)
for node in f.maker.fgraph.toposort()])
o1, o2 = f()
assert numpy.allclose(o1, m.get_value(borrow=True)[:2])
assert numpy.allclose(o2, m.get_value(borrow=True)[2:])
def test_gpujoin_gpualloc():
a = T.fmatrix('a')
a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
b = T.fmatrix('b')
b_val = numpy.asarray(numpy.random.rand(3, 5), dtype='float32')
f = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4,
mode=mode_without_gpu)
f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)),
mode=mode_with_gpu)
f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a),
T.ones_like(b)) + 4,
mode=mode_with_gpu)
assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 2
assert sum([node.op == T.join for node in f.maker.fgraph.toposort()]) == 1
assert sum([isinstance(node.op, GpuAlloc)
for node in f_gpu.maker.fgraph.toposort()]) == 2
assert sum([node.op == gpu_join
for node in f_gpu.maker.fgraph.toposort()]) == 1
assert sum([isinstance(node.op, GpuAlloc)
for node in f_gpu2.maker.fgraph.toposort()]) == 2
assert sum([node.op == gpu_join
for node in f_gpu2.maker.fgraph.toposort()]) == 1
assert numpy.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))
def test_gpueye():
def check(dtype, N, M_=None):
# Theano does not accept None as a tensor.
# So we must use a real value.
M = M_
# Currently DebugMode does not support None as inputs even if this is
# allowed.
if M is None:
M = N
N_symb = T.iscalar()
M_symb = T.iscalar()
k_symb = numpy.asarray(0)
out = T.eye(N_symb, M_symb, k_symb, dtype=dtype)
f = theano.function([N_symb, M_symb],
out,
mode=mode_with_gpu)
result = numpy.asarray(f(N, M))
assert numpy.allclose(result, numpy.eye(N, M_, dtype=dtype))
assert result.dtype == numpy.dtype(dtype)
assert any([isinstance(node.op, GpuEye)
for node in f.maker.fgraph.toposort()])
for dtype in ['float32', 'int32', 'float16']:
yield check, dtype, 3
# M != N, k = 0
yield check, dtype, 3, 5
yield check, dtype, 5, 3
def test_hostfromgpu_shape_i():
"""
Test that the shape is lifted over hostfromgpu
"""
m = mode_with_gpu.including('local_dot_to_dot22',
'local_dot22_to_dot22scalar',
'specialize')
a = T.fmatrix('a')
ca = theano.gpuarray.type.GpuArrayType('float32', (False, False))()
av = numpy.asarray(numpy.random.rand(5, 4), dtype='float32')
cv = gpuarray.asarray(numpy.random.rand(5, 4),
dtype='float32',
context=get_context(test_ctx_name))
f = theano.function([a], GpuFromHost(test_ctx_name)(a), mode=m)
assert any(isinstance(x.op, GpuFromHost)
for x in f.maker.fgraph.toposort())
f = theano.function([a], GpuFromHost(test_ctx_name)(a).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, T.opt.Shape_i)
assert isinstance(topo[1].op, T.opt.Shape_i)
assert isinstance(topo[2].op, T.opt.MakeVector)
assert tuple(f(av)) == (5, 4)
f = theano.function([ca], host_from_gpu(ca), mode=m)
assert host_from_gpu in [x.op
for x in f.maker.fgraph.toposort()]
f = theano.function([ca], host_from_gpu(ca).shape, mode=m)
topo = f.maker.fgraph.toposort()
assert isinstance(topo[0].op, theano.compile.Shape_i)
assert isinstance(topo[1].op, theano.compile.Shape_i)
assert isinstance(topo[2].op, theano.tensor.opt.MakeVector)
assert tuple(f(cv)) == (5, 4)
| bsd-3-clause | -3,566,938,448,693,773,300 | 37.243848 | 89 | 0.563849 | false |
draklaw/lair | src/sys_sdl2/sys_sdl2.py | 1 | 2854 | #!/usr/bin/env python3
##
## Copyright (C) 2015 Simon Boyé
##
## This file is part of lair.
##
## lair is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## lair is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with lair. If not, see <http://www.gnu.org/licenses/>.
##
from sys import path, argv
from os import getcwd
path.append(getcwd())
from autopy import *
from os.path import join, dirname
from importlib.machinery import SourceFileLoader
core = SourceFileLoader('core', join(dirname(__file__), '..', 'core', 'core.py')).load_module()
window_class = (
AutoClass('Window', None)
.add_getset('width', auto_int)
.add_getset('height', auto_int)
.add_getset('title', auto_string.const(), 'utf8Title', 'setUtf8Title')
.add_method('isValid', auto_bool)
.add_method('isFullscreen', auto_bool)
.add_method('isVisible', auto_bool)
.add_method('resize', None, [ (auto_int, 'width'), (auto_int, 'height') ])
.add_method('setFullscreen', None, [ (auto_bool, 'fullscreen') ])
.add_method('setVisible', None, [ (auto_bool, 'visible') ])
.add_method('destroy')
.add_method('swapBuffers')
)
sys_module_class = (
AutoClass('SysModule', [ OPTIONAL_PARAM, (core.master_logger_class, 'MasterLogger', 'NULL') ])
.add_method('initialize', auto_bool)
.add_method('shutdown')
.add_method('isScreensaverEnabled', auto_bool)
.add_method('setScreensaverEnabled', None, [ (auto_bool, 'enable') ])
.add_method('isVSyncEnabled', auto_bool)
.add_method('setVSyncEnabled', None, [ (auto_bool, 'enable') ])
.add_method('createWindow', window_class, [
(auto_string, 'title'), (auto_int, 'width'), (auto_int, 'height') ])
.add_method('destroyAllWindows')
.add_method('waitAndDispatchSystemEvents')
.add_method('dispatchPendingSystemEvents')
.add_method('getKeyState', auto_int, [ (auto_int, 'scancode') ])
.add_method('getTimeNs', auto_int64)
.add_method('waitNs', None, [ (auto_int64, 'ns') ])
)
sys_module_module = (
AutoModule('sys_sdl2')
.add_include('../core/core_py.h')
.add_include('lair/sys_sdl2/sys_module.h')
.add_include('lair/sys_sdl2/window.h')
.add_use_namespace('lair')
.add_class(window_class)
.add_class(sys_module_class)
)
if __name__ == '__main__':
if len(argv) == 2:
base_file = argv[1]
elif len(argv > 2):
stderr.write("Usage: {} BASE\n".format(argv[0]))
exit(1)
sys_module_module.write_module(base_file)
| lgpl-3.0 | 5,791,961,998,432,265,000 | 33.792683 | 95 | 0.676481 | false |
muatik/dahi | dahi/document.py | 1 | 1446 | from bson import ObjectId
from dahi.statement import Statement
class InvalidDocument(Exception):
pass
class Document(object):
def __init__(self, docID=None, botSay=None, humanSay=None, onMatch=None):
super(Document, self).__init__()
self.botSay = botSay
self.humanSay = humanSay
self.id = docID
self.onMatch = onMatch
@staticmethod
def generate(data):
botSay = None
humanSay = None
if data.get("botSay", None):
botSay = Statement.generate(data["botSay"])
if data.get("humanSay", None):
humanSay = Statement.generate(data["humanSay"])
return Document(
docID=str(data["_id"]),
botSay=botSay,
humanSay=humanSay,
onMatch=data["onMatch"])
def __repr__(self):
return "Document <{}>".format(self.id)
def toJson(self):
return {
"_id": str(self.id),
"botSay": self.botSay.toJson() if self.botSay else None,
"humanSay": self.humanSay.toJson() if self.humanSay else None,
"onMatch": self.onMatch
}
def toDB(self):
return {
"_id": ObjectId(self.id), # FIXME: I don't like ObjectId() here
"botSay": self.botSay.toDB() if self.botSay else None,
"humanSay": self.humanSay.toDB() if self.humanSay else None,
"onMatch": self.onMatch
}
| mit | 1,258,744,973,872,936,000 | 27.352941 | 77 | 0.564315 | false |
saeidadli/Python-ArcGIS-Convertor | arcgdfconvertor/convertor.py | 1 | 3491 | import os
import sys
import tempfile
from pathlib import Path
import arcpy
import pandas as pd
import numpy as np
import geopandas as gpd
#constants
#WGS_1984 coordinate system
WGS_1984 = \
"GEOGCS['GCS_WGS_1984',DATUM['D_WGS_1984', "+\
"SPHEROID['WGS_1984',6378137.0,298.257223563]], "+\
"PRIMEM['Greenwich',0.0],UNIT['Degree',0.0174532925199433]]; "+\
"-400 -400 1000000000;-100000 10000;-100000 10000; "+\
"8.98315284119522E-09;0.001;0.001;IsHighPrecision"
#functions
def gdb_path(in_fc):
"""
Returns the properties of a input gis data
"""
if arcpy.Exists(in_fc):
desc = arcpy.Describe(in_fc)
in_fc = desc.catalogPath
fc_name = desc.name
else:
fc_name = os.path.basename(in_fc)
dirname = os.path.dirname(in_fc)
workspace = arcpy.Describe(dirname).dataType
if workspace == 'FeatureDataset':
GDB = os.path.dirname(dirname)
elif workspace == 'Workspace':
GDB = dirname
elif workspace == 'Folder':
GDB = ''
else:
GDB = ''
return GDB, workspace, dirname, fc_name
def get_fields(in_fc, output_type = 'list'):
#Gets list of fileds from a feature class
fields = arcpy.ListFields(in_fc)
if output_type == 'list':
output = [f.name for f in fields]
elif output_type == 'dict':
output = {f.name: f.type for f in fields}
else:
output = ''
return output
#pandas convertor for ArcGIS
def gdf_to_fc(gdf, fc):
"""
converts a geopandas dataframe to a layer in a ESRI file geodatabase.
Notes:
- gdf have to have geometry field.
"""
if 'geometry' not in gdf.columns.values:
sys.exit()
GDB, workspace, dirname, fc_name = gdb_path(fc)
# convert fc to a gpkg in a temporary directory
tmp_dir = tempfile.TemporaryDirectory()
p = Path(tmp_dir.name)
n = fc_name + '.shp'
gdf.to_file(str(p/n))
fc_cols = get_fields(str(p/n))[2:]
#copy the file into a feature class
fc = arcpy.CopyFeatures_management(str(p/n), fc)
gdf_cols = gdf.columns.tolist()
gdf_cols.remove('geometry')
#fixing the columns
if gdf_cols:
col_dict = {col: gdf_cols[indx] for indx, col in enumerate(fc_cols) }
for col in col_dict:
if col_dict[col] != col:
arcpy.AlterField_management(fc, col, col_dict[col], clear_field_alias="true")
# Delete temporary directory
tmp_dir.cleanup()
return fc
def gdf_to_tbl(gdf, tbl):
gdf_cols = gdf.columns.values.tolist()
if 'geometry' in gdf_cols:
gdf_cols.remove('geometry')
gdf = gdf[gdf_cols].copy()
x = np.array(np.rec.fromrecords(gdf.values))
names = gdf.dtypes.index.tolist()
names = [str(arcpy.ValidateTableName(name)) for name in names]
x.dtype.names = tuple(names)
arcpy.da.NumPyArrayToTable(x, tbl)
return tbl
def fc_to_gdf(fc):
#use scratch work space for temporary files
GDB, workspace, dirname, fc_name = gdb_path(fc)
if GDB != '':
gdf = gpd.read_file(GDB, layer = fc_name)
else:
desc = arcpy.Describe(fc)
fc_path = desc.catalogPath
gdf = gpd.read_file(fc_path)
return gdf
def tbl_to_gdf(tbl, fieldnames = None):
gdf = fc_to_gdf(tbl)
if fieldnames != None:
fieldnames = [f for f in fieldnames if f in gdf.columns()]
else:
fieldnames = get_fields(tbl)[1:]
return gdf[fieldnames].copy()
| mit | 6,099,987,639,100,289,000 | 25.648855 | 93 | 0.613005 | false |
AravindK95/ee106b | project3/src/lab3/src/extra/grasp_ctrl.py | 1 | 5226 | #!/usr/bin/env python
import sys
import rospkg
import rospy
import tf
import numpy as np
from std_msgs.msg import String, Bool
from geometry_msgs.msg import Transform, Pose, Vector3, Quaternion, Point
from lab3.msg import FrameCall
PROJECT_PATH = rospkg.RosPack().get_path('lab3')
sys.path.append(PROJECT_PATH+'/src/lab3')
sys.path.append(PROJECT_PATH+'/src/extra')
SPRAY_BOTTLE_MESH_FILENAME = PROJECT_PATH+'/data/spray.obj'
import obj_file
import transformations
from lab3_starter import contacts_to_baxter_hand_pose
BASE = 'base'
OBJ_BASE = 'graspable_object'
def publish_frame_group(trans, rot, name, base, to_add):
tf_pub.publish(Transform(Vector3(trans[0], trans[1], trans[2]),
Quaternion(rot[0], rot[1], rot[2], rot[3])),
name,
base,
to_add)
#One of these is the correct direction to off set grap pos by.
pre_trans = Vector3(trans[0] - 0.2, trans[1], trans[2])
pre_rot = Quaternion(rot[0], rot[1], rot[2], rot[3])
#One of these is the correct direction to lift it straight up. Probably z.
post_trans = Vector3(trans[0], trans[1], trans[2] + 0.3)
post_rot = Quaternion(rot[0], rot[1], rot[2], rot[3])
#We want to the post orientation to be the same as the initial orientation during grasp
#so we do not need to change orientation of end effector.
#Publish the pre and post trans
tf_pub.publish(Transform(pre_trans, pre_rot), 'pre'+name, base, to_add)
tf_pub.publish(Transform(post_trans, post_rot), 'post'+name, base, to_add)
def addframe(trans, rot, name, base):
publish_frame_group(trans, rot, name, base, True)
def rmframe(name):
# trans and rot values irrelevant
publish_frame_group((0,0,0), (0,0,0,0), name, 'blah', False)
def moveto(name):
(trans,rot) = tf_listener.lookupTransform(BASE, name, rospy.Time(0))
moveit_pub.publish(Pose(Point(trans[0], trans[1], trans[2]),
Quaternion(rot[0], rot[1], rot[2], rot[3])))
def setclaw(state):
claw_pub.publish(state)
def makepose(name, idx1, idx2):
trans,rot = contacts_to_baxter_hand_pose(vertices[idx1], vertices[idx2])
trans = (trans[0], trans[1], trans[2])
#rot = (rot[0], rot[1], rot[2], rot[3])
rot = (0, np.sqrt(2)/2, 0, np.sqrt(2)/2)
publish_frame_group(trans, rot, name, OBJ_BASE, True)
if __name__ == '__main__':
of = obj_file.ObjFile(SPRAY_BOTTLE_MESH_FILENAME)
mesh = of.read()
vertices = mesh.vertices
triangles = mesh.triangles
normals = mesh.normals
rospy.init_node('grasp_ctrl')
tf_pub = rospy.Publisher('lab3/tf', FrameCall, queue_size=3)
moveit_pub = rospy.Publisher('new_position', Pose, queue_size=3)
claw_pub = rospy.Publisher('gripper_control', Bool, queue_size=3)
tf_listener = tf.TransformListener()
while not rospy.is_shutdown():
# parse input
inval = raw_input("cmd >> ")
cmd = None
try:
inval = inval.split(' ')
cmd = inval[0]
except:
print 'Bad input!'
continue
if cmd == 'addframe':
# publish grasp frame
"""Example input:
$ cmd >> addframe (1,2,3) (4,5,6,7) child base
"""
trans = eval(inval[1]) # XYZ vector
rot = eval(inval[2]) # quaternion
name = inval[3]
base = inval[4]
addframe(trans, rot, name, base)
elif cmd == 'rmframe':
# stop publishing grasp frame
"""Example input:
$ cmd >> rmframe child
"""
name = inval[1]
rmframe(name)
elif cmd == 'moveto':
# command moveit
"""Example input:
$ cmd >> moveto child
"""
name = inval[1]
moveto(name)
elif cmd == 'setclaw':
# command the end effector
"""Example input:
$ cmd >> setclaw True
"""
claw_bool = eval(inval[1])
setclaw(claw_bool)
elif cmd == 'makepose':
# turn two force closure vertices into a tf frame
"""Example input:
$ cmd >> makepose name 2473 2035
"""
name = inval[1]
idx1 = int(inval[2])
idx2 = int(inval[3])
makepose(name, idx1, idx2)
elif cmd == 'test':
# runs repeated tests of a single grasp
"""Example input:
$ cmd >> test name
"""
name = inval[1]
while not rospy.is_shutdown():
if raw_input("Test again? [y/n] >> ") == 'n':
break
moveto('pre'+name)
rospy.sleep(2)
moveto(name)
rospy.sleep(2)
setclaw(True)
rospy.sleep(2)
moveto('post'+name)
rospy.sleep(4)
moveto(name)
rospy.sleep(2)
setclaw(False)
rospy.sleep(2)
moveto('pre'+name)
else:
print 'Bad command: '+inval[0]
| mit | 895,466,151,847,824,100 | 31.259259 | 91 | 0.539801 | false |
googleads/google-ads-python | google/ads/googleads/v7/services/services/mobile_device_constant_service/transports/grpc.py | 1 | 10591 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v7.resources.types import mobile_device_constant
from google.ads.googleads.v7.services.types import (
mobile_device_constant_service,
)
from .base import MobileDeviceConstantServiceTransport, DEFAULT_CLIENT_INFO
class MobileDeviceConstantServiceGrpcTransport(
MobileDeviceConstantServiceTransport
):
"""gRPC backend transport for MobileDeviceConstantService.
Service to fetch mobile device constants.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_mobile_device_constant(
self,
) -> Callable[
[mobile_device_constant_service.GetMobileDeviceConstantRequest],
mobile_device_constant.MobileDeviceConstant,
]:
r"""Return a callable for the
get mobile device constant
method over gRPC.
Returns the requested mobile device constant in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetMobileDeviceConstantRequest],
~.MobileDeviceConstant]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_mobile_device_constant" not in self._stubs:
self._stubs[
"get_mobile_device_constant"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v7.services.MobileDeviceConstantService/GetMobileDeviceConstant",
request_serializer=mobile_device_constant_service.GetMobileDeviceConstantRequest.serialize,
response_deserializer=mobile_device_constant.MobileDeviceConstant.deserialize,
)
return self._stubs["get_mobile_device_constant"]
__all__ = ("MobileDeviceConstantServiceGrpcTransport",)
| apache-2.0 | 1,221,377,875,632,550,100 | 40.69685 | 107 | 0.608441 | false |
dcrosta/nymwit | nymwit/game/management/commands/advancegamestate.py | 1 | 2917 | # Copyright (c) 2011, Daniel Crosta
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
from pytz import utc
from logging import getLogger
from datetime import datetime, timedelta
from django.core.management.base import NoArgsCommand
from game.models import Game
log = getLogger('job.advancegamestate')
class Command(NoArgsCommand):
args = ''
help = 'Advances game state from "playing" to "voting" to "finished" as necessary for active games'
def handle_noargs(self, **options):
games = Game.objects(state__in=('playing', 'voting'), next_ending__lte=datetime.now(utc))
for game in games:
if game.state == 'playing':
if game.num_players < 2:
game.update(set__state='invalid')
log.debug('advanced game %s from playing to invalid, only %d players', game.pk, game.num_players)
else:
new_next_ending = game.next_ending + timedelta(minutes=game.minutes_per_round)
game.update(set__state='voting', set__next_ending=new_next_ending)
log.debug('advanced game %s from playing to voting, next ending %s', game.pk, new_next_ending)
elif game.state == 'voting':
total_votes = sum(len(play.upvotes) for play in game.plays)
if total_votes == 0:
game.update(set__state='invalid')
log.debug('advanced game %s from voting to invalid, 0 votes', game.pk)
else:
game.update(set__state='finished')
log.debug('advanced game %s from voting to finished', game.pk)
| bsd-2-clause | -979,874,457,618,385,700 | 47.616667 | 117 | 0.685293 | false |
pmleveque/cross-site-navigation | authentification.py | 1 | 1642 | from cefbase import *
from google.appengine.api import users
# Permet de savoir si c'est le bon utilisateur qui cherche a avoir acces aux donnees
# Si l'utlisateur a aucun droit, il n'a acces a rien
# Si le contributeur est un admin de diocese, il n'a acees qu'a ses barres propres, ses menus propres et les menus publics
# Si le contributeur a ete designe admin, il a acces en a toutes les barres de tous les contributeurs et tous les menus. Il peut
# en plus decider de rendre public ou prive un menu
# Si le contributeur est admin de l'appli elle meme (administrateur app engine), il peut ajouter des contributeurs de diocese et
# choisir si un contributeur peut avoir acces a l'option admin (acces a toutes les barres et tous les menus)
class Authentification():
@staticmethod
def check_authentification(must_admin=False):
if not users.get_current_user():
return False
else:
list_admin = Administrator.all().filter(
"user =",
users.get_current_user()
).fetch(1)
if len(list_admin) == 0:
if users.is_current_user_admin():
admin = Administrator(
user=users.get_current_user(),
admin=True
)
admin.put()
else:
return False
else:
admin = list_admin[0]
admin.super_admin = users.is_current_user_admin()
if must_admin and not admin.admin:
return False
return admin
| mit | 1,967,716,344,745,664,500 | 42.378378 | 128 | 0.588916 | false |
landlab/drivers | scripts/ecohydrology_flat_surface/run_driver.py | 1 | 4929 | """
Created on Wed Jul 20 2016
This tutorial is on:
landlab/tutorials/ecohydrology/cellular_automaton_vegetation_flat_surface.ipynb
Creating a (.py) version of the same.
@author: Sai Nudurupati & Erkan Istanbulluoglu
"""
import os
import time
import numpy as np
from landlab import RasterModelGrid, load_params
from ecohyd_functions_flat import (initialize, empty_arrays,
create_pet_lookup, save, plot)
grid1 = RasterModelGrid((100, 100), spacing=(5., 5.))
grid = RasterModelGrid((5, 4), spacing=(5., 5.))
# Create dictionary that holds the inputs
data = load_params('inputs_vegetation_ca.yaml')
(precip_dry, precip_wet, radiation, pet_tree, pet_shrub,
pet_grass, soil_moisture, vegetation, vegca) = initialize(data, grid, grid1)
n_years = 2000 # Approx number of years for model to run
# Calculate approximate number of storms per year
fraction_wet = (data['doy__end_of_monsoon'] -
data['doy__start_of_monsoon']) / 365.
fraction_dry = 1 - fraction_wet
no_of_storms_wet = 8760 * fraction_wet / (data['mean_interstorm_wet'] +
data['mean_storm_wet'])
no_of_storms_dry = 8760 * fraction_dry / (data['mean_interstorm_dry'] +
data['mean_storm_dry'])
n = int(n_years * (no_of_storms_wet + no_of_storms_dry))
(precip, inter_storm_dt, storm_dt, time_elapsed, veg_type, daily_pet,
rad_factor, EP30, pet_threshold) = empty_arrays(n, grid, grid1)
create_pet_lookup(radiation, pet_tree, pet_shrub, pet_grass, daily_pet,
rad_factor, EP30, grid)
# Represent current time in years
current_time = 0 # Start from first day of Jan
# Keep track of run time for simulation - optional
wallclock_start = time.clock() # Recording time taken for simulation
# declaring few variables that will be used in the storm loop
time_check = 0. # Buffer to store current_time at previous storm
yrs = 0 # Keep track of number of years passed
water_stress = 0. # Buffer for Water Stress
Tg = 270 # Growing season in days
# Run storm Loop
for i in range(n):
# Update objects
# Calculate Day of Year (DOY)
julian = np.int(np.floor((current_time - np.floor(current_time)) * 365.))
# Generate seasonal storms
# Wet Season - Jul to Sep - NA Monsoon
if data['doy__start_of_monsoon'] <= julian <= data['doy__end_of_monsoon']:
precip_wet.update()
precip[i] = precip_wet.storm_depth
storm_dt[i] = precip_wet.storm_duration
inter_storm_dt[i] = precip_wet.interstorm_duration
else: # for Dry season
precip_dry.update()
precip[i] = precip_dry.storm_depth
storm_dt[i] = precip_dry.storm_duration
inter_storm_dt[i] = precip_dry.interstorm_duration
# Spatially distribute PET and its 30-day-mean (analogous to degree day)
grid.at_cell['surface__potential_evapotranspiration_rate'] = daily_pet[julian]
grid.at_cell['surface__potential_evapotranspiration_30day_mean'] = EP30[julian]
# Assign spatial rainfall data
grid.at_cell['rainfall__daily_depth'] = np.full(grid.number_of_cells, precip[i])
# Update soil moisture component
current_time = soil_moisture.update(current_time, Tr=storm_dt[i],
Tb=inter_storm_dt[i])
# Decide whether its growing season or not
if julian != 364:
if EP30[julian + 1, 0] > EP30[julian, 0]:
pet_threshold = 1
# 1 corresponds to ETThresholdup (begin growing season)
else:
pet_threshold = 0
# 0 corresponds to ETThresholddown (end growing season)
# Update vegetation component
vegetation.update(PETThreshold_switch=pet_threshold, Tb=inter_storm_dt[i],
Tr=storm_dt[i])
# Update yearly cumulative water stress data
water_stress += (grid.at_cell['vegetation__water_stress'] *
inter_storm_dt[i] / 24.)
# Record time (optional)
time_elapsed[i] = current_time
# Update spatial PFTs with Cellular Automata rules
if (current_time - time_check) >= 1.:
if yrs % 100 == 0:
print 'Elapsed time = ', yrs, ' years'
veg_type[yrs] = grid1.at_cell['vegetation__plant_functional_type']
WS_ = np.choose(veg_type[yrs], water_stress)
grid1.at_cell['vegetation__cumulative_water_stress'] = WS_ / Tg
vegca.update()
time_check = current_time
water_stress = 0
yrs += 1
veg_type[yrs] = grid1.at_cell['vegetation__plant_functional_type']
wallclock_stop = time.clock()
walltime = (wallclock_stop - wallclock_start) / 60. # in minutes
print 'Time_consumed = ', walltime, ' minutes'
# Saving
try:
os.mkdir('output')
except OSError:
pass
finally:
os.chdir('output')
save('veg', inter_storm_dt, storm_dt, precip, veg_type, yrs,
walltime, time_elapsed)
plot('veg', grid1, veg_type, yrs, yr_step=100)
| mit | -338,219,367,900,883,300 | 33.957447 | 84 | 0.646379 | false |
danielnyga/dnutils | src/dnutils/tools.py | 1 | 10478 | '''
Created on May 22, 2017
@author: nyga
'''
import re
def ifnone(if_, else_, transform=None):
'''Returns the condition ``if_`` iff it is not ``None``, or if a transformation is
specified, ``transform(if_)``. Returns ``else_`` if the condition is ``None``.
``transform`` can be any callable, which will be passed ``if_`` in case ``if_`` is not ``None``.'''
if if_ is None:
return else_
else:
if transform is not None: return transform(if_)
else: return if_
def ifnot(if_, else_, transform=None):
'''Returns the condition ``if_`` iff it evaluates to ``True``, or if a transformation is
specified, ``transform(if_)``. Returns ``else_`` if the condition is ``False``.
``transform`` can be any callable, which will be passed ``if_`` in case ``if_`` is not ``False``.'''
if not bool(if_):
return else_
else:
if transform is not None: return transform(if_)
else: return if_
def ifstr(arg, transform):
'''
Returns ``transform(arg)`` if ``arg`` is a string, or returns ``arg``, otherwise
:param arg:
:param transform:
:return:
'''
return transform(arg) if type(arg) is str else arg
def allnone(it):
'''Returns True iff all elements in the iterable ``it`` are ``None``, and ``False`` otherwise.'''
return not ([1 for e in it if e is not None])
def allnot(it):
'''Returns True iff all elements in the iterable ``it`` evaluate to ``False``, and ``False`` otherwise.'''
return not ([1 for e in it if bool(e) is True])
def idxif(it, idx, transform=None):
'''Returns the element with the specified index of the iterable ``it``. If a ``transformation`` is specified,
the result of the ``transformation`` will be returned applied to the element.
If the iterable is ``None``, or ``it`` does not have enough elements, ``None`` is returned.'''
try:
it[idx]
except (IndexError, TypeError):
return None
el = it[idx]
if transform is not None:
return transform(el)
else:
return el
def first(it, transform=None, else_=None):
'''
Returns the first element of the iterable ``it``, if it has any.
Returns ``None``, if ``it`` is ``None`` or ``it` does not contain any elements. If a transformation is
specified, the result of the transformation applied to the first element is returned.
:param transform:
:param it:
:return:
'''
if it is None:
return else_
try:
el = next(iter(it))
if transform is not None:
return transform(el)
else:
return el
except StopIteration:
pass
return else_
def last(it, transform=None):
'''
Same as :func:`dnutils.tools.first`, but returns the last element.
:param it:
:param transform:
:return:
'''
return idxif(it, -1, transform=transform)
sqbrpattern = re.compile(r'\[(-?\d+)\]')
class edict(dict):
'''
Enhanced ``dict`` with some convenience methods such as dict addition and
subtraction.
Warning: The constructor using keyword arguments, ie. ``dict(one=1, two=2, ...)`` does not work
with the edict dictionaries. Instead, ``edict``s support default values corresponding to the
``defaultdict`` class from the ``itertools`` package.
:Example:
>>> s = edict({'a':{'b': 1}, 'c': [1,2,3]})
>>> r = edict({'x': 'z', 'c': 5})
>>> print s
{'a': {'b': 1}, 'c': [1, 2, 3]}
>>> print r
{'x': 'z', 'c': 5}
>>> print s + r
{'a': {'b': 1}, 'x': 'z', 'c': 5}
>>> print s - r
{'a': {'b': 1}}
>>> print r
{'x': 'z', 'c': 5}
'''
def __init__(self, d=None, default=None, recursive=False):
if d is None:
dict.__init__(self)
else:
dict.__init__(self, dict(d))
self._default = default
if recursive:
self._recurse()
def __iadd__(self, d):
self.update(d)
return self
def __isub__(self, d):
for k in d:
if k in self: del self[k]
return self
def __add__(self, d):
return type(self)({k: v for items in (self.items(), d.items())for k, v in items})
def __sub__(self, d):
return type(self)({k: v for k, v in self.items() if k not in d})
def __getitem__(self, key):
if self._default is not None and key not in self:
self[key] = self._default()
return self[key]
else:
return dict.__getitem__(self, key)
def _recurse(self):
for key, value in self.items():
if type(value) is list:
self[key] = [edict(v) if hasattr(v, '__getitem__') else v for v in value]
elif hasattr(value, '__getitem__'): #type(value) is dict:
self[key] = edict(value, default=self._default, recursive=True)
@staticmethod
def _todict(d, recursive=True):
d = dict(d)
if recursive:
for key, value in d.items():
if type(value) is edict:
d[key] = edict._todict(value, recursive=True)
return d
@staticmethod
def _parse_xpath(selector):
keys = map(str.strip, selector.split('/'))
for key in keys:
m = sqbrpattern.match(key)
if m is not None:
yield int(m.group(1))
else:
yield key
def xpath(self, selector, insert=None, force=False):
'''
Allows a 'pseudo-xpath' query to a nested set of dictionaries.
At the moment, only nested dict-selections separated by slashes (``/``) are supported.
Allows to conveniently access hierarchical dictionart structures without the need
of checking every key for existence.
:param selector: a slash-separated list of dict keys
:param insert:
:param force:
:return:
'''
keys = edict._parse_xpath(selector)
d = self
for key in keys:
if type(key) is int:
d = None if key >= len(d) else d[key]
else:
d = d.get(key)
if d is None:
if insert is None:
return None
return self.set_xpath(selector, insert, force=force)
return d
def set_xpath(self, selector, data, force=False):
'''
Creates the xpath structure represented by the selector string, if necessary, to
set the data to the end point.
:param selector:
:param data:
:return:
'''
keys = list(edict._parse_xpath(selector))
d = self
for key in keys[:-1]:
if type(key) is int:
raise ValueError('indexing in set_xpath() is not yet supported')
else:
d_ = d.get(key)
if d_ is None or not isinstance(d_, dict) and force:
d[key] = edict()
d = d[key]
d[keys[-1]] = data
return data
def pprint(self):
from pprint import pprint
pprint(self)
def project(self, *keys):
'''
Returns a copy of this edict that contains only the pairs whose key is in ``keys``.
:param keys:
:return:
'''
return edict({k: v for k, v in self.items() if k in keys})
class RStorage(edict, object):
'''
Recursive extension of web.util.Storage that applies the Storage constructor
recursively to all value elements that are dicts.
'''
__slots__ = ['_utf8']
def __init__(self, d=None, utf8=False):
self._utf8 = utf8
if d is not None:
for k, v in d.iteritems(): self[k] = v
def __setattr__(self, key, value):
if key in self.__slots__:
self.__dict__[key] = value
else:
self[key] = value
def __setitem__(self, key, value):
if self._utf8 and isinstance(key, str): key = key.encode('utf8')
dict.__setitem__(self, key, rstorify(value, utf8=self._utf8))
def __getattr__(self, key):
if key in type(self).__slots__:
return self.__dict__[key]
else:
try:
return self[key]
except KeyError as k:
raise (AttributeError, k)
def __delattr__(self, key):
try:
del self[key]
except KeyError as k:
raise (AttributeError, k)
def __repr__(self):
return ('<%s ' % type(self).__name__) + dict.__repr__(self) + '>'
def rstorify(e):
if type(e) is dict:
return RStorage(d=e)
elif type(e) in (list, tuple):
return [rstorify(i) for i in e]
else: return e
def jsonify(item, ignore_errors=False):
'''
Recursively construct a json representation of the argument ``item``.
:param item:
:return:
'''
if hasattr(item, 'json'):
return item.json
elif hasattr(item, 'tojson'):
return item.tojson()
elif isinstance(item, dict):
return {str(k): jsonify(v, ignore_errors=ignore_errors) for k, v in item.items()}
elif type(item) in (list, tuple):
return [jsonify(e, ignore_errors=ignore_errors) for e in item]
elif isinstance(item, (int, float, bool, str, type(None))):
return item
else:
if not ignore_errors:
raise TypeError('object of type "%s" is not jsonifiable: %s' % (type(item), repr(item)))
else: return '%s (NOT JSONIFIABLE)' % str(item)
class LinearScale(object):
'''
Implementation of a linear mapping from one interval of real
numbers [a,b] into another one [c,d] by linearly interpolating.
Example:
>>> scale = LinearScale((1, 2), (-2, 278))
>>> scale(1.5)
138.0
'''
def __init__(self, fromint, toint, strict=True):
self._from = fromint
self._to = toint
self._fromrange = fromint[1] - fromint[0]
self._torange = toint[1] - toint[0]
self.strict = strict
def _apply(self, value):
if self.strict and not self._from[0] <= value <= self._from[1]:
raise ValueError('value out of range [%s, %s], got %s' % (self._from[0], self._from[1], value))
v = float((value-self._from[0])) / self._fromrange
return v * self._torange + self._to[0]
def __call__(self, value):
return self._apply(value)
if __name__ == '__main__':
d = edict({1:2,2:3})
print(d.project(2))
| mit | -7,107,193,630,464,736,000 | 29.637427 | 113 | 0.550678 | false |
intellisense/django-loginas | loginas/tests/tests.py | 1 | 11032 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function, absolute_import
try:
from urllib.parse import urlsplit
except ImportError:
from urlparse import urlsplit
from datetime import timedelta
from django.conf import settings as django_settings
from django.test import Client
from django.test import TestCase
from django.contrib.auth.models import User, update_last_login
from django.contrib.auth.signals import user_logged_in
from django.test.utils import override_settings as override_settings_orig
from django.core.urlresolvers import reverse
from django.core.exceptions import ImproperlyConfigured
from django.contrib.messages.storage.cookie import CookieStorage
from django.utils.six import text_type
from django.utils import timezone
from loginas import settings as la_settings
try:
import imp
reload = imp.reload # @ReservedAssignment
except ImportError:
pass
class override_settings(override_settings_orig):
"""
Reload application settings module every time we redefine a setting
"""
def enable(self):
super(override_settings, self).enable()
from loginas import settings as loginas_settings
reload(loginas_settings)
def disable(self):
super(override_settings, self).disable()
from loginas import settings as loginas_settings
reload(loginas_settings)
def create_user(username='', password='', **kwargs):
user = User(username=username, **kwargs)
if password:
user.set_password(password)
user.save()
return user
def login_as_nonstaff(request, user):
return request.user.is_superuser or (request.user.is_staff and
not user.is_staff)
class ViewTest(TestCase):
"""Tests for user_login view"""
def setUp(self):
self.client = Client(enforce_csrf_checks=True)
self.client.get('/') # To get the CSRF token for next request
assert django_settings.CSRF_COOKIE_NAME in self.client.cookies
self.target_user = User.objects.create(username='target')
# setup listener
user_logged_in.connect(update_last_login)
def tearDown(self):
"""Disconnect the listeners"""
user_logged_in.disconnect(update_last_login)
def get_csrf_token_payload(self):
return {
'csrfmiddlewaretoken':
self.client.cookies[django_settings.CSRF_COOKIE_NAME].value
}
def get_target_url(self, target_user=None):
if target_user is None:
target_user = self.target_user
response = self.client.post(
reverse("loginas-user-login", kwargs={'user_id': target_user.id}),
data=self.get_csrf_token_payload()
)
self.assertEqual(response.status_code, 302)
return response
def assertCurrentUserIs(self, user):
id_ = text_type(user.id if user is not None else None).encode('utf-8')
r = self.client.post(
reverse("current_user"),
data=self.get_csrf_token_payload()
)
self.assertEqual(r.content, id_)
def assertLoginError(self, resp):
self.assertEqual(urlsplit(resp['Location'])[2], "/")
messages = CookieStorage(resp)._decode(resp.cookies['messages'].value)
self.assertIn(
(40, "You do not have permission to do that."),
[(m.level, m.message) for m in messages]
)
def assertLoginSuccess(self, resp, user):
self.assertEqual(urlsplit(resp['Location'])[2],
django_settings.LOGIN_REDIRECT_URL)
msg = la_settings.MESSAGE_LOGIN_SWITCH.format(username=user.username)
messages = CookieStorage(resp)._decode(resp.cookies['messages'].value)
self.assertIn(msg, "".join([m.message for m in messages]))
def assertRaisesExact(self, exception, func, *args, **kwargs):
try:
func(*args, **kwargs)
self.assertFail("{0} not raised".format(exception))
except exception.__class__ as caught:
self.assertEqual(caught.args, exception.args)
def clear_session_cookie(self):
del self.client.cookies[django_settings.SESSION_COOKIE_NAME]
@override_settings(CAN_LOGIN_AS=login_as_nonstaff)
def test_custom_permissions(self):
user = create_user(u"üser", "pass", is_superuser=False, is_staff=False)
staff1 = create_user("stäff", "pass", is_superuser=False, is_staff=True)
staff2 = create_user("super", "pass", is_superuser=True, is_staff=True)
# Regular user can't login as anyone
self.assertTrue(self.client.login(username=u"üser", password="pass"))
self.assertLoginError(self.get_target_url())
self.assertCurrentUserIs(user)
self.clear_session_cookie()
# Non-superuser staff user can login as regular user
self.assertTrue(self.client.login(username="stäff", password="pass"))
response = self.get_target_url(user)
self.assertLoginSuccess(response, user)
self.assertCurrentUserIs(user)
self.clear_session_cookie()
# Non-superuser staff user cannot login as other staff
self.assertTrue(self.client.login(username="stäff", password="pass"))
self.assertLoginError(self.get_target_url(staff2))
self.assertCurrentUserIs(staff1)
self.clear_session_cookie()
# Superuser staff user can login as other staff
self.assertTrue(self.client.login(username="super", password="pass"))
response = self.get_target_url(staff1)
self.assertLoginSuccess(response, staff1)
self.assertCurrentUserIs(staff1)
@override_settings(CAN_LOGIN_AS='loginas.tests.login_as_shorter_username')
def test_custom_permissions_as_string(self):
ray = create_user("ray", "pass")
lonnie = create_user("lonnie", "pass")
# Ray cannot login as Lonnie
self.assertTrue(self.client.login(username="ray", password="pass"))
self.assertLoginError(self.get_target_url(lonnie))
self.assertCurrentUserIs(ray)
self.clear_session_cookie()
# Lonnie can login as Ray
self.assertTrue(self.client.login(username="lonnie", password="pass"))
response = self.get_target_url(ray)
self.assertLoginSuccess(response, ray)
self.assertCurrentUserIs(ray)
def test_custom_permissions_invalid_path(self):
def assertMessage(message):
self.assertRaisesExact(
ImproperlyConfigured(message),
self.get_target_url
)
with override_settings(CAN_LOGIN_AS='loginas.tests.invalid_func'):
assertMessage(
"Module loginas.tests does not define a invalid_func function.")
with override_settings(CAN_LOGIN_AS='loginas.tests.invalid_path.func'):
assertMessage("Error importing CAN_LOGIN_AS function: loginas.tests.invalid_path")
def test_as_superuser(self):
create_user("me", "pass", is_superuser=True, is_staff=True)
self.assertTrue(self.client.login(username="me", password="pass"))
response = self.get_target_url()
self.assertLoginSuccess(response, self.target_user)
self.assertCurrentUserIs(self.target_user)
def test_as_non_superuser(self):
user = create_user("me", "pass", is_staff=True)
self.assertTrue(self.client.login(username="me", password="pass"))
self.assertLoginError(self.get_target_url())
self.assertCurrentUserIs(user)
def test_as_anonymous_user(self):
self.assertLoginError(self.get_target_url())
self.assertCurrentUserIs(None)
def test_get_405_method_not_allowed(self):
url = reverse("loginas-user-login", kwargs={'user_id': '0'})
r = self.client.get(url)
self.assertEqual(r.status_code, 405)
def test_missing_csrf_token_403_forbidden(self):
url = reverse("loginas-user-login", kwargs={'user_id': '0'})
r = self.client.post(url)
self.assertEqual(r.status_code, 403)
@override_settings(LOGINAS_REDIRECT_URL="/another-redirect")
def test_loginas_redirect_url(self):
create_user("me", "pass", is_superuser=True, is_staff=True)
self.assertTrue(self.client.login(username="me", password="pass"))
response = self.client.post(
reverse("loginas-user-login", kwargs={'user_id': self.target_user.id}),
data=self.get_csrf_token_payload()
)
self.assertEqual(response.status_code, 302)
self.assertEqual(urlsplit(response['Location'])[2], "/another-redirect")
def test_restore_original_user(self):
# Create a super user and login as this
original_user = create_user("me", "pass", is_superuser=True, is_staff=True)
self.assertTrue(self.client.login(username="me", password="pass"))
response = self.get_target_url()
self.assertLoginSuccess(response, self.target_user)
url = reverse("loginas-user-login", kwargs={'user_id': self.target_user.id})
self.client.get(url)
self.assertCurrentUserIs(self.target_user)
# Restore
url = reverse("loginas-logout")
self.client.get(url)
self.assertCurrentUserIs(original_user)
@override_settings(LOGINAS_LOGOUT_REDIRECT_URL="/another-redirect")
def test_loginas_redirect_url(self):
create_user("me", "pass", is_superuser=True, is_staff=True)
self.assertTrue(self.client.login(username="me", password="pass"))
response = self.client.get(reverse("loginas-logout"))
self.assertEqual(response.status_code, 302)
self.assertEqual(urlsplit(response['Location'])[2], "/another-redirect")
def test_last_login_not_updated(self):
last_login = timezone.now() - timedelta(hours=1)
self.target_user.last_login = last_login
self.target_user.save()
create_user("me", "pass", is_superuser=True, is_staff=True)
self.assertTrue(self.client.login(username="me", password="pass"))
response = self.get_target_url()
self.assertLoginSuccess(response, self.target_user)
self.assertCurrentUserIs(self.target_user)
target_user = User.objects.get(id=self.target_user.id) # refresh from db
self.assertEqual(target_user.last_login, last_login)
@override_settings(LOGINAS_UPDATE_LAST_LOGIN=True)
def test_last_login_updated(self):
last_login = timezone.now() - timedelta(hours=1)
self.target_user.last_login = last_login
self.target_user.save()
create_user("me", "pass", is_superuser=True, is_staff=True)
self.assertTrue(self.client.login(username="me", password="pass"))
response = self.get_target_url()
self.assertLoginSuccess(response, self.target_user)
self.assertCurrentUserIs(self.target_user)
target_user = User.objects.get(id=self.target_user.id) # refresh from db
self.assertGreater(target_user.last_login, last_login)
| bsd-3-clause | 6,606,978,489,041,057,000 | 39.098182 | 94 | 0.659926 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.