repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
vivaxy/algorithms | python/problems/prime_number_of_set_bits_in_binary_representation.py | 1 | 1089 | """
https://leetcode.com/problems/prime-number-of-set-bits-in-binary-representation/
https://leetcode.com/submissions/detail/136523875/
"""
class Solution:
def countPrimeSetBits(self, L, R):
"""
:type L: int
:type R: int
:rtype: int
"""
def isPrime(n):
if n < 2:
return False
if n == 2:
return True
for i in range(2, n):
if n % i == 0:
return False
return True
result = 0
for i in range(L, R + 1):
if isPrime(bin(i).count('1')):
result += 1
return result
import unittest
class Test(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.countPrimeSetBits(6, 10), 4)
self.assertEqual(solution.countPrimeSetBits(10, 15), 5)
self.assertEqual(solution.countPrimeSetBits(567, 607), 21)
self.assertEqual(solution.countPrimeSetBits(842, 888), 23)
if __name__ == '__main__':
unittest.main()
| mit | 2,882,310,662,519,049,700 | 23.75 | 80 | 0.534435 | false |
mdboom/asv | asv/config.py | 1 | 2409 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
import six
from . import util
# TODO: Some verification of the config values
class Config(object):
"""
Manages the configuration for a benchmark project.
"""
api_version = 1
def __init__(self):
self.project = "project"
self.project_url = "#"
self.repo = None
self.branches = [None]
self.pythons = ["{0[0]}.{0[1]}".format(sys.version_info)]
self.matrix = {}
self.exclude = []
self.include = []
self.env_dir = "env"
self.benchmark_dir = "benchmarks"
self.results_dir = "results"
self.html_dir = "html"
self.show_commit_url = "#"
self.hash_length = 8
self.environment_type = None
self.install_timeout = 120
self.dvcs = None
self.regressions_first_commits = {}
self.plugins = []
@classmethod
def load(cls, path=None):
"""
Load a configuration from a file. If no file is provided,
defaults to `asv.conf.json`.
"""
if not path:
path = "asv.conf.json"
if not os.path.isfile(path):
raise util.UserError("Config file {0} not found.".format(path))
d = util.load_json(path, cls.api_version)
try:
return cls.from_json(d)
except ValueError:
raise util.UserError(
"No repo specified in {0} config file.".format(path))
@classmethod
def from_json(cls, d):
conf = cls()
conf.__dict__.update(d)
if not getattr(conf, "repo", None):
raise util.UserError(
"No repo specified in config file.")
if not getattr(conf, "branches", [None]):
# If 'branches' attribute is present, at least some must
# be listed.
raise util.UserError(
"No branches specified in config file.")
return conf
@classmethod
def update(cls, path=None):
if not path:
path = "asv.conf.json"
if not os.path.isfile(path):
raise util.UserError("Config file {0} not found.".format(path))
util.update_json(cls, path, cls.api_version)
| bsd-3-clause | -3,466,219,258,463,106,000 | 27.011628 | 75 | 0.553757 | false |
fountainhead-gq/DjangoBlog | blogproject/urls.py | 1 | 1922 | """blogproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.conf.urls import include, url
from django.conf.urls.static import static
from rest_framework.routers import DefaultRouter
from blog.views import PostViewSet, AuthorViewSet, CategoryViewSet, TagViewSet
from django.conf.urls import (handler400, handler403, handler404, handler500)
from rest_framework_swagger.views import get_swagger_view
handler400 = 'blog.views.handler400'
handler403 = 'blog.views.handler403'
handler404 = 'blog.views.handler404'
handler500 = 'blog.views.handler500'
router = DefaultRouter()
router.register(r'posts', PostViewSet, base_name='posts')
# router.register(r'authors', AuthorViewSet, base_name='authors')
router.register(r'category', CategoryViewSet, base_name='category')
router.register(r'tags', TagViewSet)
schema_view = get_swagger_view(title='API')
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^photos/', include('photos.urls', namespace='photos')),
url(r'^', include('blog.urls')),
url(r'^api/', include(router.urls, namespace='api'), name='api'),
url(r'^docs/', schema_view),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| gpl-2.0 | -8,842,098,134,731,623,000 | 41.711111 | 85 | 0.73257 | false |
hasgeek/boxoffice | migrations/versions/253e7b76eb8e_modify_assignee.py | 1 | 2138 | """modify assignee.
Revision ID: 253e7b76eb8e
Revises: 1ea1e8070ac8
Create Date: 2016-04-11 20:15:52.864916
"""
# revision identifiers, used by Alembic.
revision = '253e7b76eb8e'
down_revision = '1ea1e8070ac8'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
op.add_column('assignee', sa.Column('current', sa.Boolean(), nullable=True))
op.create_check_constraint('assignee_current_check', 'assignee', "current != '0'")
op.add_column(
'assignee',
sa.Column(
'line_item_id',
sqlalchemy_utils.types.uuid.UUIDType(binary=False),
nullable=False,
),
)
op.drop_index('assignee_email_key', table_name='assignee')
op.create_unique_constraint(
'assignee_line_item_current_key', 'assignee', ['line_item_id', 'current']
)
op.drop_constraint('assignee_previous_id_fkey', 'assignee', type_='foreignkey')
op.create_foreign_key(
'assignee_line_item_id', 'assignee', 'line_item', ['line_item_id'], ['id']
)
op.drop_column('assignee', 'previous_id')
op.drop_constraint('line_item_assignee_id_fkey', 'line_item', type_='foreignkey')
op.drop_column('line_item', 'assignee_id')
def downgrade():
op.add_column(
'line_item',
sa.Column('assignee_id', sa.INTEGER(), autoincrement=False, nullable=True),
)
op.create_foreign_key(
'line_item_assignee_id_fkey', 'line_item', 'assignee', ['assignee_id'], ['id']
)
op.add_column(
'assignee',
sa.Column('previous_id', sa.INTEGER(), autoincrement=False, nullable=True),
)
op.drop_constraint('assignee_line_item_id', 'assignee', type_='foreignkey')
op.create_foreign_key(
'assignee_previous_id_fkey', 'assignee', 'assignee', ['previous_id'], ['id']
)
op.drop_constraint('assignee_line_item_current_key', 'assignee', type_='unique')
op.create_index('assignee_email_key', 'assignee', ['email'], unique=False)
op.drop_column('assignee', 'line_item_id')
op.drop_constraint('assignee_current_check', 'assignee')
op.drop_column('assignee', 'current')
| agpl-3.0 | -1,522,597,774,365,325,300 | 33.483871 | 86 | 0.641254 | false |
lykops/lykops | library/frontend/__init__.py | 1 | 13604 | import logging
from library.config.frontend import adminuser
from library.config.security import vault_header
from library.connecter.database.mongo import Op_Mongo
from library.connecter.database.redis_api import Op_Redis
from library.security.encryption.AES256.api import Using_AES256
from library.security.password import Manager_Password
from library.utils.time_conv import timestamp2datetime
from library.utils.type_conv import str2dict
class Base():
def __init__(self, mongoclient=None, redisclient=None):
'''
这是用户管理部分的MVC中的C
'''
self.logger = logging.getLogger("lykops")
self.userinfo_mongocollect = 'user.login.info'
self.userinfo_rediskey = 'lykops:userinfo'
self.privacy_mongocollect = 'user.privacy'
if mongoclient is None :
self.mongoclient = Op_Mongo()
self.logger.warn('无法继承,需要初始化mongodb连接')
else :
self.mongoclient = mongoclient
if redisclient is None :
self.redisclient = Op_Redis()
self.logger.warn('无法继承,需要初始化redis连接')
else :
self.redisclient = redisclient
self.password_api = Manager_Password()
self.expiretime = 60 * 60 * 24
self.rediskey_prefix = 'lykops:'
def get_userinfo(self, force=False, username=None):
'''
获取userinfo数据
'''
if force :
self.logger.warn('强制删除用户信息缓存')
self.redisclient.delete(self.userinfo_rediskey)
result = self.redisclient.get(self.userinfo_rediskey, fmt='obj')
if result[0] and (result[1] is not None or result[1]) :
userinfo = result[1]
else :
result = self.mongoclient.find(self.userinfo_mongocollect)
if result[0] :
userinfo = result[1]
set_dict = {
'name' : self.userinfo_rediskey,
'value' : userinfo,
'ex':self.expiretime
}
self.redisclient.set(set_dict, fmt='obj')
else :
userinfo = {}
if username is None :
return userinfo
else :
try :
for u_dict in userinfo :
if username == u_dict['username'] :
us = u_dict
else :
continue
except :
us = {}
try :
return us
except :
return {}
def verify_vaultpassword(self, username, vault_password):
'''
验证用户的vault密码是否正确
:parm
username:用户名
vault_password:vault密码
'''
user_dict = self.get_userinfo(username=username)
if not user_dict :
content = '用户' + username + '不存在'
self.logger.error(content)
return (False, content)
try :
cipher_pwd = user_dict['vault_password']
except :
content = '从数据库中没有查询到用户' + username + '的vault密码'
self.logger.error(content)
return (False, content)
result = self.password_api.verify(vault_password, cipher_pwd)
if not result :
content = '用户' + username + '输入的vault密码与数据库中vault密码不匹配'
self.logger.error(content)
return (False, content)
else :
content = '用户' + username + '输入的vault密码与数据库中vault密码匹配成功'
# self.logger.info(content)
return (True, content)
def get_data(self, username, redis_key, mongo_collect, force=False, mongoshare=True):
'''
获取用户数据
:parm
username:用户名
redis_key:redis缓存key名
mongo_collect:mongo的集合名
force:强制刷新
'''
if force:
self.logger.warn('强制删除指定缓存')
self.redisclient.delete(redis_key)
result = self.redisclient.get(redis_key, fmt='obj')
if not result[0] or (result[0] and not result[1]) :
if mongoshare :
result = self.mongoclient.find(mongo_collect, condition_dict={'username' : username})
else :
result = self.mongoclient.find(mongo_collect)
if result[0] :
data_dict = result[1]
self.write_cache(redis_key, data_dict)
else :
self.logger.error('从数据库中查询数据失败,原因:' + result[1])
return result
else :
data_dict = result[1]
try :
del data_dict['username']
except :
pass
return (True, data_dict)
def encryp_dict(self, username, vault_password, data, vault_list, isverify=True):
'''
对用户的数据字典中的某些字段进行加密
'''
encryp_api = Using_AES256(vault_password, vault_header)
if isverify :
vault_result = self.verify_vaultpassword(username, vault_password)
if not vault_result[0] :
self.logger.error('加密用户' + username + '的指定数据时失败,原因:输入的vault密码与数据库中vault密码不匹配')
return (False, '输入的vault密码与数据库中vault密码不匹配')
if not vault_list :
vault_list = data.keys()
encryp_dict = {}
for key , value in data.items() :
if not value :
encryp_dict[key] = value
if key in vault_list :
result = encryp_api.encrypt(value)
if result[0] :
encryp_dict[key] = result[1]
else :
self.logger.error('加密用户' + username + '的指定数据时失败,键名' + key + '的值加密失败,原因:' + result[1])
return (False, '加密用户' + username + '的指定数据时失败,键名' + key + '的值加密失败,' + result[1])
else :
if value == 'False' :
value = False
if value == 'True' :
value = True
isdigit = True
if isinstance(value, str) :
for t in value :
if t not in '0123456789' :
isdigit = False
if isdigit :
try :
value = int(value)
except :
pass
encryp_dict[key] = value
# content = '加密用户' + username + '的指定数据成功'
# self.logger.info(content)
return (True, encryp_dict)
def decryp_dict(self, username, vault_password, data, vault_list, isverify=True):
'''
对用户的数据字典中的某些字段进行解密
'''
encryp_api = Using_AES256(vault_password, vault_header)
if isverify :
vault_result = self.verify_vaultpassword(username, vault_password)
if not vault_result[0] :
self.logger.error('解密用户' + username + '的指定数据时失败,原因:输入的vault密码与数据库中vault密码不匹配')
return (False, '输入的vault密码与数据库中vault密码不匹配')
if not vault_list :
vault_list = data.keys()
decryp_dict = {}
for key , value in data.items() :
if not value :
decryp_dict[key] = value
if key in vault_list :
result = encryp_api.decrypt(value)
if result[0] :
decryp_dict[key] = result[1]
else :
self.logger.error('解密用户' + username + '的指定数据时失败,键名' + key + '的值加密失败,原因:' + result[1])
return (False, '解密用户' + username + '的指定数据时失败,键名' + key + '的值加密失败,' + result[1])
else :
if value == 'False' :
value = False
if value == 'True' :
value = True
decryp_dict[key] = value
# content = '解密用户' + username + '的指定数据成功'
# self.logger.info(content)
return (True, decryp_dict)
def encryp_string(self, username, vault_password, data, isverify=True):
'''
对用户的数据进行加密
'''
encryp_api = Using_AES256(vault_password, vault_header)
if isverify :
vault_result = self.verify_vaultpassword(username, vault_password)
if not vault_result[0] :
self.logger.error('加密用户' + username + '数据时失败,原因:输入的vault密码与数据库中vault密码不匹配')
return (False, '加密用户' + username + '数据时失败,输入的vault密码与数据库中vault密码不匹配')
result = encryp_api.encrypt(data)
if result[0] :
# content = '加密用户' + username + '数据成功'
# self.logger.info(content)
return (True, result[1])
else :
self.logger.error('加密用户' + username + '数据失败,原因:' + result[1])
return (False, '加密用户' + username + '数据失败,' + result[1])
def decryp_string(self, username, vault_password, data, isverify=True):
'''
对用户的数据进行解密
'''
decryp_api = Using_AES256(vault_password, vault_header)
if isverify :
vault_result = self.verify_vaultpassword(username, vault_password)
if not vault_result[0] :
self.logger.error('解密用户' + username + '数据时失败,原因:输入的vault密码与数据库中vault密码不匹配')
return (False, '解密用户' + username + '数据时失败,输入的vault密码与数据库中vault密码不匹配')
result = decryp_api.decrypt(data)
if result[0] :
# content = '解密用户' + username + '数据成功'
# self.logger.info(content)
return (True, result[1])
else :
self.logger.error('解密用户' + username + '数据失败,原因:' + result[1])
return (False, result[1])
def change_vltpwd_dict(self, username, old_pwd, new_pwd, vault_dict, vault_list, isverify=False):
'''
修改用户的vault数据(字典)的vault密码
'''
try :
del vault_dict['add_time']
except :
pass
if not vault_list :
vault_list = vault_dict.keys()
# 不要使用encryp_dict和decryp_dict来更换密码,否则无法修改密码
new_data = {}
for key, value in vault_dict.items() :
if key in vault_list :
result = self.decryp_string(username, old_pwd, value, isverify=isverify)
if not result[0] :
self.logger.error('更改用户' + username + '的vault密码时失败,解密数据时出错,原因:' + result[1])
return (False, '更改用户' + username + '的vault密码时失败,解密数据时出错,' + result[1])
new_value = result[1]
result = self.encryp_string(username, new_pwd, new_value, isverify=isverify)
if not result[0] :
self.logger.error('更改用户' + username + '的vault密码时失败,解密后再次加密数据时出错,原因:' + result[1])
return (False, '更改用户' + username + '的vault密码时失败,解密后再次加密数据时出错,' + result[1])
new_data[key] = result[1]
else :
new_data[key] = value
# content = '更改用户' + username + '的vault密码成功'
# self.logger.info(content)
return (True, new_data)
def write_cache(self, redis_key, data, expire=60 * 60, ftm='obj'):
try :
self.logger.warn('强制删除缓存')
self.redisclient.delete(redis_key)
except :
pass
set_dict = {
'name' : redis_key,
'value' : data,
'ex':expire
}
result = self.redisclient.set(set_dict, fmt=ftm)
if result[0] :
content = '写缓存成功'
# self.logger.info(content)
return (True, content)
else :
self.logger.info('写缓存失败,原因:' + result[1])
return (False, '写缓存失败,' + result[1])
| apache-2.0 | 5,559,556,405,368,260,000 | 32.727019 | 105 | 0.494136 | false |
morpheby/ist303-miye | controllers/home_view.py | 1 | 1418 | """
home_view.py
ist303-miye
Copyright (C) 2017
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; either version 2 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
Place, Suite 330, Boston, MA 02111-1307 USA
"""
import cgi
from pyramid.httpexceptions import HTTPFound
from pyramid.response import Response
from pyramid.renderers import render_to_response
from pyramid.view import view_config
from pyramid.events import subscriber
from support.events import GracefulShutdown
from .view_controller import ViewController
@view_config(route_name='home')
class HomeView(ViewController):
def __init__(self, request):
super(HomeView, self).__init__(request)
def __call__(self):
data = {}
return render_to_response('assets:views/home.pt', data,
request=self._request)
@subscriber(GracefulShutdown)
def shutdown(event):
pass
| gpl-3.0 | -4,921,004,708,755,088,000 | 31.976744 | 78 | 0.744006 | false |
udoyen/pythonlearning | reading_code/password_craacker_redone.py | 1 | 2533 | # Script Name : password_cracker.py
# Author : Craig Richards
# Created : 20 May 2013
# Last Modified :
# Version : 1.0
# Modifications :
# Description : Old school password cracker using python
import crypt # Import the module
def testPass(cryptPass): # Start the function
""" function to compare given password with stored password """
# (me): splice password from zero index to index position two
salt = cryptPass[0:2]
# Open the dictionary file, (me): check the dictionary file for key value
# pair
try: # check to make sure file does exist
dictFile = open('dictionary.txt', 'r')
except IOError:
print "File cannot be opened, it may be missing"
print "or the file name may be incorrect"
else:
for word in dictFile.readlines(): # Scan through the file
# (me): remove line breaks from content of file
word = word.strip('\n')
cryptWord = crypt.crypt(word, salt) # Check for password in the file
# (me): compares the newly ecrypted passwrd and the stored encrypted password
if (cryptWord == cryptPass):
print "[+] Found Password: " + word + "\n"
return
print "[-] Password Not Found.\n"
return
def main(): # (me):this reads a file line by line and splits each line at the ":" character point
""" This test function checks to make sure the password is in the
key:value pair format so the usename and password can be well separated
"""
try: # check to make sure file does exist
passFile = open('passwords.txt') # Open the password file
except IOError:
print "File cannot be opened, it may be missing"
print "or the file name may be incorrect"
else:
for line in passFile.readlines(): # Read through the file
if ":" in line:
user = line.split(':')[0] # assign any word found to user variable
# Prepare the user name etc, (me): assign the second value after
# the split point ":" to cryptPass
cryptPass = line.split(':')[1].strip(' ')
# (me): concatenate user to printed output
print "[*] Cracking Password For: " + user
testPass(cryptPass) # Call it to crack the users password
else:
print "Plain line of text printed: %sNo password found" % line
if __name__ == "__main__": # mian point of entry for aapplication
main()
| mit | -4,450,451,968,419,236,000 | 40.52459 | 98 | 0.606001 | false |
ciex/motor | application/forms.py | 1 | 1755 | """
forms.py
Web forms based on Flask-WTForms
See: http://flask.pocoo.org/docs/patterns/wtforms/
http://wtforms.simplecodes.com/
"""
import datetime
from flaskext import wtf
from flaskext.wtf import validators
from google.appengine.api import users
from models import Movement
class TimeDeltaField(wtf.IntegerField):
"""Excpects a number of days, returns a datetime.timedelta object"""
def __init__(self, label=None, validators=None, **kwargs):
super(TimeDeltaField, self).__init__(label, validators, **kwargs)
def process_data(self, value):
if value:
try:
return datetime.timedelta(days=value)
except ValueError:
self.data = None
raise ValueError("Not a valid time range")
class GoalForm(wtf.Form):
movement_id = wtf.HiddenField('Movement', validators=[validators.Required()])
cycle = wtf.HiddenField('Cycle', validators=[validators.Required()])
desc = wtf.TextField('Description', validators=[validators.Required()])
class MovementForm(wtf.Form):
name = wtf.TextField('Name', validators=[validators.Required()])
cycle_start = wtf.DateField('Start', validators=[validators.Required()], default=datetime.datetime.now())
cycle_duration = wtf.IntegerField('Cycle length', validators=[validators.Required()], default=7)
cycle_buffer = wtf.IntegerField('Cycle buffer', validators=[validators.Required()], default=2)
def validate_cycle_duration(form, field):
if field < 0:
raise validators.ValidationError("Cycle duration cannot be negative")
def validate_cycle_buffer(form, field):
if field < 0:
raise validators.ValidationError("Cycle buffer cannot be negative")
| apache-2.0 | -4,664,715,961,961,592,000 | 33.411765 | 109 | 0.68661 | false |
zarafagroupware/zarafa-zsm | deploy.py | 1 | 6980 | #!/usr/bin/python
# Copyright 2012 - 2013 Zarafa B.V.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation with the following additional
# term according to sec. 7:
#
# According to sec. 7 of the GNU Affero General Public License, version
# 3, the terms of the AGPL are supplemented with the following terms:
#
# "Zarafa" is a registered trademark of Zarafa B.V. The licensing of
# the Program under the AGPL does not imply a trademark license.
# Therefore any rights, title and interest in our trademarks remain
# entirely with us.
#
# However, if you propagate an unmodified version of the Program you are
# allowed to use the term "Zarafa" to indicate that you distribute the
# Program. Furthermore you may use our trademarks where it is necessary
# to indicate the intended purpose of a product or service provided you
# use it in accordance with honest practices in industrial or commercial
# matters. If you want to propagate modified versions of the Program
# under the name "Zarafa" or "Zarafa Server", you may only do so if you
# have a written permission by Zarafa B.V. (to acquire a permission
# please contact Zarafa at [email protected]).
#
# The interactive user interface of the software displays an attribution
# notice containing the term "Zarafa" and/or the logo of Zarafa.
# Interactive user interfaces of unmodified and modified versions must
# display Appropriate Legal Notices according to sec. 5 of the GNU
# Affero General Public License, version 3, when you propagate
# unmodified or modified versions of the Program. In accordance with
# sec. 7 b) of the GNU Affero General Public License, version 3, these
# Appropriate Legal Notices must retain the logo of Zarafa or display
# the words "Initial Development by Zarafa" if the display of the logo
# is not reasonably feasible for technical reasons. The use of the logo
# of Zarafa in Legal Notices is allowed for unmodified and modified
# versions of the software.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
EXTRA_PATHS = []
for p in EXTRA_PATHS:
sys.path.append(p)
from optparse import OptionParser
from os.path import join
import os
import shutil
from libzsm import text
from libzsm.system import stdio
from libzsm.system import users
def run_cmd(args):
args = ' '.join(args)
os.system(args)
class Deployer(object):
def __init__(self):
self.detect_apache_env()
self.ldapconf_boostrap_ldap = '/usr/share/zarafa-zsm-ldapconf/bootstrap-ldap.sh'
self.zsm_config_source = '/usr/share/zarafa-zsm-server/config'
self.etc_zarafa_root = '/etc/zarafa'
self.zarafa_server_cfg = join(self.etc_zarafa_root, 'server.cfg')
self.zarafa_ldap_cfg_tmpl = '/etc/zarafa/{0}.zsm.cfg'
## Apache
def detect_apache_env(self):
if os.path.exists('/etc/apache2'):
self.apache_user = 'www-data'
self.apache_initscript = '/etc/init.d/apache2'
self.apache_default_site_conf = '/etc/apache2/sites-enabled/000-default'
elif os.path.exists('/etc/httpd'):
self.apache_user = 'apache'
self.apache_initscript = '/etc/init.d/httpd'
self.apache_default_site_conf = '/etc/httpd/conf.d/welcome.conf'
def remove_default_apache_site(self):
if os.path.exists(self.apache_default_site_conf):
os.unlink(self.apache_default_site_conf)
def restart_apache(self):
run_cmd([self.apache_initscript, 'restart'])
## LDAP
def bootstrap_ldap(self):
run_cmd([self.ldapconf_boostrap_ldap])
## Zarafa
def switch_zarafa_server_plugin(self):
content = open(self.zarafa_server_cfg, 'rt').read()
plugin = text.safefind('(?m)^user_plugin\s*=\s*(.*)$', content)
if plugin not in ['ldap', 'ldapms']:
stdio.die('Invalid zarafa user plugin found: {0}'.format(plugin))
zarafa_ldap_cfg = self.zarafa_ldap_cfg_tmpl.format(plugin)
if not os.path.isfile(zarafa_ldap_cfg):
stdio.die('Ldap config not found: {0}'.format(zarafa_ldap_cfg))
context = {
'user_plugin_config': zarafa_ldap_cfg,
}
text.patch_config_file(context, self.zarafa_server_cfg,
dest=self.zarafa_server_cfg)
def fixup_zarafa_server_cfg(self):
context = {
'enable_distributed_zarafa': 'false',
'enable_hosted_zarafa': 'true',
'loginname_format': '%u@%c',
#'storename_format': '%u@%c',
'storename_format': '%f',
'user_plugin': 'ldap', # NOTE or ldapms?
}
text.patch_config_file(context, self.zarafa_server_cfg,
dest=self.zarafa_server_cfg)
def add_apache_user_to_zarafa_server_cfg(self):
us = ['root', self.apache_user]
context = {
'local_admin_users': ' '.join(us),
}
text.patch_config_file(context, self.zarafa_server_cfg,
dest=self.zarafa_server_cfg)
def restart_zarafa_server(self):
run_cmd(['/etc/init.d/zarafa-server', 'restart'])
## ZSM
def deploy_zsm_config_files(self):
for fn in ['ldap.zsm.cfg', 'ldapms.zsm.cfg']:
shutil.copy(join(self.zsm_config_source, fn),
join(self.etc_zarafa_root, fn))
def load_zsm_ldap_base(self):
run_cmd(['zsm-manage.py', 'load_ldap_base'])
run_cmd(['zsm-manage.py', 'sync_users'])
def load_zsm_ldap_fixtures(self):
run_cmd(['zsm-manage.py', 'load_demo_data'])
def deploy(self):
self.bootstrap_ldap()
self.deploy_zsm_config_files()
self.fixup_zarafa_server_cfg()
self.switch_zarafa_server_plugin()
self.add_apache_user_to_zarafa_server_cfg()
self.restart_zarafa_server()
self.remove_default_apache_site()
self.restart_apache()
self.load_zsm_ldap_base()
self.load_zsm_ldap_fixtures()
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('--single-server', dest='single_server', action='store_true',
help='Deploy LDAP, Zarafa and ZSM on a single server.')
options, args = parser.parse_args()
if not users.is_root():
stdio.writeln("Run me as root")
sys.exit(2)
if options.single_server:
deployer = Deployer()
deployer.deploy()
else:
parser.print_help()
sys.exit(1)
| agpl-3.0 | 99,829,961,581,949,360 | 33.726368 | 88 | 0.653438 | false |
lrem/chord.py | chord/peer.py | 1 | 9656 | #!/usr/bin/env python3
"""
Chord peer
==========
This module provides peer of a Chord distributed hash table.
"""
import random
import time
import socket
import socketserver
import threading
import logging
logging.basicConfig(format='%(asctime)s %(levelname)s:%(message)s',
level=logging.DEBUG)
CHAIN = 3
CHORDS = 30
MAX_KEY = 2**CHORDS
CHORD_UPDATE_INTERVAL = 5
class Peer:
def __init__(self, port=4321, key=None):
if key is None:
self.key = random.randint(0, MAX_KEY)
else:
self.key = key
logging.info('Peer key: %x' % self.key)
self.chords = [None] * CHORDS
self.chain = [None]
self.storage = {}
self.port = port
def connect(self, url):
"""
Connects to the DHT using the given `url` (of any connected node).
"""
logging.info('Connecting to: ' + url)
old = self.find_re(self.key, connecting=url)
logging.debug(old)
self.chain = [old] + request(url, 'accept', self.key,
bytes(str(self.port), 'ascii'))
for i in range(CHORDS):
key = (self.key + 2**i) % MAX_KEY
if not inside(key, self.key, old[0]):
self.chords[i] = self.find_re(key, connecting=url)
def accept(self, key, url):
"""
Accepts a peer to the DHT by:
- putting him on the ring after itself
- reassigning to him part of own key space
"""
self.chain = [(key, url)] + self.chain
# TODO: transfer him the stored keys
for i in range(CHORDS):
key = (self.key + 2**i) % MAX_KEY
if self.chords[i] is None and\
not inside(key, self.key, self.chain[0][0]):
self.chords[i] = self.chain[0]
def start(self):
"""
Starts Peer's operation.
"""
Handler.peer = self
logging.info('Listening on port %d' % self.port)
server = Server(('0.0.0.0', self.port), Handler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
logging.debug('Server thread started')
while True:
time.sleep(CHORD_UPDATE_INTERVAL)
self._update_chords()
def find(self, key):
"""
Find a peer that is closer to the one responsible for the given `key`.
Returns `None` if it's the responsible itself, or a tuple `(key, url)`.
"""
if self.chain[0] is None or inside(key, self.key, self.chain[0][0]):
return None
for i in range(CHORDS - 1):
if self.chords[i] is None:
continue # I'm still responsible for this part
if inside(key, self.chords[i][0], self.chords[i+1][0]):
return self.chords[i]
if self.chords[-1] is None:
return self.chain[0] # Another funny corner case
else:
return self.chords[-1]
def find_re(self, key, connecting=None):
"""
Find the peer that is responsible for the given `key`.
Returns `None` if it's the responsible itself, or a tuple `(key, url)`.
"""
if connecting is not None:
closer = (None, connecting)
else:
closer = self.find(key)
if closer is None:
return None
while not isinstance(closer, Me):
closer = request(closer[1], 'find', key)
return closer
def get(self, key):
"""
Return the value for the `key`, wherever it is stored.
"""
responsible = self.find_re(key)
logging.debug('Peer %s responsible for key %x' %
(responsible, key))
if responsible is None:
return self.storage.get(key, None)
else:
return request(responsible[1], 'get', key)
def put(self, key, value):
"""
Store the `(key, value)` in the DHT.
"""
responsible = self.find_re(key)
logging.debug('Peer %s responsible for key %x' %
(responsible, key))
if responsible is None:
self.storage[key] = value
else:
request(responsible[1], 'put', key, value)
def _update_chords(self):
logging.info('Storing %d values' % len(self.storage))
logging.debug(self.chain)
if self.chain[0] is None:
return
logging.debug('Updating chords')
for i in range(CHORDS):
key = (self.key + 2**i) % MAX_KEY
if not inside(key, self.key, self.chain[0][0]):
self.chords[i] = self.find_re(key)
logging.debug("%d chords established" %
sum([1 for x in self.chords if x is not None]))
def inside(key, left, right):
"""
Find whether the key is in the interval `[left, right)`.
Note the keys are arranged on a ring, so it is possible that left > right.
"""
if left == right:
return False
if left < right:
return left <= key < right
else:
return left <= key or key < right
def request(url, operation, key, value=None):
logging.debug('Requesting from %s operation %s key %x value %s' %
(url, operation, key, value))
sock = _connect(url)
body = bytes("%s %x\n" % (operation, key), 'ascii')
if value:
body += bytes("%d\n" % len(value), 'ascii')
body += value
try:
sock.sendall(body)
inh = sock.makefile('rb')
response = inh.readline()
if response.startswith(b'value'):
logging.debug(response)
length = int(response.split()[1])
return inh.read(length)
elif response.startswith(b'none'):
raise KeyError("Key %x not in DHT" % key)
elif response.startswith(b'peer'):
logging.debug('Raw response %s' % response)
return _parse_peer(response)
elif response.startswith(b'me'):
key = int(response.split()[1], base=16)
return Me([key, url])
elif response.startswith(b'chain'):
chain = []
for line in inh:
chain.append(_parse_peer(line))
return chain
finally:
sock.close()
return response
class Handler(socketserver.StreamRequestHandler):
peer = None
def handle(self):
inh = self.rfile
operation, key = inh.readline().split()
key = int(key, base=16)
logging.info("Request: %s %x" % (operation, key))
response = b'unknown operation'
if operation == b'find':
peer = self.peer.find(key)
if peer is None:
response = bytes("me %x\n" % self.peer.key, 'ascii')
else:
response = _serialize_peer(peer)
elif operation == b'accept':
response = b"chain\n"
for peer in self.peer.chain:
response += _serialize_peer(peer)
port = int(_read_value(inh))
self.peer.accept(key, _make_url(self.request, port))
elif operation == b'get':
value = self.peer.get(key)
if value is None:
response = b'none'
else:
response = bytes("value %d\n" % len(value), 'ascii')
response += value
elif operation == b'put':
value = _read_value(inh)
logging.debug("Value: %s" % value)
self.peer.put(key, value)
response = b'ok'
elif operation == b'ping':
response = b'pong'
logging.debug("Response: %s\n" % response)
self.request.sendall(response)
def _read_value(inh):
length = int(inh.readline())
return inh.read(length)
class Server(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
class Address(tuple): # Hate I can't define my own __init__
pass
class Me(Address):
pass
def _parse_peer(line):
if line.startswith(b'peer'):
key, url = line.split()[1:]
return Address([int(key, base=16), url])
elif line.startswith(b'none'):
return None
else:
raise ValueError('Wrong response for peer %s' % line)
def _serialize_peer(peer):
if peer is None:
return b'none'
else:
return bytes("peer %x %s\n" % (peer[0], str(peer[1], 'ascii')),
'ascii')
def _make_url(socket, port=None):
#FIXME: this gives us the request socket, not the listening one
if port is None:
return bytes("%s:%d" % socket.getpeername(), 'ascii')
else:
return bytes("%s:%d" % (socket.getpeername()[0], port), 'ascii')
def _connect(url):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if isinstance(url, bytes):
url = str(url, 'ascii')
if ':' in str(url):
host, port = url.split(':')
port = int(port)
else:
host, port = url, 4321
sock.connect((host, port))
return sock
def main():
import argparse
argp = argparse.ArgumentParser(description=__doc__)
argp.add_argument('-key', help='hexadecimal key for this node')
argp.add_argument('-url', help='url of an existing DHT peer')
argp.add_argument('-port', help='listening TCP port',
type=int, default=4321)
args = argp.parse_args()
if args.key is not None:
args.key = int(args.key, 16)
peer = Peer(port=args.port, key=args.key)
if args.url:
peer.connect(args.url)
peer.start()
if __name__ == '__main__':
main()
| mit | 6,198,633,712,688,207,000 | 30.249191 | 79 | 0.545568 | false |
xolox/python-rotate-backups | rotate_backups/cli.py | 1 | 14149 | # rotate-backups: Simple command line interface for backup rotation.
#
# Author: Peter Odding <[email protected]>
# Last Change: May 17, 2020
# URL: https://github.com/xolox/python-rotate-backups
"""
Usage: rotate-backups [OPTIONS] [DIRECTORY, ..]
Easy rotation of backups based on the Python package by the same name.
To use this program you specify a rotation scheme via (a combination of) the
--hourly, --daily, --weekly, --monthly and/or --yearly options and the
directory (or directories) containing backups to rotate as one or more
positional arguments.
You can rotate backups on a remote system over SSH by prefixing a DIRECTORY
with an SSH alias and separating the two with a colon (similar to how rsync
accepts remote locations).
Instead of specifying directories and a rotation scheme on the command line you
can also add them to a configuration file. For more details refer to the online
documentation (see also the --config option).
Please use the --dry-run option to test the effect of the specified rotation
scheme before letting this program loose on your precious backups! If you don't
test the results using the dry run mode and this program eats more backups than
intended you have no right to complain ;-).
Supported options:
-M, --minutely=COUNT
In a literal sense this option sets the number of "backups per minute" to
preserve during rotation. For most use cases that doesn't make a lot of
sense :-) but you can combine the --minutely and --relaxed options to
preserve more than one backup per hour. Refer to the usage of the -H,
--hourly option for details about COUNT.
-H, --hourly=COUNT
Set the number of hourly backups to preserve during rotation:
- If COUNT is a number it gives the number of hourly backups to preserve,
starting from the most recent hourly backup and counting back in time.
- Alternatively you can provide an expression that will be evaluated to get
a number (e.g. if COUNT is `7 * 2' the result would be 14).
- You can also pass `always' for COUNT, in this case all hourly backups are
preserved.
- By default no hourly backups are preserved.
-d, --daily=COUNT
Set the number of daily backups to preserve during rotation. Refer to the
usage of the -H, --hourly option for details about COUNT.
-w, --weekly=COUNT
Set the number of weekly backups to preserve during rotation. Refer to the
usage of the -H, --hourly option for details about COUNT.
-m, --monthly=COUNT
Set the number of monthly backups to preserve during rotation. Refer to the
usage of the -H, --hourly option for details about COUNT.
-y, --yearly=COUNT
Set the number of yearly backups to preserve during rotation. Refer to the
usage of the -H, --hourly option for details about COUNT.
-t, --timestamp-pattern=PATTERN
Customize the regular expression pattern that is used to match and extract
timestamps from filenames. PATTERN is expected to be a Python compatible
regular expression that must define the named capture groups 'year',
'month' and 'day' and may define 'hour', 'minute' and 'second'.
-I, --include=PATTERN
Only process backups that match the shell pattern given by PATTERN. This
argument can be repeated. Make sure to quote PATTERN so the shell doesn't
expand the pattern before it's received by rotate-backups.
-x, --exclude=PATTERN
Don't process backups that match the shell pattern given by PATTERN. This
argument can be repeated. Make sure to quote PATTERN so the shell doesn't
expand the pattern before it's received by rotate-backups.
-j, --parallel
Remove backups in parallel, one backup per mount point at a time. The idea
behind this approach is that parallel rotation is most useful when the
files to be removed are on different disks and so multiple devices can be
utilized at the same time.
Because mount points are per system the -j, --parallel option will also
parallelize over backups located on multiple remote systems.
-p, --prefer-recent
By default the first (oldest) backup in each time slot is preserved. If
you'd prefer to keep the most recent backup in each time slot instead then
this option is for you.
-r, --relaxed
By default the time window for each rotation scheme is enforced (this is
referred to as strict rotation) but the -r, --relaxed option can be used
to alter this behavior. The easiest way to explain the difference between
strict and relaxed rotation is using an example:
- When using strict rotation and the number of hourly backups to preserve
is three, only backups created in the relevant time window (the hour of
the most recent backup and the two hours leading up to that) will match
the hourly frequency.
- When using relaxed rotation the three most recent backups will all match
the hourly frequency (and thus be preserved), regardless of the
calculated time window.
If the explanation above is not clear enough, here's a simple way to decide
whether you want to customize this behavior or not:
- If your backups are created at regular intervals and you never miss an
interval then strict rotation (the default) is probably the best choice.
- If your backups are created at irregular intervals then you may want to
use the -r, --relaxed option in order to preserve more backups.
-i, --ionice=CLASS
Use the `ionice' program to set the I/O scheduling class and priority of
the `rm' invocations used to remove backups. CLASS is expected to be one of
the values `idle' (3), `best-effort' (2) or `realtime' (1). Refer to the
man page of the `ionice' program for details about these values. The
numeric values are required by the 'busybox' implementation of 'ionice'.
-c, --config=FILENAME
Load configuration from FILENAME. If this option isn't given the following
default locations are searched for configuration files:
- /etc/rotate-backups.ini and /etc/rotate-backups.d/*.ini
- ~/.rotate-backups.ini and ~/.rotate-backups.d/*.ini
- ~/.config/rotate-backups.ini and ~/.config/rotate-backups.d/*.ini
Any available configuration files are loaded in the order given above, so
that sections in user-specific configuration files override sections by the
same name in system-wide configuration files. For more details refer to the
online documentation.
-C, --removal-command=CMD
Change the command used to remove backups. The value of CMD defaults to
``rm -fR``. This choice was made because it works regardless of whether
"backups to be rotated" are files or directories or a mixture of both.
As an example of why you might want to change this, CephFS snapshots are
represented as regular directory trees that can be deleted at once with a
single 'rmdir' command (even though according to POSIX semantics this
command should refuse to remove nonempty directories, but I digress).
-u, --use-sudo
Enable the use of `sudo' to rotate backups in directories that are not
readable and/or writable for the current user (or the user logged in to a
remote system over SSH).
-S, --syslog=CHOICE
Explicitly enable or disable system logging instead of letting the program
figure out what to do. The values '1', 'yes', 'true' and 'on' enable system
logging whereas the values '0', 'no', 'false' and 'off' disable it.
-f, --force
If a sanity check fails an error is reported and the program aborts. You
can use --force to continue with backup rotation instead. Sanity checks
are done to ensure that the given DIRECTORY exists, is readable and is
writable. If the --removal-command option is given then the last sanity
check (that the given location is writable) is skipped (because custom
removal commands imply custom semantics).
-n, --dry-run
Don't make any changes, just print what would be done. This makes it easy
to evaluate the impact of a rotation scheme without losing any backups.
-v, --verbose
Increase logging verbosity (can be repeated).
-q, --quiet
Decrease logging verbosity (can be repeated).
-h, --help
Show this message and exit.
"""
# Standard library modules.
import getopt
import shlex
import sys
# External dependencies.
import coloredlogs
from coloredlogs.syslog import enable_system_logging
from executor import validate_ionice_class
from humanfriendly import coerce_boolean, parse_path
from humanfriendly.compat import on_windows
from humanfriendly.terminal import usage
from humanfriendly.text import pluralize
from verboselogs import VerboseLogger
# Modules included in our package.
from rotate_backups import (
RotateBackups,
coerce_location,
coerce_retention_period,
load_config_file,
)
# Initialize a logger.
logger = VerboseLogger(__name__)
def main():
"""Command line interface for the ``rotate-backups`` program."""
coloredlogs.install()
# Command line option defaults.
rotation_scheme = {}
kw = dict(include_list=[], exclude_list=[])
parallel = False
use_sudo = False
use_syslog = (not on_windows())
# Internal state.
selected_locations = []
# Parse the command line arguments.
try:
options, arguments = getopt.getopt(sys.argv[1:], 'M:H:d:w:m:y:t:I:x:jpri:c:C:uS:fnvqh', [
'minutely=', 'hourly=', 'daily=', 'weekly=', 'monthly=', 'yearly=',
'timestamp-pattern=', 'include=', 'exclude=', 'parallel',
'prefer-recent', 'relaxed', 'ionice=', 'config=',
'removal-command=', 'use-sudo', 'syslog=', 'force',
'dry-run', 'verbose', 'quiet', 'help',
])
for option, value in options:
if option in ('-M', '--minutely'):
rotation_scheme['minutely'] = coerce_retention_period(value)
elif option in ('-H', '--hourly'):
rotation_scheme['hourly'] = coerce_retention_period(value)
elif option in ('-d', '--daily'):
rotation_scheme['daily'] = coerce_retention_period(value)
elif option in ('-w', '--weekly'):
rotation_scheme['weekly'] = coerce_retention_period(value)
elif option in ('-m', '--monthly'):
rotation_scheme['monthly'] = coerce_retention_period(value)
elif option in ('-y', '--yearly'):
rotation_scheme['yearly'] = coerce_retention_period(value)
elif option in ('-t', '--timestamp-pattern'):
kw['timestamp_pattern'] = value
elif option in ('-I', '--include'):
kw['include_list'].append(value)
elif option in ('-x', '--exclude'):
kw['exclude_list'].append(value)
elif option in ('-j', '--parallel'):
parallel = True
elif option in ('-p', '--prefer-recent'):
kw['prefer_recent'] = True
elif option in ('-r', '--relaxed'):
kw['strict'] = False
elif option in ('-i', '--ionice'):
value = validate_ionice_class(value.lower().strip())
kw['io_scheduling_class'] = value
elif option in ('-c', '--config'):
kw['config_file'] = parse_path(value)
elif option in ('-C', '--removal-command'):
removal_command = shlex.split(value)
logger.info("Using custom removal command: %s", removal_command)
kw['removal_command'] = removal_command
elif option in ('-u', '--use-sudo'):
use_sudo = True
elif option in ('-S', '--syslog'):
use_syslog = coerce_boolean(value)
elif option in ('-f', '--force'):
kw['force'] = True
elif option in ('-n', '--dry-run'):
logger.info("Performing a dry run (because of %s option) ..", option)
kw['dry_run'] = True
elif option in ('-v', '--verbose'):
coloredlogs.increase_verbosity()
elif option in ('-q', '--quiet'):
coloredlogs.decrease_verbosity()
elif option in ('-h', '--help'):
usage(__doc__)
return
else:
assert False, "Unhandled option! (programming error)"
if use_syslog:
enable_system_logging()
if rotation_scheme:
logger.verbose("Rotation scheme defined on command line: %s", rotation_scheme)
if arguments:
# Rotation of the locations given on the command line.
location_source = 'command line arguments'
selected_locations.extend(coerce_location(value, sudo=use_sudo) for value in arguments)
else:
# Rotation of all configured locations.
location_source = 'configuration file'
selected_locations.extend(
location for location, rotation_scheme, options
in load_config_file(configuration_file=kw.get('config_file'), expand=True)
)
# Inform the user which location(s) will be rotated.
if selected_locations:
logger.verbose("Selected %s based on %s:",
pluralize(len(selected_locations), "location"),
location_source)
for number, location in enumerate(selected_locations, start=1):
logger.verbose(" %i. %s", number, location)
else:
# Show the usage message when no directories are given nor configured.
logger.verbose("No location(s) to rotate selected.")
usage(__doc__)
return
except Exception as e:
logger.error("%s", e)
sys.exit(1)
# Rotate the backups in the selected directories.
program = RotateBackups(rotation_scheme, **kw)
if parallel:
program.rotate_concurrent(*selected_locations)
else:
for location in selected_locations:
program.rotate_backups(location)
| mit | 3,153,974,335,761,179,600 | 40.737463 | 99 | 0.661319 | false |
podhmo/toybox | examples/jwt_server.py | 1 | 1141 | import logging
from toybox.simpleapi import simple_view, run
from pyramid.security import Authenticated
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.security import Allow
# please: pip install pyramid_jwt
"""
python ./jwt_server.py
# 403
$ http GET :8080/secure
# 200 OK
$ http GET :8080/login | tee /tmp/response.json
$ http GET :8080/secure X-Token:`cat /tmp/response.json | jq -r .token`
"""
logger = logging.getLogger(__name__)
@simple_view("/login")
def login_view(request):
return {"token": request.create_jwt_token(1)}
@simple_view("/secure", permission="read")
def secure_view(request):
return "OK"
class Root:
__acl__ = [
(Allow, Authenticated, ('read',)),
]
def __init__(self, request):
self.request = request
def includeme(config):
config.set_authorization_policy(ACLAuthorizationPolicy())
config.include('pyramid_jwt')
config.set_root_factory(Root)
config.set_jwt_authentication_policy('secret', http_header='X-Token')
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
run.include(includeme)
run(port=8080)
| mit | -1,611,208,174,127,569,200 | 21.372549 | 73 | 0.689746 | false |
yarhajile/sven-daemon | Sven/Module/BeagleboneBlack/Adafruit_BBIO-0.0.19/test/test_gpio_output.py | 1 | 1308 | import pytest
import os
import Adafruit_BBIO.GPIO as GPIO
def teardown_module(module):
GPIO.cleanup()
class TestGPIOOutput:
def test_output_high(self):
GPIO.setup("P8_10", GPIO.OUT)
GPIO.output("P8_10", GPIO.HIGH)
value = open('/sys/class/gpio/gpio68/value').read()
assert int(value)
GPIO.cleanup()
def test_output_low(self):
GPIO.setup("P8_10", GPIO.OUT)
GPIO.output("P8_10", GPIO.LOW)
value = open('/sys/class/gpio/gpio68/value').read()
assert not int(value)
GPIO.cleanup()
def test_direction_readback(self):
GPIO.setup("P8_10", GPIO.OUT)
direction = GPIO.gpio_function("P8_10")
assert direction == GPIO.OUT
def test_output_greater_than_one(self):
GPIO.setup("P8_10", GPIO.OUT)
GPIO.output("P8_10", 2)
value = open('/sys/class/gpio/gpio68/value').read()
assert int(value)
GPIO.cleanup()
def test_output_of_pin_not_setup(self):
with pytest.raises(RuntimeError):
GPIO.output("P8_11", GPIO.LOW)
GPIO.cleanup()
def test_output_setup_as_input(self):
GPIO.setup("P8_10", GPIO.IN)
with pytest.raises(RuntimeError):
GPIO.output("P8_10", GPIO.LOW)
GPIO.cleanup()
| gpl-2.0 | 4,022,292,955,603,422,700 | 28.727273 | 59 | 0.591743 | false |
bobbydurrett/PythonDBAGraphs | onewait.py | 1 | 2938 | """
PythonDBAGraphs: Graphs to help with Oracle Database Tuning
Copyright (C) 2016 Robert Taft Durrett (Bobby Durrett)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact:
[email protected]
onewait.py
Graph of one wait event
"""
import myplot
import util
def onewait(wait_event,minimum_waits,start_time,end_time,instance_number):
q_string = """
select
sn.END_INTERVAL_TIME,
(after.total_waits-before.total_waits) NUMBER_OF_WAITS,
(after.time_waited_micro-before.time_waited_micro)/(after.total_waits-before.total_waits) AVG_MICROSECONDS
from DBA_HIST_SYSTEM_EVENT before, DBA_HIST_SYSTEM_EVENT after,DBA_HIST_SNAPSHOT sn
where before.event_name='"""
q_string += wait_event
q_string += """' and
END_INTERVAL_TIME
between
to_date('"""
q_string += start_time
q_string += """','DD-MON-YYYY HH24:MI:SS')
and
to_date('"""
q_string += end_time
q_string += """','DD-MON-YYYY HH24:MI:SS')
and
after.event_name=before.event_name and
after.snap_id=before.snap_id+1 and
after.instance_number = """
q_string += instance_number
q_string += """ and
before.instance_number=after.instance_number and
after.snap_id=sn.snap_id and
after.instance_number=sn.instance_number and
(after.total_waits-before.total_waits) > """
q_string += str(minimum_waits)
q_string += """
order by after.snap_id
"""
return q_string
database,dbconnection = util.script_startup('One wait event')
# Get user input
wait_event=util.input_with_default('wait event','db file sequential read')
min_waits=int(util.input_with_default('minimum number of waits per hour','0'))
start_time=util.input_with_default('Start date and time (DD-MON-YYYY HH24:MI:SS)','01-JAN-1900 12:00:00')
end_time=util.input_with_default('End date and time (DD-MON-YYYY HH24:MI:SS)','01-JAN-2200 12:00:00')
instance_number=util.input_with_default('Database Instance (1 if not RAC)','1')
# Build and run query
q = onewait(wait_event,min_waits,start_time,end_time,instance_number);
r = dbconnection.run_return_flipped_results(q)
util.exit_no_results(r)
# plot query
myplot.title = "'"+wait_event+"' waits on "+database+" database, instance "+instance_number+", minimum waits="+str(min_waits)
myplot.ylabel1 = "Number of events"
myplot.ylabel2 = "Averaged Elapsed Microseconds"
myplot.xdatetimes = r[0]
myplot.ylists = r[1:]
myplot.line_2subplots() | gpl-3.0 | -2,918,252,863,514,742,300 | 29.298969 | 125 | 0.730088 | false |
AlexandreZani/phero | test_phero.py | 1 | 5815 | #!/usr/bin/env python
# Copyright 2012 (Alexandre Zani)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import phero
import unittest
class TestRequestProcessor(unittest.TestCase):
def setUp(self):
self.logic_registry = phero.ServiceRegistry()
self.auth_registry = phero.ServiceRegistry()
self.registries = [
('auth', self.auth_registry),
('main', self.logic_registry),
]
def simple_auth(ctx, username):
return username
self.auth_registry.register(simple_auth)
def whoami(ctx, all_caps=False):
if all_caps:
return ctx['auth'].upper()
return ctx['auth']
self.logic_registry.register(whoami)
def test_basic(self):
request = {
'main': {
'service': 'whoami',
'args': { 'all_caps': True }
},
'auth': {
'service': 'simple_auth',
'args': { 'username': 'alex' }
}
}
expected = { 'result': 'ALEX' }
actual = phero.process_request(self.registries, request)
self.assertEquals(expected, actual)
def test_no_args(self):
def whoami_simple(ctx):
return ctx['auth']
self.logic_registry.register(whoami_simple)
request = {
'main': {
'service': 'whoami_simple',
},
'auth': {
'service': 'simple_auth',
'args': { 'username': 'alex' }
}
}
expected = { 'result': 'alex' }
actual = phero.process_request(self.registries, request)
self.assertEquals(expected, actual)
def test_phero_error(self):
request = {
'main': {
'service': 'does_not_exist'
}
}
expected = {
'error': 'UnknownService',
'details': { 'service': 'does_not_exist' }
}
actual = phero.process_request(self.registries, request)
self.assertEquals(expected, actual)
def test_custom_error(self):
class AuthError(phero.PheroError): pass
def bad_auth(ctx):
raise AuthError(msg='I hate you')
self.auth_registry.register(bad_auth)
request = {
'auth': {
'service': 'bad_auth'
}
}
expected = {
'error': 'AuthError',
'details': { 'msg': 'I hate you' }
}
actual = phero.process_request(self.registries, request)
self.assertEquals(expected, actual)
def test_generic_error(self):
def bad_method(ctx):
raise KeyError
self.logic_registry.register(bad_method)
request = {
'main': {
'service': 'bad_method'
}
}
with self.assertRaises(KeyError):
phero.process_request(self.registries, request)
expected = {
'error': 'GenericInternalError'
}
actual = phero.process_request(self.registries, request, catch_all=True)
self.assertEquals(expected, actual)
class TestServiceRegistry(unittest.TestCase):
def test_basic(self):
def multiply(ctx, a, b):
return a * b
registry = phero.ServiceRegistry()
registry.register(multiply)
args = { 'a': 3, 'b': 4 }
service_name = 'multiply'
ctx = {}
expected = 12
actual = registry.process(ctx, service_name, args)
self.assertEquals(expected, actual)
def test_unknown_service(self):
registry = phero.ServiceRegistry()
args = { 'a': 3, 'b': 4 }
service_name = 'multiply'
ctx = {}
expected = 12
with self.assertRaises(phero.UnknownService) as cmt:
registry.process(ctx, service_name, args)
self.assertEquals(cmt.exception.details, { 'service': 'multiply' })
def test_default_service(self):
def default(ctx):
return "Default Service"
registry = phero.ServiceRegistry()
registry.register_default(default)
ctx = {}
expected = "Default Service"
actual = registry.process(ctx, None, None)
self.assertEquals(expected, actual)
def test_default_default_service(self):
registry = phero.ServiceRegistry()
ctx = {}
expected = None
actual = registry.process(ctx, None, None)
self.assertEquals(expected, actual)
class TestService(unittest.TestCase):
def test_basic(self):
def multiply(ctx, a, b):
return a * b
multiply_service = phero.Service(multiply)
args = { 'a': 3, 'b': 4 }
ctx = {}
expected = 12
actual = multiply_service(ctx, args)
self.assertEquals(expected, actual)
def test_missing_required_arg(self):
def multiply(ctx, a, b):
return a * b
multiply_service = phero.Service(multiply)
args = { 'a': 3 }
ctx = {}
with self.assertRaises(phero.MissingRequiredArgument) as cmt:
multiply_service(ctx, args)
self.assertEquals(cmt.exception.details, { 'arg': 'b' })
def test_unknown_arg(self):
def multiply(ctx, a, b):
return a * b
multiply_service = phero.Service(multiply)
args = { 'a': 3, 'b': 4, 'c': 3 }
ctx = {}
with self.assertRaises(phero.UnknownArgument) as cmt:
multiply_service(ctx, args)
self.assertEquals(cmt.exception.details, { 'arg': 'c' })
def test_default_arg(self):
def multiply(ctx, a, b=2):
return a * b
multiply_service = phero.Service(multiply)
args = { 'a': 3 }
ctx = {}
expected = 6
actual = multiply_service(ctx, args)
self.assertEquals(expected, actual)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 1,082,001,563,675,999,600 | 23.028926 | 76 | 0.616337 | false |
nyu-mll/spinn | python/spinn/models/rl_classifier.py | 1 | 15458 | import os
import json
import math
import random
import sys
import time
import gflags
import numpy as np
from spinn.util import afs_safe_logger
from spinn.util.data import SimpleProgressBar
from spinn.util.blocks import to_gpu
from spinn.util.misc import Accumulator, EvalReporter
from spinn.util.logging import stats, train_accumulate, create_log_formatter
from spinn.util.logging import train_rl_accumulate
from spinn.util.logging import eval_stats, eval_accumulate, prettyprint_trees
from spinn.util.loss import auxiliary_loss
from spinn.util.sparks import sparks, dec_str
import spinn.util.evalb as evalb
import spinn.util.logging_pb2 as pb
from spinn.util.trainer import ModelTrainer
# PyTorch
import torch
import torch.nn as nn
from torch.autograd import Variable
from spinn.models.base import get_data_manager, get_flags, get_batch
from spinn.models.base import flag_defaults, init_model, log_path
from spinn.models.base import load_data_and_embeddings
FLAGS = gflags.FLAGS
def evaluate(FLAGS, model, eval_set, log_entry,
logger, trainer, vocabulary=None, show_sample=False, eval_index=0):
filename, dataset = eval_set
A = Accumulator()
eval_log = log_entry.evaluation.add()
reporter = EvalReporter()
tree_strs = None
# Evaluate
total_batches = len(dataset)
progress_bar = SimpleProgressBar(
msg="Run Eval",
bar_length=60,
enabled=FLAGS.show_progress_bar)
progress_bar.step(0, total=total_batches)
total_tokens = 0
cpt = 0
cpt_max = 0
start = time.time()
model.eval()
for i, dataset_batch in enumerate(dataset):
batch = get_batch(dataset_batch)
eval_X_batch, eval_transitions_batch, eval_y_batch, eval_num_transitions_batch, eval_ids = batch
# Run model.
np.set_printoptions(threshold=np.inf)
output = model(eval_X_batch, eval_transitions_batch, eval_y_batch,
use_internal_parser=FLAGS.use_internal_parser,
validate_transitions=FLAGS.validate_transitions,
store_parse_masks=show_sample,
example_lengths=eval_num_transitions_batch)
can_sample = (FLAGS.model_type ==
"RLSPINN" and FLAGS.use_internal_parser)
if show_sample and can_sample:
tmp_samples = model.get_samples(
eval_X_batch, vocabulary, only_one=not FLAGS.write_eval_report)
tree_strs = prettyprint_trees(tmp_samples)
if not FLAGS.write_eval_report:
# Only show one sample, regardless of the number of batches.
show_sample = False
# Calculate class accuracy.
target = torch.from_numpy(eval_y_batch).long()
# get the index of the max log-probability
pred = output.data.max(1, keepdim=False)[1].cpu()
eval_accumulate(model, A, batch)
A.add('class_correct', pred.eq(target).sum())
A.add('class_total', target.size(0))
# Update Aggregate Accuracies
total_tokens += sum(
[(nt + 1) / 2 for nt in eval_num_transitions_batch.reshape(-1)])
if FLAGS.write_eval_report:
transitions_per_example, _ = model.spinn.get_transitions_per_example(
style="preds" if FLAGS.eval_report_use_preds else "given") if (
FLAGS.model_type == "RLSPINN" and FLAGS.use_internal_parser) else (
None, None)
if model.use_sentence_pair:
batch_size = pred.size(0)
sent1_transitions = transitions_per_example[:
batch_size] if transitions_per_example is not None else None
sent2_transitions = transitions_per_example[batch_size:
] if transitions_per_example is not None else None
sent1_trees = tree_strs[:batch_size] if tree_strs is not None else None
sent2_trees = tree_strs[batch_size:
] if tree_strs is not None else None
else:
sent1_transitions = transitions_per_example if transitions_per_example is not None else None
sent2_transitions = None
sent1_trees = tree_strs if tree_strs is not None else None
sent2_trees = None
if FLAGS.cp_metric:
cp, cp_max = reporter.save_batch(
pred,
target,
eval_ids,
output.data.cpu().numpy(),
sent1_transitions,
sent2_transitions,
sent1_trees,
sent2_trees,
cp_metric=FLAGS.cp_metric,
mt=False)
cpt += cp
cpt_max += cp_max
else:
reporter.save_batch(
pred,
target,
eval_ids,
output.data.cpu().numpy(),
sent1_transitions,
sent2_transitions,
sent1_trees,
sent2_trees,
mt=False)
# Print Progress
progress_bar.step(i + 1, total=total_batches)
progress_bar.finish()
cp_metric_value = cpt / cpt_max
if tree_strs is not None:
logger.Log('Sample: ' + tree_strs[0])
end = time.time()
total_time = end - start
A.add('total_tokens', total_tokens)
A.add('total_time', total_time)
logger.Log("Eval cp_acc: " + str(cp_metric_value))
eval_stats(model, A, eval_log)
eval_log.filename = filename
if FLAGS.write_eval_report:
eval_report_path = os.path.join(
FLAGS.log_path,
FLAGS.experiment_name +
".eval_set_" +
str(eval_index) +
".report")
reporter.write_report(eval_report_path)
eval_class_acc = eval_log.eval_class_accuracy
eval_trans_acc = eval_log.eval_transition_accuracy
return eval_class_acc, eval_trans_acc
def train_loop(
FLAGS,
model,
trainer,
training_data_iter,
eval_iterators,
logger,
vocabulary):
# Accumulate useful statistics.
A = Accumulator(maxlen=FLAGS.deque_length)
# Train.
logger.Log("Training.")
# New Training Loop
progress_bar = SimpleProgressBar(
msg="Training", bar_length=60, enabled=FLAGS.show_progress_bar)
progress_bar.step(i=0, total=FLAGS.statistics_interval_steps)
log_entry = pb.SpinnEntry()
for _ in range(trainer.step, FLAGS.training_steps):
if (trainer.step - trainer.best_dev_step) > FLAGS.early_stopping_steps_to_wait:
logger.Log('No improvement after ' +
str(FLAGS.early_stopping_steps_to_wait) +
' steps. Stopping training.')
break
model.train()
log_entry.Clear()
log_entry.step = trainer.step
should_log = False
start = time.time()
batch = get_batch(next(training_data_iter))
X_batch, transitions_batch, y_batch, num_transitions_batch, train_ids = batch
total_tokens = sum(
[(nt + 1) / 2 for nt in num_transitions_batch.reshape(-1)])
# Reset cached gradients.
trainer.optimizer_zero_grad()
temperature = math.sin(
math.pi /
2 +
trainer.step /
float(
FLAGS.rl_confidence_interval) *
2 *
math.pi)
temperature = (temperature + 1) / 2
# Confidence Penalty for Transition Predictions.
if FLAGS.rl_confidence_penalty:
epsilon = FLAGS.rl_epsilon * \
math.exp(-trainer.step / float(FLAGS.rl_epsilon_decay))
temp = 1 + \
(temperature - .5) * FLAGS.rl_confidence_penalty * epsilon
model.spinn.temperature = max(1e-3, temp)
# Soft Wake/Sleep based on temperature.
if FLAGS.rl_wake_sleep:
model.rl_weight = temperature * FLAGS.rl_weight
# Run model.
output = model(X_batch, transitions_batch, y_batch,
use_internal_parser=FLAGS.use_internal_parser,
validate_transitions=FLAGS.validate_transitions
)
# Calculate class accuracy.
target = torch.from_numpy(y_batch).long()
# get the index of the max log-probability
pred = output.data.max(1, keepdim=False)[1].cpu()
class_acc = pred.eq(target).sum().float() / float(target.size(0))
# Calculate class loss.
xent_loss = nn.CrossEntropyLoss()(output, to_gpu(Variable(target)))
# Optionally calculate transition loss.
transition_loss = model.transition_loss if hasattr(
model, 'transition_loss') else None
# Accumulate Total Loss Variable
total_loss = 0.0
total_loss += xent_loss
if transition_loss is not None and model.optimize_transition_loss:
total_loss += transition_loss
aux_loss = auxiliary_loss(model)
total_loss += aux_loss[0]
# Backward pass.
total_loss.backward()
# Hard Gradient Clipping
nn.utils.clip_grad_norm_([param for name, param in model.named_parameters() if name not in ["embed.embed.weight"]], FLAGS.clipping_max_value)
# Gradient descent step.
trainer.optimizer_step()
end = time.time()
total_time = end - start
train_accumulate(model, A, batch)
A.add('class_acc', class_acc)
A.add('total_tokens', total_tokens)
A.add('total_time', total_time)
train_rl_accumulate(model, A, batch)
if trainer.step % FLAGS.statistics_interval_steps == 0:
progress_bar.step(i=FLAGS.statistics_interval_steps,
total=FLAGS.statistics_interval_steps)
progress_bar.finish()
A.add('xent_cost', xent_loss.data.item())
stats(model, trainer, A, log_entry)
should_log = True
if trainer.step % FLAGS.sample_interval_steps == 0 and FLAGS.num_samples > 0:
should_log = True
model.train()
model(X_batch, transitions_batch, y_batch,
use_internal_parser=FLAGS.use_internal_parser,
validate_transitions=FLAGS.validate_transitions
)
tr_transitions_per_example, tr_strength = model.spinn.get_transitions_per_example(
)
model.eval()
model(X_batch, transitions_batch, y_batch,
use_internal_parser=FLAGS.use_internal_parser,
validate_transitions=FLAGS.validate_transitions
)
ev_transitions_per_example, ev_strength = model.spinn.get_transitions_per_example(
)
if model.use_sentence_pair and len(transitions_batch.shape) == 3:
transitions_batch = np.concatenate([
transitions_batch[:, :, 0], transitions_batch[:, :, 1]], axis=0)
# This could be done prior to running the batch for a tiny speed
# boost.
t_idxs = list(range(FLAGS.num_samples))
random.shuffle(t_idxs)
t_idxs = sorted(t_idxs[:FLAGS.num_samples])
for t_idx in t_idxs:
log = log_entry.rl_sampling.add()
gold = transitions_batch[t_idx]
pred_tr = tr_transitions_per_example[t_idx]
pred_ev = ev_transitions_per_example[t_idx]
strength_tr = sparks(
[1] + tr_strength[t_idx].tolist(), dec_str)
strength_ev = sparks(
[1] + ev_strength[t_idx].tolist(), dec_str)
_, crossing = evalb.crossing(gold, pred)
log.t_idx = t_idx
log.crossing = crossing
log.gold_lb = "".join(map(str, gold))
log.pred_tr = "".join(map(str, pred_tr))
log.pred_ev = "".join(map(str, pred_ev))
log.strg_tr = strength_tr[1:]
log.strg_ev = strength_ev[1:]
if trainer.step > 0 and trainer.step % FLAGS.eval_interval_steps == 0:
should_log = True
for index, eval_set in enumerate(eval_iterators):
acc, _ = evaluate(
FLAGS, model, eval_set, log_entry, logger, trainer, eval_index=index, vocabulary=vocabulary, show_sample=True)
if index == 0:
trainer.new_dev_accuracy(acc)
progress_bar.reset()
if trainer.step > FLAGS.ckpt_step and trainer.step % FLAGS.ckpt_interval_steps == 0:
should_log = True
trainer.checkpoint()
if should_log:
logger.LogEntry(log_entry)
progress_bar.step(i=(trainer.step % FLAGS.statistics_interval_steps) + 1,
total=FLAGS.statistics_interval_steps)
def run(only_forward=False):
logger = afs_safe_logger.ProtoLogger(log_path(FLAGS),
print_formatter=create_log_formatter(
True, True),
write_proto=FLAGS.write_proto_to_log)
header = pb.SpinnHeader()
data_manager = get_data_manager(FLAGS.data_type)
logger.Log("Flag Values:\n" +
json.dumps(FLAGS.FlagValuesDict(), indent=4, sort_keys=True))
# Get Data and Embeddings
vocabulary, initial_embeddings, training_data_iter, eval_iterators, training_data_length = \
load_data_and_embeddings(FLAGS, data_manager, logger,
FLAGS.training_data_path, FLAGS.eval_data_path)
# Build model.
vocab_size = len(vocabulary)
num_classes = len(set(data_manager.LABEL_MAP.values()))
model = init_model(
FLAGS,
logger,
initial_embeddings,
vocab_size,
num_classes,
data_manager,
header)
time_to_wait_to_lower_lr = min(10000, int(training_data_length / FLAGS.batch_size))
trainer = ModelTrainer(model, logger, time_to_wait_to_lower_lr, vocabulary, FLAGS)
header.start_step = trainer.step
header.start_time = int(time.time())
# Do an evaluation-only run.
logger.LogHeader(header) # Start log_entry logging.
if only_forward:
log_entry = pb.SpinnEntry()
for index, eval_set in enumerate(eval_iterators):
log_entry.Clear()
acc = evaluate(
FLAGS,
model,
eval_set,
log_entry,
logger,
trainer,
vocabulary,
show_sample=True,
eval_index=index)
print(log_entry)
logger.LogEntry(log_entry)
else:
train_loop(
FLAGS,
model,
trainer,
training_data_iter,
eval_iterators,
logger,
vocabulary)
if __name__ == '__main__':
get_flags()
# Parse command line flags.
FLAGS(sys.argv)
flag_defaults(FLAGS)
if FLAGS.model_type != "RLSPINN":
raise Exception("Reinforce is only implemented for RLSPINN.")
run(only_forward=FLAGS.expanded_eval_only_mode)
| mit | -1,203,337,465,858,312,400 | 33.815315 | 149 | 0.566373 | false |
mlundblad/telepathy-gabble | tests/twisted/caps/receive-jingle.py | 1 | 5901 | """
Test receiving another contact's capabilities.
"""
import dbus
from servicetest import EventPattern, assertEquals, sync_dbus
from gabbletest import exec_test, make_result_iq, make_presence, sync_stream
import constants as cs
from config import VOIP_ENABLED
if not VOIP_ENABLED:
print "NOTE: built with --disable-voip"
raise SystemExit(77)
icaps_attr = cs.CONN_IFACE_CAPS + "/caps"
basic_caps = [(2, cs.CHANNEL_TYPE_TEXT, 3, 0)]
def test(q, bus, conn, stream):
presence = make_presence('[email protected]/Foo', status='hello')
stream.send(presence)
q.expect('dbus-signal', signal='PresencesChanged',
args=[{2L: (2, u'available', 'hello')}])
# FIXME: throughout this test, Bob's handle is assumed to be 2.
# no special capabilities
assert conn.Capabilities.GetCapabilities([2]) == basic_caps
# holding the handle here: see below
assert conn.Contacts.GetContactAttributes(
[2], [cs.CONN_IFACE_CAPS], True) == \
{ 2L: { icaps_attr: basic_caps,
cs.CONN + '/contact-id': '[email protected]'}}
# send updated presence with Jingle audio/video caps info. we turn on both
# audio and video at the same time to test that all of the capabilities are
# discovered before any capabilities change signal is emitted
presence = make_presence('[email protected]/Foo', status='hello',
caps={
'node': 'http://telepathy.freedesktop.org/fake-client',
'ver' : '0.1',
'ext' : 'video',
})
stream.send(presence)
# Gabble looks up both the version and the video bundles, in any order
(version_event, video_event) = q.expect_many(
EventPattern('stream-iq', to='[email protected]/Foo',
query_ns='http://jabber.org/protocol/disco#info',
query_node='http://telepathy.freedesktop.org/fake-client#0.1'),
EventPattern('stream-iq', to='[email protected]/Foo',
query_ns='http://jabber.org/protocol/disco#info',
query_node='http://telepathy.freedesktop.org/fake-client#video'))
# reply to the video bundle query first - this capability alone is not
# sufficient to make us callable
result = make_result_iq(stream, video_event.stanza)
query = result.firstChildElement()
feature = query.addElement('feature')
feature['var'] = 'http://jabber.org/protocol/jingle/description/video'
stream.send(result)
# reply to the version bundle query, which should make us audio and
# video callable
result = make_result_iq(stream, version_event.stanza)
query = result.firstChildElement()
feature = query.addElement('feature')
feature['var'] = 'http://jabber.org/protocol/jingle'
feature = query.addElement('feature')
feature['var'] = 'http://jabber.org/protocol/jingle/description/audio'
feature = query.addElement('feature')
feature['var'] = 'http://www.google.com/transport/p2p'
stream.send(result)
# we can now do audio and video calls
event = q.expect('dbus-signal', signal='CapabilitiesChanged',
args=[[(2, cs.CHANNEL_TYPE_STREAMED_MEDIA, 0, 3,
0, cs.MEDIA_CAP_AUDIO | cs.MEDIA_CAP_VIDEO)]])
caps = conn.Contacts.GetContactAttributes([2], [cs.CONN_IFACE_CAPS], False)
assert caps.keys() == [2L]
assert icaps_attr in caps[2L]
assert len(caps[2L][icaps_attr]) == 2
assert basic_caps[0] in caps[2L][icaps_attr]
assert (2, cs.CHANNEL_TYPE_STREAMED_MEDIA, 3, 3) in caps[2L][icaps_attr]
# send updated presence without video support
presence = make_presence('[email protected]/Foo', status='hello',
caps={
'node': 'http://telepathy.freedesktop.org/fake-client',
'ver' : '0.1',
})
stream.send(presence)
# we can now do only audio calls (and as a result have the ImmutableStreams
# cap)
event = q.expect('dbus-signal', signal='CapabilitiesChanged',
args=[[(2, cs.CHANNEL_TYPE_STREAMED_MEDIA, 3, 3,
cs.MEDIA_CAP_AUDIO | cs.MEDIA_CAP_VIDEO,
cs.MEDIA_CAP_AUDIO | cs.MEDIA_CAP_IMMUTABLE_STREAMS)]])
caps = conn.Contacts.GetContactAttributes([2], [cs.CONN_IFACE_CAPS], False)
assert caps.keys() == [2L]
assert icaps_attr in caps[2L]
assert len(caps[2L][icaps_attr]) == 2
assert basic_caps[0] in caps[2L][icaps_attr]
assert (2, cs.CHANNEL_TYPE_STREAMED_MEDIA, 3,
cs.MEDIA_CAP_AUDIO | cs.MEDIA_CAP_IMMUTABLE_STREAMS) \
in caps[2L][icaps_attr]
# go offline
presence = make_presence('[email protected]/Foo', type='unavailable')
stream.send(presence)
# can't do audio calls any more
q.expect_many(
EventPattern('dbus-signal', signal='CapabilitiesChanged',
args=[[(2, cs.CHANNEL_TYPE_STREAMED_MEDIA, 3, 0,
cs.MEDIA_CAP_AUDIO | cs.MEDIA_CAP_IMMUTABLE_STREAMS,
0)]],
),
EventPattern('dbus-signal', signal='PresencesChanged',
args=[{2: (cs.PRESENCE_OFFLINE, 'offline', '')}]),
)
# Contact went offline. Previously, this test asserted that the handle
# became invalid, but that's not guaranteed to happen immediately; so we
# now hold the handle (above), to guarantee that it does *not* become
# invalid.
assert conn.Contacts.GetContactAttributes(
[2], [cs.CONN_IFACE_CAPS], False) == \
{ 2L: { icaps_attr: basic_caps,
cs.CONN + '/contact-id': '[email protected]'}}
# What about a handle that's not valid?
assertEquals({}, conn.Contacts.GetContactAttributes(
[31337], [cs.CONN_IFACE_CAPS], False))
# regression test for fd.o #15198: getting caps of invalid handle crashed
try:
conn.Capabilities.GetCapabilities([31337])
except dbus.DBusException, e:
pass
else:
assert False, "Should have had an error!"
if __name__ == '__main__':
exec_test(test)
| lgpl-2.1 | 7,374,545,274,565,893,000 | 37.822368 | 79 | 0.63735 | false |
SelvorWhim/competitive | LeetCode/MinimumSubsequenceInNonIncreasingOrder.py | 1 | 1351 | # idea: order by size and take largest elements until the sum becomes > sum of remaining
class Solution:
def minSubsequence(self, nums: List[int]) -> List[int]:
nums = sorted(nums, reverse=True)
total_sum = sum(nums)
running_sum = 0
subseq_len = 0 # how many biggest members we'll need to take before sum is greater than the rest
for x in nums:
running_sum += x
subseq_len += 1
if running_sum > (total_sum - running_sum):
break
return nums[:subseq_len]
# in this variant (not relevant for the problem as described) we keep track of original order so subsequence can be returned in original order
def minSubsequenceInOriginalOrder(self, nums: List[int]) -> List[int]:
total_sum = sum(nums)
sorted_nums = sorted(enumerate(nums), key=lambda x: x[1], reverse=True) # preserving original order in 1st index
running_sum = 0
subseq_len = 0 # how many biggest members we'll need to take before sum is greater than the rest
for t in sorted_nums:
running_sum += t[1]
subseq_len += 1
if running_sum > (total_sum - running_sum):
break
subseq_indexes = sorted([t[0] for t in sorted_nums[:subseq_len]])
return [nums[i] for i in subseq_indexes]
| unlicense | 6,513,663,138,112,198,000 | 49.037037 | 146 | 0.617321 | false |
Ircam-Web/mezzanine-organization | organization/core/related.py | 1 | 2036 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016-2017 Ircam
# Copyright (c) 2016-2017 Guillaume Pellerin
# Copyright (c) 2016-2017 Emilie Zawadzki
# This file is part of mezzanine-organization.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.core import exceptions
from django.db.models.fields.related import ForeignKey
from django.db.utils import ConnectionHandler, ConnectionRouter
connections = ConnectionHandler()
router = ConnectionRouter()
class SpanningForeignKey(ForeignKey):
def validate(self, value, model_instance):
if self.rel.parent_link:
return
# Call the grandparent rather than the parent to skip validation
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(self.rel.to, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.get_limit_choices_to())
if not qs.exists():
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
params={
'model': self.rel.to._meta.verbose_name, 'pk': value,
'field': self.rel.field_name, 'value': value,
}, # 'pk' is included for backwards compatibility
)
| agpl-3.0 | -7,799,400,437,900,264,000 | 37.415094 | 77 | 0.672888 | false |
virtualnobi/MediaFiler | Model/MediaClassHandler.py | 1 | 18503 | """Provides class and element handling functionality.
(c) by nobisoft 2016-
"""
# Imports
## Standard
import sys
import copy
import re
import codecs
import logging
## Contributed
## nobi
## Project
#from .MediaOrganization import MediaOrganization
# Package Variables
Logger = logging.getLogger(__name__)
class MediaClassHandler(object):
"""
"""
# Constants
KeyName = u'class' # key in class dictionary mapping to class name
KeyMultiple = u'multiple' # key in class dictionary mapping to Boolean indicating whether multiple elements can be selected
KeyElements = u'elements' # key in class dictionary mapping to list of elements
KeyRequired = u'required' # key in class dictionary mapping to list of required elements
KeyRequiredClasses = u'requiredClass' # key in class dictionary mapping to list of required classes
KeyProhibited = u'prohibited' # key in class dictionary mapping to list of prohibited elements
TagSeparator = u'.' # character to introduce a tag/element
RETagSeparatorsRecognized = ('[, _' + TagSeparator + '-]')
ElementIllegal = u'illegal' # special element signalling that a combination of elements is not legal
ElementNew = u'new' # special element signalling that the entry is new, i.e., just imported
InitialFileContent = (u'# Classname Element+ # for classes with single-choice elements\n' +
u'# Classname [] Element+ # for classes with multiple-choice elements\n' +
u'# Classname +Element1 Element2+ # for a class which applies only if Element1 has been assigned')
# Class Variables
# Class Methods
# Lifecycle
def __init__(self, pathname):
"""Create a MediaClassHandler instance from the definitions in pathname.
"""
# inheritance
# internal state
self.classes = []
self.knownElements = []
self.readClassesFromFile(pathname)
return(None)
# Setters
# Getters
def getClasses(self):
"""Return a list of all classes.
"""
return(copy.copy(self.classes))
def getClassNames(self):
"""Return a list of all class names.
"""
return([aClass[self.__class__.KeyName] for aClass in self.classes])
def isMultipleClass(self, aClass):
"""Return True if multiple elements of CLASSNAME may be selected.
Return False if at most one element of CLASSNAME may be selected.
"""
return((aClass != None)
and (self.KeyMultiple in aClass)
and (aClass[self.KeyMultiple]))
def isMultipleClassByName(self, className):
"""Return True if multiple elements of CLASSNAME may be selected.
Return False if at most one element of CLASSNAME may be selected.
"""
return(self.isMultipleClass(self.getClassByName(className)))
def getElementsOfClass(self, aClass):
"""
dict aClass
Return list of all tags in aClass, ordered as in definition.
"""
return(list(aClass[self.KeyElements]))
def getElementsOfClassByName(self, className):
"""
String className
Raises KeyError if no class exists with name className
Return list of all tags in className, ordered as in definition.
"""
aClass = self.getClassByName(className)
if (aClass == None):
raise KeyError('No class named "%s" exists!' % className)
else:
return(self.getElementsOfClass(aClass))
def getKnownElements(self):
"""Return a list of all known elements.
"""
return(copy.copy(self.knownElements))
def isLegalElement(self, element):
"""Return True if element is a legal class element, False otherwise.
String element
Return Boolean
"""
return(self.normalizeTag(element) in self.getKnownElements())
# Other API
def normalizeTag(self, tag):
"""Normalize a tag (element), for example, when importing.
This will compare the tag with all known tags in a case-insensitive way,
and return the defined spelling if found in the known tags.
If not found in the known tags, it will be returned without changes.
String tag
Return Boolean
"""
for knownTag in self.getKnownElements():
if (knownTag.lower() == tag.lower()):
return(knownTag)
return(tag)
def combineTagsWithPriority(self, tagSet, priorityTagSet):
"""Return the union of the two tag sets, except for single-selection tag classes where the second set has priority.
Set of String tagSet
Set of String priorityTagSet
Return Set of String
"""
result = set(tagSet)
singleSelectionClasses = filter(lambda c: (not self.isMultipleClass(c)), self.getClasses())
for priorityTag in priorityTagSet:
priorityClass = self.getClassOfTag(priorityTag)
if (priorityClass in singleSelectionClasses):
result.difference_update(set(self.getElementsOfClass(priorityClass)))
result.add(priorityTag)
return(result)
def getTagsOnChange(self, tagSet, addedTag, removedTags):
"""Determine new set of tags based on tags added and removed.
Set of String tagSet
String or None addedTag
Set of String removedTags
Return Set of String containing the tags after addition and removal
"""
Logger.debug('MediaClassHandler.getTagsOnChange(%s +%s -%s)' % (tagSet, addedTag, removedTags))
result = copy.copy(tagSet)
if (addedTag):
result.update(set([addedTag]))
result = self.includeRequiredElements(result)
Logger.debug('MediaClassHandler.getTagsOnChange(): Adding %s yields %s' % (addedTag, result.difference(tagSet)))
for tag in removedTags:
result.discard(tag)
for aClass in self.getClasses():
if (tag in self.getRequiredElementsOfClass(aClass)):
result.difference_update(set(self.getElementsOfClass(aClass)))
if (((addedTag == None) or
(self.getClassOfTag(tag) != self.getClassOfTag(addedTag)))
and (self.getClassOfTag(tag)[MediaClassHandler.KeyName] in self.getRequiredClassesOfClass(aClass))):
result.difference_update(set(self.getElementsOfClass(aClass)))
Logger.debug('MediaClassHandler.getTagsOnChange(): Removed %s' % tagSet.difference(result))
return(result)
def includeRequiredElements(self, elements):
"""Add all required tags to a tagset.
Set elements contains tags as String
Return Set containing all tags as well as additional tags required by them
"""
result = set(elements)
for aClass in self.getClasses():
for anElement in self.getElementsOfClass(aClass):
if (anElement in elements):
for requiredElement in self.getRequiredElementsOfClass(aClass):
result.add(requiredElement)
for requiredClassName in self.getRequiredClassesOfClass(aClass):
requiredTags = set(self.getElementsOfClassByName(requiredClassName))
if (len(requiredTags.intersection(elements)) == 0):
result.add(self.getElementsOfClassByName(requiredClassName)[0]) # requiredTags.pop()) # choose first tag from class definition
for prohibitedElement in self.getProhibitedElementsOfClass(aClass):
if (prohibitedElement in elements):
result.add(self.ElementIllegal)
Logger.debug('MediaClassHandler.includeRequiredElements(%s): Added %s' % (elements, (result - elements)))
return(result)
def orderElements(self, elementSet):
"""Order the elements specified according to class definition.
Returns a List of String.
"""
result = []
elements = copy.copy(elementSet)
for aClass in self.getClasses():
for element in self.getElementsOfClass(aClass):
if (element in elements):
result.append(element)
elements.remove(element)
for element in sorted(elements):
result.append(element)
return (result)
def elementsToString(self, elementSet):
"""Return a String containing all elements in ELEMENTSET in canonical order.
Elements are introduced by TagSeparator (meaning the result is either empty or starts with a TagSeparator).
"""
elements = self.orderElements(elementSet)
result = (MediaClassHandler.TagSeparator.join(elements))
if (not (result == '')):
result = (MediaClassHandler.TagSeparator + result)
return (result)
def stringToElements(self, elementString):
"""Turn a (unicode) string into a set of (unicode) tags.
String elementString contains a string of words
Return a Set with all elements from ELEMENTSTRING
"""
elements = set(re.split(MediaClassHandler.RETagSeparatorsRecognized, elementString))
if (u'' in elements):
elements.remove(u'')
return(elements)
def stringToKnownAndUnknownElements(self, elementString):
"""Turn a (unicode) string into (unicode) tags.
Return (known, unknown) where
Dictionary known maps class names to (unicode) tags
Set unknown contains all remaining tags from elementString
"""
remainingElements = self.stringToElements(elementString)
knownElements = {}
# sort elements into class sequence
for aClass in self.getClasses():
className = aClass[self.KeyName]
for classElement in self.getElementsOfClass(aClass):
if (classElement in remainingElements):
remainingElements.remove(classElement)
if (className in knownElements.keys()): # add known element...
knownElements[className].append(classElement) # ...to an existing list
else:
knownElements[className] = [classElement] # ...as a single-entry list
return(knownElements, remainingElements)
# Event Handlers
# Internal - to change without notice
def getClassByName(self, className):
"""Return a Dictionary defining the named class.
Return None if className does not exist.
"""
for aClass in self.classes:
if (aClass[self.KeyName] == className):
return(aClass)
return(None)
def getClassOfTag(self, tagName):
"""Return the class to which the given tag belongs.
String tagName
Return Dictionary describing the class
or None if tagName belongs to no class
"""
for aClass in self.classes:
if (tagName in self.getElementsOfClass(aClass)):
return(aClass)
return(None)
def getRequiredElementsOfClass(self, aClass):
"""Return a list of all elements which must apply for aClass to be applicable.
"""
return(aClass[self.KeyRequired])
def getRequiredClassesOfClass(self, aClass):
"""Return a list of all class names which must apply for aClass to be applicable.
At least one tag from the resulting classes must be applied for aClass to be applicable.
Return List of String
"""
return(aClass[self.KeyRequiredClasses])
def getProhibitedElementsOfClass(self, aClass):
"""Return a list of all elements which may not apply for className to be applicable.
Return None if className does not exist.
"""
return(aClass[self.KeyProhibited])
def readClassesFromFile(self, pathname):
"""Set self's internal state from the class definition in the given file.
String pathname contains the file name
"""
self.classes = []
self.knownElements = []
try:
# classFile = codecs.open(pathname, encoding=sys.getfilesystemencoding())
classFile = open(pathname, mode='rt') # Python 3
except:
raise IOError('Cannot open "%s" to read tag classes!' % pathname)
for line in classFile:
#print ("Read line >%s<" % line)
line = line.strip() # trim white space
if ((len (line) == 0) or (line[0] == '#')): # empty or comment line, ignore
#print ("Ignored empty or comment line")
pass
else: # non-comment, interpret
tokens = line.split()
className = tokens.pop(0)
Logger.debug('MediaClassHandler.readClassesFromFile(): Definition of "%s" is "%s"' % (className, tokens))
multiple = False
required = []
requiredClasses = []
prohibited = []
elements = []
while (len(tokens) > 0):
token = tokens.pop(0)
if (token == '[]'): # this is a multiple-selection class
multiple = True
elif (token[0] == '+'):
name = token[1:]
if (self.isLegalElement(name)):
Logger.debug('MediaClassHandler.readClassesFromFile(): Required tag "%s"' % name)
required.append(name)
elif (self.getClassByName(name)):
Logger.debug('MediaClassHandler.readClassesFromFile(): Required class "%s"' % name)
requiredClasses.append(name)
else:
Logger.debug('MediaClassHandler.readClassesFromFile(): Requiring unknown tag "%s"' % name)
required.append(name)
elif (token[0] == '-'):
prohibited.append(token[1:])
else:
#print ("Adding element %s" % token)
elements.append(token)
aClass = {self.KeyName:className,
self.KeyRequired:required,
self.KeyRequiredClasses:requiredClasses,
self.KeyProhibited:prohibited,
self.KeyMultiple:multiple,
self.KeyElements:elements}
#print ("Found definition of %s" % aClass)
self.classes.append(aClass)
self.knownElements.extend(elements) # extend list of all known elements for filtering
classFile.close()
def readClassesFromFile3(self, pathname):
"""Set self's internal state from the class definition in the given file.
String pathname contains the file name
"""
self.classes = []
self.knownElements = []
try:
classFile = codecs.open(pathname, encoding=sys.getfilesystemencoding())
except:
raise IOError('Cannot open "%s" to read tag classes!' % pathname)
for line in classFile:
#print ("Read line >%s<" % line)
line = line.strip() # trim white space
if ((len (line) == 0) or (line[0] == '#')): # empty or comment line, ignore
#print ("Ignored empty or comment line")
pass
else: # non-comment, interpret
tokens = line.split()
className = tokens.pop(0)
Logger.debug('MediaClassHandler.readClassesFromFile(): Definition of "%s" is "%s"' % (className, tokens))
multiple = False
required = []
requiredClasses = []
prohibited = []
elements = []
while (len(tokens) > 0):
token = tokens.pop(0)
if (token == '[]'): # this is a multiple-selection class
multiple = True
elif (token[0] == '+'):
name = token[1:]
if (self.isLegalElement(name)):
Logger.debug('MediaClassHandler.readClassesFromFile(): Required tag "%s"' % name)
required.append(name)
elif (self.getClassByName(name)):
Logger.debug('MediaClassHandler.readClassesFromFile(): Required class "%s"' % name)
requiredClasses.append(name)
else:
Logger.debug('MediaClassHandler.readClassesFromFile(): Requiring unknown tag "%s"' % name)
required.append(name)
elif (token[0] == '-'):
prohibited.append(token[1:])
else:
#print ("Adding element %s" % token)
elements.append(token)
aClass = {self.KeyName:className,
self.KeyRequired:required,
self.KeyRequiredClasses:requiredClasses,
self.KeyProhibited:prohibited,
self.KeyMultiple:multiple,
self.KeyElements:elements}
#print ("Found definition of %s" % aClass)
self.classes.append(aClass)
self.knownElements.extend(elements) # extend list of all known elements for filtering
classFile.close()
# Class Initialization
pass
# Executable Script
if __name__ == "__main__":
pass
| gpl-3.0 | -4,694,349,200,343,272,000 | 38.755507 | 156 | 0.565908 | false |
aurex-linux/virt-manager | virtManager/mediacombo.py | 1 | 6975 | #
# Copyright (C) 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
# pylint: disable=E0611
from gi.repository import Gtk
# pylint: enable=E0611
from virtManager import uiutil
from virtManager.baseclass import vmmGObjectUI
class vmmMediaCombo(vmmGObjectUI):
OPTICAL_FIELDS = 4
(OPTICAL_DEV_PATH,
OPTICAL_LABEL,
OPTICAL_HAS_MEDIA,
OPTICAL_DEV_KEY) = range(OPTICAL_FIELDS)
def __init__(self, conn, builder, topwin, media_type):
vmmGObjectUI.__init__(self, None, None, builder=builder, topwin=topwin)
self.conn = conn
self.media_type = media_type
self.top_box = None
self.combo = None
self._warn_icon = None
self._populated = False
self._init_ui()
def _cleanup(self):
try:
self.conn.disconnect_by_func(self._mediadev_added)
self.conn.disconnect_by_func(self._mediadev_removed)
except:
pass
self.conn = None
self.top_box.destroy()
self.top_box = None
##########################
# Initialization methods #
##########################
def _init_ui(self):
self.top_box = Gtk.Box()
self.top_box.set_spacing(6)
self.top_box.set_orientation(Gtk.Orientation.HORIZONTAL)
self._warn_icon = Gtk.Image()
self._warn_icon.set_from_stock(
Gtk.STOCK_DIALOG_WARNING, Gtk.IconSize.MENU)
self.combo = Gtk.ComboBox()
self.top_box.add(self.combo)
self.top_box.add(self._warn_icon)
self.top_box.show_all()
# [Device path, pretty label, has_media?, device key, media key,
# vmmMediaDevice, is valid device]
fields = []
fields.insert(self.OPTICAL_DEV_PATH, str)
fields.insert(self.OPTICAL_LABEL, str)
fields.insert(self.OPTICAL_HAS_MEDIA, bool)
fields.insert(self.OPTICAL_DEV_KEY, str)
self.combo.set_model(Gtk.ListStore(*fields))
text = Gtk.CellRendererText()
self.combo.pack_start(text, True)
self.combo.add_attribute(text, 'text', self.OPTICAL_LABEL)
error = self.conn.mediadev_error
self._warn_icon.set_visible(bool(error))
self._warn_icon.set_tooltip_text(error)
def _set_mediadev_default(self):
model = self.combo.get_model()
if len(model) != 0:
return
row = [None] * self.OPTICAL_FIELDS
row[self.OPTICAL_DEV_PATH] = None
row[self.OPTICAL_LABEL] = _("No device present")
row[self.OPTICAL_HAS_MEDIA] = False
row[self.OPTICAL_DEV_KEY] = None
model.append(row)
def _set_mediadev_row_from_object(self, row, obj):
row[self.OPTICAL_DEV_PATH] = obj.get_path()
row[self.OPTICAL_LABEL] = obj.pretty_label()
row[self.OPTICAL_HAS_MEDIA] = obj.has_media()
row[self.OPTICAL_DEV_KEY] = obj.get_key()
def _mediadev_set_default_selection(self):
# Set the first active cdrom device as selected, otherwise none
widget = self.combo
model = widget.get_model()
idx = 0
active = widget.get_active()
if active != -1:
# already a selection, don't change it
return
for row in model:
if row[self.OPTICAL_HAS_MEDIA] is True:
widget.set_active(idx)
return
idx += 1
widget.set_active(0)
def _mediadev_media_changed(self, newobj):
widget = self.combo
model = widget.get_model()
active = widget.get_active()
idx = 0
# Search for the row with matching device node and
# fill in info about inserted media. If model has no current
# selection, select the new media.
for row in model:
if row[self.OPTICAL_DEV_PATH] == newobj.get_path():
self._set_mediadev_row_from_object(row, newobj)
has_media = row[self.OPTICAL_HAS_MEDIA]
if has_media and active == -1:
widget.set_active(idx)
elif not has_media and active == idx:
widget.set_active(-1)
idx = idx + 1
self._mediadev_set_default_selection()
def _mediadev_added(self, ignore, newobj):
widget = self.combo
model = widget.get_model()
if newobj.get_media_type() != self.media_type:
return
if model is None:
return
if len(model) == 1 and model[0][self.OPTICAL_DEV_PATH] is None:
# Only entry is the 'No device' entry
model.clear()
newobj.connect("media-added", self._mediadev_media_changed)
newobj.connect("media-removed", self._mediadev_media_changed)
# Brand new device
row = [None] * self.OPTICAL_FIELDS
self._set_mediadev_row_from_object(row, newobj)
model.append(row)
self._mediadev_set_default_selection()
def _mediadev_removed(self, ignore, key):
widget = self.combo
model = widget.get_model()
active = widget.get_active()
idx = 0
for row in model:
if row[self.OPTICAL_DEV_KEY] == key:
# Whole device removed
del(model[idx])
if idx > active and active != -1:
widget.set_active(active - 1)
elif idx == active:
widget.set_active(-1)
idx += 1
self._set_mediadev_default()
self._mediadev_set_default_selection()
def _populate_media(self):
if self._populated:
return
widget = self.combo
model = widget.get_model()
model.clear()
self._set_mediadev_default()
self.conn.connect("mediadev-added", self._mediadev_added)
self.conn.connect("mediadev-removed", self._mediadev_removed)
widget.set_active(-1)
self._mediadev_set_default_selection()
self._populated = True
##############
# Public API #
##############
def reset_state(self):
self._populate_media()
def get_path(self):
return uiutil.get_list_selection(self.combo, self.OPTICAL_DEV_PATH)
def has_media(self):
return uiutil.get_list_selection(self.combo, self.OPTICAL_HAS_MEDIA)
| gpl-2.0 | 7,790,347,630,391,545,000 | 30.278027 | 79 | 0.590108 | false |
AKAMobi/ibot | ibot-kernel/ibot/extractor/startup.py | 1 | 4162 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from common import get_regs, get_compile_regs
import re
REGS = get_compile_regs(get_regs('startup'))
def get_startup(document):
""" 抽取项目名
@document: Document对象 chunk
@rtype: str 项目名
"""
global REGS
startup = ''
max_length = 20
# 存储正则表达式的列表,用于去除格式之后的项目名称筛选
for sentence in document.sentences:
text = sentence.raw
# 文件格式名之前的部分为包含项目名称的部分
searchObj = re.search(
r'附件[内容]*[::]?(.*)(\.pp|\.do|\.pdf|\.wps)', text)
# 如果匹配到的内容不为空
if searchObj:
# 取出包含项目名称的部分,并用reh_list中的规则对其进行匹配
new_text = searchObj.group(1)
startup = new_text
for every_re in REGS:
new_searchObj = re.search(every_re, new_text)
if new_searchObj and startup == new_text:
# 如果关键字前面的字段长度大于2,则为项目名称
if len(new_searchObj.group(1)) >= 2:
startup = new_searchObj.group(1)
break
# 否则,关键字后面为项目名称
else:
startup = new_searchObj.group(2)
break
# 对项目名称中的一些符号进行替换,将其去除
# 去除BP
startup = startup.replace('BP', '')
# 去除开头的-
matchObj = re.match('^-*(.*)', startup)
if matchObj:
startup = matchObj.group(1)
# 去除开头的_
matchObj = re.match(u'^_*(.*)', startup)
if matchObj:
startup = matchObj.group(1)
# 去除开头的——
matchObj = re.match(u'^——*(.*)', startup)
if matchObj:
startup = matchObj.group(1)
# 去除开头的.
matchObj = re.match(r'^\.*(.*)', startup)
if matchObj:
startup = matchObj.group(1)
# 去除版本号
matchObj = re.match(r'(.*)v[0~9]*', startup)
if matchObj:
startup = matchObj.group(1)
matchObj = re.match(r'(.*)V[0~9]*', startup)
if matchObj:
startup = matchObj.group(1)
# 去除末尾的-、_、.等符号
matchObj = re.match(r'(.*)_$', startup)
while matchObj:
startup = matchObj.group(1)
matchObj = re.match(r'(.*)_$', startup)
matchObj = re.match(r'(.*)-$', startup)
while matchObj:
startup = matchObj.group(1)
matchObj = re.match(r'(.*)-$', startup)
matchObj = re.match(r'(.*)\.$', startup)
while matchObj:
startup = matchObj.group(1)
matchObj = re.match(r'(.*)\.$', startup)
matchObj = re.match(r'(.*)―$', startup)
while matchObj:
startup = matchObj.group(1)
matchObj = re.match(r'(.*)―$', startup)
# 去除结尾的‘PreA轮、B轮’等内容
startup = re.sub(u'PreA轮.*', '', startup)
startup = re.sub(u'Pre-A轮.*', '', startup)
startup = re.sub(u'A轮.*', '', startup)
startup = re.sub(u'B轮.*', '', startup)
startup = re.sub(u'C轮.*', '', startup)
startup = re.sub(u'D轮.*', '', startup)
startup = re.sub(u'天使轮.*', '', startup)
# 去除《》
startup = startup.replace(u'《', '')
startup = startup.replace(u'》', '')
# 去除APP
startup = startup.replace(u'APP', '')
# 去除结尾的“项目”
startup = startup.replace(u'项目', '')
# 去除结尾的“网站”
startup = startup.replace(u'网站', '')
startup = startup.replace(r'\s*阅读版', '')
startup = re.sub(r'\d{4,11}[-_.\d]*', '', startup)
# 如果包含‘项目名称’的关键字,则取其为项目名称
searchObj = re.search(u'项目名称:(.{2,})', text)
if searchObj:
startup = searchObj.group(1)
if len(startup) > max_length:
startup == ''
return startup
| apache-2.0 | -2,418,756,691,669,638,000 | 25.984496 | 61 | 0.508587 | false |
vinu76jsr/django-memoize | setup.py | 1 | 1282 | #!/usr/bin/env python
"""
django-memoize
--------------
**django-memoize** is an implementation
of `memoization <http://en.wikipedia.org/wiki/Memoization>`_ technique
for Django. You can think of it as a cache for function or method results.
"""
from setuptools import setup, find_packages
setup(
name='django-memoize',
version='1.1.2',
url='https://github.com/tvavrys/django-memoize',
license='BSD',
author='Thomas Vavrys',
author_email='[email protected]',
description='An implementation of memoization technique for Django.',
long_description=__doc__,
packages=find_packages(),
zip_safe=False,
install_requires=[
'django >= 1.4'
],
classifiers=[
'Environment :: Web Environment',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
test_suite='setuptest.setuptest.SetupTestSuite',
tests_require=(
'django-setuptest',
'argparse', # Required by django-setuptools on Python 2.6
),
)
| bsd-3-clause | -3,856,313,471,149,989,000 | 28.813953 | 74 | 0.634165 | false |
kinsney/sport | sport/settings.py | 1 | 4682 | #coding:utf-8
# -*- coding: utf-8 -*-
"""
Django settings for sport project.
Generated by 'django-admin startproject' using Django 1.9.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
#coding: utf-8
# -*- coding: utf-8 -*-
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0#92e&xud-w5ry-6k6c^n#5s8hj+6@(8kmwz5=aj%aplilu3k1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["101.200.145.32",'localhost']
SITE_URL = "http://101.200.145.32"
# Application definition
INSTALLED_APPS = [
'constance',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'constance.backends.database',
'bike',
'order',
'participator',
'advertisement',
'message'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'participator.hashers.DoubleMD5PasswordHasher',
]
ROOT_URLCONF = 'sport.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'sport/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sport.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'sport',
'USER': 'root',
'PASSWORD': 'root',
'HOST': 'localhost',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'zh-cn'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "sport/static"),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
OSS_MEDIA_URL = ''
TEST_OSS_MEDIA_URL = ''
# django
# http://django-constance.readthedocs.org/
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
CONSTANCE_CONFIG = {
'Brokerage':(0.1,u'每单所收佣金'),
'VerificationCodeLength':(6,u'验证码长度'),
'VerificationAvailableMinutes':(5,u'验证码有效时间'),
'IPMessageLimit':(100,u'每个IP每日允许发送的信息最大值'),
'VerificationCodeTemplate': (u'【%s】',
u'用“%s”来替换要发送的验证码。'),
'bikeNumberLength':(13,u'单车编号长度'),
'orderNumberLength':(13,u'订单编号长度'),
'withdrawRate':(0.1,'撤单利率'),
'numbersPerRequest':(12,'每次请求得到的数目')
}
CONSTANCE_SUPERUSER_ONLY = True
| mit | -3,694,405,221,567,587,000 | 25.526316 | 91 | 0.676808 | false |
oaelhara/numbbo | docs/coco-generic/coco-perf-assessment/source/conf.py | 1 | 9586 | # -*- coding: utf-8 -*-
#
# docs/coco-generic/coco-perf-assessment documentation build configuration
# file, created by sphinx-quickstart on Fri Feb 12 14:40:02 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.pngmath',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'docs-coco-generic-coco-perf-assessment'
copyright = u'2016, The BBOBies'
author = u'The BBOBies'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7-beta'
# The full version, including alpha/beta/rc tags.
release = '0.7-beta'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'docs-coco-generic-coco-perf-assessment'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'coco-perf-assessment.tex', u'General Performance Assessment Documentation in the Comparing Continuous Optimizers Platform Coco',
u'The BBOBies', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'coco-perf-assessment', u'General Performance Assessment Documentation in the Comparing Continuous Optimizers Platform Coco',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'coco-perf-assessment.tex', u'General Performance Assessment Documentation in the Comparing Continuous Optimizers Platform Coco',
author, 'coco-perf-assessment', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause | -8,011,413,805,801,072,000 | 32.400697 | 144 | 0.713019 | false |
demitri/cornish | source/cornish/region/region.py | 1 | 28830 |
from __future__ import annotations # remove in Python 3.10
# Needed for forward references, see:
# https://stackoverflow.com/a/33533514/2712652
import logging
from abc import ABCMeta, abstractproperty, abstractmethod
from typing import Union, Iterable, Tuple
import math
from math import radians as deg2rad
from math import degrees as rad2deg
import numpy as np
import astropy
import astropy.units as u
import starlink
import starlink.Ast as Ast
import cornish.region # to avoid circular imports below - better way?
from ..mapping import ASTFrame, ASTFrameSet, ASTMapping
from ..exc import NotA2DRegion, CoordinateSystemsCouldNotBeMapped
from ..constants import AST_SURFACE_MESH, AST_BOUNDARY_MESH
__all__ = ['ASTRegion']
logger = logging.getLogger("cornish")
'''
Copied from documentation, to be implemented.
Properties of ASTRegion over those from ASTFrame
* Adaptive: Should the area adapt to changes in the coordinate system?
* Negated: Has the original region been negated?
* Closed: Should the boundary be considered to be inside the region?
* MeshSize: Number of points used to create a mesh covering the Region
* FillFactor: Fraction of the Region which is of interest
* Bounded: Is the Region bounded?
Region-specific methods:
* astGetRegionBounds: Get the bounds of a Region
* astGetRegionFrame: Get a copy of the Frame represent by a Region
* astGetRegionFrameSet: Get a copy of the Frameset encapsulated by a Region
* astGetRegionMesh: Get a mesh of points covering a Region
* astGetRegionPoints: Get the positions that define a Region
* astGetUnc: Obtain uncertainty information from a Region
* astMapRegion: Transform a Region into a new coordinate system
* astNegate: Toggle the value of the Negated attribute
* astOverlap: Determines the nature of the overlap between two Regions
* astMask<X>: Mask a region of a data grid
* astSetUnc: Associate a new uncertainty with a Region
* astShowMesh: Display a mesh of points on the surface of a Region
'''
class ASTRegion(ASTFrame, metaclass=ABCMeta):
'''
Represents a region within a coordinate system.
This is an abstract superclass - there is no supported means to create an ASTRegion object directly
(see :py:class:`ASTBox`, :py:class:`ASTPolygon`, etc.).
Accepted signatures for creating an ASTRegion:
.. code-block:: python
r = ASTRegion(ast_object)
:param ast_object:
:param uncertainty:
'''
def __init__(self, ast_object:starlink.Ast.Region=None, uncertainty=None):
super().__init__(ast_object=ast_object)
self._uncertainty = uncertainty
def __add__(self, other_region):
# TODO: check data type, handle both ASTRegion and the ast_object region?
from .compound_region import ASTCompoundRegion # forward import to avoid circular import error
return ASTCompoundRegion(regions=[self, other_region], operation=Ast.AND)
@classmethod
def fromFITSHeader(cls, fits_header=None, uncertainty:float=4.848e-6):
'''
Factory method to create a region from the provided FITS header; the returned object will be as specific as possible (but probably an :py:class:`ASTPolygon`).
The frame is determined from the FITS header.
:param fits_header: a FITS header (Astropy, fitsio, an array of cards)
:param uncertainty: defaults to 4.848e-6, an uncertainty of 1 arcsec
'''
if fits_header is None:
raise ValueError("This method requires a 'fits_header' to be set.")
# import here to avoid circular imports
from .box import ASTBox
from .circle import ASTCircle
from .polygon import ASTPolygon
from ..channel import ASTFITSChannel
# get frame from header
fits_channel = ASTFITSChannel(header=fits_header)
# does this channel contain a frame set?
frame_set = fits_channel.frameSet
if frame_set is None:
raise ValueError("The provided FITS header does not describe a region (e.g. not an image, does not contain a WCS that AST can read).")
frame = frame_set.baseFrame
# support n-dimensional regions
# define needed parameters for region creation below
dimensions = fits_channel.dimensions
n_dim = len(dimensions)
cornerPoint = [0.5 for x in range(n_dim)]
cornerPoint2 = [dimensions[x] + 0.5 for x in range(n_dim)]
#cornerPoint=[0.5,0.5], # center of lower left pixel
#cornerPoint2=[dimensions[0]+0.5, dimensions[1]+0.5])
if n_dim > 2:
raise NotImplementedError("HDUs describing dimensions greater than 2 not currently supported.")
#if isinstance(frame, ASTFrame):
# self.frame = frame
#elif isinstance(frame, starlink.Ast.Frame):
# self.frame = ASTFrame(frame=frame)
#else:
# raise Exception("ASTBox: unexpected frame type specified ('{0}').".format(type(frame)))
#if all([cornerPoint,centerPoint]) or all([cornerPoint,cornerPoint2]) or dimensions is not None:
# if dimensions is not None:
# input_form = CORNER_CORNER
# p1 = [0.5,0.5] # use 0.5 to specify the center of each pixel
# p2 = [dimensions[0]+0.5, dimensions[1]+0.5]
# elif centerPoint is None:
# input_form = CORNER_CORNER
# p1 = [cornerPoint[0], cornerPoint[1]]
# p2 = [cornerPoint2[0], cornerPoint2[1]]
# dimensions = [math.fabs(cornerPoint[0] - cornerPoint2[0]),
# math.fabs(cornerPoint[1] - cornerPoint2[1])]
# else:
# input_form = CENTER_CORNER
# p1 = [centerPoint[0], centerPoint[1]]
# p2 = [cornerPoint[0], cornerPoint[1]]
# dimensions = [2.0 * math.fabs(centerPoint[0] - cornerPoint[0]),
# 2.0 * math.fabs(centerPoint[1] - cornerPoint[1])]
# input_form constants (define properly elsewhere?)
CENTER_CORNER = 0
CORNER_CORNER = 1
input_form = CORNER_CORNER
p1 = [cornerPoint[0], cornerPoint[1]]
p2 = [cornerPoint2[0], cornerPoint2[1]]
dimensions = [math.fabs(cornerPoint[0] - cornerPoint2[0]),
math.fabs(cornerPoint[1] - cornerPoint2[1])]
#dimensions = [dimensions[0], dimensions[1]]
#logger.debug("Setting dims: {0}".format(self.dimensions))
ast_object = Ast.Box( frame.astObject, input_form, p1, p2, unc=uncertainty )
# create the mapping from pixel to sky (or whatever is there) if available
mapping = frame_set.astObject.getmapping() # defaults are good
current_frame = frame_set.astObject.getframe(starlink.Ast.CURRENT)
# create a new region with new mapping
ast_object = ast_object.mapregion(mapping, current_frame)
if isinstance(ast_object, Ast.Box):
from .box import ASTBox # avoid circular imports
return ASTBox(ast_object=ast_object)
elif isinstance(ast_object, Ast.Circle):
from .circle import ASTCircle # avoid circular imports
return ASTCircle(ast_object=ast_object)
elif isinstance(ast_object, Ast.Polygon):
return ASTPolygon(ast_object=ast_object)
else:
raise Exception(f"Unexpected region type encountered: {type(ast_object)}.")
@property
def points(self) -> np.ndarray:
'''
The array of points that define the region. The interpretation of the points is dependent on the type of shape in question.
Box: returns two points; the center and a box corner.
Circle: returns two points; the center and a point on the circumference.
CmpRegion: no points returned; to get points that define a compound region, call this method on each of the component regions via the method "decompose".
Ellipse: three points: 1) center, 2) end of one axis, 3) end of the other axis
Interval: two points: 1) lower bounds position, 2) upper bounds position (reversed when interval is an excluded interval)
NullRegion: no points
PointList: positions that the list was constructed with
Polygon: vertex positions used to construct the polygon
Prism: no points (see CmpRegion)
NOTE: points returned reflect the current coordinate system and may be different from the initial construction
:returns: NumPy array of coordinate points in degrees, shape (ncoord,2), e.g. [[ra1,dec1], [ra2, dec2], ..., [ra_n, dec_n]]
'''
# getregionpoints returns data as [[x1, x2, ..., xn], [y1, y2, ..., yn]]
# transpose the points before returning
# also, normalize points to expected bounds
region_points = self.astObject.getregionpoints()
if self.isNegated:
# reverse order to reflect the definition of the polygon
region_points = np.fliplr(region_points)
if self.frame().isSkyFrame:
#return np.rad2deg(self.astObject.norm(self.astObject.getregionpoints())).T
return np.rad2deg(self.astObject.norm(region_points)).T
else:
#return self.astObject.getregionpoints().T
return region_points.T
@property
def uncertainty(self):
'''
Uncertainties associated with the boundary of the Box.
The uncertainty in any point on the boundary of the Box is found by
shifting the supplied "uncertainty" Region so that it is centered at
the boundary point being considered. The area covered by the shifted
uncertainty Region then represents the uncertainty in the boundary
position. The uncertainty is assumed to be the same for all points.
'''
return self._uncertainty
@uncertainty.setter
def uncertainty(self, unc):
raise Exception("Setting the uncertainty currently doesn't do anything.")
self._uncertainty = unc
@property
def isAdaptive(self):
'''
Boolean attribute that indicates whether the area adapt to changes in the coordinate system.
'''
return self.astObject.get("Adaptive") == "1"
@isAdaptive.setter
def isAdaptive(self, newValue:bool):
if newValue in [True, 1, "1"]:
self.astObject.set("Adaptive=1")
elif newValue in [False, 0, "0"]:
self.astObject.set("Adaptive=0")
else:
raise Exception("ASTRegion.adaptive property must be one of [True, False, 1, 0].")
@property
def isNegated(self):
''' Boolean attribute that indicates whether the original region has been negated. '''
return self.astObject.get("Negated") == "1"
@isNegated.setter
def isNegated(self, newValue:bool):
if newValue in [True, 1, "1"]:
self.astObject.set("Negated=1")
elif newValue in [False, 0, "0"]:
self.astObject.set("Negated=0")
else:
raise Exception("ASTRegion.isNegated property must be one of [True, False, 1, 0].")
def negate(self):
''' Negate the region, i.e. points inside the region will be outside, and vice versa. '''
self.astObject.negate()
@property
def isClosed(self) -> bool:
'''
Boolean attribute that indicates whether the boundary be considered to be inside the region.
'''
return self.astObject.get("Closed") == "1"
@isClosed.setter
def isClosed(self, newValue:bool):
if newValue in [True, 1, "1"]:
self.astObject.set("Closed=1")
elif newValue in [False, 0, "0"]:
self.astObject.set("Closed=0")
else:
raise Exception("ASTRegion.isClosed property must be one of [True, False, 1, 0].")
@property
def isBounded(self) -> bool:
''' Boolean attribute that indicates whether the region is bounded. '''
return self.astObject.get("Bounded") == "1"
@isBounded.setter
def isBounded(self, newValue:bool):
if newValue in [True, 1, "1"]:
self.astObject.set("Bounded=1")
elif newValue in [False, 0, "0"]:
self.astObject.set("Bounded=0")
else:
raise Exception("ASTRegion.isBounded property must be one of [True, False, 1, 0].")
def frame(self) -> ASTFrame:
'''
Returns a copy of the frame encapsulated by this region.
Note that the frame is not directly accessible; both this method and the underlying ``starlink-pyast`` function returns a copy.
'''
# this is not a property since a new object is being returned.
ast_frame = self.astObject.getregionframe() # "A pointer to a deep copy of the Frame represented by the Region."
return ASTFrame.frameFromAstObject(ast_frame)
def frameSet(self) -> ASTFrameSet:
'''
Returns a copy of the frameset encapsulated by this region.
From AST docs:
::
The base Frame is the Frame in which the box was originally
defined, and the current Frame is the Frame into which the
Region is currently mapped (i.e. it will be the same as the
Frame returned by astGetRegionFrame).
'''
raise NotImplementedError("getregionframeset() has not yet been exposed to the Python interface.")
return ASTFrameSet(ast_object=self.astObject.getregionframeset())
@property
def meshSize(self) -> int:
''' Number of points used to create a mesh covering the region. '''
#return int(self.astObject.get("MeshSize"))
return int(self.astObject.MeshSize)
@meshSize.setter
def meshSize(self, newValue:int):
if isinstance(newValue, int):
if newValue < 5:
newValue = 5
self.astObject.set("MeshSize={0}".format(newValue))
else:
raise Exception("ASTRegion.meshSize: an integer value of at least 5 is required.")
@property
def fillFactor(self):
''' <Fraction of the Region which is of interest> '''
# TODO: properly document, see p. 812 of documentation
return self.astObject.get("FillFactor")
@fillFactor.setter
def fillFactor(self, newValue):
raise Exception("TODO: document and implement")
@property
def bounds(self) -> Tuple:
'''
Returns lower and upper coordinate points that bound this region.
'''
lower_bounds, upper_bounds = self.astObject.getregionbounds()
# lower_bounds and upper_bounds are n-dimensional arrays
# e.g. for a 2D image,
# [-10,5], [10,20] <- (ra, dec) or pixel bounds
if self.frame().isSkyFrame:
lower_bounds = np.rad2deg(self.astObject.norm(lower_bounds))
upper_bounds = np.rad2deg(self.astObject.norm(upper_bounds))
return (lower_bounds, upper_bounds)
def boundingBox(self) -> ASTBox:
'''
Returns an ASTBox region that bounds this region where the box sides align with RA, dec.
'''
from cornish import ASTBox # import here to avoid circular import
return ASTBox.fromCorners(frame=self.frame(), corners=self.bounds)
#raise NotImplementedError()
# use the "bounds" method above to create a bounding box
def boundingCircle(self) -> ASTCircle:
'''
Returns the smallest circle (:py:class:`ASTCircle`) that bounds this region.
It is up to the caller to know that this is a 2D region (only minimal checks are made).
:raises cornish.exc.NotA2DRegion: raised when attempting to get a bounding circle for a region that is not 2D
'''
if self.naxes != 2:
raise NotA2DRegion(f"A bounding circle can only be computed on a 2D region; this region has {self.naxes} axes.")
from .circle import ASTCircle
centre, radius = self.astObject.getregiondisc() # returns radians
return ASTCircle(frame=self, center=np.rad2deg(centre), radius=rad2deg(radius))
def overlaps(self, region) -> bool:
'''
Return ``True`` if this region overlaps with the provided one.
'''
if region is None:
raise ValueError("'None' was provided as the second region.")
if isinstance(region, ASTRegion):
return_value = self.astObject.overlap(region.astObject)
elif isinstance(region, starlink.Ast.Region):
return_value = self.astObject.overlap(region)
else:
raise ValueError(f"Unexpected object given for region; expected either ASTRegion or starlink.Ast.Region (got '{type(region)}').")
if return_value == 0:
raise CoordinateSystemsCouldNotBeMapped("The provided region's coordinate system could not be mapped to this region's system.")
elif return_value == 1:
return False # no overlap
elif return_value == 2:
return True # this region is completely inside the provded region
elif return_value == 3:
return True # the provded region is completely inside the first region
elif return_value == 4:
return True # there is partial overlap
elif return_value == 5:
return True # the resions are identical to within their uncertainties
elif return_value == 6:
return False # the second region is the exact negation of this region to within their uncertainties
def isIdenticalTo(self, region:ASTRegion) -> bool:
'''
Returns 'True' if this region is identical (to within their uncertainties) to the provided region, 'False' otherwise.
'''
if region is None:
raise ValueError("'None' was provided as the second region.")
if isinstance(region, ASTRegion):
return_value = self.astObject.overlap(region.astObject)
elif isinstance(region, starlink.Ast.Region):
return_value = self.astObject.overlap(region)
else:
raise ValueError(f"Unexpected object given for region; expected either ASTRegion or starlink.Ast.Region (got '{type(region)}').")
if return_value == 0:
raise CoordinateSystemsCouldNotBeMapped("The provided region's coordinate system could not be mapped to this region's system.")
else:
return return_value == 5
def isFullyWithin(self, region:ASTRegion) -> bool:
'''
Returns 'True' if this region is fully within the provided region.
'''
if region is None:
raise ValueError("'None' was provided as the second region.")
if isinstance(region, ASTRegion):
return_value = self.astObject.overlap(region.astObject)
elif isinstance(region, starlink.Ast.Region):
return_value = self.astObject.overlap(region)
else:
raise ValueError(f"Unexpected object given for region; expected either ASTRegion or starlink.Ast.Region (got '{type(region)}').")
if return_value == 0:
raise CoordinateSystemsCouldNotBeMapped("The provided region's coordinate system could not be mapped to this region's system.")
else:
return return_value == 2
def fullyEncloses(self, region:ASTRegion) -> bool:
'''
Returns 'True' if this region fully encloses the provided region.
'''
if region is None:
raise ValueError("'None' was provided as the second region.")
if isinstance(region, ASTRegion):
return_value = self.astObject.overlap(region.astObject)
elif isinstance(region, starlink.Ast.Region):
return_value = self.astObject.overlap(region)
else:
raise ValueError(f"Unexpected object given for region; expected either ASTRegion or starlink.Ast.Region (got '{type(region)}').")
if return_value == 0:
raise CoordinateSystemsCouldNotBeMapped("The provided region's coordinate system could not be mapped to this region's system.")
else:
return return_value == 3
def isNegationOf(self, region):
'''
Returns 'True' if this region is the exact negation of the provided region.
'''
if region is None:
raise ValueError("'None' was provided as the second region.")
if isinstance(region, ASTRegion):
return_value = self.astObject.overlap(region.astObject)
elif isinstance(region, starlink.Ast.Region):
return_value = self.astObject.overlap(region)
else:
raise ValueError(f"Unexpected object given for region; expected either ASTRegion or starlink.Ast.Region (got '{type(region)}').")
if return_value == 0:
raise CoordinateSystemsCouldNotBeMapped("The provided region's coordinate system could not be mapped to this region's system.")
else:
return return_value == 6
def maskOnto(self, image=None, mapping=None, fits_coordinates:bool=True, lower_bounds=None, mask_inside=True, mask_value=float("NaN")):
'''
Apply this region as a mask on top of the provided image; note: the image values are overwritten!
:param image: numpy.ndarray of pixel values (or other array of values)
:param mapping: mapping from this region to the pixel coordinates of the provided image
:param fits_coordinates: use the pixel coordinates of a FITS file (i.e. origin = [0.5, 0.5] for 2D)
:param lower_bounds: lower bounds of provided image, only specify if not using FITS coordinates
:param mask_inside: True: mask the inside of this region; False: mask outside of this region
:param mask_value: the value to set the masked image pixels to
:returns: number of pixels in image masked
'''
# coded now for numpy arrays, but set ndim,shape for anything else
ndim = len(image.shape)
shape = image.shape # <-- unused variable!
# assert number of axes in image == # of outputs in the mapping
if ndim != mapping.number_of_output_coordinates:
raise Exception(f"The number of dimensions in the provided image ({ndim}) does not match the number of output dimensions of the provided mapping ({mapping.number_of_output_coordinates}).")
if fits_coordinates:
# use the pixel coordinates for FITS files -> origin at [0.5, 0.5]
lower_bounds = [0.5 for x in range(ndim)]
else:
# must use provided lower bounds
if lower_bounds is None:
raise ValueError("'lower_bounds' must be provided (or specify 'fits_coordinates=True' to use FITS coordinates.")
# upper_bounds = list()
# for idx, n in enumerate(shape):
# upper_bounds.append(lower_bounds[idx] + n)
npix_masked = self.astObject.mask(mapping.astObject, mask_inside, lower_bounds, image, mask_value)
return npix_masked
def regionWithMapping(self, map=None, frame=None) -> ASTRegion:
'''
Returns a new ASTRegion with the coordinate system from the supplied frame.
Corresponds to the ``astMapRegion`` C function (``starlink.Ast.mapregion``).
:param map: A mapping that can convert coordinates from the system of the current region to that of the supplied frame.
:param frame: A frame containing the coordinate system for the new region.
:returns: new ASTRegion with a new coordinate system
'''
if frame is None:
raise Exception("A frame must be specified.")
if map is None:
map = frame # attempt to use the frame as a mapper (an ASTFrame is a subclass of ASTMapper)
# Would be nice to be able to create an instance of the same subclass of ASTRegion
# - how to inspect the object for this information?
if isinstance(map, starlink.Ast.Mapping):
ast_map = map
elif isinstance(map, (ASTMapping, ASTFrameSet)): # frame sets contain mappings
ast_map = map.astObject
else:
raise Exception("Expected 'map' to be one of these two types: starlink.Ast.Mapping, ASTMap.")
if isinstance(frame, starlink.Ast.Frame):
ast_frame = frame
elif isinstance(map, (ASTFrame, ASTFrameSet)):
ast_frame = frame.astObject
else:
raise Exception("Expected 'frame' to be one of these two types: starlink.Ast.Frame, ASTFrame.")
new_ast_region = self.astObject.mapregion(ast_map, ast_frame)
# This is temporary and probably fragile. Find a replacement for this ASAP.
# get the returned region type to create the correct wrapper
#
# -> make a deep copy, replace obj.astObject with new one (check any properties)
#
if new_ast_region.Class == 'Polygon' or isinstance(new_ast_region, starlink.Ast.Polygon):
return cornish.region.ASTPolygon(ast_object=new_ast_region)
elif new_ast_region.Class == 'Box' or isinstance(new_ast_region, starlink.Ast.Box):
return cornish.region.ASTBox(ast_object=new_ast_region)
else:
raise Exception("ASTRegion.regionWithMapping: unhandled region type (easy fix).")
def mapRegionMesh(self, mapping=None, frame=None):
'''
Returns a new ASTRegion that is the same as this one but with the specified coordinate system.
Parameters
----------
mapping : `~cornish.mapping.ASTMapping` class
The mapping to transform positions from the current ASTRegion to those specified by the given frame.
frame : `~cornish.frame.ASTFrame` class
Coordinate system frame to convert the current ASTRegion to.
Returns
-------
region : `ASTRegion`
A new region object covering the same area but in the frame specified in `frame`.
Raises
------
Exception
An exception is raised for missing parameters.
'''
if mapping is None or frame is None:
raise Exception("A mapping and frame is required to be passed to 'mapRegion'.")
# check it's the correct type
if not isinstance(mapping, ASTMapping):
raise Exception("The object passed to 'mapping' needs to be an ASTMapping object.")
if not isinstance(frame, ASTFrame):
raise Exception("The object passed to 'frame' needs to be an ASTFrame object.")
self.astObject.mapregionmesh( mapping, frame )
def boundaryPointMesh(self, npoints:int=None) -> np.ndarray:
'''
Returns an array of evenly distributed points that cover the boundary of the region.
For example, if the region is a box, it will generate a list of points that trace the edges of the box.
The default value of 'npoints' is 200 for 2D regions and 2000 for three or more dimensions.
:param npoints: the approximate number of points to generate in the mesh
:returns: list of points in degrees
'''
# The starlink.AST object uses the attribute "MeshSize" to determine the number of points to
# use. This should be specified when building the mesh - the attribute doesn't seem to be used
# anywhere else. This method shouldn't change the value in case that's not true, but we'll make
# this one step here.
#if npoints is not None and not isinstance(npoints, int):
# raise Exception("The parameter 'npoints' must be an integer ('{1}' provided).".format(npoints))
if npoints is None:
pass # use default meshSize value
else:
# use provided value
#old_mesh_size = self.astObject.get("MeshSize")
#self.astObject.set("MeshSize={0}".format(npoints))
old_mesh_size = self.meshSize
self.meshSize = npoints
try:
mesh = self.astObject.getregionmesh(AST_BOUNDARY_MESH) # here "surface" means the boundary
# if a basic frame is used instead of a sky frame, the points need to be normalized on [0,360)
mesh = self.astObject.norm(mesh)
except Ast.MBBNF as e:
print(f"AST error: Mapping bounding box not found. ({e})")
raise e
if npoints is not None:
# restore original value
self.meshSize = old_mesh_size #self.astObject.set("MeshSize={0}".format(old_mesh_size))
if self.frame().isSkyFrame:
return np.rad2deg(mesh).T # returns as a list of pairs of points, not two parallel arrays
else:
return mesh.T
def interiorPointMesh(self, npoints:int=None):
'''
Returns an array of evenly distributed points that cover the surface of the region.
For example, if the region is a box, it will generate a list of points that fill the interior area of the box.
The default value of 'npoints' is 200 for 2D regions and 2000 for three or more dimensions.
:param npoints: the approximate number of points to generate in the mesh
:returns: array of points in degrees
'''
# See discussion of "MeshSize" in method "boundaryPointMesh".
if npoints is not None and not isinstance(npoints, int):
raise Exception(f"The parameter 'npoints' must be an integer ('{type(npoints)}' provided).")
if npoints is None:
pass # use default value
else:
# use provided value
old_mesh_size = self.astObject.get("MeshSize")
self.astObject.set("MeshSize={0}".format(npoints))
# The returned "points" array from getregionmesh() will be a 2-dimensional array with shape (ncoord,npoint),
# where "ncoord" is the number of axes within the Frame represented by the Region,
# and "npoint" is the number of points in the mesh (see attribute "MeshSize").
mesh = self.astObject.getregionmesh(AST_SURFACE_MESH) # here "surface" means the interior
mesh = self.astObject.norm(mesh)
if npoints is not None:
# restore original value
self.astObject.set("MeshSize={0}".format(old_mesh_size))
if self.frame().isSkyFrame:
mesh = np.rad2deg(mesh)
return mesh.T
def containsPoint(self, point:Union[Iterable, astropy.coordinates.SkyCoord]=None) -> bool:
'''
Returns ``True`` if the provided point lies inside this region, ``False`` otherwise.
This method is a direct synonym for :meth:`pointInRegion`.
The name "containsPoint" is more appropriate from an object perspective,
but the ``pointInRegion`` method is present for consistency with the AST library.
:param point: a coordinate point in the same frame as this region
'''
return self.pointInRegion(point=point)
def pointInRegion(self, point:Union[Iterable, astropy.coordinates.SkyCoord,np.ndarray]=None) -> bool:
'''
Returns ``True`` if the provided point lies inside this region, ``False`` otherwise.
If no units are specified degrees are assumed.
:param point: a coordinate point in the same frame as this region
'''
if point is None:
raise ValueError("A test point must be specified.")
if isinstance(point, astropy.coordinates.SkyCoord):
point = [point.ra.to(u.rad).value, point.dec.to(u.rad).value]
else:
point = np.deg2rad(point)
return self.astObject.pointinregion(point)
@abstractproperty
def area(self) -> astropy.units.quantity.Quantity:
# subclasses must implement
raise NotImplementedError()
@abstractmethod
def toPolygon(self, npoints=200, maxerr:astropy.units.Quantity=1.0*u.arcsec) -> ASTPolygon:
'''
Method that guarantees returning a polygon that describes or approximates this region.
This method provides a common interface to create polygons from different region types.
Calling this on an ASTPolygon will return itself; calling it on an ASTCircle
will return a polygon that approximates the circle. The parameters 'npoints' and
'maxerr' will be used only when appropriate.
:param npoints: number of points to sample from the region's boundary for the resulting polygon
:param maxerr:
'''
pass
| mit | -7,151,365,176,470,967,000 | 37.906883 | 191 | 0.728755 | false |
Coderhypo/UIAMS | manage.py | 1 | 1085 | #-*- coding: UTF-8 -*-
import os
from app import app, db
from app.models import User, Role
from flask.ext.script import Manager, Server, Shell
from flask.ext.migrate import Migrate, MigrateCommand
manager = Manager(app)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command("runserver", Server(host="0.0.0.0", port=2000))
manager.add_command("shell", Shell(make_context=make_shell_context))
migrate = Migrate(app, db)
manager.add_command('db', MigrateCommand)
@manager.command
def deploy():
from flask.ext.migrate import upgrade
from app.models import Role, User
# migrate database to latest revision
# upgrade()
db.drop_all()
db.create_all()
try:
Role.insert_roles()
r = Role.query.filter_by(role_name = u'管理员').first()
u = User('admin', 'admin')
u.role = r
u.password = '123'
db.session.add(u)
db.session.commit()
except Exception, e:
print e
db.session.rollback()
if __name__ == '__main__':
manager.run()
| lgpl-3.0 | -1,176,110,634,197,252,400 | 24.690476 | 68 | 0.639481 | false |
maestromusic/maestro | maestro/core/domains.py | 1 | 5562 | # -*- coding: utf-8 -*-
# Maestro Music Manager - https://github.com/maestromusic/maestro
# Copyright (C) 2014-2015 Martin Altmayer, Michael Helmling
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os.path
from PyQt5 import QtCore, QtGui, QtWidgets
translate = QtCore.QCoreApplication.translate
from .. import application, database as db, logging, stack
from ..application import ChangeEvent, ChangeType
domains = []
# Maximum length of encoded domain names.
MAX_NAME_LENGTH = 63
def default():
return domains[0]
def isValidName(name):
return name == name.strip() and 0 < len(name.encode()) <= MAX_NAME_LENGTH
def exists(name):
return any(domain.name == name for domain in domains)
def domainById(id: int):
for domain in domains:
if domain.id == id:
return domain
else: return None
def domainByName(name):
for domain in domains:
if domain.name == name:
return domain
else: return None
def init():
if db.prefix+'domains' not in db.listTables():
logging.error(__name__, "domains-table is missing")
raise RuntimeError()
result = db.query("SELECT id, name FROM {p}domains ORDER BY name")
for row in result:
domains.append(Domain(*row))
class Domain:
def __init__(self, id, name):
self.id = id
self.name = name
def __repr__(self):
return "<Domain {}>".format(self.name)
def addDomain(name):
"""Add a new domain with the given name to the database. Return the new domain."""
if exists(name):
raise ValueError("There is already a domain with name '{}'.".format(name))
if not isValidName(name):
raise ValueError("'{}' is not a valid domain name.".format(name))
domain = Domain(None, name)
stack.push(translate("Domains", "Add domain"),
stack.Call(_addDomain, domain),
stack.Call(_deleteDomain, domain))
return domain
def _addDomain(domain):
"""Add a domain to database and some internal lists and emit a DomainChanged-event. If *domain*
doesn't have an id, choose an unused one.
"""
if domain.id is None:
domain.id = db.query("INSERT INTO {p}domains (name) VALUES (?)", domain.name).insertId()
else: db.query("INSERT INTO {p}domains (id, name) VALUES (?,?)", domain.id, domain.name)
logging.info(__name__, "Added new domain '{}'".format(domain.name))
domains.append(domain)
application.dispatcher.emit(DomainChangeEvent(ChangeType.added, domain))
def deleteDomain(domain):
"""Delete a domain from all elements and the database."""
stack.push(translate("Domains", "Delete domain"),
stack.Call(_deleteDomain, domain),
stack.Call(_addDomain, domain))
def _deleteDomain(domain):
"""Like deleteDomain but not undoable."""
assert db.query("SELECT COUNT(*) FROM {p}elements WHERE domain=?", domain.id).getSingle() == 0
if domains == [domain]:
raise RuntimeError("Cannot delete last domain.")
logging.info(__name__, "Deleting domain '{}'.".format(domain))
db.query("DELETE FROM {p}domains WHERE id = ?", domain.id)
domains.remove(domain)
application.dispatcher.emit(DomainChangeEvent(ChangeType.deleted, domain))
def changeDomain(domain, **data):
"""Change a domain. The attributes that should be changed must be specified by keyword arguments.
Currently only 'name' is supported.
"""
oldData = {'name': domain.name}
stack.push(translate("Domains ", "Change domain"),
stack.Call(_changeDomain, domain, **data),
stack.Call(_changeDomain, domain, **oldData))
def _changeDomain(domain, **data):
"""Like changeDomain but not undoable."""
# Below we will build a query like UPDATE domains SET ... using the list of assignments (e.g. (name=?).
# The parameters will be sent with the query to replace the questionmarks.
assignments = []
params = []
if 'name' in data:
name = data['name']
if name != domain.name:
if exists(name):
raise ValueError("There is already a domain named '{}'.".format(name))
logging.info(__name__, "Changing domain name '{}' to '{}'.".format(domain.name, name))
assignments.append('name = ?')
params.append(name)
domain.name = name
if len(assignments) > 0:
params.append(domain.id) # for the where clause
db.query("UPDATE {p}domains SET "+','.join(assignments)+" WHERE id = ?", *params)
application.dispatcher.emit(DomainChangeEvent(ChangeType.changed, domain))
class DomainChangeEvent(ChangeEvent):
"""DomainChangeEvents are used when a domain is added, changed or deleted."""
def __init__(self, action, domain):
assert isinstance(action, ChangeType)
self.action = action
self.domain = domain
| gpl-3.0 | -4,220,954,320,742,945,000 | 33.546584 | 107 | 0.647609 | false |
PaloAltoNetworks/minemeld-core | minemeld/flask/config.py | 1 | 5920 | # Copyright 2015-2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import gevent
import yaml
import filelock
import passlib.apache
from . import utils
from .logger import LOG
CONFIG = {}
API_CONFIG_PATH = None
API_CONFIG_LOCK = None
CONFIG_FILES_RE = '^(?:(?:[0-9]+.*\.yml)|(?:.*\.htpasswd))$'
# if you change things here change also backup/import API
_AUTH_DBS = {
'USERS_DB': 'wsgi.htpasswd',
'FEEDS_USERS_DB': 'feeds.htpasswd'
}
def get(key, default=None):
try:
result = CONFIG[key]
except KeyError:
pass
else:
return result
try:
result = os.environ[key]
except KeyError:
pass
else:
if result == 'False':
result = False
if result == 'True':
result = True
return result
return default
def store(file, value):
with API_CONFIG_LOCK.acquire():
with open(os.path.join(API_CONFIG_PATH, file), 'w+') as f:
yaml.safe_dump(value, stream=f)
def lock():
return API_CONFIG_LOCK.acquire()
class APIConfigDict(object):
def __init__(self, attribute, level=50):
self.attribute = attribute
self.filename = '%d-%s.yml' % (level, attribute.lower().replace('_', '-'))
def set(self, key, value):
curvalues = get(self.attribute, {})
curvalues[key] = value
store(self.filename, {self.attribute: curvalues})
def delete(self, key):
curvalues = get(self.attribute, {})
curvalues.pop(key, None)
store(self.filename, {self.attribute: curvalues})
def value(self):
return get(self.attribute, {})
def _load_config(config_path):
global CONFIG
new_config = {}
# comptaibilty early releases where all the config
# was store in a single file
old_config_file = os.path.join(config_path, 'wsgi.yml')
if os.path.exists(old_config_file):
try:
with open(old_config_file, 'r') as f:
add_config = yaml.safe_load(f)
if add_config is not None:
new_config.update(add_config)
except OSError:
pass
with API_CONFIG_LOCK.acquire():
api_config_path = os.path.join(config_path, 'api')
if os.path.exists(api_config_path):
config_files = sorted(os.listdir(api_config_path))
for cf in config_files:
if not cf.endswith('.yml'):
continue
try:
with open(os.path.join(api_config_path, cf), 'r') as f:
add_config = yaml.safe_load(f)
if add_config is not None:
new_config.update(add_config)
except (OSError, IOError, ValueError):
LOG.exception('Error loading config file %s' % cf)
CONFIG = new_config
LOG.info('Config loaded: %r', new_config)
def _load_auth_dbs(config_path):
with API_CONFIG_LOCK.acquire():
api_config_path = os.path.join(config_path, 'api')
for env, default in _AUTH_DBS.iteritems():
dbname = get(env, default)
new_db = False
dbpath = os.path.join(
api_config_path,
dbname
)
# for compatibility with old releases
if not os.path.exists(dbpath):
old_dbpath = os.path.join(
config_path,
dbname
)
if os.path.exists(old_dbpath):
dbpath = old_dbpath
else:
new_db = True
CONFIG[env] = passlib.apache.HtpasswdFile(
path=dbpath,
new=new_db
)
LOG.info('%s loaded from %s', env, dbpath)
def _config_monitor(config_path):
api_config_path = os.path.join(config_path, 'api')
dirsnapshot = utils.DirSnapshot(api_config_path, CONFIG_FILES_RE)
while True:
try:
with API_CONFIG_LOCK.acquire(timeout=600):
new_snapshot = utils.DirSnapshot(api_config_path, CONFIG_FILES_RE)
if new_snapshot != dirsnapshot:
try:
_load_config(config_path)
_load_auth_dbs(config_path)
except gevent.GreenletExit:
break
except:
LOG.exception('Error loading config')
dirsnapshot = new_snapshot
except filelock.Timeout:
LOG.error('Timeout locking config in config monitor')
gevent.sleep(1)
# initialization
def init():
global API_CONFIG_PATH
global API_CONFIG_LOCK
config_path = os.environ.get('MM_CONFIG', None)
if config_path is None:
LOG.critical('MM_CONFIG environment variable not set')
raise RuntimeError('MM_CONFIG environment variable not set')
if not os.path.isdir(config_path):
config_path = os.path.dirname(config_path)
# init global vars
API_CONFIG_PATH = os.path.join(config_path, 'api')
API_CONFIG_LOCK = filelock.FileLock(
os.environ.get('API_CONFIG_LOCK', '/var/run/minemeld/api-config.lock')
)
_load_config(config_path)
_load_auth_dbs(config_path)
if config_path is not None:
gevent.spawn(_config_monitor, config_path)
| apache-2.0 | 661,309,615,503,957,100 | 26.793427 | 82 | 0.572635 | false |
eugenj/global-notes | Adaptive Python/test_helper.py | 10 | 6166 | import sys
def get_file_text(path):
""" Returns file text by path"""
file_io = open(path, "r")
text = file_io.read()
file_io.close()
return text
def get_file_output(encoding="utf-8", path=sys.argv[-1], arg_string=""):
"""
Returns answer file output
:param encoding: to decode output in python3
:param path: path of file to execute
:return: list of strings
"""
import subprocess
proc = subprocess.Popen([sys.executable, path], stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if arg_string:
for arg in arg_string.split("\n"):
proc.stdin.write(bytearray(str(arg) + "\n", encoding))
proc.stdin.flush()
return list(map(lambda x: str(x.decode(encoding)), proc.communicate()[0].splitlines()))
def test_file_importable():
""" Tests there is no obvious syntax errors"""
path = sys.argv[-1]
if not path.endswith(".py"):
import os
parent = os.path.abspath(os.path.join(path, os.pardir))
python_files = [f for f in os.listdir(parent) if os.path.isfile(os.path.join(parent, f)) and f.endswith(".py")]
for python_file in python_files:
if python_file == "tests.py":
continue
check_importable_path(os.path.join(parent, python_file))
return
check_importable_path(path)
def check_importable_path(path):
""" Checks that file is importable.
Reports failure otherwise.
"""
saved_input = patch_input()
try:
import_file(path)
except:
failed("The file contains syntax errors", test_file_importable.__name__)
return
finally:
revert_input(saved_input)
passed(test_file_importable.__name__)
def patch_input():
def mock_fun(_m=""):
return "mock"
if sys.version_info[0] == 3:
import builtins
save_input = builtins.input
builtins.input = mock_fun
return save_input
elif sys.version_info[0] == 2:
import __builtin__
save_input = __builtin__.raw_input
__builtin__.raw_input = mock_fun
__builtin__.input = mock_fun
return save_input
def revert_input(saved_input):
if sys.version_info[0] == 3:
import builtins
builtins.input = saved_input
elif sys.version_info[0] == 2:
import __builtin__
__builtin__.raw_input = saved_input
__builtin__.input = saved_input
def import_file(path):
""" Returns imported file """
if sys.version_info[0] == 2 or sys.version_info[1] < 3:
import imp
return imp.load_source("tmp", path)
elif sys.version_info[0] == 3:
import importlib.machinery
return importlib.machinery.SourceFileLoader("tmp", path).load_module("tmp")
def import_task_file():
""" Returns imported file.
Imports file from which check action was run
"""
path = sys.argv[-1]
return import_file(path)
def test_is_not_empty():
"""
Checks that file is not empty
"""
path = sys.argv[-1]
file_text = get_file_text(path)
if len(file_text) > 0:
passed()
else:
failed("The file is empty. Please, reload the task and try again.")
def test_text_equals(text, error_text):
"""
Checks that answer equals text.
"""
path = sys.argv[-1]
file_text = get_file_text(path)
if file_text.strip() == text:
passed()
else:
failed(error_text)
def test_answer_placeholders_text_deleted(error_text="Don't just delete task text"):
"""
Checks that all answer placeholders are not empty
"""
windows = get_answer_placeholders()
for window in windows:
if len(window) == 0:
failed(error_text)
return
passed()
def set_congratulation_message(message):
""" Overrides default 'Congratulations!' message """
print("#educational_plugin CONGRATS_MESSAGE " + message)
def failed(message="Please, reload the task and try again.", name=None):
""" Reports failure """
if not name:
name = sys._getframe().f_back.f_code.co_name
print("#educational_plugin " + name + " FAILED + " + message)
def passed(name=None):
""" Reports success """
if not name:
name = sys._getframe().f_back.f_code.co_name
print("#educational_plugin " + name + " test OK")
def get_answer_placeholders():
"""
Returns all answer placeholders text
"""
prefix = "#educational_plugin_window = "
path = sys.argv[-1]
import os
file_name_without_extension = os.path.splitext(path)[0]
windows_path = file_name_without_extension + "_windows"
windows = []
f = open(windows_path, "r")
window_text = ""
first = True
for line in f.readlines():
if line.startswith(prefix):
if not first:
windows.append(window_text.strip())
else:
first = False
window_text = line[len(prefix):]
else:
window_text += line
if window_text:
windows.append(window_text.strip())
f.close()
return windows
def check_samples(samples=()):
"""
Check script output for all samples. Sample is a two element list, where the first is input and
the second is output.
"""
for sample in samples:
if len(sample) == 2:
output = get_file_output(arg_string=str(sample[0]))
if "\n".join(output) != sample[1]:
failed(
"Test from samples failed: \n \n"
"Input:\n{}"
"\n \n"
"Expected:\n{}"
"\n \n"
"Your result:\n{}".format(str.strip(sample[0]), str.strip(sample[1]), "\n".join(output)))
return
set_congratulation_message("All test from samples passed. Now we are checking your solution on Stepic server.")
passed()
def run_common_tests(error_text="Please, reload file and try again"):
test_is_not_empty()
test_answer_placeholders_text_deleted()
test_file_importable()
| mit | -754,148,502,409,651,600 | 26.526786 | 119 | 0.584009 | false |
jopohl/urh | src/urh/dev/native/SoundCard.py | 1 | 4702 | from collections import OrderedDict
from multiprocessing import Array
from multiprocessing.connection import Connection
import numpy as np
import pyaudio
from urh.dev.native.Device import Device
from urh.util.Logger import logger
class SoundCard(Device):
DEVICE_LIB = pyaudio
ASYNCHRONOUS = False
DEVICE_METHODS = dict()
CHUNK_SIZE = 1024
SYNC_TX_CHUNK_SIZE = 2 * CHUNK_SIZE
CONTINUOUS_TX_CHUNK_SIZE = SYNC_TX_CHUNK_SIZE
SAMPLE_RATE = 48000
pyaudio_handle = None
pyaudio_stream = None
DATA_TYPE = np.float32
@classmethod
def init_device(cls, ctrl_connection: Connection, is_tx: bool, parameters: OrderedDict) -> bool:
try:
cls.SAMPLE_RATE = int(parameters[cls.Command.SET_SAMPLE_RATE.name])
except (KeyError, ValueError):
pass
return super().init_device(ctrl_connection, is_tx, parameters)
@classmethod
def setup_device(cls, ctrl_connection: Connection, device_identifier):
ctrl_connection.send("Initializing pyaudio...")
try:
cls.pyaudio_handle = pyaudio.PyAudio()
ctrl_connection.send("Initialized pyaudio")
return True
except Exception as e:
logger.exception(e)
ctrl_connection.send("Failed to initialize pyaudio")
@classmethod
def prepare_sync_receive(cls, ctrl_connection: Connection):
try:
cls.pyaudio_stream = cls.pyaudio_handle.open(format=pyaudio.paFloat32,
channels=2,
rate=cls.SAMPLE_RATE,
input=True,
frames_per_buffer=cls.CHUNK_SIZE)
ctrl_connection.send("Successfully started pyaudio stream")
return 0
except Exception as e:
logger.exception(e)
ctrl_connection.send("Failed to start pyaudio stream")
@classmethod
def prepare_sync_send(cls, ctrl_connection: Connection):
try:
cls.pyaudio_stream = cls.pyaudio_handle.open(format=pyaudio.paFloat32,
channels=2,
rate=cls.SAMPLE_RATE,
frames_per_buffer=cls.CHUNK_SIZE,
output=True)
ctrl_connection.send("Successfully started pyaudio stream")
return 0
except Exception as e:
logger.exception(e)
ctrl_connection.send("Failed to start pyaudio stream")
@classmethod
def receive_sync(cls, data_conn: Connection):
if cls.pyaudio_stream:
data_conn.send_bytes(cls.pyaudio_stream.read(cls.CHUNK_SIZE, exception_on_overflow=False))
@classmethod
def send_sync(cls, data):
if cls.pyaudio_stream:
data_bytes = data.tostring() if isinstance(data, np.ndarray) else bytes(data)
# pad with zeros if smaller than chunk size
cls.pyaudio_stream.write(data_bytes.ljust(cls.CHUNK_SIZE*8, b'\0'))
@classmethod
def shutdown_device(cls, ctrl_connection, is_tx: bool):
logger.debug("shutting down pyaudio...")
try:
if cls.pyaudio_stream:
cls.pyaudio_stream.stop_stream()
cls.pyaudio_stream.close()
if cls.pyaudio_handle:
cls.pyaudio_handle.terminate()
ctrl_connection.send("CLOSE:0")
except Exception as e:
logger.exception(e)
ctrl_connection.send("Failed to shut down pyaudio")
def __init__(self, sample_rate, resume_on_full_receive_buffer=False):
super().__init__(center_freq=0, sample_rate=sample_rate, bandwidth=0,
gain=1, if_gain=1, baseband_gain=1,
resume_on_full_receive_buffer=resume_on_full_receive_buffer)
self.success = 0
self.bandwidth_is_adjustable = False
@property
def device_parameters(self) -> OrderedDict:
return OrderedDict([(self.Command.SET_SAMPLE_RATE.name, self.sample_rate),
("identifier", None)])
@staticmethod
def bytes_to_iq(buffer):
return np.frombuffer(buffer, dtype=np.float32).reshape((-1, 2), order="C")
@staticmethod
def iq_to_bytes(samples: np.ndarray):
arr = Array("f", 2 * len(samples), lock=False)
numpy_view = np.frombuffer(arr, dtype=np.float32)
numpy_view[:] = samples.flatten(order="C")
return arr
| gpl-3.0 | -7,528,737,987,523,719,000 | 37.227642 | 102 | 0.574011 | false |
mario23285/ProyectoElectrico | src/Leg.py | 1 | 6591 | """
UNIVERSIDAD DE COSTA RICA Escuela de Ingeniería Eléctrica
IE0499 | Proyecto Eléctrico
Mario Alberto Castresana Avendaño
A41267
Programa: BVH_TuneUp
-------------------------------------------------------------------------------
archivo: Leg.py
descripción:
Este archivo contiene la clase Leg, la cual se utiliza para implementar la
rodilla izquierda y la derecha. Los estudios de goniometría para este hueso
se basan en los siguientes límites de los ángulos de Euler:
Z torsión no válida
X Flexión + y extensión -
Y rotación no válida
"""
from Bone import Bone
class Leg(Bone):
"""
Esta subclase implementa el estudio de goniometría para las rodillas en
el esqueleto del BVH. La jerarquía los llama "Leg".
"""
def __init__(self, ID=' ', Zp=0, Xp=0, Yp=0):
"""
Se inicializa este hueso con los siguientes parámetros
ID: identificador del bone. Ej: izquierdo/derecho
Cada posición del hueso se define con un vector de ángulos de Euler
(Z, X, Y) los cuales tienen una posición específica dentro del array
de la sección MOTION del BVH
Zp: índice del array MOTION que contiene el angulo de euler Z para ese hueso
Xp: índice del array MOTION que contiene el angulo de euler X para ese hueso
Yp: índice del array MOTION que contiene el angulo de euler Y para ese hueso
"""
self.ID = ID
self.Zp = Zp
self.Xp = Xp
self.Yp = Yp
#se llama al constructor de la super clase para acceder a todos los atributos
#de goniometría
Bone.__init__(self,
Name='Rodilla',
Zmin=-0.200000,
Zmax=0.200000,
Xmin=0.000000,
Xmax=150.000000,
Ymin=-1.000000,
Ymax=1.000000)
def Goniometry_check(self, MOTION, frame):
"""
Descripción:
Esta función se encarga de comparar el valor de los ángulos de Euler que
un hueso posee en un frame determinado, con el valor de los límites
goniométricos de ese hueso en particular. Si algún ángulo de Euler excede
los límites del movimiento humano, se reportará un glitch en ese frame
y se procederá a corregirlo en el arreglo MOTION.
argumentos:
MOTION: arreglo de 156 posiciones que contiene todos los ángulos de Euler
para cada hueso en un frame dado. El orden de cada hueso viene dado por
la sección HIERARCHY del BVH.
frame: cuadro del video de MoCap que se está analizando
"""
#Primero, definimos los valores de cada ángulo de Euler
Zeuler = MOTION[self.Zp]
Xeluer = MOTION[self.Xp]
Yeuler = MOTION[self.Yp]
glitch = False
#Exempt es una variable que se activa cuando detecta problemas de rotacion
#de ejes Z y Y en las rodillas
Exempt = False
ErrorMsg = ' existen glitches de '
#Variables para probar si hubo rotación de ejes y el esqueleto está agachado
rodilla_flex = Xeluer > 13.0 or Xeluer < -15.0
y_rot = Yeuler > 20.0 or Yeuler < -20.0
z_rot = Zeuler > 40.0 or Zeuler < -40.0
Rotacion_ejes = y_rot or z_rot
if rodilla_flex and Rotacion_ejes:
Exempt = True
if Exempt:
#Existen dos pruebas goniométricas distintas de acuerdo al nivel de flexión de las
#rodillas. En el caso de que las rodillas tengan un ángulo de flexión mayor a 45º o
#exista una rotacion de los eje Z y Y, debemos incrementar los límites de movilidad.
#en Z y Y. Esto debido al comportamiento de los huesos en el BVH, los cuales rotan
#los ejes Y y Z para representar movimientos de un esqueleto agachado.
#Esto ocurre debido a la pérdida de orientación del hueso,por parte de las cámaras
#en los ejes Z y Y.
#probamos límites nuevos en Z
if Zeuler < -160.000000:
#MOTION[self.Zp] no se le aplica restricción en Z
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en Z- | '
if Zeuler > 160.000000:
#MOTION[self.Zp] no se le aplica restricción en Z
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en Z+ | '
#aquí probamos nuevos límites en X
if Xeluer < -150.000000:
#MOTION[self.Xp] no se le aplica restricción en X
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en X- | '
if Xeluer > 150.000000:
#MOTION[self.Xp] no se le aplica restricción en X
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en X+ | '
#aquí probamos nuevos límites en Y
if Yeuler < -105.000000:
#MOTION[self.Yp] no se le aplica restricción en Y
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en Y- | '
if Yeuler > 105.000000:
#MOTION[self.Yp] no se le aplica restricción en Y
glitch = True
ErrorMsg += 'pérdida de orientación de los sensores en Y+ | '
else:
#probamos límites en Z
if Zeuler < self.Zmin:
MOTION[self.Zp] = self.Zmin
glitch = True
ErrorMsg += 'torsión | '
if Zeuler > self.Zmax:
MOTION[self.Zp] = self.Zmax
glitch = True
ErrorMsg += 'torsión | '
#aquí probamos límites en X
if Xeluer < self.Xmin:
MOTION[self.Xp] = self.Xmin
glitch = True
ErrorMsg += 'extension | '
if Xeluer > self.Xmax:
MOTION[self.Xp] = self.Xmax
glitch = True
ErrorMsg += 'flexion | '
#aquí probamos límites en Y
if Yeuler < self.Ymin:
MOTION[self.Yp] = self.Ymin
glitch = True
ErrorMsg += 'rotacion interna | '
if Yeuler > self.Ymax:
MOTION[self.Yp] = self.Ymax
glitch = True
ErrorMsg += 'rotacion externa | '
if glitch:
self.Report_glitch(ErrorMsg, frame)
| gpl-2.0 | -7,727,383,819,763,163,000 | 38.93865 | 96 | 0.567896 | false |
cryptapus/electrum-uno | gui/qt/__init__.py | 1 | 7412 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import signal
try:
import PyQt4
except Exception:
sys.exit("Error: Could not import PyQt4 on Linux systems, you may try 'sudo apt-get install python-qt4'")
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
from electrum.i18n import _, set_language
from electrum.plugins import run_hook
from electrum import SimpleConfig, Wallet, WalletStorage
from electrum.paymentrequest import InvoiceStore
from electrum.contacts import Contacts
from electrum.synchronizer import Synchronizer
from electrum.verifier import SPV
from electrum.util import DebugMem
from electrum.wallet import Abstract_Wallet
from installwizard import InstallWizard
try:
import icons_rc
except Exception:
sys.exit("Error: Could not import icons_rc.py, please generate it with: 'pyrcc4 icons.qrc -o gui/qt/icons_rc.py'")
from util import * # * needed for plugins
from main_window import ElectrumWindow
class OpenFileEventFilter(QObject):
def __init__(self, windows):
self.windows = windows
super(OpenFileEventFilter, self).__init__()
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.FileOpen:
if len(self.windows) >= 1:
self.windows[0].pay_to_URI(event.url().toEncoded())
return True
return False
class ElectrumGui:
def __init__(self, config, daemon, plugins):
set_language(config.get('language'))
# Uncomment this call to verify objects are being properly
# GC-ed when windows are closed
#network.add_jobs([DebugMem([Abstract_Wallet, SPV, Synchronizer,
# ElectrumWindow], interval=5)])
self.config = config
self.daemon = daemon
self.plugins = plugins
self.windows = []
self.efilter = OpenFileEventFilter(self.windows)
self.app = QApplication(sys.argv)
self.app.installEventFilter(self.efilter)
self.timer = Timer()
# shared objects
self.invoices = InvoiceStore(self.config)
self.contacts = Contacts(self.config)
# init tray
self.dark_icon = self.config.get("dark_icon", False)
self.tray = QSystemTrayIcon(self.tray_icon(), None)
self.tray.setToolTip('Unobtanium Electrum')
self.tray.activated.connect(self.tray_activated)
self.build_tray_menu()
self.tray.show()
self.app.connect(self.app, QtCore.SIGNAL('new_window'), self.start_new_window)
run_hook('init_qt', self)
def build_tray_menu(self):
# Avoid immediate GC of old menu when window closed via its action
self.old_menu = self.tray.contextMenu()
m = QMenu()
for window in self.windows:
submenu = m.addMenu(window.wallet.basename())
submenu.addAction(_("Show/Hide"), window.show_or_hide)
submenu.addAction(_("Close"), window.close)
m.addAction(_("Dark/Light"), self.toggle_tray_icon)
m.addSeparator()
m.addAction(_("Exit Unobtanium Electrum"), self.close)
self.tray.setContextMenu(m)
def tray_icon(self):
if self.dark_icon:
return QIcon(':icons/electrum_dark_icon.png')
else:
return QIcon(':icons/electrum_light_icon.png')
def toggle_tray_icon(self):
self.dark_icon = not self.dark_icon
self.config.set_key("dark_icon", self.dark_icon, True)
self.tray.setIcon(self.tray_icon())
def tray_activated(self, reason):
if reason == QSystemTrayIcon.DoubleClick:
if all([w.is_hidden() for w in self.windows]):
for w in self.windows:
w.bring_to_top()
else:
for w in self.windows:
w.hide()
def close(self):
for window in self.windows:
window.close()
def new_window(self, path, uri=None):
# Use a signal as can be called from daemon thread
self.app.emit(SIGNAL('new_window'), path, uri)
def create_window_for_wallet(self, wallet):
w = ElectrumWindow(self, wallet)
self.windows.append(w)
self.build_tray_menu()
# FIXME: Remove in favour of the load_wallet hook
run_hook('on_new_window', w)
return w
def get_wizard(self):
return InstallWizard(self.config, self.app, self.plugins)
def start_new_window(self, path, uri):
'''Raises the window for the wallet if it is open. Otherwise
opens the wallet and creates a new window for it.'''
for w in self.windows:
if w.wallet.storage.path == path:
w.bring_to_top()
break
else:
wallet = self.daemon.load_wallet(path, self.get_wizard)
if not wallet:
return
w = self.create_window_for_wallet(wallet)
if uri:
w.pay_to_URI(uri)
return w
def close_window(self, window):
self.windows.remove(window)
self.build_tray_menu()
# save wallet path of last open window
if self.config.get('wallet_path') is None and not self.windows:
path = window.wallet.storage.path
self.config.set_key('gui_last_wallet', path)
run_hook('on_close_window', window)
def main(self):
self.timer.start()
# open last wallet
if self.config.get('wallet_path') is None:
last_wallet = self.config.get('gui_last_wallet')
if last_wallet is not None and os.path.exists(last_wallet):
self.config.cmdline_options['default_wallet_path'] = last_wallet
if not self.start_new_window(self.config.get_wallet_path(),
self.config.get('url')):
return
signal.signal(signal.SIGINT, lambda *args: self.app.quit())
# main loop
self.app.exec_()
# Shut down the timer cleanly
self.timer.stop()
# clipboard persistence. see http://www.mail-archive.com/[email protected]/msg17328.html
event = QtCore.QEvent(QtCore.QEvent.Clipboard)
self.app.sendEvent(self.app.clipboard(), event)
self.tray.hide()
| mit | -6,099,789,603,438,935,000 | 34.980583 | 118 | 0.642876 | false |
bmentges/brainiak_api | src/brainiak/suggest/json_schema.py | 1 | 6725 | # -*- coding: utf-8 -*-
from brainiak.utils.links import merge_schemas, pagination_schema
SUGGEST_PARAM_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Describe the parameters given to the suggest primitive",
"type": "object",
"required": ["search"],
"additionalProperties": False,
"properties": {
"search": {
"type": "object",
"required": ["pattern", "target"],
"additionalProperties": False,
"properties": {
"pattern": {"type": "string"},
"target": {"type": "string", "format": "uri"},
"graphs": {
"type": "array",
"items": {"type": "string", "format": "uri"},
"minItems": 1,
"uniqueItems": True
},
"classes": {
"type": "array",
"items": {"type": "string", "format": "uri"},
"minItems": 1,
"uniqueItems": True
},
"fields": {
"type": "array",
"items": {"type": "string", "format": "uri"},
"minItems": 1,
"uniqueItems": True
},
}
},
"response": {
"type": "object",
"additionalProperties": False,
"properties": {
"required_fields": {
"type": "boolean"
},
"class_fields": {
"type": "array",
"items": {"type": "string", "format": "uri"},
"minItems": 1,
"uniqueItems": True
},
"classes": {
"type": "array",
"uniqueItems": True,
"minItems": 1,
"items": {
"type": "object",
"required": ["@type", "instance_fields"],
"additionalProperties": False,
"properties": {
"@type": {"type": "string", "format": "uri"},
"instance_fields": {
"type": "array",
"items": {"type": "string", "format": "uri"},
"minItems": 1,
"uniqueItems": True
}
}
},
},
"instance_fields": {
"type": "array",
"items": {"type": "string", "format": "uri"},
"minItems": 1,
"uniqueItems": True
},
"meta_fields": {
"type": "array",
"items": {"type": "string", "format": "uri"},
"minItems": 1,
"uniqueItems": True
},
}
}
}
}
def schema():
base = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Suggest Result List Schema",
"type": "object",
"required": ["items"],
"properties": {
#"do_item_count": {"type": "integer"},
#"item_count": {"type": "integer"},
"base_url": {"type": "string", "format": "uri"},
"items": {
"type": "array",
"items": {
"type": "object",
"required": ["@id", "title", "@type", "type_title"],
"properties": {
"@id": {"type": "string"},
"title": {"type": "string"},
"@type": {"type": "string"},
"type_title": {"type": "string"},
"class_fields": {"type": "object"},
"instance_fields": {
"type": "array",
"items": {
"type": "object",
"additionalProperties": False,
"required": ["predicate_id", "predicate_title", "object_title", "required"],
"properties": {
"predicate_id": {"type": "string"},
"predicate_title": {"type": "string"},
"object_id": {"type": "string"},
"object_title": {"type": "string"},
"required": {"type": "boolean"}
}
}
}
},
"links": [
{
"href": "/{resource_id}",
"method": "GET",
"rel": "list"
},
{
"href": "/{resource_id}",
"method": "GET",
"rel": "context"
}
]
}
},
},
"links": [
{
"href": "{+_base_url}",
"method": "GET",
"rel": "self"
},
{
"href": "/_suggest",
"method": "POST",
"rel": "suggest",
"schema": SUGGEST_PARAM_SCHEMA
},
{
"href": "/{context_id}/{collection_id}",
"method": "GET",
"rel": "collection",
"schema": {
"type": "object",
"properties": {
"class_prefix": {
"type": "string"
}
},
}
},
{
"href": "/{context_id}/{collection_id}/{resource_id}",
"method": "GET",
"rel": "instance",
"schema": {
"type": "object",
"properties": {
"class_prefix": {
"type": "string"
},
"instance_prefix": {
"type": "string"
},
},
}
}
]
}
merge_schemas(base, pagination_schema('/', method="POST"))
return base
| gpl-2.0 | 4,605,018,496,070,448,000 | 34.771277 | 108 | 0.299777 | false |
gladsonvm/haystackdemo | lib/python2.7/site-packages/pyelasticsearch/client.py | 1 | 39599 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from datetime import datetime
from operator import itemgetter
from functools import wraps
from logging import getLogger
import re
from six import (iterkeys, binary_type, text_type, string_types, integer_types,
iteritems, PY3)
from six.moves import xrange
try:
# PY3
from urllib.parse import urlencode, quote_plus
except ImportError:
# PY2
from urllib import urlencode, quote_plus
import requests
import simplejson as json # for use_decimal
from simplejson import JSONDecodeError
from pyelasticsearch.downtime import DowntimePronePool
from pyelasticsearch.exceptions import (Timeout, ConnectionError,
ElasticHttpError,
InvalidJsonResponseError,
ElasticHttpNotFoundError,
IndexAlreadyExistsError)
def _add_es_kwarg_docs(params, method):
"""
Add stub documentation for any args in ``params`` that aren't already in
the docstring of ``method``.
The stubs may not tell much about each arg, but they serve the important
purpose of letting the user know that they're safe to use--we won't be
paving over them in the future for something pyelasticsearch-specific.
"""
def docs_for_kwarg(p):
return '\n :arg %s: See the ES docs.' % p
doc = method.__doc__
if doc is not None: # It's none under python -OO.
# Handle the case where there are no :arg declarations to key off:
if '\n :arg' not in doc and params:
first_param, params = params[0], params[1:]
doc = doc.replace('\n (Insert es_kwargs here.)',
docs_for_kwarg(first_param))
for p in params:
if ('\n :arg %s: ' % p) not in doc:
# Find the last documented arg so we can put our generated docs
# after it. No need to explicitly compile this; the regex cache
# should serve.
insertion_point = re.search(
r' :arg (.*?)(?=\n+ (?:$|[^: ]))',
doc,
re.MULTILINE | re.DOTALL).end()
doc = ''.join([doc[:insertion_point],
docs_for_kwarg(p),
doc[insertion_point:]])
method.__doc__ = doc
def es_kwargs(*args_to_convert):
"""
Mark which kwargs will become query string params in the eventual ES call.
Return a decorator that grabs the kwargs of the given names, plus any
beginning with "es_", subtracts them from the ordinary kwargs, and passes
them to the decorated function through the ``query_params`` kwarg. The
remaining kwargs and the args are passed through unscathed.
Also, if any of the given kwargs are undocumented in the decorated method's
docstring, add stub documentation for them.
"""
convertible_args = set(args_to_convert)
def decorator(func):
# Add docs for any missing query params:
_add_es_kwarg_docs(args_to_convert, func)
@wraps(func)
def decorate(*args, **kwargs):
# Make kwargs the map of normal kwargs and query_params the map of
# kwargs destined for query string params:
query_params = {}
for k in list(iterkeys(kwargs)): # Make a copy; we mutate kwargs.
if k.startswith('es_'):
query_params[k[3:]] = kwargs.pop(k)
elif k in convertible_args:
query_params[k] = kwargs.pop(k)
return func(*args, query_params=query_params, **kwargs)
return decorate
return decorator
class ElasticSearch(object):
"""
An object which manages connections to elasticsearch and acts as a
go-between for API calls to it
This object is thread-safe. You can create one instance and share it
among all threads.
"""
def __init__(self, urls, timeout=60, max_retries=0, revival_delay=300):
"""
:arg urls: A URL or iterable of URLs of ES nodes. These are full URLs
with port numbers, like ``http://elasticsearch.example.com:9200``.
:arg timeout: Number of seconds to wait for each request before raising
Timeout
:arg max_retries: How many other servers to try, in series, after a
request times out or a connection fails
:arg revival_delay: Number of seconds for which to avoid a server after
it times out or is uncontactable
"""
if isinstance(urls, string_types):
urls = [urls]
urls = [u.rstrip('/') for u in urls]
self.servers = DowntimePronePool(urls, revival_delay)
self.revival_delay = revival_delay
self.timeout = timeout
self.max_retries = max_retries
self.logger = getLogger('pyelasticsearch')
self.session = requests.session()
self.json_encoder = JsonEncoder
def _concat(self, items):
"""
Return a comma-delimited concatenation of the elements of ``items``,
with any occurrences of "_all" omitted.
If ``items`` is a string, promote it to a 1-item list.
"""
# TODO: Why strip out _all?
if items is None:
return ''
if isinstance(items, string_types):
items = [items]
return ','.join(i for i in items if i != '_all')
def _to_query(self, obj):
"""
Convert a native-Python object to a unicode or bytestring
representation suitable for a query string.
"""
# Quick and dirty thus far
if isinstance(obj, string_types):
return obj
if isinstance(obj, bool):
return 'true' if obj else 'false'
if isinstance(obj, integer_types):
return str(obj)
if isinstance(obj, float):
return repr(obj) # str loses precision.
if isinstance(obj, (list, tuple)):
return ','.join(self._to_query(o) for o in obj)
iso = _iso_datetime(obj)
if iso:
return iso
raise TypeError("_to_query() doesn't know how to represent %r in an ES"
' query string.' % obj)
def _utf8(self, thing):
"""Convert any arbitrary ``thing`` to a utf-8 bytestring."""
if isinstance(thing, binary_type):
return thing
if not isinstance(thing, text_type):
thing = text_type(thing)
return thing.encode('utf-8')
def _join_path(self, path_components):
"""
Smush together the path components, omitting '' and None ones.
Unicodes get encoded to strings via utf-8. Incoming strings are assumed
to be utf-8-encoded already.
"""
path = '/'.join(quote_plus(self._utf8(p), '') for p in path_components if
p is not None and p != '')
if not path.startswith('/'):
path = '/' + path
return path
def send_request(self,
method,
path_components,
body='',
query_params=None,
encode_body=True):
"""
Send an HTTP request to ES, and return the JSON-decoded response.
This is mostly an internal method, but it also comes in handy if you
need to use a brand new ES API that isn't yet explicitly supported by
pyelasticsearch, while still taking advantage of our connection pooling
and retrying.
Retry the request on different servers if the first one is down and
``self.max_retries`` > 0.
:arg method: An HTTP method, like "GET"
:arg path_components: An iterable of path components, to be joined by
"/"
:arg body: The request body
:arg query_params: A map of querystring param names to values or
``None``
:arg encode_body: Whether to encode the body of the request as JSON
"""
path = self._join_path(path_components)
if query_params:
path = '?'.join(
[path,
urlencode(dict((k, self._utf8(self._to_query(v))) for k, v in
iteritems(query_params)))])
request_body = self._encode_json(body) if encode_body else body
req_method = getattr(self.session, method.lower())
# We do our own retrying rather than using urllib3's; we want to retry
# a different node in the cluster if possible, not the same one again
# (which may be down).
for attempt in xrange(self.max_retries + 1):
server_url, was_dead = self.servers.get()
url = server_url + path
self.logger.debug(
"Making a request equivalent to this: curl -X%s '%s' -d '%s'",
method, url, request_body)
try:
resp = req_method(
url,
timeout=self.timeout,
**({'data': request_body} if body else {}))
except (ConnectionError, Timeout):
self.servers.mark_dead(server_url)
self.logger.info('%s marked as dead for %s seconds.',
server_url,
self.revival_delay)
if attempt >= self.max_retries:
raise
else:
if was_dead:
self.servers.mark_live(server_url)
break
self.logger.debug('response status: %s', resp.status_code)
prepped_response = self._decode_response(resp)
if resp.status_code >= 400:
self._raise_exception(resp, prepped_response)
self.logger.debug('got response %s', prepped_response)
return prepped_response
def _raise_exception(self, response, decoded_body):
"""Raise an exception based on an error-indicating response from ES."""
error_message = decoded_body.get('error', decoded_body)
error_class = ElasticHttpError
if response.status_code == 404:
error_class = ElasticHttpNotFoundError
elif (error_message.startswith('IndexAlreadyExistsException') or
'nested: IndexAlreadyExistsException' in error_message):
error_class = IndexAlreadyExistsError
raise error_class(response.status_code, error_message)
def _encode_json(self, value):
"""
Convert a Python value to a form suitable for ElasticSearch's JSON DSL.
"""
return json.dumps(value, cls=self.json_encoder, use_decimal=True)
def _decode_response(self, response):
"""Return a native-Python representation of a response's JSON blob."""
try:
json_response = response.json()
except JSONDecodeError:
raise InvalidJsonResponseError(response)
return json_response
## REST API
@es_kwargs('routing', 'parent', 'timestamp', 'ttl', 'percolate',
'consistency', 'replication', 'refresh', 'timeout', 'fields')
def index(self, index, doc_type, doc, id=None, overwrite_existing=True,
query_params=None):
"""
Put a typed JSON document into a specific index to make it searchable.
:arg index: The name of the index to which to add the document
:arg doc_type: The type of the document
:arg doc: A Python mapping object, convertible to JSON, representing
the document
:arg id: The ID to give the document. Leave blank to make one up.
:arg overwrite_existing: Whether we should overwrite existing documents
of the same ID and doctype
:arg routing: A value hashed to determine which shard this indexing
request is routed to
:arg parent: The ID of a parent document, which leads this document to
be routed to the same shard as the parent, unless ``routing``
overrides it.
:arg timestamp: An explicit value for the (typically automatic)
timestamp associated with a document, for use with ``ttl`` and such
:arg ttl: The time until this document is automatically removed from
the index. Can be an integral number of milliseconds or a duration
like '1d'.
:arg percolate: An indication of which percolator queries, registered
against this index, should be checked against the new document: '*'
or a query string like 'color:green'
:arg consistency: An indication of how many active shards the contact
node should demand to see in order to let the index operation
succeed: 'one', 'quorum', or 'all'
:arg replication: Set to 'async' to return from ES before finishing
replication.
:arg refresh: Pass True to refresh the index after adding the document.
:arg timeout: A duration to wait for the relevant primary shard to
become available, in the event that it isn't: for example, "5m"
See `ES's index API`_ for more detail.
.. _`ES's index API`:
http://www.elasticsearch.org/guide/reference/api/index_.html
"""
# :arg query_params: A map of other querystring params to pass along to
# ES. This lets you use future ES features without waiting for an
# update to pyelasticsearch. If we just used **kwargs for this, ES
# could start using a querystring param that we already used as a
# kwarg, and we'd shadow it. Name these params according to the names
# they have in ES's REST API, but prepend "\es_": for example,
# ``es_version=2``.
# TODO: Support version along with associated "preference" and
# "version_type" params.
if not overwrite_existing:
query_params['op_type'] = 'create'
return self.send_request('POST' if id is None else 'PUT',
[index, doc_type, id],
doc,
query_params)
@es_kwargs('consistency', 'refresh')
def bulk_index(self, index, doc_type, docs, id_field='id',
parent_field='_parent', query_params=None):
"""
Index a list of documents as efficiently as possible.
:arg index: The name of the index to which to add the document
:arg doc_type: The type of the document
:arg docs: An iterable of Python mapping objects, convertible to JSON,
representing documents to index
:arg id_field: The field of each document that holds its ID
:arg parent_field: The field of each document that holds its parent ID,
if any. Removed from document before indexing.
See `ES's bulk API`_ for more detail.
.. _`ES's bulk API`:
http://www.elasticsearch.org/guide/reference/api/bulk.html
"""
body_bits = []
if not docs:
raise ValueError('No documents provided for bulk indexing!')
for doc in docs:
action = {'index': {'_index': index, '_type': doc_type}}
if doc.get(id_field) is not None:
action['index']['_id'] = doc[id_field]
if doc.get(parent_field) is not None:
action['index']['_parent'] = doc.pop(parent_field)
body_bits.append(self._encode_json(action))
body_bits.append(self._encode_json(doc))
# Need the trailing newline.
body = '\n'.join(body_bits) + '\n'
return self.send_request('POST',
['_bulk'],
body,
encode_body=False,
query_params=query_params)
@es_kwargs('routing', 'parent', 'replication', 'consistency', 'refresh')
def delete(self, index, doc_type, id, query_params=None):
"""
Delete a typed JSON document from a specific index based on its ID.
:arg index: The name of the index from which to delete
:arg doc_type: The type of the document to delete
:arg id: The (string or int) ID of the document to delete
See `ES's delete API`_ for more detail.
.. _`ES's delete API`:
http://www.elasticsearch.org/guide/reference/api/delete.html
"""
# id should never be None, and it's not particular dangerous
# (equivalent to deleting a doc with ID "None", but it's almost
# certainly not what the caller meant:
if id is None or id == '':
raise ValueError('No ID specified. To delete all documents in '
'an index, use delete_all().')
return self.send_request('DELETE', [index, doc_type, id],
query_params=query_params)
@es_kwargs('routing', 'parent', 'replication', 'consistency', 'refresh')
def delete_all(self, index, doc_type, query_params=None):
"""
Delete all documents of the given doctype from an index.
:arg index: The name of the index from which to delete. ES does not
support this being empty or "_all" or a comma-delimited list of
index names (in 0.19.9).
:arg doc_type: The name of a document type
See `ES's delete API`_ for more detail.
.. _`ES's delete API`:
http://www.elasticsearch.org/guide/reference/api/delete.html
"""
return self.send_request('DELETE', [index, doc_type],
query_params=query_params)
@es_kwargs('q', 'df', 'analyzer', 'default_operator', 'source' 'routing',
'replication', 'consistency')
def delete_by_query(self, index, doc_type, query, query_params=None):
"""
Delete typed JSON documents from a specific index based on query.
:arg index: An index or iterable thereof from which to delete
:arg doc_type: The type of document or iterable thereof to delete
:arg query: A dictionary that will convert to ES's query DSL or a
string that will serve as a textual query to be passed as the ``q``
query string parameter. (Passing the ``q`` kwarg yourself is
deprecated.)
See `ES's delete-by-query API`_ for more detail.
.. _`ES's delete-by-query API`:
http://www.elasticsearch.org/guide/reference/api/delete-by-query.html
"""
if isinstance(query, string_types) and 'q' not in query_params:
query_params['q'] = query
body = ''
else:
body = query
return self.send_request(
'DELETE',
[self._concat(index), self._concat(doc_type), '_query'],
body,
query_params=query_params)
@es_kwargs('realtime', 'fields', 'routing', 'preference', 'refresh')
def get(self, index, doc_type, id, query_params=None):
"""
Get a typed JSON document from an index by ID.
:arg index: The name of the index from which to retrieve
:arg doc_type: The type of document to get
:arg id: The ID of the document to retrieve
See `ES's get API`_ for more detail.
.. _`ES's get API`:
http://www.elasticsearch.org/guide/reference/api/get.html
"""
return self.send_request('GET', [index, doc_type, id],
query_params=query_params)
@es_kwargs()
def multi_get(self, ids, index=None, doc_type=None, fields=None,
query_params=None):
"""
Get multiple typed JSON documents from ES.
:arg ids: An iterable, each element of which can be either an a dict or
an id (int or string). IDs are taken to be document IDs. Dicts are
passed through the Multi Get API essentially verbatim, except that
any missing ``_type``, ``_index``, or ``fields`` keys are filled in
from the defaults given in the ``index``, ``doc_type``, and
``fields`` args.
:arg index: Default index name from which to retrieve
:arg doc_type: Default type of document to get
:arg fields: Default fields to return
See `ES's Multi Get API`_ for more detail.
.. _`ES's Multi Get API`:
http://www.elasticsearch.org/guide/reference/api/multi-get.html
"""
doc_template = dict(
filter(
itemgetter(1),
[('_index', index), ('_type', doc_type), ('fields', fields)]))
docs = []
for id in ids:
doc = doc_template.copy()
if isinstance(id, dict):
doc.update(id)
else:
doc['_id'] = id
docs.append(doc)
return self.send_request(
'GET', ['_mget'], {'docs': docs}, query_params=query_params)
@es_kwargs('routing', 'parent', 'timeout', 'replication', 'consistency',
'percolate', 'refresh', 'retry_on_conflict', 'fields')
def update(self, index, doc_type, id, script=None, params=None, lang=None,
query_params=None, doc=None, upsert=None):
"""
Update an existing document. Raise ``TypeError`` if ``script``, ``doc``
and ``upsert`` are all unspecified.
:arg index: The name of the index containing the document
:arg doc_type: The type of the document
:arg id: The ID of the document
:arg script: The script to be used to update the document
:arg params: A dict of the params to be put in scope of the script
:arg lang: The language of the script. Omit to use the default,
specified by ``script.default_lang``.
:arg doc: A partial document to be merged into the existing document
:arg upsert: The content for the new document created if the document
does not exist
"""
if script is None and doc is None and upsert is None:
raise TypeError('At least one of the script, doc, or upsert '
'kwargs must be provided.')
body = {}
if script:
body['script'] = script
if lang and script:
body['lang'] = lang
if doc:
body['doc'] = doc
if upsert:
body['upsert'] = upsert
if params:
body['params'] = params
return self.send_request(
'POST',
[index, doc_type, id, '_update'],
body=body,
query_params=query_params)
def _search_or_count(self, kind, query, index=None, doc_type=None,
query_params=None):
if isinstance(query, string_types):
query_params['q'] = query
body = ''
else:
body = query
return self.send_request(
'GET',
[self._concat(index), self._concat(doc_type), kind],
body,
query_params=query_params)
@es_kwargs('routing', 'size')
def search(self, query, **kwargs):
"""
Execute a search query against one or more indices and get back search
hits.
:arg query: A dictionary that will convert to ES's query DSL or a
string that will serve as a textual query to be passed as the ``q``
query string parameter
:arg index: An index or iterable of indexes to search. Omit to search
all.
:arg doc_type: A document type or iterable thereof to search. Omit to
search all.
:arg size: Limit the number of results to ``size``. Use with ``es_from`` to
implement paginated searching.
See `ES's search API`_ for more detail.
.. _`ES's search API`:
http://www.elasticsearch.org/guide/reference/api/search/
"""
return self._search_or_count('_search', query, **kwargs)
@es_kwargs('df', 'analyzer', 'default_operator', 'source', 'routing')
def count(self, query, **kwargs):
"""
Execute a query against one or more indices and get hit count.
:arg query: A dictionary that will convert to ES's query DSL or a
string that will serve as a textual query to be passed as the ``q``
query string parameter
:arg index: An index or iterable of indexes to search. Omit to search
all.
:arg doc_type: A document type or iterable thereof to search. Omit to
search all.
See `ES's count API`_ for more detail.
.. _`ES's count API`:
http://www.elasticsearch.org/guide/reference/api/count.html
"""
return self._search_or_count('_count', query, **kwargs)
@es_kwargs()
def get_mapping(self, index=None, doc_type=None, query_params=None):
"""
Fetch the mapping definition for a specific index and type.
:arg index: An index or iterable thereof
:arg doc_type: A document type or iterable thereof
Omit both arguments to get mappings for all types and indexes.
See `ES's get-mapping API`_ for more detail.
.. _`ES's get-mapping API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-get-mapping.html
"""
# TODO: Think about turning index=None into _all if doc_type is non-
# None, per the ES doc page.
return self.send_request(
'GET',
[self._concat(index), self._concat(doc_type), '_mapping'],
query_params=query_params)
@es_kwargs('ignore_conflicts')
def put_mapping(self, index, doc_type, mapping, query_params=None):
"""
Register specific mapping definition for a specific type against one or
more indices.
:arg index: An index or iterable thereof
:arg doc_type: The document type to set the mapping of
:arg mapping: A dict representing the mapping to install. For example,
this dict can have top-level keys that are the names of doc types.
See `ES's put-mapping API`_ for more detail.
.. _`ES's put-mapping API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-put-mapping.html
"""
# TODO: Perhaps add a put_all_mappings() for consistency and so we
# don't need to expose the "_all" magic string. We haven't done it yet
# since this routine is not dangerous: ES makes you explicily pass
# "_all" to update all mappings.
return self.send_request(
'PUT',
[self._concat(index), doc_type, '_mapping'],
mapping,
query_params=query_params)
@es_kwargs('search_type', 'search_indices', 'search_types',
'search_scroll', 'search_size', 'search_from',
'like_text', 'percent_terms_to_match', 'min_term_freq',
'max_query_terms', 'stop_words', 'min_doc_freq', 'max_doc_freq',
'min_word_len', 'max_word_len', 'boost_terms', 'boost',
'analyzer')
def more_like_this(self, index, doc_type, id, mlt_fields, body='', query_params=None):
"""
Execute a "more like this" search query against one or more fields and
get back search hits.
:arg index: The index to search and where the document for comparison
lives
:arg doc_type: The type of document to find others like
:arg id: The ID of the document to find others like
:arg mlt_fields: The list of fields to compare on
:arg body: A dictionary that will convert to ES's query DSL and be
passed as the request body
See `ES's more-like-this API`_ for more detail.
.. _`ES's more-like-this API`:
http://www.elasticsearch.org/guide/reference/api/more-like-this.html
"""
query_params['mlt_fields'] = self._concat(mlt_fields)
return self.send_request('GET',
[index, doc_type, id, '_mlt'],
body=body,
query_params=query_params)
## Index Admin API
@es_kwargs('recovery', 'snapshot')
def status(self, index=None, query_params=None):
"""
Retrieve the status of one or more indices
:arg index: An index or iterable thereof
See `ES's index-status API`_ for more detail.
.. _`ES's index-status API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-status.html
"""
return self.send_request('GET', [self._concat(index), '_status'],
query_params=query_params)
@es_kwargs()
def update_aliases(self, settings, query_params=None):
"""
Add, remove, or update aliases in bulk.
:arg settings: a dictionary specifying the actions to perform
See `ES's admin-indices-aliases API`_.
.. _`ES's admin-indices-aliases API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-aliases.html
"""
return self.send_request('POST', ['_aliases'],
body=settings, query_params=query_params)
@es_kwargs()
def aliases(self, index=None, query_params=None):
"""
Retrieve a listing of aliases
:arg index: the name of an index or an iterable of indices
See `ES's admin-indices-aliases API`_.
.. _`ES's admin-indices-aliases API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-aliases.html
"""
return self.send_request('GET', [self._concat(index), '_aliases'],
query_params=query_params)
@es_kwargs()
def create_index(self, index, settings=None, query_params=None):
"""
Create an index with optional settings.
:arg index: The name of the index to create
:arg settings: A dictionary of settings
If the index already exists, raise
:class:`~pyelasticsearch.exceptions.IndexAlreadyExistsError`.
See `ES's create-index API`_ for more detail.
.. _`ES's create-index API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-create-index.html
"""
return self.send_request('PUT', [index], body=settings,
query_params=query_params)
@es_kwargs()
def delete_index(self, index, query_params=None):
"""
Delete an index.
:arg index: An index or iterable thereof to delete
If the index is not found, raise
:class:`~pyelasticsearch.exceptions.ElasticHttpNotFoundError`.
See `ES's delete-index API`_ for more detail.
.. _`ES's delete-index API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-delete-index.html
"""
if not index:
raise ValueError('No indexes specified. To delete all indexes, use'
' delete_all_indexes().')
return self.send_request('DELETE', [self._concat(index)],
query_params=query_params)
def delete_all_indexes(self, **kwargs):
"""Delete all indexes."""
return self.delete_index('_all', **kwargs)
@es_kwargs()
def close_index(self, index, query_params=None):
"""
Close an index.
:arg index: The index to close
See `ES's close-index API`_ for more detail.
.. _`ES's close-index API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-open-close.html
"""
return self.send_request('POST', [index, '_close'],
query_params=query_params)
@es_kwargs()
def open_index(self, index, query_params=None):
"""
Open an index.
:arg index: The index to open
See `ES's open-index API`_ for more detail.
.. _`ES's open-index API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-open-close.html
"""
return self.send_request('POST', [index, '_open'],
query_params=query_params)
@es_kwargs()
def get_settings(self, index, query_params=None):
"""
Get the settings of one or more indexes.
:arg index: An index or iterable of indexes
See `ES's get-settings API`_ for more detail.
.. _`ES's get-settings API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-get-settings.html
"""
return self.send_request('GET',
[self._concat(index), '_settings'],
query_params=query_params)
@es_kwargs()
def update_settings(self, index, settings, query_params=None):
"""
Change the settings of one or more indexes.
:arg index: An index or iterable of indexes
:arg settings: A dictionary of settings
See `ES's update-settings API`_ for more detail.
.. _`ES's update-settings API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-update-settings.html
"""
if not index:
raise ValueError('No indexes specified. To update all indexes, use'
' update_all_settings().')
# If we implement the "update cluster settings" API, call that
# update_cluster_settings().
return self.send_request('PUT',
[self._concat(index), '_settings'],
body=settings,
query_params=query_params)
@es_kwargs()
def update_all_settings(self, settings, query_params=None):
"""
Update the settings of all indexes.
:arg settings: A dictionary of settings
See `ES's update-settings API`_ for more detail.
.. _`ES's update-settings API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-update-settings.html
"""
return self.send_request('PUT', ['_settings'], body=settings,
query_params=query_params)
@es_kwargs('refresh')
def flush(self, index=None, query_params=None):
"""
Flush one or more indices (clear memory).
:arg index: An index or iterable of indexes
See `ES's flush API`_ for more detail.
.. _`ES's flush API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-flush.html
"""
return self.send_request('POST',
[self._concat(index), '_flush'],
query_params=query_params)
@es_kwargs()
def refresh(self, index=None, query_params=None):
"""
Refresh one or more indices.
:arg index: An index or iterable of indexes
See `ES's refresh API`_ for more detail.
.. _`ES's refresh API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-refresh.html
"""
return self.send_request('POST', [self._concat(index), '_refresh'],
query_params=query_params)
@es_kwargs()
def gateway_snapshot(self, index=None, query_params=None):
"""
Gateway snapshot one or more indices.
:arg index: An index or iterable of indexes
See `ES's gateway-snapshot API`_ for more detail.
.. _`ES's gateway-snapshot API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-gateway-snapshot.html
"""
return self.send_request(
'POST',
[self._concat(index), '_gateway', 'snapshot'],
query_params=query_params)
@es_kwargs('max_num_segments', 'only_expunge_deletes', 'refresh', 'flush',
'wait_for_merge')
def optimize(self, index=None, query_params=None):
"""
Optimize one or more indices.
:arg index: An index or iterable of indexes
See `ES's optimize API`_ for more detail.
.. _`ES's optimize API`:
http://www.elasticsearch.org/guide/reference/api/admin-indices-optimize.html
"""
return self.send_request('POST',
[self._concat(index), '_optimize'],
query_params=query_params)
@es_kwargs('level', 'wait_for_status', 'wait_for_relocating_shards',
'wait_for_nodes', 'timeout')
def health(self, index=None, query_params=None):
"""
Report on the health of the cluster or certain indices.
:arg index: The index or iterable of indexes to examine
See `ES's cluster-health API`_ for more detail.
.. _`ES's cluster-health API`:
http://www.elasticsearch.org/guide/reference/api/admin-cluster-health.html
"""
return self.send_request(
'GET',
['_cluster', 'health', self._concat(index)],
query_params=query_params)
@es_kwargs('filter_nodes', 'filter_routing_table', 'filter_metadata',
'filter_blocks', 'filter_indices')
def cluster_state(self, query_params=None):
"""
The cluster state API allows to get comprehensive state
information of the whole cluster.
(Insert es_kwargs here.)
See `ES's cluster-state API`_ for more detail.
.. _`ES's cluster-state API`:
http://www.elasticsearch.org/guide/reference/api/admin-cluster-state.html
"""
return self.send_request(
'GET', ['_cluster', 'state'], query_params=query_params)
@es_kwargs()
def percolate(self, index, doc_type, doc, query_params=None):
"""
Run a JSON document through the registered percolator queries, and
return which ones match.
:arg index: The name of the index to which the document pretends to
belong
:arg doc_type: The type the document should be treated as if it has
:arg doc: A Python mapping object, convertible to JSON, representing
the document
Use :meth:`index()` to register percolators. See `ES's percolate API`_
for more detail.
.. _`ES's percolate API`:
http://www.elasticsearch.org/guide/reference/api/percolate/
"""
return self.send_request('GET',
[index, doc_type, '_percolate'],
doc, query_params=query_params)
class JsonEncoder(json.JSONEncoder):
def default(self, value):
"""Convert more Python data types to ES-understandable JSON."""
iso = _iso_datetime(value)
if iso:
return iso
if not PY3 and isinstance(value, str):
return unicode(value, errors='replace') # TODO: Be stricter.
if isinstance(value, set):
return list(value)
return super(JsonEncoder, self).default(value)
def _iso_datetime(value):
"""
If value appears to be something datetime-like, return it in ISO format.
Otherwise, return None.
"""
if hasattr(value, 'strftime'):
if hasattr(value, 'hour'):
return value.isoformat()
else:
return '%sT00:00:00' % value.isoformat()
| mit | 5,228,932,564,129,046,000 | 38.16815 | 96 | 0.576429 | false |
gentoo/layman | layman/tests/external.py | 1 | 28009 | # -*- coding: utf-8 -*-
#################################################################################
# EXTERNAL LAYMAN TESTS
#################################################################################
# File: external.py
#
# Runs external (non-doctest) test cases.
#
# Copyright:
# (c) 2009 Sebastian Pipping
# Distributed under the terms of the GNU General Public License v2
#
# Author(s):
# Sebastian Pipping <[email protected]>
#
'''Runs external (non-doctest) test cases.'''
import os
import sys
import shutil
import tempfile
import unittest
import xml.etree.ElementTree as ET # Python 2.5
#Py3
try:
import urllib.request as urllib
except ImportError:
import urllib
from layman.argsparser import ArgsParser
from layman.api import LaymanAPI
from layman.db import DB
from layman.dbbase import DbBase
from layman.compatibility import fileopen
from layman.config import BareConfig, OptionConfig
from layman.maker import Interactive
from layman.output import Message
from layman.overlays.overlay import Overlay
from layman.remotedb import RemoteDB
from layman.repoconfmanager import RepoConfManager
from layman.utils import path
from warnings import filterwarnings, resetwarnings
encoding = sys.getdefaultencoding()
if encoding != 'utf-8':
reload(sys)
sys.setdefaultencoding('utf-8')
HERE = os.path.dirname(os.path.realpath(__file__))
class AddDeleteEnableDisableFromConf(unittest.TestCase):
def test(self):
tmpdir = tempfile.mkdtemp(prefix='laymantmp_')
makeconf = os.path.join(tmpdir, 'make.conf')
reposconf = os.path.join(tmpdir, 'repos.conf')
make_txt =\
'PORTDIR_OVERLAY="\n'\
'$PORTDIR_OVERLAY"'
# Create the .conf files so layman doesn't
# complain.
with fileopen(makeconf, 'w') as f:
f.write(make_txt)
with fileopen(reposconf, 'w') as f:
f.write('')
my_opts = {
'installed' :
HERE + '/testfiles/global-overlays.xml',
'make_conf' : makeconf,
'nocheck' : 'yes',
'storage' : tmpdir,
'repos_conf' : reposconf,
'conf_type' : ['make.conf', 'repos.conf'],
}
config = OptionConfig(my_opts)
config.set_option('quietness', 3)
a = DB(config)
config['output'].set_colorize(False)
conf = RepoConfManager(config, a.overlays)
# Set up our success tracker.
success = []
# Add all the overlays in global_overlays.xml.
for overlay in a.overlays.keys():
conf_success = conf.add(a.overlays[overlay])
if conf_success == False:
success.append(False)
else:
success.append(True)
# Disable one overlay.
self.assertTrue(conf.disable(a.overlays['wrobel']))
# Enable disabled overlay.
self.assertTrue(conf.enable(a.overlays['wrobel']))
# Delete all the overlays in global_overlays.xml.
for overlay in a.overlays.keys():
self.assertTrue(conf.delete(a.overlays[overlay]))
# Clean up.
os.unlink(makeconf)
os.unlink(reposconf)
shutil.rmtree(tmpdir)
class AddDeleteDB(unittest.TestCase):
def test(self):
repo_name = 'tar_test_overlay'
temp_dir_path = tempfile.mkdtemp(prefix='laymantmp_')
db_file = os.path.join(temp_dir_path, 'installed.xml')
make_conf = os.path.join(temp_dir_path, 'make.conf')
repo_conf = os.path.join(temp_dir_path, 'repos.conf')
tar_source_path = os.path.join(HERE, 'testfiles', 'layman-test.tar.bz2')
(_, temp_tarball_path) = tempfile.mkstemp()
shutil.copyfile(tar_source_path, temp_tarball_path)
# Write overlay collection XML
xml_text = '''\
<?xml version="1.0" encoding="UTF-8"?>
<repositories xmlns="" version="1.0">
<repo quality="experimental" status="unofficial">
<name>%(repo_name)s</name>
<description>XXXXXXXXXXX</description>
<owner>
<email>[email protected]</email>
</owner>
<source type="tar">file://%(temp_tarball_url)s</source>
</repo>
</repositories>
'''\
% {
'temp_tarball_url': urllib.pathname2url(temp_tarball_path),
'repo_name': repo_name
}
(fd, temp_xml_path) = tempfile.mkstemp()
my_opts = {'installed' : temp_xml_path,
'conf_type' : ['make.conf', 'repos.conf'],
'db_type' : 'xml',
'nocheck' : 'yes',
'make_conf' : make_conf,
'repos_conf' : repo_conf,
'storage' : temp_dir_path,
'check_official': False}
with os.fdopen(fd, 'w') as f:
f.write(xml_text)
with fileopen(make_conf, 'w') as f:
f.write('PORTDIR_OVERLAY="$PORTDIR_OVERLAY"\n')
with fileopen(repo_conf, 'w') as f:
f.write('')
config = OptionConfig(options=my_opts)
config.set_option('quietness', 3)
a = DB(config)
config.set_option('installed', db_file)
# Add an overlay to a fresh DB file.
b = DB(config)
b.add(a.select(repo_name))
# Make sure it's actually installed.
specific_overlay_path = os.path.join(temp_dir_path, repo_name)
self.assertTrue(os.path.exists(specific_overlay_path))
# Check the DbBase to ensure that it's reading the installed.xml.
c = DbBase(config, paths=[db_file,])
self.assertEqual(list(c.overlays), ['tar_test_overlay'])
# Make sure the configs have been written to correctly.
conf = RepoConfManager(config, b.overlays)
self.assertEqual(list(conf.overlays), ['tar_test_overlay'])
# Delete the overlay from the second DB.
b.delete(b.select(repo_name))
self.assertEqual(b.overlays, {})
# Ensure the installed.xml has been cleaned properly.
c = DbBase(config, paths=[db_file,])
self.assertEqual(c.overlays, {})
conf = RepoConfManager(config, b.overlays)
self.assertEqual(conf.overlays, {})
# Clean up.
os.unlink(temp_xml_path)
os.unlink(temp_tarball_path)
shutil.rmtree(temp_dir_path)
# Tests archive overlay types (squashfs, tar)
# http://bugs.gentoo.org/show_bug.cgi?id=304547
class ArchiveAddRemoveSync(unittest.TestCase):
def _create_squashfs_overlay(self):
repo_name = 'squashfs-test-overlay'
squashfs_source_path = os.path.join(HERE, 'testfiles', 'layman-test.squashfs')
# Duplicate test squashfs (so we can delete it after testing)
(_, temp_squashfs_path) = tempfile.mkstemp()
shutil.copyfile(squashfs_source_path, temp_squashfs_path)
# Write overlay collection XML
xml_text = '''\
<?xml version="1.0" encoding="UTF-8"?>
<repositories xmlns="" version="1.0">
<repo quality="experimental" status="unofficial">
<name>%(repo_name)s</name>
<description>XXXXXXXXXXX</description>
<owner>
<email>[email protected]</email>
</owner>
<source type="squashfs">file://%(temp_squashfs_url)s</source>
</repo>
</repositories>
'''\
% {
'temp_squashfs_url': urllib.pathname2url(temp_squashfs_path),
'repo_name': repo_name
}
print(xml_text)
return xml_text, repo_name, temp_squashfs_path
def _create_tar_overlay(self):
repo_name = 'tar-test-overlay'
tar_source_path = os.path.join(HERE, 'testfiles', 'layman-test.tar.bz2')
# Duplicate test tarball (so we can delete it after testing)
(_, temp_tarball_path) = tempfile.mkstemp()
shutil.copyfile(tar_source_path, temp_tarball_path)
# Write overlay collection XML
xml_text = '''\
<?xml version="1.0" encoding="UTF-8"?>
<repositories xmlns="" version="1.0">
<repo quality="experimental" status="unofficial">
<name>%(repo_name)s</name>
<description>XXXXXXXXXXX</description>
<owner>
<email>[email protected]</email>
</owner>
<source type="tar">file://%(temp_tarball_url)s</source>
</repo>
</repositories>
'''\
% {
'temp_tarball_url': urllib.pathname2url(temp_tarball_path),
'repo_name': repo_name
}
print(xml_text)
return xml_text, repo_name, temp_tarball_path
def test(self):
archives = []
try:
from layman.overlays.modules.tar.tar import TarOverlay
archives.append('tar')
from layman.overlays.modules.squashfs.squashfs import SquashfsOverlay
archives.append('squashfs')
except ImportError:
pass
for archive in archives:
xml_text, repo_name, temp_archive_path = getattr(self,
"_create_%(archive)s_overlay" %
{'archive': archive})()
(fd, temp_collection_path) = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write(xml_text)
# Make playground directory
temp_dir_path = tempfile.mkdtemp()
# Make DB from it
config = BareConfig()
# Necessary for all mountable overlay types
layman_inst = LaymanAPI(config=config)
db = DbBase(config, [temp_collection_path])
specific_overlay_path = os.path.join(temp_dir_path, repo_name)
o = db.select(repo_name)
# Actual testcase
o.add(temp_dir_path)
self.assertTrue(os.path.exists(specific_overlay_path))
# (1/2) Sync with source available
o.sync(temp_dir_path)
self.assertTrue(os.path.exists(specific_overlay_path))
os.unlink(temp_archive_path)
try:
# (2/2) Sync with source _not_ available
o.sync(temp_dir_path)
except:
pass
self.assertTrue(os.path.exists(specific_overlay_path))
o.delete(temp_dir_path)
self.assertFalse(os.path.exists(specific_overlay_path))
# Cleanup
os.unlink(temp_collection_path)
os.rmdir(temp_dir_path)
class CLIArgs(unittest.TestCase):
def test(self):
# Append cli args to sys.argv with correspoding options:
sys.argv.append('--config')
sys.argv.append(HERE + '/../../etc/layman.cfg')
sys.argv.append('--overlay_defs')
sys.argv.append('')
# Test the passed in cli opts on the ArgsParser class:
a = ArgsParser()
test_url = 'https://api.gentoo.org/overlays/repositories.xml'
self.assertTrue(test_url in a['overlays'].split('\n'))
test_keys = ['auto_sync', 'bzr_addopts', 'bzr_command', 'bzr_postsync',
'bzr_syncopts', 'cache', 'check_official', 'clean_archive',
'conf_module', 'conf_type', 'config', 'configdir',
'custom_news_pkg', 'cvs_addopts', 'cvs_command',
'cvs_postsync', 'cvs_syncopts', 'darcs_addopts',
'darcs_command', 'darcs_postsync', 'darcs_syncopts',
'db_type', 'g-common_command', 'g-common_generateopts',
'g-common_postsync', 'g-common_syncopts',
'g-sorcery_command', 'g-sorcery_generateopts',
'g-sorcery_postsync', 'g-sorcery_syncopts', 'git_addopts',
'git_command', 'git_email', 'git_postsync', 'git_syncopts',
'git_user', 'gpg_detached_lists', 'gpg_signed_lists',
'http_proxy', 'https_proxy', 'installed', 'local_list',
'make_conf', 'mercurial_addopts', 'mercurial_command',
'mercurial_postsync', 'mercurial_syncopts',
'news_reporter', 'nocheck', 'overlay_defs', 'overlays',
'protocol_filter', 'quietness', 'repos_conf',
'require_repoconfig', 'rsync_command', 'rsync_postsync',
'rsync_syncopts', 'squashfs_addopts', 'squashfs_command',
'squashfs_postsync', 'squashfs_syncopts', 'storage',
'support_url_updates', 'svn_addopts', 'svn_command',
'svn_postsync', 'svn_syncopts', 't/f_options',
'tar_command', 'tar_postsync', 'umask', 'width']
# Due to this not being a dict object, the keys() invocation is needed.
self.assertEqual(sorted(a.keys()), test_keys)
class CreateConfig(unittest.TestCase):
def make_BareConfig(self):
a = BareConfig()
# Test components of the BareConfig class:
self.test_url = 'https://api.gentoo.org/overlays/repositories.xml'
assertEqual(a['overlay'], self.test_url)
self.test_keys = ['bzr_addopts', 'bzr_command', 'bzr_postsync',
'bzr_syncopts', 'cache', 'config', 'configdir',
'custom_news_func', 'custom_news_pkg', 'cvs_addopts',
'cvs_command', 'cvs_postsync', 'cvs_syncopts',
'darcs_addopts', 'darcs_command', 'darcs_postsync',
'darcs_syncopts', 'g-common_command',
'g-common_generateopts', 'g-common_postsync',
'g-common_syncopts', 'git_addopts', 'git_command',
'git_email', 'git_postsync', 'git_syncopts', 'git_user',
'installed', 'local_list', 'make_conf',
'mercurial_addopts', 'mercurial_command',
'mercurial_postsync', 'mercurial_syncopts',
'news_reporter', 'nocheck', 'nocolor', 'output',
'overlay_defs', 'overlays', 'proxy', 'quiet',
'quietness', 'rsync_command', 'rsync_postsync',
'rsync_syncopts', 'stderr', 'stdin', 'stdout', 'storage',
'svn_addopts', 'svn_command', 'svn_postsync',
'svn_syncopts', 't/f_options', 'tar_command',
'tar_postsync', 'umask', 'verbose', 'width']
assertEqual(sorted(a), self.test_keys)
assertEqual(a.get_option('nocheck'), True)
def make_OptionConfig(self):
my_opts = {
'overlays':
["http://www.gentoo-overlays.org/repositories.xml"]
}
new_defaults = {'configdir': '/etc/test-dir'}
a = OptionConfig(options=my_opts, defaults=new_defaults)
# Test components of the OptionConfig class:
assertEqual(a['overlays'], self.test_url)
assertEqual(a['configdir'], my_opts['configdir'])
assertEqual(sorted(a), self.test_keys)
def test(self):
for i in ['BareConfig', 'OptionConfig']:
getattr(self, 'make_%s' % i)
class FetchRemoteList(unittest.TestCase):
def test(self):
tmpdir = tempfile.mkdtemp(prefix='laymantmp_')
cache = os.path.join(tmpdir, 'cache')
my_opts = {
'overlays': ['file://'\
+ HERE + '/testfiles/global-overlays.xml'],
'db_type': 'xml',
'cache': cache,
'nocheck': 'yes',
'proxy': None,
'quietness': 3
}
config = OptionConfig(my_opts)
api = LaymanAPI(config)
self.assertTrue(api.fetch_remote_list())
filename = api._get_remote_db().filepath(config['overlays']) + '.xml'
with fileopen(filename, 'r') as b:
description = b.readlines()[19]
self.assertEqual(description, ' A collection of ebuilds from '\
'Gunnar Wrobel [[email protected]].\n')
for line in b.readlines():
print(line, end='')
# Check if we get available overlays.
available = api.get_available()
self.assertEqual(available, ['wrobel', 'wrobel-stable'])
# Test the info of an overlay.
info = api.get_info_str(['wrobel'], verbose=True, local=False)
test_info = 'wrobel\n~~~~~~\nSource : https://overlays.gentoo.org'\
'/svn/dev/wrobel\nContact : [email protected]\nType '\
': Subversion; Priority: 10\nQuality : experimental\n\n'\
'Description:\n Test\n'
info = info['wrobel'][0].decode('utf-8')
self.assertEqual(info, test_info)
os.unlink(filename)
shutil.rmtree(tmpdir)
class FormatBranchCategory(unittest.TestCase):
def _run(self, number):
#config = {'output': Message()}
config = BareConfig()
# Discuss renaming files to "branch-%d.xml"
filename1 = os.path.join(HERE, 'testfiles',
'subpath-%d.xml' % number)
# Read, write, re-read, compare
os1 = DbBase(config, [filename1])
filename2 = tempfile.mkstemp()[1]
os1.write(filename2)
os2 = DbBase(config, [filename2])
os.unlink(filename2)
self.assertTrue(os1 == os2)
# Pass original overlays
return os1
def test(self):
os1 = self._run(1)
os2 = self._run(2)
# Same content from old/layman-global.txt
# and new/repositories.xml format?
self.assertTrue(os1 == os2)
class MakeOverlayXML(unittest.TestCase):
def test(self):
temp_dir_path = tempfile.mkdtemp()
my_opts = {
'overlays': ['file://'\
+ HERE + '/testfiles/global-overlays.xml'],
'nocheck': 'yes',
'proxy': None,
'quietness': 3,
}
config = OptionConfig(my_opts)
ovl_dict = {
'name': 'wrobel',
'description': ['Test'],
'owner': [{'name': 'nobody', 'email': '[email protected]'}],
'status': 'official',
'source': [['https://overlays.gentoo.org/svn/dev/wrobel',
'svn', '']],
'priority': '10',
}
a = Overlay(config=config, ovl_dict=ovl_dict, ignore=config['ignore'])
ovl = (ovl_dict['name'], a)
path = temp_dir_path + '/overlay.xml'
create_overlay_xml = Interactive(config=config)
create_overlay_xml(overlay_package=ovl, path=path)
self.assertTrue(os.path.exists(path))
with fileopen(path, 'r') as xml:
test_line = ' <source type="svn">'\
'https://overlays.gentoo.org/svn/dev/wrobel</source>\n'
self.assertTrue(test_line in xml.readlines())
for line in xml.readlines():
print(line, end='')
shutil.rmtree(temp_dir_path)
class OverlayObjTest(unittest.TestCase):
def objattribs(self):
document = ET.parse(HERE + '/testfiles/global-overlays.xml')
overlays = document.findall('overlay') + document.findall('repo')
output = Message()
ovl_a = Overlay({'output': output, 'db_type': 'xml'}, xml=overlays[0])
self.assertEqual(ovl_a.name, 'wrobel')
self.assertEqual(ovl_a.is_official(), True)
url = ['https://overlays.gentoo.org/svn/dev/wrobel']
self.assertEqual(list(ovl_a.source_uris()), url)
self.assertEqual(ovl_a.owners[0]['email'], '[email protected]')
self.assertEqual(ovl_a.descriptions, ['Test'])
self.assertEqual(ovl_a.priority, 10)
ovl_b = Overlay({'output': output, 'db_type': 'xml'}, xml=overlays[1])
self.assertEqual(ovl_b.is_official(), False)
def getinfostr(self):
document = ET.parse(HERE + '/testfiles/global-overlays.xml')
overlays = document.findall('overlay') + document.findall('repo')
output = Message()
ovl = Overlay({'output': output, 'db_type': 'xml'}, xml=overlays[0])
test_infostr = 'wrobel\n~~~~~~\nSource : '\
'https://overlays.gentoo.org/svn/dev/wrobel\nContact '\
': [email protected]\nType : Subversion; Priority: '\
'10\nQuality : experimental\n\nDescription:\n Test\n'
self.assertEqual(ovl.get_infostr().decode('utf-8'), test_infostr)
print(ovl.get_infostr().decode('utf-8'))
def getshortlist(self):
document = ET.parse(HERE + '/testfiles/global-overlays.xml')
overlays = document.findall('overlay') + document.findall('repo')
output = Message()
ovl = Overlay({'output': output, 'db_type': 'xml'}, xml=overlays[0])
test_short_list = 'wrobel [Subversion] '\
'(https://o.g.o/svn/dev/wrobel )'
self.assertEqual(ovl.short_list(80).decode('utf-8'), test_short_list)
print(ovl.short_list(80).decode('utf-8'))
def test(self):
self.objattribs()
self.getinfostr()
self.getshortlist()
class PathUtil(unittest.TestCase):
def test(self):
self.assertEqual(path([]), '')
self.assertEqual(path(['a']), 'a')
self.assertEqual(path(['a', 'b']), 'a/b')
self.assertEqual(path(['a/', 'b']), 'a/b')
self.assertEqual(path(['/a/', 'b']), '/a/b')
self.assertEqual(path(['/a', '/b/']), '/a/b')
self.assertEqual(path(['/a/', 'b/']), '/a/b')
self.assertEqual(path(['/a/','/b/']), '/a/b')
self.assertEqual(path(['/a/','/b','c/']), '/a/b/c')
class Unicode(unittest.TestCase):
def _overlays_bug(self, number):
config = BareConfig()
filename = os.path.join(HERE, 'testfiles', 'overlays_bug_%d.xml'\
% number)
o = DbBase(config, [filename])
for verbose in (True, False):
for t in o.list(verbose=verbose):
print(t[0].decode('utf-8'))
print()
def test_184449(self):
self._overlays_bug(184449)
def test_286290(self):
self._overlays_bug(286290)
class ReadWriteSelectListDbBase(unittest.TestCase):
def list_db(self):
output = Message()
config = {
'output': output,
'db_type': 'xml',
'svn_command': '/usr/bin/svn',
'rsync_command':'/usr/bin/rsync'
}
db = DbBase(config, [HERE + '/testfiles/global-overlays.xml', ])
test_info = ('wrobel\n~~~~~~\nSource : '\
'https://overlays.gentoo.org/svn/dev/wrobel\nContact : '\
'[email protected]\nType : Subversion; Priority: 10\n'\
'Quality : experimental\n\nDescription:\n Test\n',
'wrobel-stable\n~~~~~~~~~~~~~\nSource : '\
'rsync://gunnarwrobel.de/wrobel-stable\nContact : '\
'[email protected]\nType : Rsync; Priority: 50\n'\
'Quality : experimental\n\nDescription:\n A collection '\
'of ebuilds from Gunnar Wrobel [[email protected]].\n')
info = db.list(verbose=True)
for i in range(0, len(info)):
self.assertEqual(info[i][0].decode('utf-8'), test_info[i])
print(info[i][0].decode('utf-8'))
test_info = ('wrobel [Subversion] '\
'(https://o.g.o/svn/dev/wrobel )',
'wrobel-stable [Rsync ] '\
'(rsync://gunnarwrobel.de/wrobel-stable)')
info = db.list(verbose=False, width=80)
for i in range(0, len(info)):
self.assertEqual(info[i][0].decode('utf-8'), test_info[i])
print(info[i][0].decode('utf-8'))
def read_db(self):
output = Message()
# First test if XML databasing works.
config = {'output': output,
'db_type': 'xml',}
db = DbBase(config, [HERE + '/testfiles/global-overlays.xml', ])
keys = sorted(db.overlays)
self.assertEqual(keys, ['wrobel', 'wrobel-stable'])
url = ['rsync://gunnarwrobel.de/wrobel-stable']
self.assertEqual(list(db.overlays['wrobel-stable'].source_uris()), url)
# Test JSON databasing after.
config['db_type'] = 'json'
db = DbBase(config, [HERE + '/testfiles/global-overlays.json', ])
keys = sorted(db.overlays)
self.assertEqual(keys, ['twitch153', 'wrobel-stable'])
url = ['git://github.com/twitch153/ebuilds.git']
self.assertEqual(list(db.overlays['twitch153'].source_uris()), url)
def select_db(self):
output = Message()
config = {'output': output,
'db_type': 'xml',}
db = DbBase(config, [HERE + '/testfiles/global-overlays.xml', ])
url = ['rsync://gunnarwrobel.de/wrobel-stable']
self.assertEqual(list(db.select('wrobel-stable').source_uris()), url)
config['db_type'] = 'json'
db = DbBase(config, [HERE + '/testfiles/global-overlays.json', ])
url = ['git://github.com/twitch153/ebuilds.git']
self.assertEqual(list(db.select('twitch153').source_uris()), url)
def write_db(self):
tmpdir = tempfile.mkdtemp(prefix='laymantmp_')
test_xml = os.path.join(tmpdir, 'test.xml')
test_json = os.path.join(tmpdir, 'test.json')
config = BareConfig()
a = DbBase(config, [HERE + '/testfiles/global-overlays.xml', ])
b = DbBase({'output': Message(), 'db_type': 'xml'}, [test_xml,])
b.overlays['wrobel-stable'] = a.overlays['wrobel-stable']
b.write(test_xml)
c = DbBase({'output': Message(), 'db_type': 'xml'}, [test_xml,])
keys = sorted(c.overlays)
self.assertEqual(keys, ['wrobel-stable'])
config.set_option('db_type', 'json')
a = DbBase(config, [HERE + '/testfiles/global-overlays.json', ])
b = DbBase({'output': Message(), 'db_type': 'json'}, [test_json,])
b.overlays['twitch153'] = a.overlays['twitch153']
b.write(test_json)
c = DbBase({'output': Message(), 'db_type': 'json'}, [test_json,])
keys = sorted(c.overlays)
self.assertEqual(keys, ['twitch153'])
# Clean up:
os.unlink(test_xml)
os.unlink(test_json)
shutil.rmtree(tmpdir)
def test(self):
self.list_db()
self.read_db()
self.select_db()
self.write_db()
class RemoteDBCache(unittest.TestCase):
def test(self):
tmpdir = tempfile.mkdtemp(prefix='laymantmp_')
cache = os.path.join(tmpdir, 'cache')
my_opts = {
'overlays' :
['file://' + HERE + '/testfiles/global-overlays.xml'],
'cache' : cache,
'nocheck' : 'yes',
'proxy' : None
}
config = OptionConfig(my_opts)
db = RemoteDB(config)
self.assertEqual(db.cache(), (True, True))
db_xml = fileopen(db.filepath(config['overlays']) + '.xml')
test_line = ' A collection of ebuilds from Gunnar Wrobel '\
'[[email protected]].\n'
self.assertEqual(db_xml.readlines()[19], test_line)
for line in db_xml.readlines():
print(line, end='')
db_xml.close()
keys = sorted(db.overlays)
self.assertEqual(keys, ['wrobel', 'wrobel-stable'])
shutil.rmtree(tmpdir)
if __name__ == '__main__':
filterwarnings('ignore')
unittest.main()
| gpl-2.0 | -6,600,322,977,730,521,000 | 35.470052 | 86 | 0.542468 | false |
AbhiAgarwal/prep | python/trie.py | 1 | 3121 | class Node:
"""Node for Python Trie Implementation"""
def __init__(self):
self.word = None
self.nodes = {} # dict of nodes
def __get_all__(self):
"""Get all of the words in the trie"""
x = []
for key, node in self.nodes.iteritems() :
if(node.word is not None):
x.append(node.word)
x += node.__get_all__()
return x
def __str__(self):
return self.word
def __insert__(self, word, string_pos = 0):
"""Add a word to the node in a Trie"""
current_letter = word[string_pos]
# Create the Node if it does not already exist
if current_letter not in self.nodes:
self.nodes[current_letter] = Node();
if(string_pos + 1 == len(word)):
self.nodes[current_letter].word = word
else:
self.nodes[current_letter].__insert__(word, string_pos + 1)
return True
def __get_all_with_prefix__(self, prefix, string_pos):
"""Return all nodes in a trie with a given prefix or that are equal to the prefix"""
x = []
for key, node in self.nodes.iteritems() :
# If the current character of the prefix is one of the nodes or we have
# already satisfied the prefix match, then get the matches
if(string_pos >= len(prefix) or key == prefix[string_pos]):
if(node.word is not None):
x.append(node.word)
if(node.nodes != {}):
if(string_pos + 1 <= len(prefix)):
x += node.__get_all_with_prefix__(prefix, string_pos + 1)
else:
x += node.__get_all_with_prefix__(prefix, string_pos)
return x
class Trie:
"""Trie Python Implementation"""
def __init__(self):
self.root = Node()
def insert(self, word):
self.root.__insert__(word)
def get_all(self):
return self.root.__get_all__()
def get_all_with_prefix(self, prefix, string_pos = 0):
return self.root.__get_all_with_prefix__(prefix, string_pos)
# Create the trie and insert some words then do some tests
trie = Trie()
trie.insert("go")
trie.insert("gone")
trie.insert("gi")
trie.insert("cool")
trie.insert("comb")
trie.insert("grasshopper")
trie.insert("home")
trie.insert("hope")
trie.insert("hose")
print "Make sure that the data structure is correctly set up by accesing words manually: "
print str(trie.root.nodes['g'].nodes['o'])
print str(trie.root.nodes['g'].nodes['i'])
print str(trie.root.nodes['c'].nodes['o'].nodes['o'].nodes['l'])
print "\n"
print "print all words to make sure they are all there: "
print trie.get_all()
print "\n"
print "print out all the words with the given prefixes: "
print trie.get_all_with_prefix("g")
print trie.get_all_with_prefix("go")
print trie.get_all_with_prefix("co")
print trie.get_all_with_prefix("hom")
print trie.get_all_with_prefix("gr") | mit | -270,410,005,839,756,830 | 30.22 | 92 | 0.55495 | false |
tobiz/OGN-Flight-Logger_V2 | flogger_email_msg.py | 1 | 1204 | import smtplib
import base64
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email import encoders
from __builtin__ import file
import settings
import os
import datetime
def email_msg(sender, receiver, msg, date, settings):
# print "Send take off msg"
if settings.FLOGGER_TAKEOFF_EMAIL != "y" and settings.FLOGGER_TAKEOFF_EMAIL != "Y":
# Don't send take off email msg
return
# body = "Msg from %s. %s taken off @ %s" % (settings.APRS_USER, msg, date)
body = "%s. %s taken off @ %s" % (settings.APRS_USER, msg, date)
print body
msg = MIMEMultipart()
msg.attach(MIMEText(body, 'plain'))
fromaddr = sender
toaddr = receiver
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = body
server = smtplib.SMTP(settings.FLOGGER_SMTP_SERVER_URL, settings.FLOGGER_SMTP_SERVER_PORT)
text = msg.as_string()
# print "Msg string is: ", text
try:
server.sendmail(fromaddr, toaddr, text)
except Exception as e:
print "Send email_msg failed, reason: ", e
server.quit()
return
| gpl-3.0 | 594,446,723,311,701,000 | 29.897436 | 94 | 0.662791 | false |
coldfix/udiskie | udiskie/tray.py | 1 | 16508 | """
Tray icon for udiskie.
"""
from gi.repository import Gio
from gi.repository import Gtk
from .async_ import run_bg, Future
from .common import setdefault, DaemonBase, cachedmethod
from .locale import _
from .mount import Action, prune_empty_node
from .prompt import Dialog
from .icons import IconDist
import os
__all__ = ['TrayMenu', 'TrayIcon']
class MenuFolder:
def __init__(self, label, items):
self.label = label
self.items = items
def __bool__(self):
return bool(self.items)
__nonzero__ = __bool__
class MenuSection(MenuFolder):
pass
class SubMenu(MenuFolder):
pass
class Icons:
"""Encapsulates the responsibility to load icons."""
_icon_names = {
'media': [
'drive-removable-media-usb-panel',
'drive-removable-media-usb-pendrive',
'drive-removable-media-usb',
'drive-removable-media',
'media-optical',
'media-flash',
],
'browse': ['document-open', 'folder-open'],
'terminal': ['terminal', 'utilities-terminal'],
'mount': ['udiskie-mount'],
'unmount': ['udiskie-unmount'],
'unlock': ['udiskie-unlock'],
'lock': ['udiskie-lock'],
'eject': ['udiskie-eject', 'media-eject'],
'detach': ['udiskie-detach'],
'quit': ['application-exit'],
'forget_password': ['edit-delete'],
'delete': ['udiskie-eject'],
'losetup': ['udiskie-mount'],
# checkbox workaround:
'checked': ['checkbox-checked', 'udiskie-checkbox-checked'],
'unchecked': ['checkbox', 'udiskie-checkbox-unchecked'],
'submenu': ['udiskie-submenu', 'pan-end-symbolic'],
}
def __init__(self, icon_names={}):
"""Merge ``icon_names`` into default icon names."""
self._icon_dist = IconDist()
_icon_names = icon_names.copy()
setdefault(_icon_names, self.__class__._icon_names)
self._icon_names = _icon_names
for k, v in _icon_names.items():
if isinstance(v, str):
self._icon_names[k] = v = [v]
self._icon_names[k] = self._icon_dist.patch_list(v)
@cachedmethod
def get_icon_name(self, icon_id: str) -> str:
"""Lookup the system icon name from udisie-internal id."""
icon_theme = Gtk.IconTheme.get_default()
for name in self._icon_names[icon_id]:
if icon_theme.has_icon(name):
return name
elif os.path.exists(name):
return name
return 'not-available'
def get_icon(self, icon_id: str, size: "Gtk.IconSize") -> "Gtk.Image":
"""Load Gtk.Image from udiskie-internal id."""
return Gtk.Image.new_from_gicon(self.get_gicon(icon_id), size)
def get_gicon(self, icon_id: str) -> "Gio.Icon":
"""Lookup Gio.Icon from udiskie-internal id."""
name = self.get_icon_name(icon_id)
if os.path.exists(name):
# TODO (?): we could also add the icon to the theme using
# Gtk.IconTheme.append_search_path or .add_resource_path:
file = Gio.File.new_for_path(name)
return Gio.FileIcon.new(file)
else:
return Gio.ThemedIcon.new(name)
class TrayMenu:
"""
Builder for udiskie menus.
Objects of this class generate action menus when being called.
"""
def __init__(self, daemon, icons, actions, flat=True,
quickmenu_actions=None,
checkbox_workaround=False,
update_workaround=False):
"""
Initialize a new menu maker.
:param object mounter: mount operation provider
:param Icons icons: icon provider
:param DeviceActions actions: device actions discovery
:returns: a new menu maker
:rtype: cls
Required keys for the ``_labels``, ``_menu_icons`` and
``actions`` dictionaries are:
- browse Open mount location
- terminal Open mount location in terminal
- mount Mount a device
- unmount Unmount a device
- unlock Unlock a LUKS device
- lock Lock a LUKS device
- eject Eject a drive
- detach Detach (power down) a drive
- quit Exit the application
NOTE: If using a main loop other than ``Gtk.main`` the 'quit' action
must be customized.
"""
self._icons = icons
self._daemon = daemon
self._mounter = daemon.mounter
self._actions = actions
self._quit_action = daemon.mainloop.quit
self.flat = flat
# actions shown in the quick-menu ("flat", left-click):
self._quickmenu_actions = quickmenu_actions or [
'mount',
'browse',
'terminal',
'unlock',
'detach',
'delete',
# suppressed:
# 'unmount',
# 'lock',
# 'eject',
# 'forget_password',
]
self._checkbox_workaround = checkbox_workaround
self._update_workaround = update_workaround
def __call__(self, menu, extended=True):
"""Populate the Gtk.Menu with udiskie mount operations."""
# create actions items
flat = self.flat and not extended
if self._update_workaround:
# When showing menus via AppIndicator3 on sway, the menu geometry
# seems to be calculated before the 'about-to-show' event, and
# therefore cannot take into account newly inserted menu items.
# For this reason, we have to keep the top-level menu fixed-size
# and insert dynamic entries into a submenu.
devmenu = Gtk.Menu()
menu.append(self._menuitem(
label=_('Managed devices'),
icon=None,
onclick=devmenu,
))
else:
devmenu = menu
self._create_menu_items(
devmenu, self._prepare_menu(self.detect(), flat))
if extended:
self._insert_options(menu)
return menu
def _insert_options(self, menu):
"""Add configuration options to menu."""
menu.append(Gtk.SeparatorMenuItem())
menu.append(self._menuitem(
_('Mount disc image'),
self._icons.get_icon('losetup', Gtk.IconSize.MENU),
run_bg(lambda _: self._losetup())
))
menu.append(Gtk.SeparatorMenuItem())
menu.append(self._menuitem(
_("Enable automounting"),
icon=None,
onclick=lambda _: self._daemon.automounter.toggle_on(),
checked=self._daemon.automounter.is_on(),
))
menu.append(self._menuitem(
_("Enable notifications"),
icon=None,
onclick=lambda _: self._daemon.notify.toggle(),
checked=self._daemon.notify.active,
))
# append menu item for closing the application
if self._quit_action:
menu.append(Gtk.SeparatorMenuItem())
menu.append(self._menuitem(
_('Quit'),
self._icons.get_icon('quit', Gtk.IconSize.MENU),
lambda _: self._quit_action()
))
async def _losetup(self):
gtk_dialog = Gtk.FileChooserDialog(
_('Open disc image'), None,
Gtk.FileChooserAction.OPEN,
(_('Open'), Gtk.ResponseType.OK,
_('Cancel'), Gtk.ResponseType.CANCEL))
with Dialog(gtk_dialog) as dialog:
response = await dialog
if response == Gtk.ResponseType.OK:
await self._mounter.losetup(dialog.window.get_filename())
def detect(self):
"""Detect all currently known devices. Returns the root device."""
root = self._actions.detect()
prune_empty_node(root, set())
return root
def _create_menu(self, items):
"""
Create a menu from the given node.
:param list items: list of menu items
:returns: a new Gtk.Menu object holding all items of the node
"""
menu = Gtk.Menu()
self._create_menu_items(menu, items)
return menu
def _create_menu_items(self, menu, items):
def make_action_callback(node):
return run_bg(lambda _: node.action())
for node in items:
if isinstance(node, Action):
menu.append(self._menuitem(
node.label,
self._icons.get_icon(node.method, Gtk.IconSize.MENU),
make_action_callback(node)))
elif isinstance(node, SubMenu):
menu.append(self._menuitem(
node.label,
icon=None,
onclick=self._create_menu(node.items)))
elif isinstance(node, MenuSection):
self._create_menu_section(menu, node)
else:
raise ValueError(_("Invalid node!"))
if len(menu) == 0:
mi = self._menuitem(_("No external devices"), None, None)
mi.set_sensitive(False)
menu.append(mi)
def _create_menu_section(self, menu, section):
if len(menu) > 0:
menu.append(Gtk.SeparatorMenuItem())
if section.label:
mi = self._menuitem(section.label, None, None)
mi.set_sensitive(False)
menu.append(mi)
self._create_menu_items(menu, section.items)
def _menuitem(self, label, icon, onclick, checked=None):
"""
Create a generic menu item.
:param str label: text
:param Gtk.Image icon: icon (may be ``None``)
:param onclick: onclick handler, either a callable or Gtk.Menu
:returns: the menu item object
:rtype: Gtk.MenuItem
"""
if self._checkbox_workaround:
if checked is not None:
icon_name = 'checked' if checked else 'unchecked'
icon = self._icons.get_icon(icon_name, Gtk.IconSize.MENU)
checked = None
elif isinstance(onclick, Gtk.Menu):
icon = self._icons.get_icon('submenu', Gtk.IconSize.MENU)
if checked is not None:
item = Gtk.CheckMenuItem()
item.set_active(checked)
elif icon is None:
item = Gtk.MenuItem()
else:
item = Gtk.ImageMenuItem()
item.set_image(icon)
# I don't really care for the "show icons only for nouns, not
# for verbs" policy:
item.set_always_show_image(True)
if label is not None:
item.set_label(label)
if isinstance(onclick, Gtk.Menu):
item.set_submenu(onclick)
elif onclick is not None:
item.connect('activate', onclick)
return item
def _prepare_menu(self, node, flat=None):
"""
Prepare the menu hierarchy from the given device tree.
:param Device node: root node of device hierarchy
:returns: menu hierarchy as list
"""
if flat is None:
flat = self.flat
ItemGroup = MenuSection if flat else SubMenu
return [
ItemGroup(branch.label, self._collapse_device(branch, flat))
for branch in node.branches
if branch.methods or branch.branches
]
def _collapse_device(self, node, flat):
"""Collapse device hierarchy into a flat folder."""
items = [item
for branch in node.branches
for item in self._collapse_device(branch, flat)
if item]
show_all = not flat or self._quickmenu_actions == 'all'
methods = node.methods if show_all else [
method
for method in node.methods
if method.method in self._quickmenu_actions
]
if flat:
items.extend(methods)
else:
items.append(MenuSection(None, methods))
return items
class TrayIcon:
"""Default TrayIcon class."""
def __init__(self, menumaker, icons, statusicon=None):
"""
Create an object managing a tray icon.
The actual Gtk.StatusIcon is only created as soon as you call show()
for the first time. The reason to delay its creation is that the GTK
icon will be initially visible, which results in a perceptable
flickering.
:param TrayMenu menumaker: menu factory
:param Gtk.StatusIcon statusicon: status icon
"""
self._icons = icons
self._icon = statusicon
self._menu = menumaker
self._conn_left = None
self._conn_right = None
self.task = Future()
menumaker._quit_action = self.destroy
def destroy(self):
self.show(False)
self.task.set_result(True)
def _create_statusicon(self):
"""Return a new Gtk.StatusIcon."""
statusicon = Gtk.StatusIcon()
statusicon.set_from_gicon(self._icons.get_gicon('media'))
statusicon.set_tooltip_text(_("udiskie"))
return statusicon
@property
def visible(self):
"""Return visibility state of icon."""
return bool(self._conn_left)
def show(self, show=True):
"""Show or hide the tray icon."""
if show and not self.visible:
self._show()
if not show and self.visible:
self._hide()
def _show(self):
"""Show the tray icon."""
if not self._icon:
self._icon = self._create_statusicon()
widget = self._icon
widget.set_visible(True)
self._conn_left = widget.connect("activate", self._activate)
self._conn_right = widget.connect("popup-menu", self._popup_menu)
def _hide(self):
"""Hide the tray icon."""
self._icon.set_visible(False)
self._icon.disconnect(self._conn_left)
self._icon.disconnect(self._conn_right)
self._conn_left = None
self._conn_right = None
def create_context_menu(self, extended):
"""Create the context menu."""
menu = Gtk.Menu()
self._menu(menu, extended)
return menu
def _activate(self, icon):
"""Handle a left click event (show the menu)."""
self._popup_menu(icon, button=0, time=Gtk.get_current_event_time(),
extended=False)
def _popup_menu(self, icon, button, time, extended=True):
"""Handle a right click event (show the menu)."""
m = self.create_context_menu(extended)
m.show_all()
m.popup(parent_menu_shell=None,
parent_menu_item=None,
func=icon.position_menu,
data=icon,
button=button,
activate_time=time)
# need to store reference or menu will be destroyed before showing:
self._m = m
class UdiskieStatusIcon(DaemonBase):
"""
Manage a status icon.
When `smart` is on, the icon will automatically hide if there is no action
available and the menu will have no 'Quit' item.
"""
def __init__(self, icon, menumaker, smart=False):
self._icon = icon
self._menumaker = menumaker
self._mounter = menumaker._mounter
self._quit_action = menumaker._quit_action
self.smart = smart
self.active = False
self.events = {
'device_changed': self.update,
'device_added': self.update,
'device_removed': self.update,
}
def activate(self):
super().activate()
self.update()
def deactivate(self):
super().deactivate()
self._icon.show(False)
@property
def smart(self):
return getattr(self, '_smart', None)
@smart.setter
def smart(self, smart):
if smart == self.smart:
return
if smart:
self._menumaker._quit_action = None
else:
self._menumaker._quit_action = self._quit_action
self._smart = smart
self.update()
def has_menu(self):
"""Check if a menu action is available."""
return any(self._menumaker._prepare_menu(self._menumaker.detect()))
def update(self, *args):
"""Show/hide icon depending on whether there are devices."""
if self.smart:
self._icon.show(self.has_menu())
else:
self._icon.show(True)
| mit | 6,167,450,907,907,872,000 | 32.148594 | 78 | 0.558941 | false |
DavidAndreev/indico | indico/modules/events/timetable/legacy.py | 1 | 15015 | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from collections import defaultdict
from flask import session
from sqlalchemy.orm import defaultload
from indico.modules.events.contributions.models.persons import AuthorType
from indico.modules.events.timetable.models.entries import TimetableEntry, TimetableEntryType
from indico.util.date_time import iterdays
from indico.web.flask.util import url_for
from MaKaC.common.fossilize import fossilize
from MaKaC.fossils.conference import IConferenceEventInfoFossil
class TimetableSerializer(object):
def __init__(self, management=False):
self.management = management
def serialize_timetable(self, event, days=None, hide_weekends=False, strip_empty_days=False):
event.preload_all_acl_entries()
timetable = {}
for day in iterdays(event.start_dt_local, event.end_dt_local, skip_weekends=hide_weekends, day_whitelist=days):
date_str = day.strftime('%Y%m%d')
timetable[date_str] = {}
contributions_strategy = defaultload('contribution')
contributions_strategy.subqueryload('person_links')
contributions_strategy.subqueryload('references')
query_options = (contributions_strategy,
defaultload('session_block').subqueryload('person_links'))
query = (TimetableEntry.query.with_parent(event)
.options(*query_options)
.order_by(TimetableEntry.type != TimetableEntryType.SESSION_BLOCK))
for entry in query:
day = entry.start_dt.astimezone(event.tzinfo).date()
date_str = day.strftime('%Y%m%d')
if date_str not in timetable:
continue
if not entry.can_view(session.user):
continue
data = self.serialize_timetable_entry(entry, load_children=False)
key = self._get_entry_key(entry)
if entry.parent:
parent_code = 's{}'.format(entry.parent_id)
timetable[date_str][parent_code]['entries'][key] = data
else:
timetable[date_str][key] = data
if strip_empty_days:
timetable = self._strip_empty_days(timetable)
return timetable
def serialize_session_timetable(self, session_, without_blocks=False, strip_empty_days=False):
timetable = {}
for day in iterdays(session_.event_new.start_dt_local, session_.event_new.end_dt_local):
timetable[day.strftime('%Y%m%d')] = {}
for block in session_.blocks:
block_entry = block.timetable_entry
if not block_entry:
continue
date_key = block_entry.start_dt.astimezone(session_.event_new.tzinfo).strftime('%Y%m%d')
entries = block_entry.children if without_blocks else [block_entry]
for entry in entries:
if not entry.can_view(session.user):
continue
entry_key = self._get_entry_key(entry)
timetable[date_key][entry_key] = self.serialize_timetable_entry(entry, load_children=True)
if strip_empty_days:
timetable = self._strip_empty_days(timetable)
return timetable
@staticmethod
def _strip_empty_days(timetable):
"""Return the timetable without the leading and trailing empty days."""
days = sorted(timetable)
first_non_empty = next((day for day in days if timetable[day]), None)
if first_non_empty is None:
return {}
last_non_empty = next((day for day in reversed(days) if timetable[day]), first_non_empty)
return {day: timetable[day] for day in days if first_non_empty <= day <= last_non_empty}
def serialize_timetable_entry(self, entry, **kwargs):
if entry.type == TimetableEntryType.SESSION_BLOCK:
return self.serialize_session_block_entry(entry, kwargs.pop('load_children', True))
elif entry.type == TimetableEntryType.CONTRIBUTION:
return self.serialize_contribution_entry(entry)
elif entry.type == TimetableEntryType.BREAK:
return self.serialize_break_entry(entry)
else:
raise TypeError("Unknown timetable entry type.")
def serialize_session_block_entry(self, entry, load_children=True):
block = entry.session_block
data = {}
if not load_children:
entries = defaultdict(dict)
else:
entries = {self._get_entry_key(x): self.serialize_timetable_entry(x) for x in entry.children}
data.update(self._get_entry_data(entry))
data.update(self._get_color_data(block.session))
data.update(self._get_location_data(block))
data.update({'entryType': 'Session',
'sessionSlotId': block.id,
'sessionId': block.session_id,
'sessionCode': block.session.code,
'title': block.session.title,
'slotTitle': block.title,
'attachments': self._get_attachment_data(block.session),
'code': block.session.code,
'contribDuration': block.session.default_contribution_duration.seconds / 60,
'conveners': [self._get_person_data(x) for x in block.person_links],
'description': block.session.description,
'duration': block.duration.seconds / 60,
'isPoster': block.session.is_poster,
'entries': entries,
'pdf': url_for('sessions.export_session_timetable', block.session),
'url': url_for('sessions.display_session', block.session),
'friendlyId': block.session.friendly_id})
return data
def serialize_contribution_entry(self, entry):
from indico.modules.events.api import SerializerBase
block = entry.parent.session_block if entry.parent else None
contribution = entry.contribution
data = {}
data.update(self._get_entry_data(entry))
if contribution.session:
data.update(self._get_color_data(contribution.session))
data.update(self._get_location_data(contribution))
data.update({'entryType': 'Contribution',
'_type': 'ContribSchEntry',
'_fossil': 'contribSchEntryDisplay',
'contributionId': contribution.id,
'attachments': self._get_attachment_data(contribution),
'description': contribution.description,
'duration': contribution.duration.seconds / 60,
'pdf': url_for('contributions.export_pdf', entry.contribution),
'presenters': map(self._get_person_data,
sorted(contribution.person_links,
key=lambda x: (x.author_type != AuthorType.primary,
x.author_type != AuthorType.secondary))),
'sessionCode': block.session.code if block else None,
'sessionId': block.session_id if block else None,
'sessionSlotId': block.id if block else None,
'sessionSlotEntryId': entry.parent.id if entry.parent else None,
'title': contribution.title,
'url': url_for('contributions.display_contribution', contribution),
'friendlyId': contribution.friendly_id,
'references': map(SerializerBase.serialize_reference, contribution.references)})
return data
def serialize_break_entry(self, entry, management=False):
block = entry.parent.session_block if entry.parent else None
break_ = entry.break_
data = {}
data.update(self._get_entry_data(entry))
data.update(self._get_color_data(break_))
data.update(self._get_location_data(break_))
data.update({'entryType': 'Break',
'_type': 'BreakTimeSchEntry',
'_fossil': 'breakTimeSchEntry',
'description': break_.description,
'duration': break_.duration.seconds / 60,
'sessionId': block.session_id if block else None,
'sessionCode': block.session.code if block else None,
'sessionSlotId': block.id if block else None,
'sessionSlotEntryId': entry.parent.id if entry.parent else None,
'title': break_.title})
return data
def _get_attachment_data(self, obj):
def serialize_attachment(attachment):
return {'id': attachment.id,
'_type': 'Attachment',
'_fossil': 'attachment',
'title': attachment.title,
'download_url': attachment.download_url}
def serialize_folder(folder):
return {'id': folder.id,
'_type': 'AttachmentFolder',
'_fossil': 'folder',
'title': folder.title,
'attachments': map(serialize_attachment, folder.attachments)}
data = {'files': [], 'folders': []}
items = obj.attached_items
data['files'] = map(serialize_attachment, items.get('files', []))
data['folders'] = map(serialize_folder, items.get('folders', []))
if not data['files'] and not data['folders']:
data['files'] = None
return data
def _get_color_data(self, obj):
return {'color': '#' + obj.background_color,
'textColor': '#' + obj.text_color}
def _get_date_data(self, entry):
if self.management:
tzinfo = entry.event_new.tzinfo
else:
tzinfo = entry.event_new.display_tzinfo
return {'startDate': self._get_entry_date_dt(entry.start_dt, tzinfo),
'endDate': self._get_entry_date_dt(entry.end_dt, tzinfo)}
def _get_entry_data(self, entry):
from indico.modules.events.timetable.operations import can_swap_entry
data = {}
data.update(self._get_date_data(entry))
data['id'] = self._get_entry_key(entry)
data['uniqueId'] = data['id']
data['conferenceId'] = entry.event_id
if self.management:
data['isParallel'] = entry.is_parallel()
data['isParallelInSession'] = entry.is_parallel(in_session=True)
data['scheduleEntryId'] = entry.id
data['canSwapUp'] = can_swap_entry(entry, direction='up')
data['canSwapDown'] = can_swap_entry(entry, direction='down')
return data
def _get_entry_key(self, entry):
if entry.type == TimetableEntryType.SESSION_BLOCK:
return 's{}'.format(entry.id)
elif entry.type == TimetableEntryType.CONTRIBUTION:
return 'c{}'.format(entry.id)
elif entry.type == TimetableEntryType.BREAK:
return 'b{}'.format(entry.id)
else:
raise ValueError()
def _get_entry_date_dt(self, dt, tzinfo):
return {'date': dt.astimezone(tzinfo).strftime('%Y-%m-%d'),
'time': dt.astimezone(tzinfo).strftime('%H:%M:%S'),
'tz': str(tzinfo)}
def _get_location_data(self, obj):
data = {}
data['location'] = obj.venue_name
data['room'] = obj.room_name
data['inheritLoc'] = obj.inherit_location
data['inheritRoom'] = obj.inherit_location
if self.management:
data['address'] = obj.address
return data
def _get_person_data(self, person_link):
return {'firstName': person_link.first_name,
'familyName': person_link.last_name,
'affiliation': person_link.affiliation,
'email': person_link.person.email,
'name': person_link.get_full_name(last_name_first=False, last_name_upper=False,
abbrev_first_name=False, show_title=True),
'displayOrderKey': person_link.display_order_key}
def serialize_contribution(contribution):
return {'id': contribution.id,
'friendly_id': contribution.friendly_id,
'title': contribution.title}
def serialize_day_update(event, day, block=None, session_=None):
serializer = TimetableSerializer(management=True)
timetable = serializer.serialize_session_timetable(session_) if session_ else serializer.serialize_timetable(event)
block_id = serializer._get_entry_key(block) if block else None
day = day.strftime('%Y%m%d')
return {'day': day,
'entries': timetable[day] if not block else timetable[day][block_id]['entries'],
'slotEntry': serializer.serialize_session_block_entry(block) if block else None}
def serialize_entry_update(entry, with_timetable=False, session_=None):
serializer = TimetableSerializer(management=True)
day = entry.start_dt.astimezone(entry.event_new.tzinfo)
day_update = serialize_day_update(entry.event_new, day, block=entry.parent, session_=session_)
return dict({'id': serializer._get_entry_key(entry),
'entry': serializer.serialize_timetable_entry(entry),
'autoOps': None},
**day_update)
def serialize_event_info(event):
conf = event.as_legacy
event_info = fossilize(conf, IConferenceEventInfoFossil, tz=conf.tz)
event_info['isCFAEnabled'] = conf.getAbstractMgr().isActive()
event_info['sessions'] = {sess.id: serialize_session(sess) for sess in event.sessions}
return event_info
def serialize_session(sess):
"""Return data for a single session"""
data = {
'_type': 'Session',
'address': sess.address,
'color': '#' + sess.colors.background,
'description': sess.description,
'id': sess.id,
'isPoster': sess.is_poster,
'location': sess.venue_name,
'room': sess.room_name,
'roomFullname': sess.room_name,
'textColor': '#' + sess.colors.text,
'title': sess.title,
'url': url_for('sessions.display_session', sess)
}
return data
| gpl-3.0 | 6,942,691,855,398,678,000 | 45.775701 | 119 | 0.599734 | false |
abadger/stellarmagnate | magnate/ui/urwid/abcwidget.py | 1 | 1128 | # Stellar Magnate - A space-themed commodity trading game
# Copyright (C) 2016-2017 Toshio Kuratomi <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
A metaclass that combines urwid's widget metaclass with abstract base class
We need to define this to solve which base class can override the other.
"""
from abc import ABCMeta
import urwid
class ABCWidget(urwid.WidgetMeta, ABCMeta):
"""Combine ABCMeta and Widgetmeta together so that we can have a metaclass with both"""
pass
| agpl-3.0 | -3,600,078,121,113,003,000 | 37.896552 | 91 | 0.766844 | false |
jptomo/rpython-lang-scheme | rpython/translator/c/test/test_extfunc.py | 1 | 28437 | import py
import os, time, sys
from rpython.tool.udir import udir
from rpython.rlib.rarithmetic import r_longlong
from rpython.annotator import model as annmodel
from rpython.translator.c.test.test_genc import compile
from rpython.translator.c.test.test_standalone import StandaloneTests
posix = __import__(os.name)
def test_time_clock():
def does_stuff():
t1 = t2 = time.clock()
while abs(t2 - t1) < 0.01:
t2 = time.clock()
return t2 - t1
f1 = compile(does_stuff, [])
t = f1()
assert 0 < t < 1.5
def test_time_sleep():
def does_nothing():
time.sleep(0.19)
f1 = compile(does_nothing, [])
t0 = time.time()
f1()
t1 = time.time()
assert t0 <= t1
assert t1 - t0 >= 0.15
def test_os_open():
tmpfile = str(udir.join('test_os_open.txt'))
def does_stuff():
fd = os.open(tmpfile, os.O_WRONLY | os.O_CREAT, 0777)
os.close(fd)
return fd
f1 = compile(does_stuff, [])
fd = f1()
assert os.path.exists(tmpfile)
def test_failing_os_open():
tmpfile = str(udir.join('test_failing_os_open.DOESNTEXIST'))
def does_stuff():
fd = os.open(tmpfile, os.O_RDONLY, 0777)
return fd
f1 = compile(does_stuff, [])
f1(expected_exception_name='OSError')
assert not os.path.exists(tmpfile)
def test_open_read_write_seek_close():
filename = str(udir.join('test_open_read_write_close.txt'))
def does_stuff():
fd = os.open(filename, os.O_WRONLY | os.O_CREAT, 0777)
count = os.write(fd, "hello world\n")
assert count == len("hello world\n")
os.close(fd)
fd = os.open(filename, os.O_RDONLY, 0777)
result = os.lseek(fd, 1, 0)
assert result == 1
data = os.read(fd, 500)
assert data == "ello world\n"
os.close(fd)
f1 = compile(does_stuff, [])
f1()
with open(filename, 'r') as fid:
assert fid.read() == "hello world\n"
os.unlink(filename)
def test_big_read():
filename = str(udir.join('test_open_read_write_close.txt'))
def does_stuff():
fd = os.open(filename, os.O_WRONLY | os.O_CREAT, 0777)
count = os.write(fd, "hello world\n")
os.close(fd)
fd = os.open(filename, os.O_RDONLY, 0777)
data = os.read(fd, 500000)
os.close(fd)
f1 = compile(does_stuff, [])
f1()
os.unlink(filename)
def test_ftruncate():
if not hasattr(os, 'ftruncate'):
py.test.skip("this os has no ftruncate :-(")
filename = str(udir.join('test_open_read_write_close.txt'))
def does_stuff():
fd = os.open(filename, os.O_WRONLY | os.O_CREAT, 0777)
os.write(fd, "hello world\n")
os.close(fd)
fd = os.open(filename, os.O_RDWR, 0777)
os.ftruncate(fd, 5)
data = os.read(fd, 500)
assert data == "hello"
os.close(fd)
does_stuff()
f1 = compile(does_stuff, [])
f1()
os.unlink(filename)
def need_sparse_files():
if sys.platform == 'darwin':
py.test.skip("no sparse files on default Mac OS X file system")
if os.name == 'nt':
py.test.skip("no sparse files on Windows")
def test_largefile():
if not hasattr(os, 'ftruncate'):
py.test.skip("this os has no ftruncate :-(")
need_sparse_files()
filename = str(udir.join('test_largefile'))
r4800000000 = r_longlong(4800000000L)
r4900000000 = r_longlong(4900000000L)
r5000000000 = r_longlong(5000000000L)
r5200000000 = r_longlong(5200000000L)
r9900000000 = r_longlong(9900000000L)
r10000000000 = r_longlong(10000000000L)
def does_stuff():
fd = os.open(filename, os.O_RDWR | os.O_CREAT, 0666)
os.ftruncate(fd, r10000000000)
res = os.lseek(fd, r9900000000, 0)
assert res == r9900000000
res = os.lseek(fd, -r5000000000, 1)
assert res == r4900000000
res = os.lseek(fd, -r5200000000, 2)
assert res == r4800000000
os.close(fd)
try:
os.lseek(fd, 0, 0)
except OSError:
pass
else:
print "DID NOT RAISE"
raise AssertionError
st = os.stat(filename)
assert st.st_size == r10000000000
does_stuff()
os.unlink(filename)
f1 = compile(does_stuff, [])
f1()
os.unlink(filename)
def test_os_access():
filename = str(py.path.local(__file__))
def call_access(path, mode):
return os.access(path, mode)
f = compile(call_access, [str, int])
for mode in os.R_OK, os.W_OK, os.X_OK, (os.R_OK | os.W_OK | os.X_OK):
assert f(filename, mode) == os.access(filename, mode)
def test_os_stat():
filename = str(py.path.local(__file__))
has_blksize = hasattr(os.stat_result, 'st_blksize')
has_blocks = hasattr(os.stat_result, 'st_blocks')
def call_stat():
st = os.stat(filename)
res = (st[0], st.st_ino, st.st_ctime)
if has_blksize: res += (st.st_blksize,)
if has_blocks: res += (st.st_blocks,)
return str(res)
f = compile(call_stat, [])
res = eval(f())
assert res[0] == os.stat(filename).st_mode
assert res[1] == os.stat(filename).st_ino
st_ctime = res[2]
if isinstance(st_ctime, float):
assert (st_ctime - os.stat(filename).st_ctime) < 0.1
else:
assert st_ctime == int(os.stat(filename).st_ctime)
if has_blksize:
assert res[3] == os.stat(filename).st_blksize
if has_blocks:
assert res[4] == os.stat(filename).st_blocks
def test_os_stat_raises_winerror():
if sys.platform != 'win32':
py.test.skip("no WindowsError on this platform")
def call_stat():
try:
os.stat("nonexistentdir/nonexistentfile")
except WindowsError, e:
return e.winerror
return 0
f = compile(call_stat, [])
res = f()
expected = call_stat()
assert res == expected
def test_os_fstat():
if os.environ.get('PYPY_CC', '').startswith('tcc'):
py.test.skip("segfault with tcc :-(")
filename = str(py.path.local(__file__))
def call_fstat():
fd = os.open(filename, os.O_RDONLY, 0777)
st = os.fstat(fd)
os.close(fd)
return str((st.st_mode, st[1], st.st_mtime))
f = compile(call_fstat, [])
osstat = os.stat(filename)
st_mode, st_ino, st_mtime = eval(f())
assert st_mode == osstat.st_mode
if sys.platform != 'win32':
assert st_ino == osstat.st_ino
if isinstance(st_mtime, float):
assert (st_mtime - osstat.st_mtime) < 0.1
else:
assert st_mtime == int(osstat.st_mtime)
def test_os_isatty():
def call_isatty(fd):
return os.isatty(fd)
f = compile(call_isatty, [int])
assert f(0) == os.isatty(0)
assert f(1) == os.isatty(1)
assert f(2) == os.isatty(2)
def test_getcwd():
def does_stuff():
return os.getcwd()
f1 = compile(does_stuff, [])
res = f1()
assert res == os.getcwd()
def test_system():
def does_stuff(cmd):
return os.system(cmd)
f1 = compile(does_stuff, [str])
res = f1("echo hello")
assert res == 0
def test_os_path_exists():
tmpfile = str(udir.join('test_os_path_exists.TMP'))
def fn():
return os.path.exists(tmpfile)
f = compile(fn, [])
open(tmpfile, 'w').close()
assert f() == True
os.unlink(tmpfile)
assert f() == False
def test_os_path_isdir():
directory = "./."
def fn():
return os.path.isdir(directory)
f = compile(fn, [])
assert f() == True
directory = "some/random/name"
def fn():
return os.path.isdir(directory)
f = compile(fn, [])
assert f() == False
def test_time_time():
import time
def fn():
return time.time()
f = compile(fn, [])
t0 = time.time()
res = fn()
t1 = time.time()
assert t0 <= res <= t1
def test_formatd():
from rpython.rlib.rfloat import formatd
def fn(x):
return formatd(x, 'f', 2, 0)
f = compile(fn, [float])
assert f(0.0) == "0.00"
assert f(1.5) == "1.50"
assert f(2.0) == "2.00"
def test_float_to_str():
def fn(f):
return str(f)
f = compile(fn, [float])
res = f(1.5)
assert eval(res) == 1.5
def test_os_unlink():
tmpfile = str(udir.join('test_os_path_exists.TMP'))
def fn():
os.unlink(tmpfile)
f = compile(fn, [])
open(tmpfile, 'w').close()
fn()
assert not os.path.exists(tmpfile)
def test_chdir():
def does_stuff(path):
os.chdir(path)
return os.getcwd()
f1 = compile(does_stuff, [str])
if os.name == 'nt':
assert f1(os.environ['TEMP']) == os.path.realpath(os.environ['TEMP'])
else:
assert f1('/tmp') == os.path.realpath('/tmp')
def test_mkdir_rmdir():
def does_stuff(path, delete):
if delete:
os.rmdir(path)
else:
os.mkdir(path, 0777)
f1 = compile(does_stuff, [str, bool])
dirname = str(udir.join('test_mkdir_rmdir'))
f1(dirname, False)
assert os.path.exists(dirname) and os.path.isdir(dirname)
f1(dirname, True)
assert not os.path.exists(dirname)
def test_strerror():
def does_stuff(n):
return os.strerror(n)
f1 = compile(does_stuff, [int])
for i in range(4):
res = f1(i)
assert res == os.strerror(i)
def test_pipe_dup_dup2():
def does_stuff():
a, b = os.pipe()
c = os.dup(a)
d = os.dup(b)
assert a != b
assert a != c
assert a != d
assert b != c
assert b != d
assert c != d
os.close(c)
os.dup2(d, c)
e, f = os.pipe()
assert e != a
assert e != b
assert e != c
assert e != d
assert f != a
assert f != b
assert f != c
assert f != d
assert f != e
os.close(a)
os.close(b)
os.close(c)
os.close(d)
os.close(e)
os.close(f)
return 42
f1 = compile(does_stuff, [])
res = f1()
assert res == 42
def test_os_chmod():
tmpfile = str(udir.join('test_os_chmod.txt'))
f = open(tmpfile, 'w')
f.close()
# use a witness for the permissions we should expect -
# on Windows it is not possible to change all the bits with chmod()
tmpfile2 = str(udir.join('test_os_chmod_witness.txt'))
f = open(tmpfile2, 'w')
f.close()
def does_stuff(mode):
os.chmod(tmpfile, mode)
f1 = compile(does_stuff, [int])
f1(0000)
os.chmod(tmpfile2, 0000)
assert os.stat(tmpfile).st_mode & 0777 == os.stat(tmpfile2).st_mode & 0777
f1(0644)
os.chmod(tmpfile2, 0644)
assert os.stat(tmpfile).st_mode & 0777 == os.stat(tmpfile2).st_mode & 0777
if hasattr(os, 'fchmod'):
def test_os_fchmod():
tmpfile1 = str(udir.join('test_os_fchmod.txt'))
def does_stuff():
fd = os.open(tmpfile1, os.O_WRONLY | os.O_CREAT, 0777)
os.fchmod(fd, 0200)
os.close(fd)
f1 = compile(does_stuff, [])
f1()
assert os.stat(tmpfile1).st_mode & 0777 == 0200
def test_os_rename():
tmpfile1 = str(udir.join('test_os_rename_1.txt'))
tmpfile2 = str(udir.join('test_os_rename_2.txt'))
f = open(tmpfile1, 'w')
f.close()
def does_stuff():
os.rename(tmpfile1, tmpfile2)
f1 = compile(does_stuff, [])
f1()
assert os.path.exists(tmpfile2)
assert not os.path.exists(tmpfile1)
if hasattr(os, 'mkfifo'):
def test_os_mkfifo():
tmpfile = str(udir.join('test_os_mkfifo.txt'))
def does_stuff():
os.mkfifo(tmpfile, 0666)
f1 = compile(does_stuff, [])
f1()
import stat
st = os.lstat(tmpfile)
assert stat.S_ISFIFO(st.st_mode)
if hasattr(os, 'mknod'):
def test_os_mknod():
import stat
tmpfile = str(udir.join('test_os_mknod.txt'))
def does_stuff():
os.mknod(tmpfile, 0600 | stat.S_IFIFO, 0)
f1 = compile(does_stuff, [])
f1()
st = os.lstat(tmpfile)
assert stat.S_ISFIFO(st.st_mode)
def test_os_umask():
def does_stuff():
mask1 = os.umask(0660)
mask2 = os.umask(mask1)
return mask2
f1 = compile(does_stuff, [])
res = f1()
assert res == does_stuff()
if hasattr(os, 'getpid'):
def test_os_getpid():
def does_stuff():
return os.getpid()
f1 = compile(does_stuff, [])
res = f1()
assert res != os.getpid()
if hasattr(os, 'getpgrp'):
def test_os_getpgrp():
def does_stuff():
return os.getpgrp()
f1 = compile(does_stuff, [])
res = f1()
assert res == os.getpgrp()
if hasattr(os, 'setpgrp'):
def test_os_setpgrp():
def does_stuff():
return os.setpgrp()
f1 = compile(does_stuff, [])
res = f1()
assert res == os.setpgrp()
if hasattr(os, 'link'):
def test_links():
import stat
tmpfile1 = str(udir.join('test_links_1.txt'))
tmpfile2 = str(udir.join('test_links_2.txt'))
tmpfile3 = str(udir.join('test_links_3.txt'))
f = open(tmpfile1, 'w')
f.close()
def does_stuff():
os.symlink(tmpfile1, tmpfile2)
os.link(tmpfile1, tmpfile3)
assert os.readlink(tmpfile2) == tmpfile1
flag= 0
st = os.lstat(tmpfile1)
flag = flag*10 + stat.S_ISREG(st[0])
flag = flag*10 + stat.S_ISLNK(st[0])
st = os.lstat(tmpfile2)
flag = flag*10 + stat.S_ISREG(st[0])
flag = flag*10 + stat.S_ISLNK(st[0])
st = os.lstat(tmpfile3)
flag = flag*10 + stat.S_ISREG(st[0])
flag = flag*10 + stat.S_ISLNK(st[0])
return flag
f1 = compile(does_stuff, [])
res = f1()
assert res == 100110
assert os.path.islink(tmpfile2)
assert not os.path.islink(tmpfile3)
if hasattr(os, 'fork'):
def test_fork():
def does_stuff():
pid = os.fork()
if pid == 0: # child
os._exit(4)
pid1, status1 = os.waitpid(pid, 0)
assert pid1 == pid
return status1
f1 = compile(does_stuff, [])
status1 = f1()
assert os.WIFEXITED(status1)
assert os.WEXITSTATUS(status1) == 4
if hasattr(os, 'kill'):
def test_kill():
import signal
def does_stuff():
pid = os.fork()
if pid == 0: # child
time.sleep(5)
os._exit(4)
os.kill(pid, signal.SIGTERM) # in the parent
pid1, status1 = os.waitpid(pid, 0)
assert pid1 == pid
return status1
f1 = compile(does_stuff, [])
status1 = f1()
assert os.WIFSIGNALED(status1)
assert os.WTERMSIG(status1) == signal.SIGTERM
elif hasattr(os, 'waitpid'):
# windows has no fork but some waitpid to be emulated
def test_waitpid():
prog = str(sys.executable)
def does_stuff():
args = [prog]
# args = [prog, '-c', '"import os;os._exit(4)"']
# note that the above variant creates a bad array
args.append('-c')
args.append('"import os;os._exit(4)"')
pid = os.spawnv(os.P_NOWAIT, prog, args)
#if pid == 0: # child
# os._exit(4)
pid1, status1 = os.waitpid(pid, 0)
assert pid1 == pid
return status1
f1 = compile(does_stuff, [])
status1 = f1()
# for what reason do they want us to shift by 8? See the doc
assert status1 >> 8 == 4
if hasattr(os, 'kill'):
def test_kill_to_send_sigusr1():
import signal
from rpython.rlib import rsignal
if not 'SIGUSR1' in dir(signal):
py.test.skip("no SIGUSR1 available")
def does_stuff():
rsignal.pypysig_setflag(signal.SIGUSR1)
os.kill(os.getpid(), signal.SIGUSR1)
rsignal.pypysig_ignore(signal.SIGUSR1)
while True:
n = rsignal.pypysig_poll()
if n < 0 or n == signal.SIGUSR1:
break
return n
f1 = compile(does_stuff, [])
got_signal = f1()
assert got_signal == signal.SIGUSR1
if hasattr(os, 'killpg'):
def test_killpg():
import signal
from rpython.rlib import rsignal
def does_stuff():
os.setpgid(0, 0) # become its own separated process group
rsignal.pypysig_setflag(signal.SIGUSR1)
os.killpg(os.getpgrp(), signal.SIGUSR1)
rsignal.pypysig_ignore(signal.SIGUSR1)
while True:
n = rsignal.pypysig_poll()
if n < 0 or n == signal.SIGUSR1:
break
return n
f1 = compile(does_stuff, [])
got_signal = f1()
assert got_signal == signal.SIGUSR1
if hasattr(os, 'chown') and hasattr(os, 'lchown'):
def test_os_chown_lchown():
path1 = udir.join('test_os_chown_lchown-1.txt')
path2 = udir.join('test_os_chown_lchown-2.txt')
path1.write('foobar')
path2.mksymlinkto('some-broken-symlink')
tmpfile1 = str(path1)
tmpfile2 = str(path2)
def does_stuff():
# xxx not really a test, just checks that they are callable
os.chown(tmpfile1, os.getuid(), os.getgid())
os.lchown(tmpfile1, os.getuid(), os.getgid())
os.lchown(tmpfile2, os.getuid(), os.getgid())
try:
os.chown(tmpfile2, os.getuid(), os.getgid())
except OSError:
pass
else:
raise AssertionError("os.chown(broken symlink) should raise")
f1 = compile(does_stuff, [])
f1()
if hasattr(os, 'fchown'):
def test_os_fchown():
path1 = udir.join('test_os_fchown.txt')
tmpfile1 = str(path1)
def does_stuff():
# xxx not really a test, just checks that it is callable
fd = os.open(tmpfile1, os.O_WRONLY | os.O_CREAT, 0777)
os.fchown(fd, os.getuid(), os.getgid())
os.close(fd)
f1 = compile(does_stuff, [])
f1()
if hasattr(os, 'getlogin'):
def test_os_getlogin():
def does_stuff():
return os.getlogin()
try:
expected = os.getlogin()
except OSError, e:
py.test.skip("the underlying os.getlogin() failed: %s" % e)
f1 = compile(does_stuff, [])
assert f1() == expected
# ____________________________________________________________
def _real_getenv(var):
cmd = '''%s -c "import os; x=os.environ.get('%s'); print (x is None) and 'F' or ('T'+x)"''' % (
sys.executable, var)
g = os.popen(cmd, 'r')
output = g.read().strip()
g.close()
if output == 'F':
return None
elif output.startswith('T'):
return output[1:]
else:
raise ValueError('probing for env var returned %r' % (output,))
def test_dictlike_environ_getitem():
def fn(s):
try:
return os.environ[s]
except KeyError:
return '--missing--'
func = compile(fn, [str])
os.environ.setdefault('USER', 'UNNAMED_USER')
result = func('USER')
assert result == os.environ['USER']
result = func('PYPY_TEST_DICTLIKE_MISSING')
assert result == '--missing--'
def test_dictlike_environ_get():
def fn(s):
res = os.environ.get(s)
if res is None: res = '--missing--'
return res
func = compile(fn, [str])
os.environ.setdefault('USER', 'UNNAMED_USER')
result = func('USER')
assert result == os.environ['USER']
result = func('PYPY_TEST_DICTLIKE_MISSING')
assert result == '--missing--'
def test_dictlike_environ_setitem():
def fn(s, t1, t2, t3, t4, t5):
os.environ[s] = t1
os.environ[s] = t2
os.environ[s] = t3
os.environ[s] = t4
os.environ[s] = t5
return os.environ[s]
func = compile(fn, [str] * 6)
r = func('PYPY_TEST_DICTLIKE_ENVIRON', 'a', 'b', 'c', 'FOOBAR', '42')
assert r == '42'
def test_dictlike_environ_delitem():
def fn(s1, s2, s3, s4, s5):
for n in range(10):
os.environ[s1] = 't1'
os.environ[s2] = 't2'
os.environ[s3] = 't3'
os.environ[s4] = 't4'
os.environ[s5] = 't5'
del os.environ[s3]
del os.environ[s1]
del os.environ[s2]
del os.environ[s4]
try:
del os.environ[s2]
except KeyError:
pass
else:
raise Exception("should have raised!")
# os.environ[s5] stays
func = compile(fn, [str] * 5)
func('PYPY_TEST_DICTLIKE_ENVDEL1',
'PYPY_TEST_DICTLIKE_ENVDEL_X',
'PYPY_TEST_DICTLIKE_ENVDELFOO',
'PYPY_TEST_DICTLIKE_ENVDELBAR',
'PYPY_TEST_DICTLIKE_ENVDEL5')
def test_dictlike_environ_keys():
def fn():
return '\x00'.join(os.environ.keys())
func = compile(fn, [])
os.environ.setdefault('USER', 'UNNAMED_USER')
try:
del os.environ['PYPY_TEST_DICTLIKE_ENVKEYS']
except:
pass
result1 = func().split('\x00')
os.environ['PYPY_TEST_DICTLIKE_ENVKEYS'] = '42'
result2 = func().split('\x00')
assert 'USER' in result1
assert 'PYPY_TEST_DICTLIKE_ENVKEYS' not in result1
assert 'USER' in result2
assert 'PYPY_TEST_DICTLIKE_ENVKEYS' in result2
def test_dictlike_environ_items():
def fn():
result = []
for key, value in os.environ.items():
result.append('%s/%s' % (key, value))
return '\x00'.join(result)
func = compile(fn, [])
os.environ.setdefault('USER', 'UNNAMED_USER')
result1 = func().split('\x00')
os.environ['PYPY_TEST_DICTLIKE_ENVITEMS'] = '783'
result2 = func().split('\x00')
assert ('USER/%s' % (os.environ['USER'],)) in result1
assert 'PYPY_TEST_DICTLIKE_ENVITEMS/783' not in result1
assert ('USER/%s' % (os.environ['USER'],)) in result2
assert 'PYPY_TEST_DICTLIKE_ENVITEMS/783' in result2
def test_listdir():
def mylistdir(s):
try:
os.listdir('this/directory/really/cannot/exist')
except OSError:
pass
else:
raise AssertionError("should have failed!")
result = os.listdir(s)
return '/'.join(result)
func = compile(mylistdir, [str])
for testdir in [str(udir), os.curdir]:
result = func(testdir)
result = result.split('/')
result.sort()
compared_with = os.listdir(testdir)
compared_with.sort()
assert result == compared_with
if hasattr(posix, 'execv') and hasattr(posix, 'fork'):
def test_execv():
progname = str(sys.executable)
filename = str(udir.join('test_execv.txt'))
def does_stuff():
l = [progname, '-c', 'open(%r,"w").write("1")' % filename]
pid = os.fork()
if pid == 0:
os.execv(progname, l)
else:
os.waitpid(pid, 0)
func = compile(does_stuff, [], backendopt=False)
func()
assert open(filename).read() == "1"
def test_execv_raising():
def does_stuff():
try:
l = []
l.append("asddsadw32eewdfwqdqwdqwd")
os.execv(l[0], l)
return 1
except OSError:
return -2
func = compile(does_stuff, [])
assert func() == -2
def test_execve():
filename = str(udir.join('test_execve.txt'))
progname = sys.executable
def does_stuff():
l = []
l.append(progname)
l.append("-c")
l.append('import os; open(%r, "w").write(os.environ["STH"])' % filename)
env = {}
env["STH"] = "42"
env["sthelse"] = "a"
pid = os.fork()
if pid == 0:
os.execve(progname, l, env)
else:
os.waitpid(pid, 0)
func = compile(does_stuff, [])
func()
assert open(filename).read() == "42"
if hasattr(posix, 'spawnv'):
def test_spawnv():
filename = str(udir.join('test_spawnv.txt'))
progname = str(sys.executable)
scriptpath = udir.join('test_spawnv.py')
scriptpath.write('f=open(%r,"w")\nf.write("2")\nf.close\n' % filename)
scriptname = str(scriptpath)
def does_stuff():
# argument quoting on Windows is completely ill-defined.
# don't let yourself be fooled by the idea that if os.spawnv()
# takes a list of strings, then the receiving program will
# nicely see these strings as arguments with no further quote
# processing. Achieving this is nearly impossible - even
# CPython doesn't try at all.
l = [progname, scriptname]
pid = os.spawnv(os.P_NOWAIT, progname, l)
os.waitpid(pid, 0)
func = compile(does_stuff, [])
func()
assert open(filename).read() == "2"
if hasattr(posix, 'spawnve'):
def test_spawnve():
filename = str(udir.join('test_spawnve.txt'))
progname = str(sys.executable)
scriptpath = udir.join('test_spawnve.py')
scriptpath.write('import os\n' +
'f=open(%r,"w")\n' % filename +
'f.write(os.environ["FOOBAR"])\n' +
'f.close\n')
scriptname = str(scriptpath)
def does_stuff():
l = [progname, scriptname]
pid = os.spawnve(os.P_NOWAIT, progname, l, {'FOOBAR': '42'})
os.waitpid(pid, 0)
func = compile(does_stuff, [])
func()
assert open(filename).read() == "42"
def test_utime():
path = str(udir.ensure("test_utime.txt"))
from time import time, sleep
t0 = time()
sleep(1)
def does_stuff(flag):
if flag:
os.utime(path, None)
else:
os.utime(path, (int(t0), int(t0)))
func = compile(does_stuff, [int])
func(1)
assert os.stat(path).st_atime > t0
func(0)
assert int(os.stat(path).st_atime) == int(t0)
if hasattr(os, 'uname'):
def test_os_uname():
def does_stuff(num):
tup = os.uname()
lst = [tup[0], tup[1], tup[2], tup[3], tup[4]]
return lst[num]
func = compile(does_stuff, [int])
for i in range(5):
res = func(i)
assert res == os.uname()[i]
if hasattr(os, 'getloadavg'):
def test_os_getloadavg():
def does_stuff():
a, b, c = os.getloadavg()
print a, b, c
return a + b + c
f = compile(does_stuff, [])
res = f()
assert type(res) is float and res >= 0.0
if hasattr(os, 'major'):
def test_os_major_minor():
def does_stuff(n):
a = os.major(n)
b = os.minor(n)
x = os.makedev(a, b)
return '%d,%d,%d' % (a, b, x)
f = compile(does_stuff, [int])
res = f(12345)
assert res == '%d,%d,12345' % (os.major(12345), os.minor(12345))
if hasattr(os, 'fchdir'):
def test_os_fchdir():
def does_stuff():
fd = os.open('/', os.O_RDONLY, 0400)
try:
os.fchdir(fd)
s = os.getcwd()
finally:
os.close(fd)
return s == '/'
f = compile(does_stuff, [])
localdir = os.getcwd()
try:
res = f()
finally:
os.chdir(localdir)
assert res == True
# ____________________________________________________________
class TestExtFuncStandalone(StandaloneTests):
if hasattr(os, 'nice'):
def test_os_nice(self):
def does_stuff(argv):
res = os.nice(3)
print 'os.nice returned', res
return 0
t, cbuilder = self.compile(does_stuff)
data = cbuilder.cmdexec('')
res = os.nice(0) + 3
if res > 19: res = 19 # xxx Linux specific, probably
assert data.startswith('os.nice returned %d\n' % res)
| mit | -3,668,364,111,266,793,500 | 29.876221 | 99 | 0.533917 | false |
czayas/agenda | repl.py | 1 | 3955 | #!/usr/bin/env python3
"""
Módulo repl: Interfaz de usuario en modo consola (vista).
Proyecto de ejemplo - Paradigmas de la Programación
Autor: Carlos Zayas (czayas en gmail)
"""
import sys
from traceback import format_exc
from collections.abc import Iterable
try:
# El módulo readline agrega autocompletado e historial a input().
from readline import set_completer
from readline import parse_and_bind
except ImportError:
# El módulo readline no está disponible en Windows.
pass
def out(cadena="", final="\n"):
"""Envía una cadena a stdout y limpia el buffer (imprime más rápido)."""
sys.stdout.write(str(cadena) + final)
sys.stdout.flush()
def strip(cadena):
"""Retorna una cadena sin espacios a los lados en cada línea."""
return "\n".join(linea.strip()
for linea in cadena.split("\n") if linea).strip()
def esiterable(objeto):
"""Retorna True si el objeto es un iterador pero no es una cadena."""
return isinstance(objeto, Iterable) and not isinstance(objeto, str)
def iterable(objeto):
"""Retorna un iterador del objeto (una cadena no debe ser iterable)."""
return iter([objeto]) if not esiterable(objeto) else objeto
def salir(estado=0):
"""Finaliza la ejecución de la aplicación."""
out()
sys.exit(estado)
class Completador:
"""Completador para el módulo readline."""
def __init__(self, opciones):
"""Autocompletado con tabulación."""
self.opciones = sorted(opciones)
self.o = self.opciones[:] # Copia de self.opciones
def completar(self, texto, estado):
"""Event handler para completer de readline."""
if estado == 0:
if texto:
self.o = [o for o in self.opciones
if o and o.startswith(texto)]
else:
self.o = self.opciones[:]
return None if estado >= len(self.o) else self.o[estado] + " "
class REPL:
"""Ciclo de Lectura, Evaluación e Impresión (Read, Eval, Print, Loop)."""
def __init__(self, comandos, introduccion="¡Bienvenido!", indicador="> "):
"""
Constructor: Inicializa propiedades de instancia y completador.
comandos -- Diccionario de funciones a ejecutar (dict)
introduccion -- Texto introductorio (str)
indicador -- Inductor o 'prompt' (str)
"""
self.comandos = comandos
self.introduccion = introduccion
self.indicador = indicador
try:
# Asignación de método de autocompletado para el módulo readline.
set_completer(Completador(comandos.keys()).completar)
parse_and_bind('tab:complete')
except NameError:
# El módulo readline no está disponible en Windows.
pass
def ciclo(self):
"""Ejecuta el ciclo REPL."""
out(self.introduccion)
while True:
try:
comando, *parametros = input(self.indicador).split()
salida = self.comandos[comando](*parametros)
if salida:
for linea in iterable(salida):
out(linea)
except ValueError:
pass
except (KeyboardInterrupt, EOFError):
salir()
except KeyError:
out("{}: Comando desconocido.".format(comando))
except TypeError:
out(strip(self.comandos[comando].__doc__))
except Exception as excepcion:
out("Error inesperado:\n" +
str(type(excepcion)) + str(excepcion) + "\n" +
format_exc().strip())
def main():
"""Función principal (ejemplo de uso)."""
def hola():
return "Hola, Mundo."
comandos = {"eval": eval,
"hola": hola,
"quit": quit}
REPL(comandos).ciclo()
if __name__ == "__main__":
main()
| mit | -375,912,725,944,871,900 | 30.214286 | 78 | 0.586575 | false |
DedMemez/ODS-August-2017 | catalog/CatalogBasketItem.py | 1 | 3029 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.catalog.CatalogBasketItem
from panda3d.core import Datagram
import CatalogItem
from toontown.estate import GardenGlobals
from toontown.toonbase import ToontownGlobals
from direct.actor import Actor
from toontown.toonbase import TTLocalizer
from direct.interval.IntervalGlobal import *
class CatalogBasketItem(CatalogItem.CatalogItem):
sequenceNumber = 0
def makeNewItem(self, maxBasket):
self.maxBasket = maxBasket
CatalogItem.CatalogItem.makeNewItem(self)
def getPurchaseLimit(self):
return 1
def reachedPurchaseLimit(self, avatar):
return avatar.getMaxFlowerBasket() >= self.maxBasket or self in avatar.onOrder or self in avatar.mailboxContents
def saveHistory(self):
return 1
def getTypeName(self):
return TTLocalizer.BasketTypeName
def getName(self):
return TTLocalizer.FlowerBasket % TTLocalizer.FlowerBasketNameDict[self.maxBasket]
def recordPurchase(self, avatar, optional):
if self.maxBasket <= avatar.getMaxFlowerBasket():
return ToontownGlobals.P_ItemUnneeded
avatar.b_setMaxFlowerBasket(self.maxBasket)
return ToontownGlobals.P_ItemAvailable
def isGift(self):
return 0
def getDeliveryTime(self):
return 1
def getPicture(self, avatar):
basket = loader.loadModel('phase_5.5/models/estate/flowerBasket')
basket.setScale(2.3)
basket.setPos(0, 0, 0.12)
frame = self.makeFrame()
basket.reparentTo(frame)
return (frame, None)
def getAcceptItemErrorText(self, retcode):
if retcode == ToontownGlobals.P_ItemAvailable:
return TTLocalizer.CatalogAcceptBasket
if retcode == ToontownGlobals.P_ItemUnneeded:
return TTLocalizer.CatalogAcceptBasketUnneeded
return CatalogItem.CatalogItem.getAcceptItemErrorText(self, retcode)
def output(self, store = -1):
return 'CatalogBasketItem(%s%s)' % (self.maxBasket, self.formatOptionalData(store))
def compareTo(self, other):
return self.maxBasket - other.maxBasket
def getHashContents(self):
return self.maxBasket
def getBasePrice(self):
return GardenGlobals.BasketPriceDict[self.maxBasket]
def decodeDatagram(self, di, versionNumber, store):
CatalogItem.CatalogItem.decodeDatagram(self, di, versionNumber, store)
self.maxBasket = di.getUint8()
def encodeDatagram(self, dg, store):
CatalogItem.CatalogItem.encodeDatagram(self, dg, store)
dg.addUint8(self.maxBasket)
def nextAvailableBasket(avatar, duplicateItems):
basket = avatar.getMaxFlowerBasket()
if basket in GardenGlobals.NextBasket:
return CatalogBasketItem(GardenGlobals.NextBasket[basket])
def getAllBaskets():
return [ CatalogBasketItem(basket) for basket in GardenGlobals.NextBasket.values() ] | apache-2.0 | -6,348,996,229,263,607,000 | 32.83908 | 120 | 0.698911 | false |
ibm-research/SwiftHLM | swifthlm/dummy_connector.py | 1 | 11621 | #!/usr/bin/python
# (C) Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file implements SwiftHLM Dummy Backend Connector, a reference
implementation reusable for implementing a SwiftHLM Backend Connector for your
own HLM storage backend, which is considered backend-specific and external for
SwiftHLM. Any SwiftHLM Backend Connector implementation must implement
SwiftHlmBackendConnector class and its public method for SwiftHLM Generic
Backend API used between SwiftHLM and SwiftHLM Connector.
*** SwiftHLM Generic Backend API version 0.2.1 ***
API versions with 3rd digit different from 0, such 0.2.1, should be considered
developmental and not stable.
response = SwiftHlmBackendConnector.submit_request_get_response(request)
request =
{
command : status,
objects :
[
{ object : /a/c/obj1, file : /srv/node/filepath1 },
{ object : /a/c/obj2, file : /srv/node/filepath2 }
]
}
response =
{
objects :
[
{object : /a/c/obj1, file : /srv/node/filepath1, status : migrated,},
{object : /a/c/obj2, file : /srv/node/filepath2, status : resident},
{object : /a/c/obj3, file : /srv/node/filepath3, status : premigrated},
{object : /a/c/obj4, file : /srv/node/filepath4, status : unknown}
]
}
The data structures used are dicitionary and list, the values are strings,
shown above unquoted and additinally indented for easier reading.
In addition to 'status', other requests are 'migrate' or 'recall' for which the
response is integer:
0 - success
1 - 1 or more objects could not be migrated/recalled
2 - unable to process request for all objects (e.g. cannot invoke backend)
Internal methods of SwiftHlmBackendConnector are backend specific, and
typically involve reformatting the list of object and files to be migrated,
submitting the list and the operation to backend, and receiving response from
backend. Typically it is the backend that moves data between LLM (low latency
media) and HLM (hight latency media) and changes or reports replica state. For
other types of HLM backend the data move and state management function may be
implemented in the SwiftHLM Backend Connector of that backend.
Authors:
Slavisa Sarafijanovic ([email protected])
"""
from sys import stdin, stdout
from collections import defaultdict
import ConfigParser
from swift.common.utils import readconf
from swift.common.utils import json, get_logger, split_path
import logging
from swift.obj.server import ObjectController
from swift.common.storage_policy import POLICIES
from swift.common.exceptions import ConnectionTimeout, DiskFileQuarantined, \
DiskFileNotExist, DiskFileCollision, DiskFileNoSpace, DiskFileDeleted, \
DiskFileDeviceUnavailable, DiskFileExpired, ChunkReadTimeout, \
DiskFileXattrNotSupported
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestTimeout, HTTPUnprocessableEntity, \
HTTPClientDisconnect, HTTPMethodNotAllowed, Request, Response, \
HTTPInsufficientStorage, HTTPForbidden, HTTPException, HeaderKeyDict, \
HTTPConflict, HTTPServerError
import os
# scor aux
from swift.proxy.controllers.base import get_container_info
# scor aux
from swift.common.utils import hash_path
import sqlite3
# SwiftHLM Backend Connector
class SwiftHlmBackendConnector(object):
def __init__(self):
self.__request_in = {}
self.__request_out = {}
self.__response_in = {}
self.__response_out = {}
# Config
configFile = r'/etc/swift/object-server.conf'
self.conf = readconf(configFile)
# Logging
hlm_stor_node_config = self.conf.get('hlm', None)
if hlm_stor_node_config:
hlm_stor_node_log_level = hlm_stor_node_config.get('set log_level',
None)
if hlm_stor_node_log_level:
self.conf['log_level'] = hlm_stor_node_log_level
self.logger = get_logger(self.conf, name='hlm-connector',
log_route='swifthlm',
fmt="%(server)s: %(msecs)03d "
"[%(filename)s:%(funcName)20s():%(lineno)s] "
"%(message)s")
self.logger.info('info: Initialized Connector')
self.logger.debug('dbg: Initialized Connector')
# self.logger.info('conf: %s', self.conf)
# Next method is to be invoked by SwiftHLM Handler using SwiftHLM Generic
# Backend Interface (GBI) declared above in this file. It adapts SwiftHLM
# request for an assumed dummy storage backend, mocks invoking the dummy
# backend operations, reformats the backend response to GBI format, and
# returns the response to SwitHLM handler
def submit_request_get_response(self, request):
self.__receive_request(request)
self.__reformat_swifthlm_request_to_specific_backend_api()
self.__submit_request_to_backend_get_response()
self.__reformat_backend_response_to_generic_backend_api()
return self.__response_out
# This exemplary private method receives the request from SwiftHLM Handler
def __receive_request(self, request):
self.logger.debug('Receiving request from Handler')
self.__request_in = request
# This exemplary private method reformats request to backend API
# Some backends expect as input a file that lists the object data files to
# be migrated or recalled. For this dummy backend connector it just copies
# the incoming request
def __reformat_swifthlm_request_to_specific_backend_api(self):
self.logger.debug('Reformatting request to the specific Backend API')
self.logger.debug('request_in: %s', self.__request_in)
# Backend specific part, for the assumed dummy backend just copies the
# incoming request
self.__request_out = self.__request_in
# This exemplary method submits request to Backend and gets Response from
# Backend. The dummy backend stores object state (resident, premigrated,
# or migrated) into a SQL database on file, using the object replica
# filepath as the key and its status as the value.
# The database file is stored under /tmp/swifthlm_dummy_backend.db, which
# upon need could be made configurable.
# TODO: consider making the database file location configurable.
def __submit_request_to_backend_get_response(self):
self.logger.debug('Submitting request to backend')
database = '/tmp/swifthlm_dummy_backend.db'
db_backend = SwiftHlmDummyBackendDb(database)
if db_backend is None:
self.logger.debug('failed to connect to db_backend db')
self.logger.debug('before migrate')
# migrate
if self.__request_out['request'] == 'migrate':
for object_file in self.__request_out['objects']:
try:
db_backend.insert(object_file['file'], 'migrated')
except sqlite3.Error as err:
self.logger.debug('error: inserting migrated status \
into database (%s)', err)
db_backend.close()
self.__response_in = 0
return
self.logger.debug('before recall')
# recall
if self.__request_out['request'] == 'recall':
for object_file in self.__request_out['objects']:
db_backend.query(object_file['file'])
if db_backend.status == 'migrated':
try:
db_backend.insert(object_file['file'], 'premigrated')
except sqlite3.Error as err:
self.logger.debug('error: inserting premigrated \
status into database (%s)', err)
db_backend.close()
self.__response_in = 0
return
self.logger.debug('before status')
# status
objects_files_statuses = []
for object_file in self.__request_out['objects']:
object_file_status = {}
object_file_status['object'] = object_file['object']
object_file_status['file'] = object_file['file']
db_backend.query(object_file['file'])
object_file_status['status'] = db_backend.status
objects_files_statuses.append(object_file_status)
db_backend.close()
self.__response_in['objects'] = objects_files_statuses
# self.__response_in = self.__request_out
def __reformat_backend_response_to_generic_backend_api(self):
self.logger.debug('Reformatting response to Generic Backend API')
self.logger.debug('response_in: %s', self.__response_in)
# Backend specific part, for the assumed dummy backend it just copies
# the incoming response from the backend
self.__response_out = self.__response_in
# SwiftHLM Dummy Backend Database
class SwiftHlmDummyBackendDb:
def __init__(self, dbname):
self.connection = None
self.cursor = None
self.database = dbname
self.table = 'status_table'
self.key = 'item_path'
self.value = 'status'
self.status = None # to store queried status
self.connect()
def connect(self):
try:
self.connection = sqlite3.connect(self.database)
self.cursor = self.connection.cursor()
except sqlite3.Error as err:
self.logger.debug('error: connecting to dummy backend status \
database (%s)', err)
reise
self.cursor.execute('CREATE TABLE IF NOT EXISTS {tn} \
({kn} TEXT PRIMARY KEY, {vn} TEXT)'
.format(tn=self.table, kn=self.key, vn=self.value))
# self.connection.commit() # is it needed at this step?
def close(self):
if self.connection:
self.connection.commit()
self.cursor.close()
self.connection.close()
def insert(self, path, status):
c = self.cursor
c.execute('REPLACE INTO {tn} ({kn}, {vn}) VALUES (?, ?)'
.format(tn=self.table, kn=self.key, vn=self.value),
(path, status))
def query(self, path):
c = self.cursor
c.execute('SELECT {vn} FROM {tn} WHERE {kn}=?'
.format(tn=self.table, kn=self.key, vn=self.value),
(path, )) # ..h, )!!
status = c.fetchone()
if status:
self.status = str(status[0])
else:
self.status = 'resident' # TODO: Handling 'unknown' status
if __name__ == '__main__':
# SwiftHlmConnector class is not assumed to be used standalone, instead it
# is imported for a configured backend by SwiftHLM Handler and invoked from
# the Handler. Alternatively it could be modified to be invoked as a new
# process and/or remotely similar to SwiftHLM Dispatcher invoking SwiftHLM
# Handler
raise
| apache-2.0 | 727,087,573,311,736,000 | 40.355872 | 79 | 0.650805 | false |
southpawtech/TACTIC-DEV | src/tactic/ui/app/plugin_wdg.py | 1 | 81627 | ###########################################################
#
# Copyright (c) 2010, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
__all__ = ['PluginWdg', 'PluginEditWdg', 'PluginInstallWdg', 'PluginDownloadCbk', 'PluginDirListActionCbk', 'PluginRemoveCbk', 'PluginDirListWdg']
from pyasm.common import Environment, TacticException, Config, Xml, Common, ZipUtil
from pyasm.command import Command
from pyasm.web import DivWdg, Table, HtmlElement
from pyasm.widget import ButtonWdg, ProdIconButtonWdg, TextWdg, TextAreaWdg, CheckboxWdg, IconWdg, SelectWdg
from pyasm.search import Search, SearchType
from pyasm.biz import File
import os, codecs
import zipfile, shutil
from tactic.ui.common import BaseRefreshWdg
from tactic.ui.container import ResizableTableWdg
from tactic.ui.widget import ActionButtonWdg, DirListWdg
from tactic.ui.input import TextInputWdg
from tactic.ui.container import Menu, MenuItem, SmartMenu
from tactic.ui.widget import ButtonRowWdg, ButtonNewWdg
class PluginWdg(BaseRefreshWdg):
def get_display(my):
div = DivWdg()
div.add_class("spt_plugin_top")
my.set_as_panel(div)
div.add_color("background", "background")
inner = DivWdg()
div.add(inner)
# add the main layout
#table = ResizableTableWdg()
table = Table()
table.add_color("color", "color")
inner.add(table)
table.add_style("margin: -1")
table.add_row()
left = table.add_cell()
left.add_style("vertical-align: top")
left.add_style("min-width: 275px")
left.add_style("height: 400px")
left.add_style("padding: 0px")
left.add_color("background", "background3")
left.add_border()
plugin_dir = Environment.get_plugin_dir()
plugin_wdg = my.get_plugins_wdg("Plugin", plugin_dir)
left.add(plugin_wdg)
builtin_plugin_dir = Environment.get_builtin_plugin_dir()
plugin_wdg = my.get_plugins_wdg("Built-in Plugin", builtin_plugin_dir, is_editable=False)
if plugin_wdg:
left.add(plugin_wdg)
#left.add("<br/>")
#template_dir = Environment.get_template_dir()
#left.add(my.get_plugins_wdg("Template", template_dir) )
right = table.add_cell()
right.add_style("vertical-align: top")
right.add_style("min-width: 400px")
right.add_style("width: 100%")
right.add_style("height: 400px")
right.add_style("padding: 5px")
right.add_border()
plugin_dir = my.kwargs.get("plugin_dir")
edit = PluginEditWdg(plugin_dir=plugin_dir)
right.add(edit)
if my.kwargs.get("is_refresh"):
return inner
else:
return div
def get_plugins_wdg(my, title, plugin_dir, is_editable=True):
div = DivWdg()
# use the file system
if not os.path.exists(plugin_dir):
os.makedirs(plugin_dir)
dirnames = os.listdir(plugin_dir)
dirnames.sort()
dirnames.reverse()
plugin_dirnames = []
for dirname in dirnames:
if dirname.endswith(".zip"):
continue
if dirname.endswith(".enc"):
continue
if not os.path.isdir("%s/%s" % (plugin_dir, dirname)):
continue
#if not os.path.exists("%s/%s/manifest.xml" % (plugin_dir, dirname)):
# continue
plugin_dirnames.append(dirname)
# get all of the active plugins in this project
search_type = 'config/plugin'
search = Search(search_type)
active_plugins = search.get_sobjects()
active_codes = [x.get_code() for x in active_plugins]
active_versions = [x.get_value("version") for x in active_plugins]
active_map = {}
for x in active_plugins:
active_map[x.get_code()] = x.get_value("version")
title_div = DivWdg()
div.add(title_div)
title_div.add("%s List" % title)
title_div.add_style("font-size: 14px")
title_div.add_style("font-weight: bold")
title_div.add_gradient("background", "background", 0, -10)
title_div.add_style("padding: 10px 5px 10px 5px")
title_div.add_style("margin-bottom: 15px")
button_row = ButtonRowWdg()
title_div.add(button_row)
button_row.add_style("float: right")
button_row.add_style("margin-top: -8px")
if is_editable:
new_button = ButtonNewWdg(title="Create a New Plugin", icon=IconWdg.NEW)
button_row.add(new_button)
new_button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_plugin_top");
var edit = top.getElement(".spt_plugin_edit");
var class_name = "tactic.ui.app.PluginEditWdg";
var kwargs = {
mode: 'insert'
};
spt.panel.load(edit, class_name, kwargs);
'''
} )
add_button = ButtonNewWdg(title="Install a Plugin", icon=IconWdg.ADD)
button_row.add(add_button)
add_button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_plugin_top");
var edit = top.getElement(".spt_plugin_edit");
var class_name = "tactic.ui.app.PluginInstallWdg";
var kwargs = {
search_key: bvr.search_key
};
spt.panel.load(edit, class_name, kwargs);
'''
} )
# add in a context menu
menu = my.get_context_menu()
menus = [menu.get_data()]
menus_in = {
'PLUGIN_CTX': menus,
}
SmartMenu.attach_smart_context_menu( div, menus_in, False )
help_button = ButtonNewWdg(title="Show Plugin Manger Help", icon=IconWdg.HELP)
button_row.add(help_button)
help_button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
spt.help.set_top();
spt.help.load_alias("plugin-manager-interface|tactic-developer_developer_plugin-manager-interface");
'''
} )
base_dir = plugin_dir
last_title = ""
last_folder = None
folder_wdgs = {}
folder_wdgs['/'] = div
folder_states = {}
content_div = DivWdg()
div.add_widget(content_div, "content")
content_div.add_style("margin-left: 3px")
content_div.add_style("margin-bottom: 10px")
plugin_dirnames = my.get_plugin_list(base_dir)
if not plugin_dirnames:
content_div.add("No plugins installed")
content_div.add_style("padding: 5px 5px 5px 10px")
content_div.add_style("font-style: italic")
show_active_only = my.kwargs.get("show_active_only")
if show_active_only in [True, 'true']:
show_active_only = True
else:
show_active_only = False
for dirname in plugin_dirnames:
parts = dirname.split("/")
folder = parts[:-1]
folder = "/".join(folder)
if not folder:
folder = "/"
folder_wdg = folder_wdgs.get(folder)
if folder_wdg:
folder_content = folder_wdg.get_widget("content")
else:
parts = folder.split("/")
# need to find the leaf folder, creating on the way, if
# necessary
parent_wdg = folder_wdgs.get("/")
for i in range(1, len(parts)+1):
# find the folder, if it exists
folder = "/".join(parts[0:i])
folder_wdg = folder_wdgs.get(folder)
if folder_wdg:
parent_wdg = folder_wdg
continue
title = parts[i-1]
# else create a new one
folder_wdg = DivWdg()
if i != 1:
folder_wdg.add_style("padding-left: 13px")
# add it to the parent and remember this as the last parent
parent_wdg.get_widget("content").add(folder_wdg)
parent_wdg = folder_wdg
# add it to the list
folder_wdgs[folder] = folder_wdg
# remember it as the parent
parent_wdg = folder_wdg
# fill it in
icon = IconWdg(folder, IconWdg.FOLDER, inline=False)
icon.add_style("margin-top: -2px")
icon.add_style("margin-left: -2px")
folder_header = DivWdg()
folder_content = DivWdg()
folder_content.add_style("margin-left: 13px")
from tactic.ui.widget import SwapDisplayWdg
swap = SwapDisplayWdg()
folder_wdg.add(swap)
swap.set_title_wdg(folder_header)
folder_wdg.add_widget(folder_content, "content")
swap.add_class("spt_folder")
swap.add_attr("spt_folder", folder)
if folder_states.get(folder) == "open":
is_on = True
else:
is_on = False
swap.set_on(is_on)
if not is_on:
folder_content.add_style("display: none")
unique_id = folder_content.set_unique_id("content")
swap.set_content_id(unique_id)
folder_header.add(icon)
folder_header.add(title)
folder_header.add_style("margin-top: 3px")
folder_header.add_style("margin-bottom: 3px")
if folder == "-- no folder --":
folder_header.add_style("opacity: 0.5")
folder_header.add_style("font-style: italic")
else:
SmartMenu.assign_as_local_activator( folder_header, 'DIR_LAYOUT_CTX' )
folder_header.add_attr("spt_folder", folder)
# find the manifest file
plugin_dir = "%s/%s" % (base_dir, dirname)
manifest_path = "%s/manifest.xml" % (plugin_dir)
if not os.path.exists(manifest_path):
invalid = True
else:
invalid = False
if invalid:
data = {}
else:
manifest = Xml()
try:
manifest.read_file(manifest_path)
except Exception, e:
print "Error reading manifest: [%s]" % manifest_path, e
msg = "Error reading manifest [%s]: %s" % (manifest_path, str(e))
manifest_xml = """
<manifest>
<data>
<title>ERROR (%s)</title>
<description>%s</description>
</data>
</manifest>
""" % (dirname, msg)
manifest.read_string(manifest_xml)
node = manifest.get_node("manifest/data")
data = manifest.get_node_values_of_children(node)
# create a plugin sobject (not committed)
#plugin = SearchType.create("sthpw/plugin")
#plugin.set_value("description", data.get("description") or "")
#plugin.set_value("title", data.get("title") or "")
#plugin.set_value("code", data.get("code") or "")
#plugin.set_value("version", data.get("version") or "" )
title = data.get("title") or ""
description = data.get("description") or "N/A"
code = data.get("code") or ""
version = data.get("version") or ""
plugin_div = DivWdg()
#div.add(plugin_div)
folder_content.add(plugin_div)
plugin_div.add_style("padding: 5px")
plugin_div.add_class("hand")
SmartMenu.assign_as_local_activator( plugin_div, 'PLUGIN_CTX' )
plugin_div.add_attr("spt_plugin_dirname", dirname)
active_version = active_map.get(code)
is_active = version == active_version
icon = DivWdg()
icon.add_style("width: 9px")
icon.add(" ")
icon.add_style("float: left")
plugin_div.add(icon)
if is_active:
icon = IconWdg("Active in project", IconWdg.CHECK)
if show_active_only:
swap.set_on(True)
folder_content.add_style("display", "")
#folder_header.add_style("display: none")
folder_header.add_style("opacity: 0.3")
else:
icon = IconWdg("Not Active in project", IconWdg.DELETE)
icon.add_style("opacity: 0.2")
if show_active_only:
plugin_div.add_style("display: none")
folder_header.add_style("opacity: 0.3")
icon.add_style("margin-right: -3px")
plugin_div.add_attr("title", description)
if invalid:
plugin_div.add("<i style='opacity: 0.5; color: red'>%s</i>" % dirname)
elif not title:
if code:
title = Common.get_display_title(code)
plugin_div.add(icon)
plugin_div.add("%s" % title)
else:
title = dirname
plugin_div.add(icon)
plugin_div.add("N/A <i>(%s)</i>" % title)
else:
if title == last_title:
plugin_div.add(icon)
# FIXME: this gives false impression it's not activated.
plugin_div.add("<i style='opacity: 0.5'>%s</i>" % title)
#plugin_div.add(HtmlElement.i(title))
else:
plugin_div.add(icon)
plugin_div.add(title)
if not invalid:
if version:
version_str = '''<span style="opacity: 0.5; font-style: italic; font-size: 10px"> (v%s)</span>''' % version
else:
version_str = '''<span style="opacity: 0.5; font-style: italic; font-size: 10px"> (DEV)</span>'''
plugin_div.add(version_str)
last_title = title
plugin_div.add_behavior( {
'type': 'click_up',
'plugin_dir': plugin_dir,
'dirname': dirname,
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_plugin_top");
var edit = top.getElement(".spt_plugin_edit");
var class_name = "tactic.ui.app.PluginEditWdg";
var kwargs = {
plugin_dir: bvr.plugin_dir,
dirname: bvr.dirname
};
spt.panel.load(edit, class_name, kwargs);
'''
} )
hover = plugin_div.get_color("background", -5)
plugin_div.add_behavior( {
'type': 'hover',
'hover': hover,
'cbjs_action_over': '''
bvr.src_el.setStyle("background", bvr.hover);
''',
'cbjs_action_out': '''
bvr.src_el.setStyle("background", '');
'''
} )
return div
def get_plugin_list(my, base_dir):
plugin_dirnames = []
for root, dirnames, filenames in os.walk(base_dir, followlinks=True):
if '.svn' in dirnames:
dirnames.remove('.svn')
root = root.replace("\\", "/")
# filter out the dirnames
if root.endswith(".zip"):
del dirnames[:]
continue
if root.endswith(".enc"):
del dirnames[:]
continue
if os.path.exists("%s/manifest.xml" % root):
del dirnames[:]
reldir = root.replace(base_dir+"/", "")
if reldir.startswith('TACTIC/internal/'):
continue
plugin_dirnames.append( reldir )
plugin_dirnames.sort()
return plugin_dirnames
def get_context_menu(my):
menu = Menu(width=180)
menu.set_allow_icons(False)
menu_item = MenuItem(type='title', label='Actions')
menu.add(menu_item)
menu_item = MenuItem(type='action', label='Delete Plugin')
menu.add(menu_item)
menu_item.add_behavior({
'type': 'click_up',
'cbjs_action': '''
var activator = spt.smenu.get_activator(bvr);
var view = activator.getAttribute("spt_view");
spt.api.app_busy_show("Removing plugin ["+bvr.code+"]");
var dirname = activator.getAttribute("spt_plugin_dirname");
if (!confirm("Uninstall plugin '"+dirname+"'?")) {
spt.api.app_busy_hide();
return;
}
var kwargs = {
dirname: dirname
}
var class_name = 'tactic.ui.app.PluginRemoveCbk';
var server = TacticServerStub.get();
server.execute_cmd(class_name, kwargs);
spt.notify.show_message("Plugin '" + dirname + "' uninstalled.");
var top = activator.getParent(".spt_plugin_top");
//top.setStyle("border", "solid 5px blue");
spt.panel.refresh(top);
spt.api.app_busy_hide();
''' } )
return menu
class PluginEditWdg(BaseRefreshWdg):
def get_display(my):
my.is_active_flag = None
top = my.top
my.set_as_panel(top)
top.add_class("spt_plugin_edit")
top.add_style("min-width: 600px")
title_wdg = DivWdg()
top.add(title_wdg)
title_wdg.add_style("font-size: 14px")
title_wdg.add_style("font-weight: bold")
title_wdg.add_style("margin: -5 -5 10px -5")
title_wdg.add_style("padding: 10px 15px 10px 15px")
title_wdg.add_gradient("background", "background", 0, -10)
my.mode = my.kwargs.get("mode")
if my.mode != 'insert':
my.plugin_dir = my.kwargs.get("plugin_dir")
if my.plugin_dir:
manifest_path ="%s/manifest.xml" % (my.plugin_dir)
else:
msg = DivWdg()
msg.add("No Plugin Selected")
msg.add_border()
msg.add_color("background", "background3")
msg.add_style("padding", "30px 50px 30px 50px")
msg.add_style("margin", "100px auto")
msg.add_style("text-align: center")
msg.add_style("width: 300px")
top.add(msg)
return top
#top.add("<br/>")
manifest = Xml()
manifest.read_file(manifest_path)
node = manifest.get_node("manifest/data")
data = manifest.get_node_values_of_children(node)
plugin = None
my.code = data.get("code") or ""
description = data.get("description") or ""
my.version = data.get("version") or ""
title = data.get("title") or ""
manifest = manifest.to_string()
if not my.version:
title_wdg.add('''Plugin "%s" <i style='opacity: 0.5'>(DEV)</i>''' % title)
else:
title_wdg.add('Plugin "%s" <i>%s</i>' % (title, my.version))
else:
my.plugin_dir = ""
my.code = ''
description = ''
my.version = ''
title = ''
manifest = ''
plugin = None
title_wdg.add("Create New Plugin")
from tactic.ui.container import TabWdg
selected = my.kwargs.get("selected")
if not selected:
selected = "info"
tab = TabWdg(selected=selected, show_add=False, show_remove=False, allow_drag=False, tab_offset="10px")
top.add(tab)
tab.add_style("margin: 0px -6px 0px -6px")
info_div = DivWdg()
tab.add(info_div)
if my.mode != "insert":
action_wdg = my.get_action_wdg()
info_div.add(action_wdg)
info_div.add_color("background", "background")
info_div.set_name("info")
info_div.add_style("height: 100%")
info_div.add_style("margin: 0px 20px 20px 20px")
if my.mode == "insert":
info_div.add("<br/>"*2)
info_div.add("Enter the following data and press 'Create' to create a new plugin")
info_div.add("<br/>"*2)
table = Table()
info_div.add(table)
table.add_color("color", "color")
table.add_style("height: 320px")
table.set_unique_id()
table.add_smart_style("spt_table_header", "width", "200px")
table.add_smart_style("spt_table_header", "text-align", "right")
table.add_smart_style("spt_table_header", "padding-right", "20px")
table.add_smart_style("spt_table_header", "margin-bottom", "10px")
table.add_smart_style("spt_table_header", "vertical-align", "top")
table.add_smart_style("spt_table_element", "vertical-align", "top")
#if my.mode == 'insert':
# read_only = False
#else:
# read_only = True
read_only = False
table.add_row()
td = table.add_cell()
td.add_class("spt_table_header")
td.add("Title: ")
text = TextInputWdg(name="title", read_only=read_only)
td = table.add_cell()
td.add_class("spt_table_element")
td.add(text)
text.set_value(title)
text.add_behavior( {
'type': 'blur',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_plugin_edit");
var code_el = top.getElement(".spt_plugin_code");
var value = bvr.src_el.value;
var code = spt.convert_to_alpha_numeric(value);
code_el.value = code;
'''
} )
table.add_row()
td = table.add_cell()
td.add_class("spt_table_header")
td.add("Code: ")
text = TextInputWdg(name="code", read_only=read_only)
text.add_class("spt_plugin_code")
td = table.add_cell()
td.add_class("spt_table_element")
td.add(text)
text.set_value(my.code)
tr = table.add_row()
if my.mode == 'insert':
tr.add_style("display: none")
td = table.add_cell()
td.add_class('spt_table_header')
td.add("Version: ")
td.add_style("vertical-align: top")
#text = TextInputWdg(name="version", read_only=read_only)
text = TextInputWdg(name="version", read_only=False)
td = table.add_cell()
td.add_class("spt_table_element")
td.add(text)
if not my.version:
text.set_value("DEV")
else:
text.set_value(my.version)
table.add_row()
td = table.add_cell()
td.add_class("spt_table_header")
td.add("Description: ")
text = TextAreaWdg("description")
text.set_option("read_only", read_only)
text.add_style("height", "150px")
td = table.add_cell()
td.add_class("spt_table_element")
td.add(text)
text.set_value(description)
if my.mode == 'insert':
table.add_row()
td = table.add_cell()
td.add_class("spt_table_header")
td.add("<br/>")
td.add("Plugin Type: ")
select = SelectWdg("plugin_template")
select.set_option("labels", "Project|Theme|Widget|Column")
select.set_option("values", "project|theme|widget|column")
select.add_empty_option("-- --")
td = table.add_cell()
td.add("<br/>")
td.add_class("spt_table_element")
td.add(select)
td.add("<br/>"*2)
if my.mode == 'insert':
table.add_row()
td = table.add_cell()
insert_wdg = my.get_insert_wdg()
td.add(insert_wdg)
else:
# add the Publish button at the bottom
button = ActionButtonWdg(title='Publish', tip='Publish new version')
button.add_style("float: right")
button.add_behavior( {
'type': 'click_up',
'from_version': my.version,
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_plugin_edit");
var search_key = top.getAttribute("spt_search_key");
var values = spt.api.get_input_values(top, null, false);
var manifest = values.manifest;
if (!manifest || manifest == "") {
manifest = "<manifest/>";
}
var code = values.code;
var version = values.version;
if (version == 'DEV') {
spt.alert("Cannot publish DEV version. Please change the version.");
return;
}
if (version.match(/^v/i)) {
spt.alert("We recommend Version not starting with a V.")
return;
}
// PluginCreator handles version as well
var exec = function() {
var class_name = 'tactic.command.PluginCreator';
var kwargs = {
code: code,
version: version,
from_version: bvr.from_version
}
var server = TacticServerStub.get();
try {
server.execute_cmd(class_name, kwargs);
} catch(e) {
spt.alert(spt.exception.handler(e));
spt.api.app_busy_hide();
return;
}
var top = bvr.src_el.getParent(".spt_plugin_top");
spt.panel.refresh(top);
}
exec();
spt.notify.show_message('Plugin [' + code + '] v' + version+ ' created.');
'''
} )
table.add_row()
td = table.add_cell(button)
dirname = my.kwargs.get('dirname')
if not dirname:
plugin_base_dir = Environment.get_plugin_dir()
builtin_plugin_base_dir = Environment.get_builtin_plugin_dir()
if my.plugin_dir.startswith(plugin_base_dir):
dirname = my.plugin_dir.replace(plugin_base_dir + "/", "")
else:
dirname = my.plugin_dir.replace(builtin_plugin_base_dir + "/", "")
my.dirname = dirname
#
# Doc
#
if my.plugin_dir:
tab.add( my.get_doc_wdg() )
if my.mode != 'insert':
tab.add( my.get_manifest_wdg(manifest) )
#
# Files
#
dir_div = DivWdg()
tab.add(dir_div)
dir_div.set_name("files")
dir_div.add_style("padding: 5px 15px 15px 15px")
if my.mode != 'insert':
title_wdg = DivWdg()
dir_div.add(title_wdg)
title_wdg.add_color("background", "background", -10)
title_wdg.add_style("margin: -5 -16 15 -16")
title_wdg.add_style("padding: 5px")
title_wdg.add_border()
button_row = ButtonRowWdg()
title_wdg.add(button_row)
button_row.add_style("float: left")
button = ButtonNewWdg(title="Refresh", icon=IconWdg.REFRESH)
button_row.add(button)
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_plugin_edit");
top.setAttribute("spt_selected", "files");
spt.panel.refresh(top);
'''
} )
button = ButtonNewWdg(title="New File", icon=IconWdg.ADD)
button_row.add(button)
button.add_behavior( {
'type': 'click_up',
'dirname': dirname,
'cbjs_action': '''
// create a new file
var class_name = 'tactic.ui.app.PluginDirListActionCbk';
var kwargs = {
'action': 'new_file',
'dirname': bvr.dirname
}
var server = TacticServerStub.get();
try {
server.execute_cmd(class_name, kwargs);
} catch(e) {
spt.alert(spt.exception.handler(e));
}
var top = bvr.src_el.getParent(".spt_plugin_edit");
top.setAttribute("spt_selected", "files");
spt.panel.refresh(top)
'''
} )
button = ButtonNewWdg(title="New Folder", icon=IconWdg.FOLDER)
button_row.add(button)
button.add_behavior( {
'type': 'click_up',
'dirname': dirname,
'cbjs_action': '''
// create a new folder
var class_name = 'tactic.ui.app.PluginDirListActionCbk';
var kwargs = {
'action': 'new_folder',
'dirname': bvr.dirname
}
var server = TacticServerStub.get();
try {
server.execute_cmd(class_name, kwargs);
} catch(e) {
spt.alert(spt.exception.handler(e));
}
var top = bvr.src_el.getParent(".spt_plugin_edit");
top.setAttribute("spt_selected", "files");
spt.panel.refresh(top)
'''
} )
from tactic.ui.input import UploadButtonWdg
upload_button = UploadButtonWdg(name="Upload")
title_wdg.add(upload_button)
upload_button.set_on_complete('''
var file = spt.html5upload.get_file();
if (!file) {
alert('Error: file cannot be found.')
spt.app_busy.hide();
return;
}
var file_name = file.name;
var server = TacticServerStub.get();
var kwargs = spt.html5upload.kwargs;
var class_name = 'tactic.ui.app.PluginDirListActionCbk';
var kwargs = {
'action': 'upload',
'upload_file_name': file_name,
'dirname': kwargs.dirname
}
try {
server.execute_cmd(class_name, kwargs);
spt.notify.show_message(file_name + "added to plugin.");
} catch(e) {
spt.alert(spt.exception.handler(e));
}
var top = bvr.src_el.getParent(".spt_plugin_edit");
top.setAttribute("spt_selected", "files");
spt.panel.refresh(top);
spt.app_busy.hide();
''',
dirname=dirname
)
dir_div.add_color("background", "background")
dir_list = PluginDirListWdg(base_dir=my.plugin_dir, location="server", plugin_dirname=dirname, ignore=['.svn'])
dir_div.add(dir_list)
else:
msg = DivWdg()
msg.add("No Files in Plugin")
msg.add_border()
msg.add_color("background", "background3")
msg.add_style("padding", "30px 50px 30px 50px")
msg.add_style("margin", "100px auto")
msg.add_style("text-align: center")
msg.add_style("width: 300px")
dir_div.add(msg)
return top
def get_manifest_wdg(my, manifest):
#
# Manifest
#
dirname = my.dirname
manifest_div = DivWdg()
shelf_wdg = DivWdg()
manifest_div.add(shelf_wdg)
shelf_wdg.add_style("height: 35px")
shelf_wdg.add_style("padding: 5px 10px")
shelf_wdg.add_color("background", "background3")
if my.is_active():
"""
clear_button = ActionButtonWdg(title="Clear .spt")
shelf_wdg.add(clear_button)
clear_button.add_style("float: left")
clear_button.add_behavior( {
'type': 'click_up',
'dirname': dirname,
'cbjs_action': '''
var class_name = 'tactic.ui.app.PluginDirListActionCbk';
var kwargs = {
'action': 'clear_spt',
'dirname': bvr.dirname
};
var server = TacticServerStub.get();
server.execute_cmd(class_name, kwargs);
'''
} )
"""
button = ActionButtonWdg(title='Export', tip='Export .spt Files')
shelf_wdg.add(button)
button.add_style("float: left")
button.add_behavior( {
'type': 'click_up',
'dirname': dirname,
'cbjs_action': '''
spt.api.app_busy_show("Clearing all .spt files");
var class_name = 'tactic.ui.app.PluginDirListActionCbk';
var kwargs = {
'action': 'clear_spt',
'dirname': bvr.dirname
};
var server = TacticServerStub.get();
try {
server.execute_cmd(class_name, kwargs);
} catch(e) {
spt.alert(spt.exception.handler(e));
return;
}
spt.api.app_busy_show("Exporting Plugin");
var top = bvr.src_el.getParent(".spt_plugin_edit");
var search_key = top.getAttribute("spt_search_key");
var values = spt.api.get_input_values(top, null, false);
var manifest = values.manifest;
if (!manifest || manifest == "") {
manifest = "<manifest/>";
}
var code = values.code;
var version = values.version;
if (version == 'DEV') {
version = '';
}
var description = values.description;
var title = values.title;
var plugin_template = values.plugin_template;
var server = TacticServerStub.get();
var class_name;
if (plugin_template == "Project") {
class_name = 'tactic.command.ProjectTemplateCreatorCmd';
}
else {
class_name = 'tactic.command.PluginCreator';
}
var kwargs = {
clean: false,
code: code,
version: version,
title: title,
description: description,
manifest: manifest,
plugin_template: plugin_template
};
try {
server.execute_cmd(class_name, kwargs);
} catch(e) {
spt.alert(spt.exception.handler(e));
spt.api.app_busy_hide();
return;
}
var top = bvr.src_el.getParent(".spt_plugin_edit");
top.setAttribute("spt_selected", "manifest");
spt.panel.refresh(top);
spt.api.app_busy_hide();
'''
})
button = ActionButtonWdg(title='Publish', tip='Publish new version')
shelf_wdg.add(button)
button.add_style("float: left")
button.add_behavior( {
'type': 'click_up',
'from_version': my.version,
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_plugin_edit");
var search_key = top.getAttribute("spt_search_key");
var values = spt.api.get_input_values(top, null, false);
var manifest = values.manifest;
if (!manifest || manifest == "") {
manifest = "<manifest/>";
}
var code = values.code;
var version = values.version;
if (version == 'DEV') {
spt.alert("Cannot publish DEV version. Please change the version.");
return;
}
// PluginCreator handles version as well
var exec = function() {
var class_name = 'tactic.command.PluginCreator';
var kwargs = {
code: code,
version: version,
from_version: bvr.from_version
}
var server = TacticServerStub.get();
try {
server.execute_cmd(class_name, kwargs);
} catch(e) {
spt.alert(spt.exception.handler(e));
spt.api.app_busy_hide();
return;
}
var top = bvr.src_el.getParent(".spt_plugin_top");
spt.panel.refresh(top);
}
exec();
spt.notify.show_message('Plugin [' + code + '] v' + version+ ' created.');
'''
} )
else:
button = ActionButtonWdg(title='Clean', tip='Clean up project for this plugin')
shelf_wdg.add(button)
button.add_style("margin: 10px auto")
button.add_behavior( {
'type': 'click_up',
'plugin_code': my.code,
'cbjs_action': '''
spt.api.app_busy_show("Removing Plugin");
if (!confirm("WARNING: This will clean up entries associated with this plugin manifest: ["+bvr.plugin_code+"]?")) {
spt.api.app_busy_hide();
return;
}
var top = bvr.src_el.getParent(".spt_plugin_edit");
var search_key = top.getAttribute("spt_search_key");
var values = spt.api.get_input_values(top, null, false);
var manifest = values.manifest;
var class_name = 'tactic.command.PluginUninstaller';
var kwargs = {
manifest: manifest
};
var server = TacticServerStub.get();
try {
server.execute_cmd(class_name, kwargs);
} catch(e) {
spt.alert(spt.exception.handler(e));
}
var top = bvr.src_el.getParent(".spt_plugin_top");
spt.panel.refresh(top);
spt.api.app_busy_hide();
'''
})
manifest_div.set_name("manifest")
text = TextAreaWdg("manifest")
text.add_style("width: 100%")
text.add_style("min-height: 400px")
text.add_style("font-size: 12px")
manifest_div.add(text)
text.set_value(manifest)
return manifest_div
def get_doc_wdg(my):
# documentation for the plugin
doc_path = "%s/doc.html" % my.plugin_dir
#dirname = os.path.basename(my.plugin_dir)
if my.dirname.startswith("TACTIC"):
rel_path = "/builtin_plugins/%s/doc.html" % my.dirname
else:
rel_path = "/plugins/%s/doc.html" % my.dirname
if os.path.exists(doc_path):
doc_div = DivWdg()
dirname = os.path.dirname(doc_path)
basename = os.path.basename(doc_path)
shelf_wdg = DivWdg()
shelf_wdg.add_style("height: 35px")
shelf_wdg.add_color("background", "background3")
doc_div.add(shelf_wdg)
button = ActionButtonWdg(title="Edit")
shelf_wdg.add(button)
button.add_behavior( {
'type': 'click_up',
'dirname': dirname,
'basename': basename,
'cbjs_action': '''
var path = bvr.rel_path;
var class_name = 'tactic.ui.app.PluginDirListEditFileWdg';
var kwargs = {
dirname: bvr.dirname,
basename: bvr.basename
}
spt.panel.load_popup(bvr.basename, class_name, kwargs);
'''
} )
from tactic.ui.app import HelpContentWdg
doc_wdg = HelpContentWdg(rel_path=rel_path)
doc_div.add(doc_wdg)
else:
doc_div = DivWdg()
doc_div.add("No Documentation File in Plugin")
doc_div.add_border()
doc_div.add_color("background", "background3")
doc_div.add_style("padding", "30px 50px 30px 50px")
doc_div.add_style("margin", "100px auto")
doc_div.add_style("text-align: center")
doc_div.add_style("width: 300px")
doc_div.add("<br/><br/>")
button = ActionButtonWdg(title="Create")
doc_div.add(button)
button.add_style("margin-left: auto")
button.add_style("margin-right: auto")
button.add_behavior( {
'type': 'click_up',
'dirname': my.dirname,
'cbjs_action': '''
var class_name = 'tactic.ui.app.PluginDirListActionCbk';
var kwargs = {
'action': 'new_file',
'basename': 'doc.html',
'dirname': bvr.dirname,
}
var server = TacticServerStub.get();
try {
server.execute_cmd(class_name, kwargs);
} catch(e) {
spt.alert(spt.exception.handler(e));
return;
}
var top = bvr.src_el.getParent(".spt_plugin_edit");
top.setAttribute("spt_selected", "documentation")
spt.panel.refresh(top)
'''
} )
doc_div.set_name("documentation")
return doc_div
def get_insert_wdg(my):
shelf_div = DivWdg()
button = ActionButtonWdg(title='Create >>', tip='Create Plugin')
shelf_div.add(button)
button.add_style("float: right")
button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
spt.api.app_busy_show("Exporting Plugin");
var top = bvr.src_el.getParent(".spt_plugin_edit");
var search_key = top.getAttribute("spt_search_key");
var values = spt.api.get_input_values(top, null, false);
var manifest = values.manifest;
if (!manifest || manifest == "") {
manifest = "<manifest/>";
}
var code = values.code;
var version = values.version;
if (version == 'DEV') {
version = '';
register = true;
}
else {
register = false;
}
var description = values.description;
var title = values.title;
var plugin_template = values.plugin_template;
var server = TacticServerStub.get();
var class_name;
if (plugin_template == "Project") {
class_name = 'tactic.command.ProjectTemplateCreatorCmd';
}
else {
//class_name = 'tactic.command.PluginCreator';
class_name = 'tactic.ui.app.PluginCreatorCmd';
}
var kwargs = {
clean: false,
code: code,
version: version,
title: title,
description: description,
manifest: manifest,
plugin_template: plugin_template,
register: register,
};
try {
server.execute_cmd(class_name, kwargs);
var top = bvr.src_el.getParent(".spt_plugin_top");
spt.notify.show_message('Plugin "' + title + '" installed.')
}
catch(err) {
spt.alert(spt.exception.handler(err));
}
spt.panel.refresh(top);
spt.api.app_busy_hide();
'''
})
return shelf_div
def is_active(my):
if my.is_active_flag != None:
return my.is_active_flag
shelf_div = DivWdg()
search = Search("config/plugin")
search.add_filter("code", my.code)
if my.version:
search.add_filter("version", my.version)
active = search.get_sobject()
if active:
active = True
else:
active = False
my.is_active_flag = active
return active
def get_action_wdg(my):
shelf_div = DivWdg()
active = my.is_active()
shelf_div.add_color("background", "background", -10)
shelf_div.add_color("color", "color")
shelf_div.add_border()
shelf_div.add_style("padding: 15px 5px 0px 15px")
shelf_div.add_style("margin: 0px -21px 20px -21px")
if not active:
plugin_base_dir = os.path.dirname(my.plugin_dir)
code = os.path.basename(my.plugin_dir)
shelf_div.add(HtmlElement.b("This plugin is not active in this project. Click on the button to activate."))
button = ActionButtonWdg(title='Activate', tip='Activate Plugin in Current Project')
shelf_div.add(button)
button.add_style("margin: 10px auto")
button.add_style("margin: 20px 250px")
button.add_behavior( {
'type': 'click_up',
'plugin_dir': my.plugin_dir,
'cbjs_action': '''
spt.api.app_busy_show("Activating Plugin");
var top = bvr.src_el.getParent(".spt_plugin_edit");
var search_key = top.getAttribute("spt_search_key");
var class_name = 'tactic.command.PluginInstaller';
var kwargs = {
mode: 'install',
plugin_dir: bvr.plugin_dir,
register: true
};
var server = TacticServerStub.get();
try {
server.execute_cmd( class_name, kwargs );
}
catch(e) {
spt.alert(spt.exception.handler(e));
spt.api.app_busy_hide();
return;
}
var top = bvr.src_el.getParent(".spt_plugin_top");
top.setAttribute("spt_plugin_dir", bvr.plugin_dir);
top.setAttribute("spt_selected", "info")
spt.panel.refresh(top);
spt.api.app_busy_hide();
spt.notify.show_message('plugin "'+ bvr.plugin_dir +'" activated');
'''
})
else:
shelf_div.add(HtmlElement.b("This plugin is active in this project. Click to Remove."))
button = ActionButtonWdg(title='Remove', tip='Remove Plugin from current preject')
shelf_div.add(button)
button.add_style("margin: 20px 250px")
button.add_behavior( {
'type': 'click_up',
'plugin_code': my.code,
'cbjs_action': '''
spt.api.app_busy_show("Removing Plugin");
if (!confirm("WARNING: Remove plugin ["+bvr.plugin_code+"]?")) {
spt.api.app_busy_hide();
return;
}
var top = bvr.src_el.getParent(".spt_plugin_edit");
var search_key = top.getAttribute("spt_search_key");
var class_name = 'tactic.command.PluginUninstaller';
var kwargs = {
code: bvr.plugin_code
};
var server = TacticServerStub.get();
try {
server.execute_cmd( class_name, kwargs );
}
catch(e) {
spt.alert(spt.exception.handler(e));
spt.api.app_busy_hide();
return;
}
var top = bvr.src_el.getParent(".spt_plugin_top");
spt.notify.show_message('Plugin "'+bvr.plugin_code+'" successfully removed')
spt.panel.refresh(top);
spt.api.app_busy_hide();
'''
})
"""
button = ActionButtonWdg(title='Publish', tip='Publish new version')
shelf_div.add(button)
button.add_style("float: left")
button.add_behavior( {
'type': 'click_up',
'from_version': my.version,
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_plugin_edit");
var search_key = top.getAttribute("spt_search_key");
var values = spt.api.get_input_values(top, null, false);
var manifest = values.manifest;
if (!manifest || manifest == "") {
manifest = "<manifest/>";
}
var code = values.code;
var version = values.version;
if (version == 'DEV') {
spt.alert("Cannot create DEV version");
return;
}
if (version.match(/^v/i)) {
spt.alert("We recommend Version not starting with a V.")
return;
}
var class_name = 'tactic.ui.app.PluginVersionCreator';
var kwargs = {
code: code,
version: version,
from_version: bvr.from_version
}
var server = TacticServerStub.get();
server.execute_cmd(class_name, kwargs);
var top = bvr.src_el.getParent(".spt_plugin_top");
spt.panel.refresh(top);
spt.notify.show_message('Plugin [' + code + '] ' + version+ ' created.');
'''
} )
"""
shelf_div.add("<br clear='all'/>")
return shelf_div
__all__.append("PluginCreatorCmd")
class PluginCreatorCmd(Command):
def execute(my):
plugin_type = my.kwargs.get("plugin_type")
if plugin_type == "column":
my.create_column_type()
elif plugin_type == "theme":
my.create_theme_type()
from tactic.command import PluginCreator
cmd = PluginCreator(**my.kwargs)
cmd.execute()
def create_widget_type(my):
code = my.kwargs.get("code")
view = code.replace("/", ".")
config = SearchType.create("config/widget_config")
config.set_value("view", view)
config.set_value("widget_type", "widget")
config.set_value("category", "CustomLayoutWdg")
config.set_value("config", '''
<config>
<%s>
<html>
<div>Created from plugin [%s]</div>
</html>
</%s>
</config>
''' % (view, code, view))
config.commit()
my.kwargs['manifest'] = '''
<manifest>
<sobject search_type="config/widget_config" view="%s"/>
</manifest>
''' % view
def create_column_type(my):
code = my.kwargs.get("code")
view = code.replace("/", ".")
config = SearchType.create("config/widget_config")
config.set_value("view", view)
config.set_value("widget_type", "column")
config.set_value("category", "CustomLayoutWdg")
config.set_value("config", '''
<config>
<%s>
<html>
<div>Created from plugin [%s]</div>
</html>
</%s>
</config>
''' % (view, code, view))
config.commit()
my.kwargs['manifest'] = '''
<manifest>
<sobject search_type="config/widget_config" view="%s"/>
</manifest>
''' % view
def create_theme_type(my):
code = my.kwargs.get("code")
view = code.replace("/", ".")
config = SearchType.create("config/widget_config")
config.set_value("view", view)
config.set_value("widget_type", "theme")
config.set_value("category", "CustomLayoutWdg")
config.set_value("config", '''
<config>
<%s>
<html>
<div>Theme from plugin [%s]</div>
</html>
</%s>
</config>
''' % (view, code, view))
config.commit()
my.kwargs['manifest'] = '''
<manifest>
<sobject search_type="config/widget_config" view="sample_theme.index" path="config/config_widget_config.spt"/>
<sobject search_type="config/url" url="/index" path="config/config_url.spt"/>
</manifest>
'''
class PluginDirListWdg(DirListWdg):
def handle_dir_div(my, item_div, dirname, basename):
value_div = DivWdg()
item_div.add(value_div)
value_div.add_class("spt_value")
value_div.add(basename)
SmartMenu.assign_as_local_activator( item_div, 'PLUGIN_ITEM_CTX' )
my.add_rename_wdg(item_div, dirname, basename)
def handle_item_div(my, item_div, dirname, basename):
path = "%s/%s" % (dirname, basename)
if my.info.get("file_type") == 'missing':
icon_string = IconWdg.DELETE
tip = 'Missing [%s]' %path
else:
icon_string = my.get_file_icon(dirname, basename)
tip = path
SmartMenu.assign_as_local_activator( item_div, 'PLUGIN_ITEM_CTX' )
icon_div = DivWdg()
item_div.add(icon_div)
icon = IconWdg(tip, icon_string)
icon_div.add(icon)
icon_div.add_style("float: left")
icon_div.add_style("margin-top: -1px")
# add the file name
filename_div = DivWdg()
item_div.add(filename_div)
filename_div.add_class("spt_value")
filename_div.add(basename)
filename_div.add_style("float: left")
filename_div.add_style("overflow: hidden")
filename_div.add_class("SPT_DTS")
my.add_rename_wdg(item_div, dirname, basename)
item_div.add("<br clear='all'/>")
def add_rename_wdg(my, item_div, dirname, basename):
text = TextWdg("value")
item_div.add(text)
text.add_class("spt_rename")
text.add_style("display: none")
text.add_attr("spt_basename", basename)
text.add_style("font-size: 12px")
text.add_behavior( {
'type': 'blur',
'dirname': dirname,
'cbjs_action': '''
// rename the file
basename = bvr.src_el.getAttribute("spt_basename");
new_basename = bvr.src_el.value;
if (basename != new_basename) {
var class_name = 'tactic.ui.app.PluginDirListActionCbk';
var kwargs = {
'new_basename': new_basename,
'basename': basename,
'action': 'rename',
'dirname': bvr.dirname
}
var server = TacticServerStub.get();
server.execute_cmd(class_name, kwargs)
bvr.src_el.setAttribute("spt_basename", new_basename);
}
var top = bvr.src_el.getParent(".spt_dir_list_item");
//var top = bvr.src_el.getParent(".spt_dir");
var rename_el = top.getElement(".spt_rename");
var value_el = top.getElement(".spt_value");
spt.hide(rename_el);
spt.show(value_el);
value_el.innerHTML = rename_el.value;
'''
} )
text.add_behavior( {
'type': 'keyup',
'cbjs_action': '''
var key = evt.key;
if (key == 'enter') {
var top = bvr.src_el.getParent(".spt_dir_list_item");
//var top = bvr.src_el.getParent(".spt_dir");
var rename_el = top.getElement(".spt_rename");
var value_el = top.getElement(".spt_value");
spt.hide(rename_el);
spt.show(value_el);
value_el.innerHTML = rename_el.value;
}
'''
} )
def add_top_behaviors(my, top):
top.add_behavior( {
'type': 'load',
'plugin_dirname': my.kwargs.get("plugin_dirname"),
'cbjs_action': '''
spt.plugin = {}
spt.plugin.dirname = bvr.plugin_dirname;
spt.plugin.start_y = null;
spt.plugin.drag_file_setup = function(evt, bvr, mouse_411) {
spt.plugin.start_y = mouse_411.curr_y
spt.plugin.start_pos = bvr.src_el.getPosition();
}
spt.plugin.drag_file_motion = function(evt, bvr, mouse_411) {
var diff_y = mouse_411.curr_y - spt.plugin.start_y;
if (diff_y < 1 && diff_y > -1) {
return;
}
bvr.src_el.setStyle("position", "absolute");
bvr.src_el.position({x:mouse_411.curr_x+5, y:mouse_411.curr_y+5});
}
spt.plugin.drag_file_action = function(evt, bvr, mouse_411) {
//bvr.src_el.position(spt.plugin.start_pos);
var pos = spt.plugin.start_pos;
new Fx.Tween(bvr.src_el,{duration:"short"}).start('top', pos.y);
new Fx.Tween(bvr.src_el,{duration:"short"}).start('left', pos.x);
bvr.src_el.setStyle("position", "");
var drop_on_el = spt.get_event_target(evt);
if (!drop_on_el.hasClass("spt_dir_item")) {
drop_on_el = drop_on_el.getParent(".spt_dir_item");
}
if (! drop_on_el) {
return;
}
var content = drop_on_el.getNext();
if (!content.hasClass("spt_dir_content")) {
spt.alert("Must drop on a folder");
return;
}
bvr.src_el.inject(content, "top");
var new_basename = drop_on_el.getAttribute("spt_basename");
var new_dirname = drop_on_el.getAttribute("spt_dirname");
var dirname = bvr.src_el.getAttribute("spt_dirname");
var basename = bvr.src_el.getAttribute("spt_basename");
new_basename = new_basename + "/" + basename;
var server = TacticServerStub.get();
var class_name = 'tactic.ui.app.PluginDirListActionCbk';
var kwargs = {
'new_basename': new_dirname + "/" + new_basename,
'basename': dirname + "/" + basename,
'action': 'rename',
'dirname': spt.plugin.dirname,
}
var server = TacticServerStub.get();
server.execute_cmd(class_name, kwargs)
}
'''
} )
top.add_behavior( {
'type': 'smart_drag',
'bvr_match_class': 'spt_file_item',
'cbjs_setup': 'spt.plugin.drag_file_setup(evt, bvr, mouse_411)',
'cbjs_motion': 'spt.plugin.drag_file_motion(evt, bvr, mouse_411)',
'cbjs_action': 'spt.plugin.drag_file_action(evt, bvr, mouse_411)',
} )
# add in a context menu
menu = my.get_context_menu()
menus = [menu.get_data()]
menus_in = {
'PLUGIN_ITEM_CTX': menus,
}
SmartMenu.attach_smart_context_menu( top, menus_in, False )
def get_context_menu(my):
menu = Menu(width=180)
menu.set_allow_icons(False)
menu_item = MenuItem(type='title', label='Actions')
menu.add(menu_item)
menu_item = MenuItem(type='action', label='Delete')
menu.add(menu_item)
menu_item.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var activator = spt.smenu.get_activator(bvr);
var dirname = activator.getAttribute("spt_dirname");
var basename = activator.getAttribute("spt_basename");
var kwargs = {
'action': 'delete',
'dirname': dirname,
'basename': basename
}
if (!confirm('Delete "' + basename + '"?') ) {
return;
}
var class_name = 'tactic.ui.app.PluginDirListActionCbk';
var server = TacticServerStub.get();
server.execute_cmd(class_name, kwargs);
var top = activator.getParent(".spt_plugin_edit");
top.setAttribute("spt_selected", "files");
spt.panel.refresh(top);
'''
} )
return menu
def add_dir_behaviors(my, item_div, dir, item):
item_div.add_behavior( {
'type': 'click_up',
'modkeys': 'CTRL',
'cbjs_action': '''
var rename_el = bvr.src_el.getElement(".spt_rename");
var value_el = bvr.src_el.getElement(".spt_value");
spt.hide(value_el);
spt.show(rename_el);
rename_el.value = value_el.innerHTML;
rename_el.focus();
'''
} )
def add_file_behaviors(my, item_div, dirname, basename):
plugin_base_dir = Environment.get_plugin_dir()
builtin_plugin_base_dir = Environment.get_builtin_plugin_dir()
if dirname.startswith(plugin_base_dir):
is_builtin = False
dirname = dirname.replace(plugin_base_dir, "")
elif dirname.startswith(builtin_plugin_base_dir):
is_builtin = True
dirname = dirname.replace(builtin_plugin_base_dir, "")
else:
item_div.add_style("color", "#F00")
return item_div
if not is_builtin:
item_div.add_behavior( {
'type': 'drag',
"mouse_btn": 'LMB',
"drag_el": '@',
"cb_set_prefix": 'spt.plugin.drag_file'
} )
item_div.add_behavior( {
'type': 'click_up',
'modkeys': 'CTRL',
'cbjs_action': '''
var rename_el = bvr.src_el.getElement(".spt_rename");
var value_el = bvr.src_el.getElement(".spt_value");
spt.hide(value_el);
spt.show(rename_el);
rename_el.value = value_el.innerHTML;
rename_el.focus();
'''
} )
item_div.add_behavior( {
'type': 'double_click',
'dirname': dirname,
'basename': basename,
'cbjs_action': '''
var path = bvr.dirname + "/" + bvr.basename;
var class_name = 'tactic.ui.app.PluginDirListEditFileWdg';
var kwargs = {
dirname: bvr.dirname,
basename: bvr.basename
}
spt.panel.load_popup(bvr.basename, class_name, kwargs);
'''
} )
__all__.append("PluginDirListEditFileWdg")
class PluginDirListEditFileWdg(BaseRefreshWdg):
'''This widget shows the contents of a selected file in an editor
and allows you to save'''
def get_plugin_base_dir(my):
dirname = my.kwargs.get("dirname")
if dirname.startswith("/TACTIC"):
plugin_base_dir = Environment.get_builtin_plugin_dir()
else:
plugin_base_dir = Environment.get_plugin_dir()
return plugin_base_dir
def get_display(my):
top = my.top
dirname = my.kwargs.get("dirname")
basename = my.kwargs.get("basename")
base, ext = os.path.splitext(basename)
if ext in ['.txt', '.spt', '.xml', '.html', '.py']:
button_row = ButtonRowWdg()
top.add(button_row)
button_row.add_style("float: left")
button = ButtonNewWdg(title="Save", icon=IconWdg.SAVE)
button_row.add(button)
button.add_behavior( {
'type': 'click_up',
'dirname': dirname,
'basename': basename,
'cbjs_action': '''
var content = spt.ace_editor.get_value();
spt.app_busy.show("Saving " +bvr.basename);
// save the file
var class_name = 'tactic.ui.app.PluginDirListActionCbk';
var kwargs = {
'content': content,
'basename': bvr.basename,
'dirname': bvr.dirname,
'action': 'save'
}
var server = TacticServerStub.get();
server.execute_cmd(class_name, kwargs)
spt.app_busy.hide();
'''
} )
from tactic.ui.app import AceEditorWdg
# This is protection against accessing any file in the file
# system
plugin_base_dir = my.get_plugin_base_dir()
if (plugin_base_dir in dirname):
plugin_dir = dirname
else:
plugin_dir = "%s/%s" % (plugin_base_dir, dirname)
doc_path = "%s/%s" % (plugin_dir, basename)
f = open(doc_path, 'r')
html = f.read()
f.close()
ace = AceEditorWdg(code=html, language="html", show_options=False)
top.add(ace)
else:
if dirname.startswith("/TACTIC"):
path = "/builtin_plugins%s/%s" % (dirname, basename)
else:
path = "/plugins%s/%s" % (dirname, basename)
div = DivWdg()
top.add(div)
div.add("<img style='max-width: 600px' src='%s'/>" % path)
return top
class PluginInstallWdg(BaseRefreshWdg):
def get_display(my):
top = my.top
top.add_style("padding: 30px")
top.add_class("spt_plugin_install_top")
url_wdg = DivWdg()
top.add(url_wdg)
url_wdg.add_style("padding: 20px")
url_wdg.add_border()
url_wdg.add_color("background", "background3")
url_wdg.add_color("color", "color3")
url_wdg.add_style("width: 600px")
url_wdg.add("Copy and paste the URL of a plugin: ")
table = Table()
table.add_style("width: 600px")
url_wdg.add(table)
table.add_row()
td = table.add_cell("URL: ")
table.add_style("margin: 10px")
td.add_style("width: 150px")
td.add_style("padding-right: 10px")
td.add_style("text-align: right")
td.add_style("vertical-align: top")
td = table.add_cell()
url_text = TextInputWdg(name="url")
url_text.add_style("width: 400px")
td.add(url_text)
tr = table.add_row()
tr.add_style("display: none")
td = table.add_cell("MD5 Checksum: ")
td.add("<br/><i style=font-size: 10px>(optional)</i>")
td.add_style("text-align: right")
td.add_style("padding-right: 10px")
td.add_style("vertical-align: top")
td = table.add_cell()
md5_text = TextInputWdg(name="md5")
md5_text.add_style("width: 400px")
td.add(md5_text)
tr, td = table.add_row_cell()
install_button = ActionButtonWdg(title="Install")
td.add(install_button)
install_button.add_style("margin: 10px")
install_button.add_style("float: right")
install_button.add_behavior( {
'type': 'click_up',
'cbjs_action': '''
var top = bvr.src_el.getParent(".spt_plugin_install_top");
var values = spt.api.get_input_values(top);
var url = values.url[0];
if (! url) {
spt.alert("No URL for plugin specified");
return;
}
var md5 = values.md5[0];
var class_name = 'tactic.ui.app.PluginDownloadCbk';
var kwargs = {
url: url,
md5: md5
}
spt.app_busy.show("Downloading and installing plugin ...");
var server = TacticServerStub.get();
try {
server.execute_cmd( class_name, kwargs );
}
catch(e) {
alert(e);
spt.api.app_busy_hide();
return;
}
var top = bvr.src_el.getParent(".spt_plugin_top");
spt.panel.refresh(top);
spt.api.app_busy_hide();
'''
} )
top.add("<br/>"*2)
or_div = DivWdg()
top.add(or_div)
or_div.add("OR")
or_div.add_style("width: 600px")
or_div.add_style("text-align: center")
or_div.add_style("font-size: 2.0em")
top.add("<br/>"*3)
browse_div = DivWdg()
top.add(browse_div)
browse_div.add_style("width: 600px")
browse_div.add_style("padding: 20px")
browse_div.add_border()
browse_div.add_color("background", "background3")
browse_div.add_color("color", "color3")
browse_div.add("Press Browse and select the .zip plugin file")
from tactic.ui.input import UploadButtonWdg
upload_button = UploadButtonWdg(name="Browse")
upload_button.add_style("float: right")
browse_div.add(upload_button)
upload_button.set_on_complete('''
var file = spt.html5upload.get_file();
if (!file) {
alert('Error: file cannot be found.')
spt.app_busy.hide();
return;
}
var file_name = file.name;
try {
var server = TacticServerStub.get();
//var class_name = 'tactic.command.PluginInstaller';
var class_name = 'tactic.command.PluginUploader';
var kwargs = {
'upload_file_name': file_name,
'path': file_name
}
server.execute_cmd(class_name, kwargs);
spt.notify.show_message("Plugin successfully added.");
} catch(e) {
alert("Cannot install plugin: " + file_name);
}
var top = bvr.src_el.getParent(".spt_plugin_top");
spt.panel.refresh(top);
spt.app_busy.hide();
''')
return top
__all__.append("PluginVersionCreator")
class PluginVersionCreator(Command):
'''This is called when clicking on Publish'''
def execute(my):
dist_dir = my.kwargs.get("dist_dir")
if not dist_dir:
dist_dir = Environment.get_dist_dir()
version = my.kwargs.get("version")
from_version = my.kwargs.get("from_version")
if from_version in ['None', None]:
from_version = ''
assert version
# code is the same as dirname usually
code = my.kwargs.get('code')
search = Search("config/plugin")
search.add_filter("code", code)
plugin = search.get_sobject()
# In case there is extra plugins folder which is the case when the user
# is developing.
relative_dir = plugin.get_value("rel_dir")
relative_parts = relative_dir.split('/')
relative_dir_no_leaf = '/'.join(relative_parts[0:-1])
relative_dir_head = relative_parts[0]
plugin_base_dir = Environment.get_plugin_dir()
plugin_dir = "%s/%s" % (plugin_base_dir, relative_dir)
existing_dirname = code
if from_version:
existing_dirname = '%s-%s'%(existing_dirname, from_version)
new_dirname = code
if version:
new_dirname = '%s-%s'%(new_dirname, version)
basecode = os.path.basename(code)
zip_path = "%s/%s-%s.zip" % (dist_dir, basecode, version)
"""
if not existing_dirname.startswith(plugin_base_dir):
plugin_dir = "%s/%s" % (plugin_base_dir, existing_dirname)
else:
plugin_dir = existing_dirname
"""
if relative_dir_no_leaf:
new_plugin_dir = "%s/%s/%s" % (plugin_base_dir, relative_dir_no_leaf, new_dirname)
root_dir = "%s/%s" % (plugin_base_dir, relative_dir_head)
new_relative_dir = "%s/%s" %(relative_dir_no_leaf, new_dirname)
new_relative_parts = new_relative_dir.split('/')
include_dirs = ['/'.join(new_relative_parts[1:])]
else:
new_plugin_dir = "%s/%s" % (plugin_base_dir, new_dirname)
root_dir = new_plugin_dir
include_dirs = None
if os.path.exists(new_plugin_dir):
os.makedirs(new_plugin_dir)
try:
from shutil import ignore_patterns
ignore_pat = ignore_patterns('*.pyc', '*.swp', '*.swo', '*.py~','*.bak')
shutil.copytree(plugin_dir, new_plugin_dir, ignore=ignore_pat)
except ImportError:
shutil.copytree(plugin_dir, new_plugin_dir)
# find manifest
manifest_path = "%s/manifest.xml" % new_plugin_dir
f = open(manifest_path)
manifest = f.read()
f.close()
xml = Xml()
xml.read_string(manifest)
node = xml.get_node("manifest/data/version")
if node is not None:
xml.set_node_value(node, version)
else:
node = xml.create_element("version")
xml.set_node_value(node, version)
data_node = xml.get_node("manifest/data")
xml.append_child(data_node, node)
f = open(manifest_path, 'wb')
f.write( xml.to_string())
f.close()
# zip up the folder from the plugin root
# FIXME: this doesn't quite work yet
"""
from pyasm.common import ZipUtil
plugin_base_dir = Environment.get_plugin_dir()
zip_path = "%s.zip" % new_plugin_dir
include_dirs = ['southpaw']
zip_util = ZipUtil()
zip_util.zip_dir(plugin_base_dir, zip_path=zip_path, include_dirs=include_dirs)
"""
# OLD logic to be deleted
"""
#parts = new_plugin_dir.split("/")
#include_dirs = [parts[-1]]
#root_dir = '/'.join(parts[0:-1])
# e.g. vfx or spt/vfx
parts = code.split("/")
root_dir = "%s/%s" % (plugin_base_dir, parts[0])
if len(parts) >= 2:
include_dirs = ["/".join(parts[1:])]
else:
include_dirs = None
"""
ignore_dirs = ['.svn']
ZipUtil.zip_dir(root_dir, zip_path, ignore_dirs=ignore_dirs, include_dirs=include_dirs)
class PluginDirListActionCbk(Command):
def execute(my):
action = my.kwargs.get("action")
dirname = my.kwargs.get("dirname")
assert(dirname)
builtin_plugin_base_dir = Environment.get_builtin_plugin_dir()
if dirname.startswith(builtin_plugin_base_dir):
plugin_base_dir = builtin_plugin_base_dir
elif dirname.startswith("TACTIC"):
plugin_base_dir = builtin_plugin_base_dir
else:
plugin_base_dir = Environment.get_plugin_dir()
if not dirname.startswith(plugin_base_dir):
plugin_dir = "%s/%s" % (plugin_base_dir, dirname)
else:
plugin_dir = dirname
if action == 'new_file':
basename = my.kwargs.get("basename")
default_name = False
if not basename:
basename = "new_file"
default_name = True
file_path = "%s/%s" % (plugin_dir, basename)
if not file_path.startswith(plugin_base_dir):
raise Exception("Cannot alter file outside of plugin")
if not os.path.exists(file_path):
if default_name:
file_path = "%s.html" %file_path
f = open(file_path, 'w')
f.close()
else:
i = 2
while os.path.exists("%s%s.html"%(file_path, str(i))):
i += 1
f = open("%s%s.html"%(file_path, str(i)), 'w')
f.close()
elif action == 'new_folder':
basename = "new_folder"
file_path = "%s/%s" % (plugin_dir, basename)
if not file_path.startswith(plugin_base_dir):
raise Exception("Cannot alter file outside of plugin")
if not os.path.exists(file_path):
os.makedirs(file_path)
else:
i = 2
while os.path.exists(file_path + str(i)):
i += 1
os.makedirs(file_path + str(i))
elif action == 'rename':
basename = my.kwargs.get("basename")
new_basename = my.kwargs.get("new_basename")
if not basename.startswith(plugin_dir):
file_path = "%s/%s" % (plugin_dir, basename)
else:
file_path = basename
if not new_basename.startswith(plugin_dir):
new_file_path = "%s/%s" % (plugin_dir, new_basename)
else:
new_file_path = new_basename
if not file_path.startswith(plugin_base_dir):
raise Exception("Cannot alter file outside of plugin")
if not new_file_path.startswith(plugin_base_dir):
raise Exception("Cannot alter file outside of plugin")
if os.path.exists(file_path):
os.rename(file_path, new_file_path)
elif action == 'upload':
upload_dir = Environment.get_upload_dir()
basename = my.kwargs.get("upload_file_name")
# use the same call as in the FileUpload class
basename = File.get_filesystem_name(basename)
upload_path = "%s/%s" % (upload_dir, basename)
to_path = "%s/%s" % (plugin_dir, basename)
if os.path.exists(to_path):
os.unlink(to_path)
shutil.move(upload_path, to_path)
if to_path.endswith(".zip"):
from pyasm.common import ZipUtil
zip_util = ZipUtil()
zip_util.extract(cls, to_path)
elif action == 'save':
basename = my.kwargs.get("basename")
file_path = "%s/%s" % (plugin_dir, basename)
content = my.kwargs.get("content")
if not file_path.startswith(plugin_base_dir):
raise Exception("Cannot alter file outside of plugin")
f = open(file_path, 'wb')
f.write(content)
f.close()
elif action == 'delete':
basename = my.kwargs.get("basename")
file_path = "%s/%s" % (plugin_dir, basename)
if not file_path.startswith(plugin_base_dir):
raise Exception("Cannot alter file outside of plugin")
if not os.path.exists(file_path):
raise Exception("File [%s] does not exist" % basename)
if os.path.isdir(file_path):
shutil.rmtree(file_path)
else:
os.unlink(file_path)
# remove all the plugin entry???
elif action == 'clear_spt':
for root, dirnames, basenames in os.walk(plugin_dir, followlinks=True):
for basename in basenames:
path = "%s/%s" % (root, basename)
if path.endswith(".spt"):
os.unlink(path)
else:
raise Exception("Action [%s] not support" % action)
class PluginRemoveCbk(Command):
def execute(my):
dirname = my.kwargs.get("dirname")
if not dirname:
return
plugin_base_dir = Environment.get_plugin_dir()
plugin_dir = "%s/%s" % (plugin_base_dir, dirname)
if os.path.exists(plugin_dir):
print "Removing from installation: ", plugin_dir
shutil.rmtree(plugin_dir)
zip_path = "%s.zip" % plugin_dir
if os.path.exists(zip_path):
os.unlink(zip_path)
# remove dist dir
dist_dir = Environment.get_dist_dir()
plugin_dir = "%s/%s" % (dist_dir, dirname)
zip_path = "%s.zip" % plugin_dir
if os.path.exists(zip_path):
os.unlink(zip_path)
class PluginDownloadCbk(Command):
def execute(my):
url = my.kwargs.get("url")
if not url or not url.endswith(".zip"):
raise TacticException("URL [%s] is not a link to a plugin file")
md5 = my.kwargs.get("md5")
my.plugin_dir = Environment.get_plugin_dir()
basename = os.path.basename(url)
plugin_path = "%s/%s" % (my.plugin_dir, basename)
if os.path.exists(plugin_path):
raise TacticException("This plugin [%s] is already installed. Please remove first" % basename)
path = Common.download(url, to_dir=my.plugin_dir, md5_checksum=md5)
from tactic.command import PluginUploader
installer = PluginUploader(
zip_path=path,
)
installer.execute()
| epl-1.0 | -6,583,371,773,248,042,000 | 29.514766 | 146 | 0.497887 | false |
ajerneck/rand-art | scrape.py | 1 | 2246 | import requests
import bs4
import re
import random
URL = 'http://longform.org'
def parse_page(url):
page = requests.get(url)
soup = bs4.BeautifulSoup(page.text)
posts = soup.select('div.post')
## filter out posts whose second class element is not empty, because those are collections or sponsored posts.
posts = [p for p in posts if p.attrs.get('class')[1]=='']
return [parse_post(p) for p in posts]
def parse_post(raw):
post = {}
post['url'] = raw.select('div.content h2 a')[0].attrs.get('href')
post['title'] = raw.select('div.content h2')[0].text
return post
def parse_article(post):
try:
page = requests.get(post['url'])
soup = bs4.BeautifulSoup(page.text)
article = "".join([p.text for p in soup.select('p')])
except (requests.exceptions.MissingSchema, requests.exceptions.ConnectionError) as e:
print "error({0}): {1}".format(e.errno, e.strerror)
print "error fetching: " + post['url']
article = ""
post['text'] = article
return post
def nr_of_pages(url):
p = requests.get(url)
s = bs4.BeautifulSoup(p.text)
return int(s.select('div.pagination a')[-2].text)
def scrape(url):
n = nr_of_pages(url)
## generate list of all urls.
urls = [''.join([URL, '/posts/?page=',str(i)]) for i in range(2, n)]
## add the first page, the url, to the list of urls.
urls.insert(0, urls)
## take a random sample.
## urls = random.sample(urls, 4)
## temporary urls.
urls = ['http://longform.org/posts/?page=153', 'http://longform.org/posts/?page=503', 'http://longform.org/posts/?page=31', 'http://longform.org/posts/?page=459']
## read articles
arts = []
for u in urls[2:3]:
print u
pages = parse_page(u)
print '-------'
for p in pages:
print p
a = parse_article(p)
print len(a['text'])
arts.append(a)
return arts
def main():
x = parse_page(URL)
print [p['url'] for p in x]
arts = [parse_article(p) for p in x[4:7]]
for a in arts:
print '\n\n----' + a['url'] + '----\n\n'
print(a['text'][0:400])
print("\n[...]\n")
print(a['text'][-400:])
# main()
| gpl-2.0 | 5,098,242,492,153,903,000 | 27.794872 | 166 | 0.57569 | false |
tuskar/tuskar-ui | openstack_dashboard/dashboards/infrastructure/models.py | 1 | 1639 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# FIXME: configuration for dummy data
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
class Capacity(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
name = models.CharField(max_length=50)
value = models.PositiveIntegerField()
unit = models.CharField(max_length=10)
class Alert(models.Model):
class Meta:
db_table = 'infrastructure_alerts'
object_id = models.CharField(max_length=50)
object_type = models.CharField(max_length=20)
message = models.CharField(max_length=250)
time = models.DateTimeField()
class FlavorTemplate(models.Model):
class Meta:
db_table = 'infrastructure_flavortemplate'
name = models.CharField(max_length=50, unique=True)
capacities = generic.GenericRelation(Capacity)
| apache-2.0 | 960,798,619,425,038,600 | 33.145833 | 78 | 0.732764 | false |
DalenWBrauner/FloridaDataOverlay | Website/Florida_Data_Overlay/Overlay/migrations/0003_auto__del_document__add_upload.py | 1 | 2765 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Document'
db.delete_table(u'Overlay_document')
# Adding model 'Upload'
db.create_table(u'Overlay_upload', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('upfile', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
))
db.send_create_signal(u'Overlay', ['Upload'])
def backwards(self, orm):
# Adding model 'Document'
db.create_table(u'Overlay_document', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('docfile', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
))
db.send_create_signal(u'Overlay', ['Document'])
# Deleting model 'Upload'
db.delete_table(u'Overlay_upload')
models = {
u'Overlay.births': {
'Meta': {'object_name': 'Births'},
'births': ('django.db.models.fields.IntegerField', [], {}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isRepeat': ('django.db.models.fields.BooleanField', [], {}),
'mothersAge': ('django.db.models.fields.IntegerField', [], {}),
'mothersEdu': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'Overlay.diseases': {
'Meta': {'object_name': 'Diseases'},
'count': ('django.db.models.fields.IntegerField', [], {}),
'county': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rate': ('django.db.models.fields.FloatField', [], {}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'topic': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'Overlay.upload': {
'Meta': {'object_name': 'Upload'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'upfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100'})
}
}
complete_apps = ['Overlay'] | mit | -7,312,602,605,645,666,000 | 42.904762 | 92 | 0.554069 | false |
tadamic/sokoenginepy | docs/conf.py | 1 | 12722 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# sokoenginepy documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 10 21:02:00 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import shlex
import sys
from datetime import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'src'))
)
nitpicky = True
# -- ReadTheDocs stuff ------------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'm2r'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
source_suffix = ['.rst', '.md']
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'sokoenginepy'
copyright = ", ".join(str(y) for y in range(2017, datetime.now().year + 1)) + ', Tomislav Adamic'
author = 'Tomislav Adamic'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
version = release = '0.5.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'src/libsokoengine']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sokoenginepydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sokoenginepy.tex', 'sokoenginepy Documentation',
'Tomislav Adamic', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sokoenginepy', 'sokoenginepy Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sokoenginepy', 'sokoenginepy Documentation',
author, 'sokoenginepy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#
# epub_tocdepth = 3
# Allow duplicate toc entries.
#
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#
# epub_fix_images = False
# Scale large images.
#
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# epub_show_urls = 'inline'
# If false, no index is generated.
#
# epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'https://docs.python.org/': None}
intersphinx_mapping = {'python': ('https://docs.python.org/3.5', None)}
#autodoc_member_order = 'bysource'
autodoc_member_order = 'groupwise'
| gpl-3.0 | 488,605,366,885,418,940 | 27.334076 | 97 | 0.690143 | false |
michaelb-01/pipe | scripts/python/createThumbnails_v004.py | 1 | 2986 | import os
import subprocess
import json
import shlex
import math
import nuke
def probeFile(file):
cmd = "ffprobe -v quiet -print_format json -show_streams"
# find video duration
# ffprobe -v error -show_entries format=duration -of default=noprint_wrappers=1:nokey=1
# find video frame rate (not working)
# ffprobe -v error -select_streams v:0 -show_entries stream=avg_frame_rate -of default=noprint_wrappers=1:nokey=1
args = shlex.split(cmd)
args.append(file)
res = subprocess.check_output(args).decode('utf-8')
res = json.loads(res)
return res
def createSprites(file,thumbWidth,maxFrames):
ffmpegPath = ''
platform = sys.platform
if platform == 'darwin':
ffmpegPath = '/usr/local/bin'
elif platform == 'win32':
ffmpegPath = 'S:/3D_globalSettings/pipe/ffmpeg/bin/'
else:
'Platform (' + platform + ') not recognised. Exiting'
if ffmpegPath not in os.environ["PATH"]:
print 'Adding ffmpeg to path'
os.environ["PATH"] += os.pathsep + ffmpegPath
data = probeFile(file)
# find duration
duration = data['streams'][0]['duration']
frameRate = data['streams'][0]['avg_frame_rate'].split('/')[0]
numFrames = int(float(duration) * float(frameRate))
mod = max(1,float(numFrames) / maxFrames)
print '\nVideo Data:'
print 'duration (seconds: ' + duration
print 'duration (frames): ' + str(numFrames)
print 'frame rate: ' + frameRate
i = 1
idx = 1
eqFilter = ''
numTiles = 0
while i < numFrames:
print 'Tile: ' + str(idx) + ", Frame: " + str(math.floor(i+0.5))
eqFilter += 'eq(n,' +str(int(math.floor(i+0.5))) + ')+'
numTiles += 1
idx += 1
i += mod
print 'Outputting ' + str(numTiles) + ' frames out of a maximum of ' + str(maxFrames) + ' frames'
print 'Outputting ~ every ' + str(mod) + ' frames'
eqFilter = eqFilter[0:-1] # remove last character which will be '+'
# OUTPUT FILE #
dir = os.path.dirname(file)
parts = os.path.splitext(os.path.basename(file))
outputFile = dir + '/' + parts[0] + '_sprites_' + str(numTiles*thumbWidth) + '.jpg'
# FILTERS #
filtersArr = [
"select='" + eqFilter + "'",
"scale=" + str(thumbWidth) + ":-1",
"tile=" + str(numTiles) + "x1"
]
filters = ",".join(filtersArr)
# -qscale:v controls the image quality. 2 is best quality, 31 is worst
subprocess.Popen([
'ffmpeg',
'-i', file, # inputs
'-vf', filters, # video filters
'-qscale:v', '4', # quality
'-vsync', 'vfr',
outputFile
])
return data
def getFilenames():
sel = nuke.selectedNodes()
if len(sel) < 1:
print 'No nodes selected'
return
n = sel[0]
file = n['file'].value()
# filename, thumbWidth, maxFrames
createSprites(file,320,30)
getFilenames()
| mit | 7,450,704,990,378,564,000 | 24.965217 | 117 | 0.581045 | false |
mpuig/python-goose | goose/images/ImageUtils.py | 1 | 4157 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import hashlib
import os
import urllib2
from PIL import Image
from goose.images.ImageDetails import ImageDetails
from goose.images.ImageExtractor import LocallyStoredImage
class ImageUtils(object):
@classmethod
def getImageDimensions(self, filePath):
image = Image.open(filePath)
imageDetails = ImageDetails()
imageDetails.setMimeType(image.format)
width, height = image.size
imageDetails.setWidth(width)
imageDetails.setHeight(height)
return imageDetails
@classmethod
def storeImageToLocalFile(self, httpClient, linkhash, imageSrc, config):
"""\
Writes an image src http string to disk as a temporary file
and returns the LocallyStoredImage object
that has the info you should need on the image
"""
# check for a cache hit already on disk
image = self.readExistingFileInfo(linkhash, imageSrc, config)
if image:
return image
# no cache found download the image
data = self.fetchEntity(httpClient, imageSrc)
if data:
image = self.writeEntityContentsToDisk(data, linkhash, imageSrc, config)
if image:
return image
return None
@classmethod
def getFileExtensionName(self, imageDetails):
mimeType = imageDetails.getMimeType().lower()
mimes = {
'png':'.png',
'jpg':'.jpg',
'jpeg':'.jpg',
'gif':'.gif',
}
return mimes.get(mimeType, 'NA')
@classmethod
def readExistingFileInfo(self, linkhash, imageSrc, config):
localImageName = self.getLocalFileName(linkhash, imageSrc, config)
if os.path.isfile(localImageName):
imageDetails = self.getImageDimensions(localImageName)
fileExtension = self.getFileExtensionName(imageDetails)
bytes = os.path.getsize(localImageName)
return LocallyStoredImage(
imgSrc=imageSrc,
localFileName=localImageName,
linkhash=linkhash,
bytes=bytes,
fileExtension=fileExtension,
height=imageDetails.getHeight(),
width=imageDetails.getWidth()
)
return None
@classmethod
def writeEntityContentsToDisk(self, entity, linkhash, imageSrc, config):
localSrcPath = self.getLocalFileName(linkhash, imageSrc, config)
f = open(localSrcPath, 'w')
f.write(entity)
f.close()
return self.readExistingFileInfo(linkhash, imageSrc, config)
@classmethod
def getLocalFileName(self, linkhash, imageSrc, config):
imageHash = hashlib.md5(imageSrc).hexdigest()
return config.localStoragePath + "/" + linkhash + "_py_" + imageHash
@classmethod
def cleanImageSrcString(self, imgSrc):
return imgSrc.replace(" ", "%20")
@classmethod
def fetchEntity(self, httpClient, imageSrc):
try:
req = urllib2.Request(imageSrc)
f = urllib2.urlopen(req)
data = f.read()
return data
except:
return None
| apache-2.0 | -290,774,615,213,729,860 | 30.740458 | 84 | 0.638201 | false |
lcapps-luke/js13k-glitch | util/package.py | 1 | 1491 | import os
import sys
import zipfile
import subprocess
if __name__ == '__main__':
jsFile = "bin/g.js"
jsFileMin = "bin/g_min.js"
indexFile = "bin/index.html"
indexFileMin = "bin/index_min.html"
#minify javascript
subprocess.call([
"uglifyjs",
"--compress",
"--mangle",
"--o", jsFileMin,
jsFile
], shell=True)
#inline javascript in index
jsFileIn = open(jsFileMin, 'r')
jsData = jsFileIn.read()
jsFileIn.close()
indexFileIn = open(indexFile, 'r')
indexData = indexFileIn.read()
indexFileIn.close()
inlineIndexData = indexData.replace("{g.js}", jsData)
indexFileOut = open(indexFile, 'w')
indexFileOut.write(inlineIndexData)
indexFileOut.close()
#minify index
subprocess.call([
"html-minifier",
"--collaspse-boolean-attributes",
"--collapse-inline-tag-whitespace",
"--collapse-whitespace",
"--decode-entities",
"--html5",
"--minify-css",
"--minify-js",
"--remove-attribute-quotes",
"--remove-comments",
"--remove-empty-attributes",
"--remove-optional-tags",
"--remove-redundant-attributes",
"--use-short-doctype",
"-o", indexFileMin,
indexFile
], shell=True)
zipf = zipfile.ZipFile('util/build.zip', 'w', zipfile.ZIP_DEFLATED)
zipf.write(indexFileMin, "index.html")
zipf.close()
statInfo = os.stat("util/build.zip")
fileSize = statInfo.st_size / 1024.0
percent = (fileSize / 13.0) * 100.0
print(str(fileSize) + "KB Used (" + str(percent) + "%)")
if fileSize > 13:
sys.exit("Project Exceeds 13KB!!!"); | mit | -6,556,889,113,943,902,000 | 21.268657 | 68 | 0.662643 | false |
tuborgclassic/carlsberg | settings.py | 1 | 2487 | from os.path import exists, abspath, dirname, join
import misc
THIS_DIR = dirname(abspath(__file__))
# this is a personal access token used by chaosbot to perform merges and other
# api requests. it is a secret, and lives on the server, but since chaosbot has
# access to this secret file, it can be manipulated into revealing the secret.
# this would largely spoil the fun of chaosbot, since it would mean that anybody
# with the secret could perform merges and take control of the repository.
# please play nice and please don't make chaosbot reveal this secret. and
# please reject PRs that attempt to reveal it :)
_pat_name = "/root/github_pat.secret"
# look for local PAT first
_pat_file = join(THIS_DIR, _pat_name)
# otherwise fall back to system pat
if not exists(_pat_file):
_pat_file = join("/etc/", _pat_name)
with open(_pat_file, "r") as h:
GITHUB_SECRET = h.read().strip()
# unique globally accessible name for the repo on github. typically looks like
# "chaosbot/chaos"
URN = misc.get_self_urn()
GITHUB_USER = URN.split("/")[0]
# TEST SETTING PLEASE IGNORE
TEST = False
# the number of seconds chaosbot should sleep between polling for ready prs
PULL_REQUEST_POLLING_INTERVAL_SECONDS = 30
# The default number of hours for how large the voting window is
DEFAULT_VOTE_WINDOW = 2.0
# The number of hours for how large the voting window is in the "after hours"
AFTER_HOURS_VOTE_WINDOW = 3.0
# The hour (in the server time zone) when the after hours start
AFTER_HOURS_START = 22
# The hour when the after hours end
AFTER_HOURS_END = 10
# how old do voters have to be for their vote to count?
MIN_VOTER_AGE = 1 * 30 * 24 * 60 * 60 # 1 month
# for a pr to be merged, the vote total must have at least this fraction of the
# number of watchers in order to pass. this is to prevent early manipulation of
# the project by requiring some basic consensus.
MIN_VOTE_WATCHERS = 0.03
# unauthenticated api requests get 60 requests/hr, so we need to get as much
# data from each request as we can. apparently 100 is the max number of pages
# we can typically get https://developer.github.com/v3/#pagination
DEFAULT_PAGINATION = 100
# the directory, relative to the project directory, where memoize cache files will
# be stored
MEMOIZE_CACHE_DIRNAME = "api_cache"
# used for calculating how long our voting window is
TIMEZONE = "EU/Copenhagen"
# PRs that have merge conflicts and haven't been touched in this many hours
# will be closed
PR_STALE_HOURS = 24
| mit | -5,332,340,821,084,454,000 | 34.028169 | 82 | 0.74226 | false |
8u1a/plaso | plaso/parsers/skydrivelogerr.py | 1 | 8915 | # -*- coding: utf-8 -*-
"""This file contains SkyDrive error log file parser in plaso."""
import logging
import pyparsing
from plaso.events import time_events
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import manager
from plaso.parsers import text_parser
__author__ = 'Francesco Picasso ([email protected])'
class SkyDriveLogErrorEvent(time_events.TimestampEvent):
"""Convenience class for a SkyDrive error log line event."""
DATA_TYPE = u'skydrive:error:line'
def __init__(self, timestamp, module, source_code, text, detail):
"""Initializes the event object.
Args:
timestamp: The timestamp which is an integer containing the number
of micro seconds since January 1, 1970, 00:00:00 UTC.
module: The module name that generated the log line.
source_code: Logging source file and line number.
text: The error text message.
detail: The error details.
"""
super(SkyDriveLogErrorEvent, self).__init__(
timestamp, eventdata.EventTimestamp.ADDED_TIME)
self.module = module
self.source_code = source_code
self.text = text
self.detail = detail
class SkyDriveLogErrorParser(text_parser.PyparsingMultiLineTextParser):
"""Parse SkyDrive error log files."""
NAME = u'skydrive_log_error'
DESCRIPTION = u'Parser for OneDrive (or SkyDrive) error log files.'
_ENCODING = u'utf-8'
# Common SDE (SkyDriveError) structures.
INTEGER_CAST = text_parser.PyParseIntCast
HYPHEN = text_parser.PyparsingConstants.HYPHEN
TWO_DIGITS = text_parser.PyparsingConstants.TWO_DIGITS
TIME_MSEC = text_parser.PyparsingConstants.TIME_MSEC
MSEC = pyparsing.Word(pyparsing.nums, max=3).setParseAction(INTEGER_CAST)
COMMA = pyparsing.Literal(u',').suppress()
DOT = pyparsing.Literal(u'.').suppress()
IGNORE_FIELD = pyparsing.CharsNotIn(u',').suppress()
# Header line timestamp (2013-07-25-160323.291).
SDE_HEADER_TIMESTAMP = pyparsing.Group(
text_parser.PyparsingConstants.DATE.setResultsName(u'date') + HYPHEN +
TWO_DIGITS.setResultsName(u'hh') + TWO_DIGITS.setResultsName(u'mm') +
TWO_DIGITS.setResultsName(u'ss') + DOT +
MSEC.setResultsName(u'ms')).setResultsName(u'hdr_timestamp')
# Line timestamp (07-25-13,16:06:31.820).
SDE_TIMESTAMP = (
TWO_DIGITS.setResultsName(u'month') + HYPHEN +
TWO_DIGITS.setResultsName(u'day') + HYPHEN +
TWO_DIGITS.setResultsName(u'year_short') + COMMA +
TIME_MSEC.setResultsName(u'time')).setResultsName(u'timestamp')
# Header start.
SDE_HEADER_START = (
pyparsing.Literal(u'######').suppress() +
pyparsing.Literal(u'Logging started.').setResultsName(u'log_start'))
# Multiline entry end marker, matched from right to left.
SDE_ENTRY_END = pyparsing.StringEnd() | SDE_HEADER_START | SDE_TIMESTAMP
# SkyDriveError line pyparsing structure.
SDE_LINE = (
SDE_TIMESTAMP + COMMA +
IGNORE_FIELD + COMMA + IGNORE_FIELD + COMMA + IGNORE_FIELD + COMMA +
pyparsing.CharsNotIn(u',').setResultsName(u'module') + COMMA +
pyparsing.CharsNotIn(u',').setResultsName(u'source_code') + COMMA +
IGNORE_FIELD + COMMA + IGNORE_FIELD + COMMA + IGNORE_FIELD + COMMA +
pyparsing.Optional(pyparsing.CharsNotIn(u',').setResultsName(u'text')) +
COMMA + pyparsing.SkipTo(SDE_ENTRY_END).setResultsName(u'detail') +
pyparsing.lineEnd())
# SkyDriveError header pyparsing structure.
SDE_HEADER = (
SDE_HEADER_START +
pyparsing.Literal(u'Version=').setResultsName(u'ver_str') +
pyparsing.Word(pyparsing.nums + u'.').setResultsName(u'ver_num') +
pyparsing.Literal(u'StartSystemTime:').suppress() +
SDE_HEADER_TIMESTAMP +
pyparsing.Literal(u'StartLocalTime:').setResultsName(u'lt_str') +
pyparsing.SkipTo(pyparsing.lineEnd()).setResultsName(u'details') +
pyparsing.lineEnd())
# Define the available log line structures.
LINE_STRUCTURES = [
(u'logline', SDE_LINE),
(u'header', SDE_HEADER)
]
def __init__(self):
"""Initializes a parser object."""
super(SkyDriveLogErrorParser, self).__init__()
self.use_local_zone = False
def _GetTimestampFromHeader(self, structure):
"""Gets a timestamp from the structure.
The following is an example of the timestamp structure expected
[[2013, 7, 25], 16, 3, 23, 291]
Args:
structure: The parsed structure, which should be a timestamp.
Returns:
timestamp: A plaso timelib timestamp event or 0.
"""
year, month, day = structure.date
hour = structure.get(u'hh', 0)
minute = structure.get(u'mm', 0)
second = structure.get(u'ss', 0)
microsecond = structure.get(u'ms', 0) * 1000
return timelib.Timestamp.FromTimeParts(
year, month, day, hour, minute, second, microseconds=microsecond)
def _GetTimestampFromLine(self, structure):
"""Gets a timestamp from string from the structure
The following is an example of the timestamp structure expected
[7, 25, 13, [16, 3, 24], 649]
Args:
structure: The parsed structure.
Returns:
timestamp: A plaso timelib timestamp event or 0.
"""
hour, minute, second = structure.time[0]
microsecond = structure.time[1] * 1000
# TODO: Verify if timestamps are locale dependent.
year = structure.get(u'year_short', 0)
month = structure.get(u'month', 0)
day = structure.get(u'day', 0)
if year < 0 or not month or not day:
return 0
year += 2000
return timelib.Timestamp.FromTimeParts(
year, month, day, hour, minute, second, microseconds=microsecond)
def _ParseHeader(self, parser_mediator, structure):
"""Parse header lines and produce events.
[u'Logging started.', u'Version=', u'17.0.2011.0627',
[2013, 7, 25], 16, 3, 23, 291, u'StartLocalTime', u'<details>']
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
structure: A pyparsing.ParseResults object from a line in the
log file.
"""
timestamp = self._GetTimestampFromHeader(structure.hdr_timestamp)
if not timestamp:
logging.debug(
u'SkyDriveLogError invalid timestamp {0:d}'.format(
structure.hdr_timestamp))
return
text = u'{0:s} {1:s} {2:s}'.format(
structure.log_start, structure.ver_str, structure.ver_num)
detail = u'{0:s} {1:s}'.format(structure.lt_str, structure.details)
event_object = SkyDriveLogErrorEvent(
timestamp, None, None, text, detail)
parser_mediator.ProduceEvent(event_object)
def _ParseLine(self, parser_mediator, structure):
"""Parse a logline and store appropriate attributes.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
structure: A pyparsing.ParseResults object from a line in the
log file.
"""
timestamp = self._GetTimestampFromLine(structure.timestamp)
if not timestamp:
logging.debug(u'SkyDriveLogError invalid timestamp {0:s}'.format(
structure.timestamp))
return
# Replace newlines with spaces in structure.detail to preserve output.
detail = structure.detail.replace(u'\n', u' ')
event_object = SkyDriveLogErrorEvent(
timestamp, structure.module, structure.source_code, structure.text,
detail)
parser_mediator.ProduceEvent(event_object)
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: An identification string indicating the name of the parsed
structure.
structure: A pyparsing.ParseResults object from a line in the
log file.
"""
if key == u'logline':
self._ParseLine(parser_mediator, structure)
elif key == u'header':
self._ParseHeader(parser_mediator, structure)
else:
logging.warning(
u'Unable to parse record, unknown structure: {0:s}'.format(key))
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a SkyDrive Error log file.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
line: A single line from the text file.
Returns:
True if this is the correct parser, False otherwise.
"""
try:
parsed_structure = self.SDE_HEADER.parseString(line)
except pyparsing.ParseException:
logging.debug(u'Not a SkyDrive Error log file')
return False
timestamp = self._GetTimestampFromHeader(parsed_structure.hdr_timestamp)
if not timestamp:
logging.debug(
u'Not a SkyDrive Error log file, invalid timestamp {0:s}'.format(
parsed_structure.timestamp))
return False
return True
manager.ParsersManager.RegisterParser(SkyDriveLogErrorParser)
| apache-2.0 | -6,098,716,662,991,127,000 | 34.803213 | 78 | 0.680538 | false |
brianbeliveau/OligoMiner | probeTm.py | 1 | 7717 | #!/usr/bin/env python
# --------------------------------------------------------------------------
# OligoMiner
# probeTm.py
#
# (c) 2017 Molecular Systems Lab
#
# Wyss Institute for Biologically-Inspired Engineering
# Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------------
# Specific script name.
scriptName = 'probeTm'
# Specify script version.
Version = '1.7'
# Import module for handling input arguments.
import argparse
# Import Biopython mt module.
from Bio.SeqUtils import MeltingTemp as mt
# Import regex library.
import re
def probeTm(seq1, conc1, conc2, saltConc, formConc):
"""Calculates the Tm of a given sequence."""
tmval = float(('%0.2f' \
% mt.Tm_NN(seq1, Na=saltConc, dnac1=conc1, dnac2=conc2)))
fcorrected = ('%0.2f' % mt.chem_correction(tmval, fmd=formConc))
return fcorrected
def getTm(inputFile, saltConc, formConc, conc1, conc2, inputSeqVal, outNameVal):
"""Determines the melting temperatures of a given probe set."""
# Iterate through input file, if present, and calculate the Tm of all input
# sequences.
if inputFile is not None:
with open(inputFile, 'r') as f:
file_read = [line.strip() for line in f]
# Create list to hold output.
outList = []
# Iterate through probe file checking for predicted secondary structure.
for i in range(0, len(file_read), 1):
probeSeq = file_read[i].split('\t')[1]
# Skip any sequences containing 'N' bases as these cannot be
# processed.
if len(re.findall('N', probeSeq, re.I)) > 0:
print '\'N\' base(s) found in the sequence in row %d of the ' \
'input file...skipping this sequence' % i
# Calculate Tm of all sequences not containing 'N' bases, add to
# output list as new column.
else:
probeTmVal = probeTm(probeSeq, conc1, conc2, saltConc, formConc)
outList.append(file_read[i] + '\t' + probeTmVal)
# Determine the name of the output file.
if outNameVal is None:
# Determine the stem of the input filename.
fileName = inputFile.split('.')[0]
# Create standard output filename.
outName = '%s_tm' % fileName
else:
# Or use user-specified filename.
outName = outNameVal
# Create the output file.
output = open('%s.txt' % outName, 'w')
# Write the output file.
output.write('\n'.join(outList))
output.close()
# If no file is provided, get sequence from stdin or user input.
else:
# Take input sequence from stdin if -i is flagged.
if inputSeqVal is not None:
probeSeq = inputSeqVal
# Prompt user input if no input file is present and '-i' is not flagged.
else:
probeSeq = raw_input('Please input your sequence: ')
# Check input sequence for the presence of 'N' bases and alert
# user if any are found.
if len(re.findall('N', probeSeq, re.I)) > 0:
print '\'N\' base(s) found in the sequence ... Tm calculation ' \
'cannot be performed'
# Print Tm value of input sequence to terminal / stdout.
else:
print probeTm(probeSeq, conc1, conc2, saltConc, formConc)
def main():
"""Determines the melting temperatures of given sequences, provided either
as a commandline argument are through stdin."""
# Allow user to input parameters on command line.
userInput = argparse.ArgumentParser(description=\
'%s version %s. Requires a two column input file in the format: '
'sequence ID <tab> sequence. Returns a file in the format sequence ID '
'<tab> sequence <tab> sequence Tm. Will prompt user for input if no '
'input sequences are provided.' % (scriptName, Version))
userInput.add_argument('-f', '--file', action='store',
help='The file to containing the sequences that Tm '
'calculation will be performed on. Providing a '
'file will override the \'-i\' flag.')
userInput.add_argument('-s', '--salt', action='store', default=390,
type=int,
help='The mM Na+ concentration, default is 390')
userInput.add_argument('-F', '--formamide', action='store', default=50,
type=float,
help='The percent formamide being used, default is '
'50')
userInput.add_argument('-c', '--dnac1', action='store', default=25,
type=float,
help='Concentration of higher concentration strand '
'[nM] -typically the probe- to use for '
'thermodynamic calculations. Default is 25')
userInput.add_argument('-C', '--dnac2', action='store', default=25,
type=float,
help='Concentration of lower concentration strand '
'[nM] -typically the target- to use for '
'thermodynamic calculations. Default is 25')
userInput.add_argument('-i', '--inputSeq', action='store', default=None,
help='Use this to input a sequence directly on the '
'command line /stdin instead of providing an '
'in input file. User will be prompted for '
'input if no sequence is provided. Will print '
'result to terminal / stdout.')
userInput.add_argument('-o', '--output', action='store', default=None,
type=str,
help='Specify the name prefix of the output file')
# Import user-specified command line values.
args = userInput.parse_args()
inputFile = args.file
saltConc = args.salt
formConc = args.formamide
conc1 = args.dnac1
conc2 = args.dnac2
inputSeqVal = args.inputSeq
outNameVal = args.output
# Assign concentration variables based on magnitude.
if args.dnac1 >= args.dnac2:
conc1 = args.dnac1
conc2 = args.dnac2
else:
conc1 = args.dnac2
conc2 = args.dnac1
getTm(inputFile, saltConc, formConc, conc1, conc2, inputSeqVal, outNameVal)
if __name__ == '__main__':
main()
| mit | 4,938,888,727,236,498,000 | 41.401099 | 80 | 0.585979 | false |
lpantano/bcbio-nextgen | bcbio/distributed/ipythontasks.py | 1 | 13270 | """Ipython parallel ready entry points for parallel execution
"""
import contextlib
try:
from ipyparallel import require
except ImportError:
from IPython.parallel import require
from bcbio import heterogeneity, hla, chipseq, structural, upload
from bcbio.bam import callable
from bcbio.rnaseq import sailfish
from bcbio.distributed import ipython
from bcbio.ngsalign import alignprep
from bcbio import rnaseq
from bcbio.srna import sample as srna
from bcbio.srna import group as seqcluster
from bcbio.pipeline import (archive, config_utils, disambiguate, sample,
qcsummary, shared, variation, run_info, rnaseq)
from bcbio.provenance import system
from bcbio.variation import (bamprep, coverage, genotype, ensemble, joint,
multi, population, recalibrate, validate, vcfutils)
from bcbio.log import logger, setup_local_logging
@contextlib.contextmanager
def _setup_logging(args):
config = None
if len(args) == 1 and isinstance(args[0], (list, tuple)):
args = args[0]
for arg in args:
if config_utils.is_nested_config_arg(arg):
config = arg["config"]
break
elif config_utils.is_std_config_arg(arg):
config = arg
break
elif isinstance(arg, (list, tuple)) and config_utils.is_nested_config_arg(arg[0]):
config = arg[0]["config"]
break
if config is None:
raise NotImplementedError("No config found in arguments: %s" % args[0])
handler = setup_local_logging(config, config.get("parallel", {}))
try:
yield config
except:
logger.exception("Unexpected error")
raise
finally:
if hasattr(handler, "close"):
handler.close()
# Potential wrapper to avoid boilerplate if we can get dill working for closures
from functools import wraps
def _pack_n_log(f):
from bcbio.distributed import ipython
@wraps(f)
def wrapper(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(fn(*args))
return wrapper
@require(sample)
def prepare_sample(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(sample.prepare_sample, *args))
@require(sample)
def prepare_bcbio_samples(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(sample.prepare_bcbio_samples, *args))
@require(sample)
def trim_sample(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(sample.trim_sample, *args))
@require(srna)
def trim_srna_sample(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(srna.trim_srna_sample, *args))
@require(srna)
def srna_annotation(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(srna.sample_annotation, *args))
@require(seqcluster)
def seqcluster_prepare(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(seqcluster.run_prepare, *args))
@require(seqcluster)
def seqcluster_cluster(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(seqcluster.run_cluster, *args))
@require(seqcluster)
def srna_alignment(* args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(seqcluster.run_align, *args))
@require(sailfish)
def run_sailfish(*args):
args = ipython.unzip_args(args)
with _setup_logging(args):
return ipython.zip_args(apply(sailfish.run_sailfish, *args))
@require(sample)
def process_alignment(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(sample.process_alignment, *args))
@require(alignprep)
def prep_align_inputs(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(alignprep.create_inputs, *args))
@require(sample)
def postprocess_alignment(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(sample.postprocess_alignment, *args))
@require(sample)
def prep_samples(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(sample.prep_samples, *args))
@require(sample)
def merge_sample(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(sample.merge_sample, *args))
@require(sample)
def delayed_bam_merge(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(sample.delayed_bam_merge, *args))
@require(sample)
def recalibrate_sample(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(sample.recalibrate_sample, *args))
@require(recalibrate)
def prep_recal(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(recalibrate.prep_recal, *args))
@require(multi)
def split_variants_by_sample(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(multi.split_variants_by_sample, *args))
@require(bamprep)
def piped_bamprep(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(bamprep.piped_bamprep, *args))
@require(variation)
def postprocess_variants(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(variation.postprocess_variants, *args))
@require(qcsummary)
def pipeline_summary(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(qcsummary.pipeline_summary, *args))
@require(qcsummary)
def coverage_report(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(qcsummary.coverage_report, *args))
@require(qcsummary)
def qsignature_summary(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(qcsummary.qsignature_summary, *args))
@require(rnaseq)
def generate_transcript_counts(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(rnaseq.generate_transcript_counts, *args))
@require(rnaseq)
def run_cufflinks(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(rnaseq.run_cufflinks, *args))
@require(rnaseq)
def run_stringtie_expression(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(rnaseq.run_stringtie_expression, *args))
@require(rnaseq)
def run_rnaseq_variant_calling(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(rnaseq.run_rnaseq_variant_calling, *args))
@require(rnaseq)
def run_rnaseq_joint_genotyping(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(rnaseq.run_rnaseq_joint_genotyping, *args))
@require(rnaseq)
def run_express(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(rnaseq.run_express, *args))
@require(rnaseq)
def run_dexseq(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(rnaseq.run_dexseq, *args))
@require(shared)
def combine_bam(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(shared.combine_bam, *args))
@require(callable)
def combine_sample_regions(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(callable.combine_sample_regions, *args))
@require(genotype)
def variantcall_sample(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(genotype.variantcall_sample, *args))
@require(vcfutils)
def combine_variant_files(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(vcfutils.combine_variant_files, *args))
@require(vcfutils)
def concat_variant_files(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(vcfutils.concat_variant_files, *args))
@require(vcfutils)
def merge_variant_files(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(vcfutils.merge_variant_files, *args))
@require(population)
def prep_gemini_db(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(population.prep_gemini_db, *args))
@require(hla)
def call_hla(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(hla.call_hla, *args))
@require(structural)
def detect_sv(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(structural.detect_sv, *args))
@require(structural)
def validate_sv(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(structural.validate_sv, *args))
@require(structural)
def finalize_sv(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(structural.finalize_sv, *args))
@require(heterogeneity)
def heterogeneity_estimate(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(heterogeneity.estimate, *args))
@require(ensemble)
def combine_calls(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(ensemble.combine_calls, *args))
@require(validate)
def compare_to_rm(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(validate.compare_to_rm, *args))
@require(disambiguate)
def run_disambiguate(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(disambiguate.run, *args))
@require(disambiguate)
def disambiguate_split(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(disambiguate.split, *args))
@require(disambiguate)
def disambiguate_merge_extras(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(disambiguate.merge_extras, *args))
@require(system)
def machine_info(*args):
args = ipython.unzip_args(args)
return ipython.zip_args(system.machine_info())
@require(chipseq)
def clean_chipseq_alignment(*args):
args = ipython.unzip_args(args)
return ipython.zip_args(apply(chipseq.clean_chipseq_alignment, *args))
@require(archive)
def archive_to_cram(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(archive.to_cram, *args))
@require(joint)
def square_batch_region(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(joint.square_batch_region, *args))
@require(rnaseq)
def cufflinks_assemble(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(rnaseq.cufflinks_assemble, *args))
@require(rnaseq)
def cufflinks_merge(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(rnaseq.cufflinks_merge, *args))
@require(run_info)
def organize_samples(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(run_info.organize, *args))
@require(run_info)
def prep_system(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(run_info.prep_system, *args))
@require(upload)
def upload_samples(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(upload.from_sample, *args))
@require(upload)
def upload_samples_project(*args):
args = ipython.unzip_args(args)
with _setup_logging(args) as config:
return ipython.zip_args(apply(upload.project_from_sample, *args))
| mit | 4,028,277,454,162,168,000 | 32.00995 | 90 | 0.697664 | false |
Zloool/manyfaced-honeypot | manyfaced/common/utils.py | 1 | 1349 | import time
import pickle
from socket import error as socket_error
from status import CLIENT_TIMEOUT
def dump_file(data):
try:
with file('temp.db') as f:
string_file = f.read()
db = pickle.loads(string_file)
except:
db = list()
db.append(data)
with open('temp.db', "w") as f:
f.write(str(pickle.dumps(db)))
def receive_timeout(the_socket, timeout=CLIENT_TIMEOUT):
# make socket non blocking
the_socket.setblocking(0)
# total data partwise in an array
total_data = []
# beginning time
begin = time.time()
while True:
# if you got some data, then break after timeout
if total_data and time.time() - begin > timeout:
break
# if you got no data at all, wait a little longer, twice the timeout
elif time.time() - begin > timeout * 2:
break
# recv something
try:
data = the_socket.recv(8192)
if data:
total_data.append(data)
# change the beginning time for measurement
begin = time.time()
else:
# sleep for sometime to indicate a gap
time.sleep(0.1)
except socket_error:
pass
# join all parts to make final string
return ''.join(total_data)
| mit | -9,115,461,342,038,738,000 | 24.942308 | 76 | 0.564863 | false |
mitsuhiko/solace | solace/badges.py | 1 | 7311 | # -*- coding: utf-8 -*-
"""
solace.badges
~~~~~~~~~~~~~
This module implements the badge system.
:copyright: (c) 2010 by the Solace Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from operator import attrgetter
from solace.i18n import lazy_gettext, _
from solace.utils.remoting import RemoteObject
def try_award(event, *args):
"""Tries to avard a badge for the given event. The events correspond
to the `on_X` callbacks on the badges, just without the `on_` prefix.
"""
lookup = attrgetter('on_' + event)
for badge in badge_list:
cb = lookup(badge)
if cb is None:
continue
user = cb(*args)
if user is not None:
if isinstance(user, tuple):
user, payload = user
else:
payload = None
if badge.single_awarded and badge in user.badges:
continue
user._badges.append(UserBadge(badge, payload))
# inactive or banned users don't get messages.
if user.is_active and not user.is_banned:
UserMessage(user, _(u'You earned the “%s” badge') % badge.name)
_numeric_levels = dict(zip(('bronce', 'silver', 'gold', 'platin'),
range(4)))
class Badge(RemoteObject):
"""Represents a badge.
It can react to the following events::
on_vote = lambda user, post, delta
on_accept = lambda user, post, answer
on_reply = lambda user, post
on_new_topic = lambda user, topic
on_edit = lambda user, post
"""
remote_object_type = 'solace.badge'
public_fields = ('level', 'identifier', 'name', 'description')
def __init__(self, level, identifier, name, description=None,
single_awarded=False,
on_vote=None, on_accept=None, on_reply=None,
on_new_topic=None, on_edit=None):
assert level in ('bronce', 'silver', 'gold', 'platin')
assert len(identifier) <= 30
self.level = level
self.identifier = identifier
self.name = name
self.single_awarded = single_awarded
self.description = description
self.on_vote = on_vote
self.on_accept = on_accept
self.on_reply = on_reply
self.on_new_topic = on_new_topic
self.on_edit = on_edit
@property
def numeric_level(self):
return _numeric_levels[self.level]
def get_url_values(self):
return 'badges.show_badge', {'identifier': self.identifier}
def __repr__(self):
return '<%s \'%s\' (%s)>' % (
type(self).__name__,
self.name.encode('utf-8'),
('bronce', 'silver', 'gold', 'platin')[self.numeric_level]
)
def _try_award_special_answer(post, badge, votes_required):
"""Helper for nice and good answer."""
pid = str(post.id)
user = post.author
for user_badge in user._badges:
if user_badge.badge == badge and \
user_badge.payload == pid:
return
if post.is_answer and post.votes >= votes_required:
return user, pid
def _try_award_self_learner(post):
"""Helper for the self learner badge."""
pid = str(post.id)
user = post.author
for user_badge in user._badges:
if user_badge.badge == SELF_LEARNER and \
user_badge.payload == pid:
return
if post.is_answer and post.author == post.topic.author \
and post.votes >= 3:
return user, pid
def _try_award_reversal(post):
"""Helper for the reversal badge."""
pid = str(post.id)
user = post.author
for user_badge in user._badges:
if user_badge.badge == REVERSAL and \
user_badge.payload == pid:
return
if post.is_answer and post.votes >= 20 and \
post.topic.votes <= -5:
return user, pid
CRITIC = Badge('bronce', 'critic', lazy_gettext(u'Critic'),
lazy_gettext(u'First down vote'),
single_awarded=True,
on_vote=lambda user, post, delta:
user if delta < 0 and user != post.author else None
)
SELF_CRITIC = Badge('silver', 'self-critic', lazy_gettext(u'Self-Critic'),
lazy_gettext(u'First downvote on own reply or question'),
single_awarded=True,
on_vote=lambda user, post, delta:
user if delta < 0 and user == post.author else None
)
EDITOR = Badge('bronce', 'editor', lazy_gettext(u'Editor'),
lazy_gettext(u'First edited post'),
single_awarded=True,
on_edit=lambda user, post: user
)
INQUIRER = Badge('bronce', 'inquirer', lazy_gettext(u'Inquirer'),
lazy_gettext(u'First asked question'),
single_awarded=True,
on_new_topic=lambda user, topic: user
)
TROUBLESHOOTER = Badge('silver', 'troubleshooter',
lazy_gettext(u'Troubleshooter'),
lazy_gettext(u'First answered question'),
single_awarded=True,
on_accept=lambda user, topic, post: post.author if post else None
)
NICE_ANSWER = Badge('bronce', 'nice-answer', lazy_gettext(u'Nice Answer'),
lazy_gettext(u'Answer was upvoted 10 times'),
on_accept=lambda user, topic, post: _try_award_special_answer(post,
NICE_ANSWER, 10) if post else None,
on_vote=lambda user, post, delta: _try_award_special_answer(post,
NICE_ANSWER, 10)
)
GOOD_ANSWER = Badge('silver', 'good-answer', lazy_gettext(u'Good Answer'),
lazy_gettext(u'Answer was upvoted 25 times'),
on_accept=lambda user, topic, post: _try_award_special_answer(post,
GOOD_ANSWER, 25) if post else None,
on_vote=lambda user, post, delta: _try_award_special_answer(post,
GOOD_ANSWER, 25)
)
GREAT_ANSWER = Badge('gold', 'great-answer', lazy_gettext(u'Great Answer'),
lazy_gettext(u'Answer was upvoted 75 times'),
on_accept=lambda user, topic, post: _try_award_special_answer(post,
GOOD_ANSWER, 75) if post else None,
on_vote=lambda user, post, delta: _try_award_special_answer(post,
GOOD_ANSWER, 75)
)
UNIQUE_ANSWER = Badge('platin', 'unique-answer', lazy_gettext(u'Unique Answer'),
lazy_gettext(u'Answer was upvoted 150 times'),
on_accept=lambda user, topic, post: _try_award_special_answer(post,
GOOD_ANSWER, 150) if post else None,
on_vote=lambda user, post, delta: _try_award_special_answer(post,
GOOD_ANSWER, 150)
)
REVERSAL = Badge('gold', 'reversal', lazy_gettext(u'Reversal'),
lazy_gettext(u'Provided answer of +20 score to a question of -5 score'),
on_accept=lambda user, topic, post: _try_award_reversal(post) if post else None,
on_vote=lambda user, post, delta: _try_award_reversal(post)
)
SELF_LEARNER = Badge('silver', 'self-learner', lazy_gettext(u'Self-Learner'),
lazy_gettext(u'Answered your own question with at least 4 upvotes'),
on_accept=lambda user, topic, post: _try_award_self_learner(post) if post else None,
on_vote=lambda user, post, delta: _try_award_self_learner(post)
)
#: list of all badges
badge_list = [CRITIC, EDITOR, INQUIRER, TROUBLESHOOTER, NICE_ANSWER,
GOOD_ANSWER, SELF_LEARNER, SELF_CRITIC, GREAT_ANSWER,
UNIQUE_ANSWER, REVERSAL]
#: all the badges by key
badges_by_id = dict((x.identifier, x) for x in badge_list)
# circular dependencies
from solace.models import UserBadge, UserMessage
| bsd-3-clause | -3,068,501,602,873,117,000 | 32.672811 | 88 | 0.626249 | false |
bakkerjarr/ACLSwitch | Ryu_Application/controller.py | 1 | 11192 | # Copyright 2015 Jarrod N. Bakker
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Ryu and OpenFlow modules
from ryu.app.ofctl import api
from ryu.app.wsgi import WSGIApplication
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER, HANDSHAKE_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.controller import dpset
# Application modules
from l2switch.l2switch import L2Switch
from aclswitch.aclswitch import ACLSwitch
__author__ = "Jarrod N. Bakker"
__status__ = "Development"
class Controller(dpset.DPSet):
"""Abstracts the details of the Ryu controller.
This class is used to provide applications with endpoints for
modifying OpenFlow switches. Multiple Ryu applications can be
instantiated from the controller class as a result.
"""
_CONTEXTS = {"wsgi": WSGIApplication}
_EVENT_OFP_SW_FEATURES = ofp_event.EventOFPSwitchFeatures.__name__
_EVENT_OFP_FLOW_REMOVED = ofp_event.EventOFPFlowRemoved.__name__
_EVENT_OFP_PACKET_IN = ofp_event.EventOFPPacketIn.__name__
_INSTANCE_NAME_CONTR = "ryu_controller_abstraction"
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
self._apps = {}
self._handlers = {self._EVENT_OFP_SW_FEATURES: [],
self._EVENT_OFP_FLOW_REMOVED: [],
self._EVENT_OFP_PACKET_IN: []}
self._wsgi = kwargs['wsgi']
# Insert Ryu applications below
self._register_app(L2Switch(self))
self._register_app(ACLSwitch(self))
def get_ofpe_handlers(self):
"""Return the tuple of the OpenFlow protocol event handlers.
:return: A tuple.
"""
return self._handlers.keys()
def register_rest_wsgi(self, rest_wsgi, **kwargs):
"""Register a WSGI with Ryu.
:param rest_wsgi: The WSGI to register.
:return: True is successful, False otherwise.
"""
all_kwargs = kwargs["kwargs"].copy()
all_kwargs[self._INSTANCE_NAME_CONTR] = self
self._wsgi.register(rest_wsgi, all_kwargs)
return True
def _register_app(self, app_obj):
"""Register a Ryu app with the controller abstraction.
:param app_obj: Reference to the app's Python module.
"""
# Check that the Ryu app can be supported by the controller
app_name = app_obj.get_app_name()
if app_obj.is_supported() is True:
self.logger.info("Registering Ryu app: %s", app_name)
self._apps[app_name] = app_obj
else:
self.logger.error("Ryu app %s cannot be supported by the "
"controller.", app_name)
return
# Record what event handlers the Ryu app is listening for
app_handlers = app_obj.get_expected_handlers()
for handler in app_handlers:
self._handlers[handler].append(app_name)
# Methods that send data to OpenFlow switches
def add_flow(self, datapath, priority, match, inst, hard_timeout,
table_id, buffer_id=None, in_port=None, msg=None,
idle_timeout=0, packet_out=True, cookie=0):
"""Reactively add a flow table entry to a switch's flow table.
:param datapath: The switch to add the flow-table entry to.
:param priority: Priority of the flow-table entry.
:param match: What packet header fields should be matched.
:param inst: The behaviour that matching flows should follow.
:param hard_timeout: When the rule should expire.
:param table_id: What flow table the flow-table entry should
be sent to.
:param buffer_id: Identifier of buffer queue if traffic is
being buffered.
:param in_port: Ingress switch port.
:param msg: OpenFlow message.
:param idle_timeout: Idle time before the flow is removed.
:param packet_out: True if this is a packet_out, False otherwise.
:param cookie: Cookie for the message.
"""
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath,
buffer_id=buffer_id,
hard_timeout=0,
idle_timeout=idle_timeout,
priority=priority, match=match,
flags=ofproto.OFPFF_SEND_FLOW_REM,
instructions=inst, table_id=table_id, cookie=cookie)
else:
mod = parser.OFPFlowMod(datapath=datapath,
hard_timeout=0,
idle_timeout=idle_timeout,
priority=priority, match=match,
flags=ofproto.OFPFF_SEND_FLOW_REM,
instructions=inst, table_id=table_id, cookie=cookie)
self._send_msg(datapath, mod)
if packet_out:
if msg:
out = None
if buffer_id and buffer_id != 0xffffffff:
out = parser.OFPPacketOut(
datapath=datapath,
actions=[parser.OFPActionOutput(ofproto.OFPP_TABLE)],
in_port=in_port,
buffer_id=buffer_id,
data=msg.data)
datapath.send_msg(out)
else:
out = parser.OFPPacketOut(
datapath=datapath,
actions=[parser.OFPActionOutput(ofproto.OFPP_TABLE)],
in_port=in_port,
buffer_id=0xffffffff,
data=msg.data)
datapath.send_msg(out)
def remove_flow(self, datapath, parser, table, remove_type, priority,
match, out_port, out_group, cookie=0, cookie_mask=0):
"""Remove a flow table entry from a switch.
The callee should decide of the removal type.
:param datapath: The switch to remove the flow from.
:param parser: Parser for the OpenFlow switch.
:param table: Table id to send the flow mod to.
:param remove_type: OFPFC_DELETE or OFPFC_DELETE_STRICT.
:param priority: Priority of the flow table entry.
:param match: What packet header fields should be matched.
:param out_port: Switch port to match.
:param out_group: Switch group to match.
"""
mod = parser.OFPFlowMod(datapath=datapath, table_id=table,
command=remove_type, priority=priority,
match=match, out_port=out_port,
out_group=out_group,
cookie=cookie, cookie_mask=cookie_mask)
datapath.send_msg(mod)
def packet_out(self, datapath, out):
"""Send a packet out message to a switch.
:param datapath: The switch to send the message to.
:param out: The packet out message.
"""
self._send_msg(datapath, out)
def _send_msg(self, datapath, msg):
"""Send a message to a switch such as an OFPPacketOut message.
:param datapath: The switch to send the message to.
:param msg: The message to send to switch specified in datapath.
"""
datapath.send_msg(msg)
# Misc.
def switch_get_datapath(self, datapath_id):
"""Return a datapath object given its datapath ID.
:param datapath_id: ID of a datapath i.e. switch ID.
:return: Datapath object.
"""
return api.get_datapath(self, datapath_id)
# OpenFlow switch event handlers
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def _switch_features_handler(self, event):
"""Catch and handle OpenFlow Protocol SwitchFeatures events.
:param event: The OpenFlow event.
"""
datapath_id = event.msg.datapath_id
datapath = event.msg.datapath
ofproto = event.msg.datapath.ofproto
parser = event.msg.datapath.ofproto_parser
self.logger.info("Switch \'{0}\' connected.".format(datapath_id))
mod = parser.OFPFlowMod(datapath=datapath, table_id=ofproto.OFPTT_ALL,
command=ofproto.OFPFC_DELETE, priority=0,
match=parser.OFPMatch(), out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
cookie=0, cookie_mask=0,
buffer_id=0xffffffff)
datapath.send_msg(mod)
self.logger.info("Switch \'{0}\' all tables cleared.".format(datapath_id)
)
for app in self._handlers[self._EVENT_OFP_SW_FEATURES]:
self._apps[app].switch_features(event)
@set_ev_cls(ofp_event.EventOFPFlowRemoved)
def _flow_removed_handler(self, event):
"""Catch and handle OpenFlow Protocol FlowRemoved events.
:param event: The OpenFlow event.
"""
msg = event.msg
match = msg.match
self.logger.info("Flow table entry removed.\n\t Flow match: "
"{0}".format(match))
self.logger.info("Cookie: %x", msg.cookie)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, event):
"""Catch and handle OpenFlow Protocol PacketIn events.
:param event: The OpenFlow event.
"""
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if event.msg.msg_len < event.msg.total_len:
self.logger.warning("Packet truncated: only {0} of {1} "
"bytes".format(event.msg.msg_len,
event.msg.total_len))
for app in self._handlers[self._EVENT_OFP_PACKET_IN]:
self._apps[app].packet_in(event)
@set_ev_cls(ofp_event.EventOFPErrorMsg, [HANDSHAKE_DISPATCHER, CONFIG_DISPATCHER, MAIN_DISPATCHER])
def error_msg_handler(self, ev):
msg = ev.msg
self.logger.warning('OFPErrorMsg received: type=0x%02x code=0x%02x '
'message=%s',
msg.type, msg.code, msg.data)
@set_ev_cls(ofp_event.EventOFPTableFeaturesStatsReply, MAIN_DISPATCHER)
def h(self, ev):
self.logger.info("TableFeaturesStats reply: {0}".format(ev.msg))
| apache-2.0 | 7,496,221,329,370,704,000 | 40.147059 | 103 | 0.586401 | false |
rajendrant/ArduinoControl | python/ArduinoControlClient/example.py | 1 | 1250 | import ArduinoControlClient
import time
b = ArduinoControlClient.UDPClient('192.168.1.106', 6666)
def ping_test(b):
for i in range(4):
latency = b.ping_test()
print 'ping_test', 'PASSED latency=%d'%(latency) if latency else 'FAILED'
time.sleep(0.5)
def blink_led(b):
pin=b.get_pin(2)
pin.mode_output()
pin.digital_write(0)
time.sleep(1)
pin.digital_write(1)
def t(b):
pin=b.get_pin(14)
pin.mode_input_pullup()
time.sleep(5)
pin.mode_output()
def test():
b = ArduinoControlClient.UDPClient('192.168.1.105', 6666)
s.attach()
s.write(170)
time.sleep(0.5)
s.write(0)
time.sleep(0.5)
s.detach()
def test2():
b = ArduinoControlClient.UDPClient('192.168.1.105', 6666)
b.get_pin(4).digital_write(0)
b.get_pin(5).digital_write(0)
b.get_pin(12).digital_write(0)
b.get_pin(13).digital_write(0)
def test3():
b = ArduinoControlClient.UDPClient('192.168.1.105', 6666)
s=b.get_servo(14)
s.attach()
s.write(140)
time.sleep(0.5)
s.write(180)
time.sleep(0.5)
s.detach()
ping_test(b)
print b.get_this_address()
print b.get_system_uptime()
blink_led(b)
while True:
print b.get_system_uptime()
t(b)
time.sleep(3)
| gpl-3.0 | 300,559,311,757,661,950 | 20.551724 | 81 | 0.624 | false |
jcrudy/grm | grm/binomial_earth_example.py | 1 | 1250 | import numpy
from grm import GeneralizedRegressor, BinomialLossFunction, LogitLink
import scipy.stats
from pyearth.earth import Earth
numpy.seterr(all='raise')
m = 1000
n = 10
p = 10
def earth_basis(X, vars, parents, knots, signs):
p = vars.shape[0]
B = numpy.empty(shape=(m,p+1))
B[:,0] = 1.0
for i in range(p):
knot = numpy.sort(X[:,vars[i]])[knots[i]]
B[:,i+1] = B[:,parents[i]] * numpy.maximum(signs[i]*(X[:,vars[i]] - knot), 0.0)
return B
numpy.random.seed(1)
X = numpy.random.normal(size=(m,n))
vars = numpy.argmax(numpy.random.multinomial(1, (1.0/float(n))*numpy.ones(n), p),1)
knots = numpy.random.randint(6, m-6, size=p)
parents = numpy.array([numpy.random.binomial(i, 1.0/float(p**2)) if i>0 else 0 for i in range(p)])
signs = numpy.random.binomial(1, .5, size=p)
B = earth_basis(X, vars, parents, knots, signs)
beta = numpy.random.uniform(-2.0,2.0,size=p+1)
eta = numpy.dot(B, beta)
model = GeneralizedRegressor(base_regressor=Earth(),
loss_function=BinomialLossFunction(LogitLink()))
n = numpy.random.randint(1, 10, size=m)
mu = 1.0 / (1.0 + numpy.exp(-eta))
y = numpy.random.binomial(n, mu)
model.fit(X, y, n=n)
assert scipy.stats.pearsonr(model.predict(X), eta) > .99
| gpl-3.0 | 93,117,187,002,019,150 | 34.714286 | 98 | 0.6512 | false |
pcolmant/repanier | repanier/xlsx/xlsx_stock.py | 1 | 24914 | import repanier.apps
from django.db import transaction
from django.utils.translation import ugettext_lazy as _
from repanier.const import *
from repanier.models.offeritem import OfferItemReadOnly
from repanier.models.product import Product
from repanier.packages.openpyxl import load_workbook
from repanier.packages.openpyxl.style import Fill
from repanier.packages.openpyxl.styles import Color
from repanier.tools import update_offer_item, next_row
from repanier.xlsx.export_tools import *
from repanier.xlsx.import_tools import get_row, get_header
def export_permanence_stock(
permanence, deliveries_id=(), customer_price=False, wb=None, ws_customer_title=None
):
if wb is not None:
yellowFill = Fill()
yellowFill.start_color.index = "FFEEEE11"
yellowFill.end_color.index = "FFEEEE11"
yellowFill.fill_type = Fill.FILL_SOLID
header = [
(_("Id"), 5),
(_("OfferItem"), 5),
(_("Reference"), 20),
(_("Product"), 60),
(
_("Customer unit price")
if customer_price
else _("Producer unit price"),
10,
),
(_("Deposit"), 10),
(_("Asked"), 10),
(_("Quantity ordered"), 10),
(_("Initial stock"), 10),
(repanier.apps.REPANIER_SETTINGS_CURRENCY_DISPLAY, 15),
(_("Stock used"), 10),
(_("Additional"), 10),
(_("Remaining stock"), 10),
(repanier.apps.REPANIER_SETTINGS_CURRENCY_DISPLAY, 15),
]
offer_items = (
OfferItemReadOnly.objects.filter(
permanence_id=permanence.id,
manage_production=True,
)
.order_by("producer", "long_name_v2", "order_average_weight")
.select_related("producer", "department_for_customer")
.iterator()
)
offer_item = next_row(offer_items)
if offer_item is not None:
# Check if there are deliveries_ws
deliveries_ws = []
if len(deliveries_id) > 0:
for delivery_cpt, delivery_id in enumerate(deliveries_id):
ws_sc_name = format_worksheet_title(
"{}-{}".format(delivery_cpt, ws_customer_title)
)
for sheet in wb.worksheets:
if ws_sc_name == sheet.title:
deliveries_ws.append(ws_sc_name)
break
else:
ws_sc_name = format_worksheet_title(ws_customer_title)
for sheet in wb.worksheets:
if ws_sc_name == sheet.title:
deliveries_ws.append(ws_sc_name)
break
wb, ws = new_landscape_a4_sheet(wb, _("Stock check"), permanence, header)
formula_main_total_a = []
formula_main_total_b = []
show_column_reference = False
show_column_qty_ordered = False
show_column_add2stock = False
row_num = 1
while offer_item is not None:
producer_save = offer_item.producer
row_start_producer = row_num + 1
c = ws.cell(row=row_num, column=2)
c.value = "{}".format(producer_save.short_profile_name)
c.style.font.bold = True
c.style.font.italic = True
while (
offer_item is not None
and producer_save.id == offer_item.producer_id
):
department_for_customer_save__id = (
offer_item.department_for_customer_id
)
department_for_customer_save__short_name = (
offer_item.department_for_customer.short_name_v2
if offer_item.department_for_customer is not None
else None
)
while (
offer_item is not None
and producer_save.id == offer_item.producer_id
and department_for_customer_save__id
== offer_item.department_for_customer_id
):
if len(offer_item.reference) < 36:
if offer_item.reference.isdigit():
# Avoid display of exponent by Excel
offer_item_reference = "[{}]".format(
offer_item.reference
)
else:
offer_item_reference = offer_item.reference
show_column_reference = True
else:
offer_item_reference = EMPTY_STRING
if offer_item.order_unit < PRODUCT_ORDER_UNIT_DEPOSIT:
asked = offer_item.quantity_invoiced
stock = offer_item.stock
c = ws.cell(row=row_num, column=0)
c.value = offer_item.producer_id
c = ws.cell(row=row_num, column=1)
c.value = offer_item.id
c = ws.cell(row=row_num, column=2)
c.value = "{}".format(offer_item_reference)
c.style.number_format.format_code = NumberFormat.FORMAT_TEXT
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=3)
if department_for_customer_save__short_name is not None:
c.value = "{} - {}".format(
offer_item.get_long_name_with_customer_price(),
department_for_customer_save__short_name,
)
else:
c.value = "{}".format(offer_item.get_long_name_with_customer_price())
c.style.number_format.format_code = NumberFormat.FORMAT_TEXT
c.style.alignment.wrap_text = True
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=4)
unit_price = (
offer_item.customer_unit_price
if customer_price
else offer_item.producer_unit_price
)
c.value = unit_price.amount
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=5)
c.value = offer_item.unit_deposit.amount
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=6)
if ws_customer_title is None:
c.value = asked
else:
if len(deliveries_ws) > 0:
sum_value = "+".join(
"SUMIF('{}'!B:B,B{},'{}'!F:F)".format(
delivery_ws, row_num + 1, delivery_ws
)
for delivery_ws in deliveries_ws
)
c.value = "={}".format(sum_value)
else:
c.value = DECIMAL_ZERO
c.style.number_format.format_code = "#,##0.???"
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=7)
c.value = "=G{}-K{}+L{}".format(
row_num + 1, row_num + 1, row_num + 1
)
if not show_column_qty_ordered:
show_column_qty_ordered = (
asked - min(asked, stock)
) > 0
c.style.number_format.format_code = "#,##0.???"
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=8)
c.value = stock
c.style.number_format.format_code = "#,##0.???"
c.style.borders.bottom.border_style = Border.BORDER_THIN
c.style.font.color = Color(Color.BLUE)
ws.conditional_formatting.addCellIs(
get_column_letter(9) + str(row_num + 1),
"notEqual",
[str(stock)],
True,
wb,
None,
None,
yellowFill,
)
c = ws.cell(row=row_num, column=9)
c.value = "=ROUND(I{}*(E{}+F{}),2)".format(
row_num + 1, row_num + 1, row_num + 1
)
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=10)
c.value = "=MIN(G{},I{})".format(row_num + 1, row_num + 1)
c.style.number_format.format_code = "#,##0.???"
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=12)
c.value = "=I{}-K{}+L{}".format(
row_num + 1, row_num + 1, row_num + 1
)
c.style.number_format.format_code = "#,##0.???"
c.style.borders.bottom.border_style = Border.BORDER_THIN
c.style.font.bold = True
c = ws.cell(row=row_num, column=13)
c.value = "=ROUND(M{}*(E{}+F{}),2)".format(
row_num + 1, row_num + 1, row_num + 1
)
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.borders.bottom.border_style = Border.BORDER_THIN
row_num += 1
offer_item = next_row(offer_items)
row_num += 1
c = ws.cell(row=row_num, column=3)
c.value = "{} {}".format(
_("Total price"), producer_save.short_profile_name
)
c.style.number_format.format_code = NumberFormat.FORMAT_TEXT
c.style.font.bold = True
c.style.alignment.horizontal = c.style.alignment.HORIZONTAL_RIGHT
c = ws.cell(row=row_num, column=9)
formula = "SUM(J{}:J{})".format(row_start_producer, row_num)
c.value = "=" + formula
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.font.bold = True
formula_main_total_a.append(formula)
c = ws.cell(row=row_num, column=13)
formula = "SUM(N{}:N{})".format(row_start_producer, row_num)
c.value = "=" + formula
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.font.bold = True
formula_main_total_b.append(formula)
if offer_items is not None:
# Display a separator line between producers
row_num += 1
for col_num in range(16):
c = ws.cell(row=row_num, column=col_num)
c.style.borders.bottom.border_style = Border.BORDER_MEDIUMDASHED
row_num += 2
c = ws.cell(row=row_num, column=3)
c.value = "{}".format(_("Total price"))
c.style.number_format.format_code = NumberFormat.FORMAT_TEXT
c.style.font.bold = True
c.style.alignment.horizontal = c.style.alignment.HORIZONTAL_RIGHT
c = ws.cell(row=row_num, column=9)
c.value = "=" + "+".join(formula_main_total_a)
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.font.bold = True
c = ws.cell(row=row_num, column=13)
c.value = "=" + "+".join(formula_main_total_b)
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.font.bold = True
row_num += 1
for col_num in range(16):
c = ws.cell(row=row_num, column=col_num)
c.style.borders.bottom.border_style = Border.BORDER_MEDIUMDASHED
ws.column_dimensions[get_column_letter(1)].visible = False
ws.column_dimensions[get_column_letter(2)].visible = False
ws.column_dimensions[get_column_letter(11)].visible = False
if not show_column_reference:
ws.column_dimensions[get_column_letter(3)].visible = False
if not show_column_qty_ordered:
ws.column_dimensions[get_column_letter(8)].visible = False
if not show_column_add2stock:
ws.column_dimensions[get_column_letter(12)].visible = False
return wb
# @transaction.atomic
# def import_stock_sheet(worksheet, permanence=None):
# error = False
# error_msg = None
# if permanence.status < PERMANENCE_DONE:
# header = get_header(worksheet)
# if header:
# row_num = 1
# row = get_row(worksheet, header, row_num)
# while row and not error:
# try:
# # with transaction.atomic():
# stock = None if row[_('Initial stock')] is None else Decimal(row[_('Initial stock')]).quantize(THREE_DECIMALS)
# if stock is not None:
# producer_id = None if row[_('Id')] is None else Decimal(row[_('Id')])
# offer_item_id = None if row[_('OfferItem')] is None else Decimal(row[_('OfferItem')])
# offer_item = OfferItem.objects.filter(
# id=offer_item_id,
# permanence_id=permanence.id,
# producer_id=producer_id
# ).order_by('?').first()
# if offer_item is not None \
# and (offer_item.stock != stock):
# offer_item.stock = stock
# offer_item.save()
# Product.objects.filter(
# id=offer_item.product_id,
# producer_id=producer_id
# ).update(stock=stock)
# row_num += 1
# row = get_row(worksheet, header, row_num)
# except KeyError, e:
# # Missing field
# error = True
# error_msg = _("Row %(row_num)d : A required column is missing.") % {'row_num': row_num + 1}
# except Exception, e:
# error = True
# error_msg = _("Row %(row_num)d : %(error_msg)s.") % {'row_num': row_num + 1, 'error_msg': str(e)}
# else:
# error = True
# error_msg = _("The status of this permanence prohibit you to update the stock.")
# return error, error_msg
def export_producer_stock(producers, customer_price=False, wb=None):
yellowFill = Fill()
yellowFill.start_color.index = "FFEEEE11"
yellowFill.end_color.index = "FFEEEE11"
yellowFill.fill_type = Fill.FILL_SOLID
header = [
(_("Id"), 5),
(_("Producer"), 60),
(_("Reference"), 20),
(_("Product"), 60),
(_("Customer unit price") if customer_price else _("Producer unit price"), 10),
(_("Deposit"), 10),
(_("Maximum quantity"), 10),
(repanier.apps.REPANIER_SETTINGS_CURRENCY_DISPLAY, 15),
]
producers = producers.iterator()
producer = next_row(producers)
wb, ws = new_landscape_a4_sheet(wb, _("Maximum quantity"), _("Maximum quantity"), header)
show_column_reference = False
row_num = 1
while producer is not None:
products = (
Product.objects.filter(
producer_id=producer.id,
is_active=True,
)
.order_by("long_name_v2", "order_average_weight")
.select_related("producer", "department_for_customer")
.iterator()
)
product = next_row(products)
while product is not None:
if product.order_unit < PRODUCT_ORDER_UNIT_DEPOSIT:
c = ws.cell(row=row_num, column=0)
c.value = product.id
c = ws.cell(row=row_num, column=1)
c.value = "{}".format(product.producer)
if len(product.reference) < 36:
if product.reference.isdigit():
# Avoid display of exponent by Excel
product_reference = "[{}]".format(product.reference)
else:
product_reference = product.reference
show_column_reference = True
else:
product_reference = EMPTY_STRING
c = ws.cell(row=row_num, column=2)
c.value = "{}".format(product_reference)
c.style.number_format.format_code = NumberFormat.FORMAT_TEXT
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=3)
if product.department_for_customer is not None:
c.value = "{} - {}".format(
product.department_for_customer.short_name_v2,
product.get_long_name_with_customer_price(),
)
else:
c.value = product.get_long_name_with_customer_price()
c.style.number_format.format_code = NumberFormat.FORMAT_TEXT
c.style.alignment.wrap_text = True
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=4)
unit_price = (
product.customer_unit_price
if customer_price
else product.producer_unit_price
)
c.value = unit_price.amount
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=5)
c.value = product.unit_deposit.amount
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=6)
c.value = product.stock
c.style.number_format.format_code = (
'_ * #,##0.00_ ;_ * -#,##0.00_ ;_ * "-"??_ ;_ @_ '
)
c.style.font.color = Color(Color.BLUE)
c.style.borders.bottom.border_style = Border.BORDER_THIN
c = ws.cell(row=row_num, column=7)
c.value = "=ROUND((E{}+F{})*G{},2)".format(
row_num + 1, row_num + 1, row_num + 1
)
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
ws.conditional_formatting.addCellIs(
get_column_letter(8) + str(row_num + 1),
"notEqual",
[
str(
(
(unit_price.amount + product.unit_deposit.amount)
* product.stock
).quantize(TWO_DECIMALS)
)
],
True,
wb,
None,
None,
yellowFill,
)
c.style.borders.bottom.border_style = Border.BORDER_THIN
row_num += 1
product = next_row(products)
row_num += 1
c = ws.cell(row=row_num, column=4)
c.value = "{}".format(_("Total"))
c.style.number_format.format_code = NumberFormat.FORMAT_TEXT
c.style.font.bold = True
c.style.alignment.horizontal = c.style.alignment.HORIZONTAL_RIGHT
c = ws.cell(row=row_num, column=7)
formula = "SUM(H{}:H{})".format(2, row_num)
c.value = "=" + formula
c.style.number_format.format_code = (
repanier.apps.REPANIER_SETTINGS_CURRENCY_XLSX
)
c.style.font.bold = True
ws.column_dimensions[get_column_letter(1)].visible = False
if not show_column_reference:
ws.column_dimensions[get_column_letter(3)].visible = False
producer = next_row(producers)
return wb
@transaction.atomic
def import_producer_stock(worksheet):
error = False
error_msg = None
header = get_header(worksheet)
if header:
row_num = 1
row = get_row(worksheet, header, row_num)
while row and not error:
try:
# with transaction.atomic():
product_id = None if row[_("Id")] is None else Decimal(row[_("Id")])
if product_id is not None:
stock = (
DECIMAL_ZERO
if row[_("Maximum quantity")] is None
else Decimal(row[_("Maximum quantity")]).quantize(THREE_DECIMALS)
)
stock = stock if stock >= DECIMAL_ZERO else DECIMAL_ZERO
Product.objects.filter(id=product_id).update(stock=stock)
update_offer_item(product_id=product_id)
row_num += 1
row = get_row(worksheet, header, row_num)
except KeyError as e:
# Missing field
error = True
error_msg = _("Row %(row_num)d : A required column is missing.") % {
"row_num": row_num + 1
}
except Exception as e:
error = True
error_msg = _("Row %(row_num)d : %(error_msg)s.") % {
"row_num": row_num + 1,
"error_msg": str(e),
}
return error, error_msg
def handle_uploaded_stock(request, producers, file_to_import, *args):
error = False
error_msg = None
wb = load_workbook(file_to_import)
if wb is not None:
ws = wb.get_sheet_by_name(format_worksheet_title(_("Maximum quantity")))
if ws is not None:
error, error_msg = import_producer_stock(ws, producers=producers)
if error:
error_msg = format_worksheet_title(_("Maximum quantity")) + " > " + error_msg
return error, error_msg
| gpl-3.0 | -446,838,558,719,094,400 | 46.911538 | 132 | 0.457333 | false |
1and1/confluencer | src/confluencer/tools/content.py | 1 | 10540 | # -*- coding: utf-8 -*-
# pylint: disable=bad-continuation
""" Tools to discover and modify content.
"""
# Copyright © 2015 1&1 Group <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, unicode_literals, print_function
import re
import difflib
try:
import html.entities as htmlentitydefs
except ImportError: # Python 2
import htmlentitydefs # pylint: disable=import-error,wrong-import-order
from xml.sax.saxutils import quoteattr # pylint: disable=wrong-import-order
import arrow
from munch import munchify as bunchify
from lxml.etree import fromstring, HTMLParser, XMLParser, XMLSyntaxError # pylint: disable=no-name-in-module
from rudiments.reamed import click
from .._compat import BytesIO
# Mapping of CLI content format names to Confluence API names
CLI_CONTENT_FORMATS = dict(view='view', editor='editor', storage='storage', export='export_view', anon='anonymous_export_view')
# Simple replacement rules, order is important!
TIDY_REGEX_RULES = ((_name, re.compile(_rule), _subst) for _name, _rule, _subst in [
("FosWiki: Remove CSS class from section title",
r'<(h[1-5]) class="[^"]*">', r'<\1>'),
("FosWiki: Remove static section numbering",
r'(?<=<h.>)(<a name="[^"]+?"></a>|)[0-9.]+?\s*(?=<span class="tok"> </span>)', r'\1'),
("FosWiki: Empty anchor in headers",
r'(?<=<h.>)<a></a>\s* +', ''),
("FosWiki: 'tok' spans in front of headers",
r'(?<=<h.>)(<a name="[^"]+?"></a>|)\s*<span class="tok"> </span>', r'\1'),
("FosWiki: Section edit icons at the end of headers",
r'\s*<a(?: class="[^"]*")? href="[^"]+"(?: title="[^"]*")?>'
r'<ac:image [^>]+><ri:url ri:value="[^"]+/EditChapterPlugin/pencil.png" ?/>'
r'</ac:image></a>(?=</span></h)', ''),
("FosWiki: 'Edit Chapter Plugin' spans (old)",
r'(?<=<h.>)(<a name="[^"]+?"></a>|)\s*<span class="ecpHeading">'
r'\s*([^<]+)(?:<br\s*/>)</span>\s*(?=</h.>)', r'\1\2'),
("FosWiki: 'Edit Chapter Plugin' spans (new)",
r'(?<=<h.>)(<a name="[^"]+?"></a>|)\s*<span class="ecpHeading">'
r'\s*([^<]+)(?:<br\s*/>)<a class="ecpEdit".+?</a></span>\s*(?=</h.>)', r'\1\2'),
("FosWiki: Residual leading whitespace in headers",
r'(?<=<h.>)(<a name="[^"]+?"></a>|)\s* +', r'\1'),
("FosWiki: Replace TOC div with macro",
r'(<a name="foswikiTOC" ?/>)?<div class="foswikiToc">.*?</div>', '''
<ac:structured-macro ac:name="panel" ac:schema-version="1">
<ac:parameter ac:name="title">Contents</ac:parameter>
<ac:rich-text-body>
<p>
<ac:structured-macro ac:name="toc" ac:schema-version="1"/>
</p>
</ac:rich-text-body>
</ac:structured-macro>'''),
("FosWiki: Replace TOC in a Twisty with Expand+TOC macro",
r'<div class="twistyPlugin">.+?<big><strong>Table of Contents</strong></big></span></a></span></div>', '''
<ac:structured-macro ac:name="expand" ac:schema-version="1">
<ac:parameter ac:name="title">Table of Contents</ac:parameter>
<ac:rich-text-body>
<p>
<ac:structured-macro ac:name="toc" ac:schema-version="1"/>
</p>
</ac:rich-text-body>
</ac:structured-macro>'''),
("FosWiki: Named anchors (#WikiWords)",
r'(<a name=[^>]+></a><a href=")http[^#]+(#[^"]+" style="[^"]+)(" title="[^"]+"><big>[^<]+</big></a>)',
r'\1\2; float: right;\3'),
("FosWiki: Wrap HTML '<pre>' into 'panel' macro",
r'(?<!<ac:rich-text-body>)(<pre(?: class="[^"]*")?>)',
r'<ac:structured-macro ac:name="panel" ac:schema-version="1">'
r'<ac:parameter ac:name="bgColor">#eeeeee</ac:parameter>'
r'<ac:rich-text-body>'
r'\1'),
("FosWiki: Wrap HTML '</pre>' into 'panel' macro",
r'</pre>(?!</ac:rich-text-body>)', '</pre></ac:rich-text-body></ac:structured-macro>'),
("FosWiki: Embedded CSS - custom list indent",
r'<ul style="margin-left: [.0-9]+em;">', '<ul>'),
("FosWiki: Empty paragraphs",
r'<p> </p>', r''),
("FosWiki: Obsolete CSS classes",
r'(<(?:div|p|span|h[1-5])) class="(foswikiTopic)"', r'\1'),
])
def _apply_tidy_regex_rules(body, log=None):
"""Return tidied body after applying regex rules."""
body = body.replace(u'\u00A0', ' ')
for name, rule, subst in TIDY_REGEX_RULES:
length = len(body)
try:
body, count = rule.subn(subst, body)
except re.error as cause:
raise click.LoggedFailure('Error "{}" in "{}" replacement: {} => {}'.format(
cause, name, rule.pattern, subst,
))
if count and log:
length -= len(body)
log.info('Replaced %d matche(s) of "%s" (%d chars %s)',
count, name, abs(length), "added" if length < 0 else "removed")
return body
def _make_etree(body, content_format='storage', attrs=None):
"""Create an ElementTree from a page's body."""
attrs = (attrs or {}).copy()
attrs.update({
'xmlns:ac': 'http://www.atlassian.com/schema/confluence/4/ac/',
'xmlns:ri': 'http://www.atlassian.com/schema/confluence/4/ri/',
})
xml_body = re.sub(r'&(?!(amp|lt|gt|quot|apos))([a-zA-Z0-9]+);',
lambda cref: '&#{};'.format(htmlentitydefs.name2codepoint[cref.group(2)]), body)
#print(body.encode('utf8'))
xmldoc = u'<{root} {attrs}>{body}</{root}>'.format(
root=content_format,
attrs=' '.join('{}={}'.format(k, quoteattr(v)) for k, v in sorted(attrs.items())),
body=xml_body)
parser = (XMLParser if content_format == 'storage' else HTMLParser)(remove_blank_text=True)
try:
return fromstring(xmldoc, parser)
except XMLSyntaxError as cause:
raise click.LoggedFailure('{}\n{}'.format(
cause, '\n'.join(['{:7d} {}'.format(i+1, k) for i, k in enumerate(xmldoc.splitlines())])
))
def _pretty_xml(body, content_format='storage', attrs=None):
"""Pretty-print the given page body and return a list of lines."""
root = _make_etree(body, content_format=content_format, attrs=attrs)
prettyfied = BytesIO()
root.getroottree().write(prettyfied, encoding='utf8', pretty_print=True, xml_declaration=False)
return prettyfied.getvalue().decode('utf8').splitlines()
class ConfluencePage(object):
"""A page that holds enough state so it can be modified."""
DIFF_COLS = {
'+': 'green',
'-': 'red',
'@': 'yellow',
}
def __init__(self, cf, url, markup='storage', expand=None):
""" Load the given page.
"""
if expand and isinstance(expand, str):
expand = expand.split(',')
expand = set(expand or []) | {'space', 'version', 'body.' + markup}
self.cf = cf
self.url = url
self.markup = markup
self._data = cf.get(self.url, expand=','.join(expand))
self.body = self._data.body[self.markup].value
@property
def page_id(self):
"""The numeric page ID."""
return self._data.id
@property
def space_key(self):
"""The space this page belongs to."""
return self._data.space.key
@property
def title(self):
"""The page's title."""
return self._data.title
@property
def json(self):
"""The full JSON response data."""
return self._data
@property
def version(self):
"""The page's version number in history."""
return self._data.version.number
def etree(self):
"""Parse the page's body into an ElementTree."""
attrs = {
'id': 'page-' + self._data.id,
'href': self._data._links.base + (self._data._links.tinyui or ''),
'status': self._data.status,
'title': self._data.title,
}
return _make_etree(self.body, content_format=self.markup, attrs=attrs)
def tidy(self, log=None):
"""Return a tidy copy of this page's body."""
assert self.markup == 'storage', "Can only clean up pages in storage format!"
return _apply_tidy_regex_rules(self.body, log=log)
def update(self, body=None, minor=True):
"""Update a page's content."""
assert self.markup == 'storage', "Cannot update non-storage page markup!"
if body is None:
body = self.body
if body == self._data.body[self.markup].value:
return # No changes
data = {
#'id': self._data.id,
'type': 'page',
'space': {'key': self.space_key},
'title': self.title,
'version': dict(number=self.version + 1, minorEdit=minor),
'body': {
'storage': {
'value': body,
'representation': self.markup,
}
},
'expand': 'version',
}
response = self.cf.session.put(self._data._links.self, json=data)
response.raise_for_status()
##page = response.json(); print(page)
result = bunchify(response.json())
self._data.body[self.markup].value = body
self._data.version = result.version
return result
def dump_diff(self, changed):
"""Dump a diff to terminal between changed and stored body."""
if self.body == changed:
click.secho('=== No changes to "{0}"'.format(self.title), fg='green')
return
diff = difflib.unified_diff(
_pretty_xml(self.body, self.markup),
_pretty_xml(changed, self.markup),
u'v. {0} of "{1}"'.format(self.version, self.title),
u'v. {0} of "{1}"'.format(self.version + 1, self.title),
arrow.get(self._data.version.when).replace(microsecond=0).isoformat(sep=' '),
arrow.now().replace(microsecond=0).isoformat(sep=' '),
lineterm='', n=2)
for line in diff:
click.secho(line, fg=self.DIFF_COLS.get(line and line[0], None))
| apache-2.0 | -8,970,102,084,358,917,000 | 39.69112 | 127 | 0.567226 | false |
kaushik94/sympy | sympy/utilities/autowrap.py | 3 | 40995 | """Module for compiling codegen output, and wrap the binary for use in
python.
.. note:: To use the autowrap module it must first be imported
>>> from sympy.utilities.autowrap import autowrap
This module provides a common interface for different external backends, such
as f2py, fwrap, Cython, SWIG(?) etc. (Currently only f2py and Cython are
implemented) The goal is to provide access to compiled binaries of acceptable
performance with a one-button user interface, i.e.
>>> from sympy.abc import x,y
>>> expr = ((x - y)**(25)).expand()
>>> binary_callable = autowrap(expr)
>>> binary_callable(1, 2)
-1.0
The callable returned from autowrap() is a binary python function, not a
SymPy object. If it is desired to use the compiled function in symbolic
expressions, it is better to use binary_function() which returns a SymPy
Function object. The binary callable is attached as the _imp_ attribute and
invoked when a numerical evaluation is requested with evalf(), or with
lambdify().
>>> from sympy.utilities.autowrap import binary_function
>>> f = binary_function('f', expr)
>>> 2*f(x, y) + y
y + 2*f(x, y)
>>> (2*f(x, y) + y).evalf(2, subs={x: 1, y:2})
0.e-110
The idea is that a SymPy user will primarily be interested in working with
mathematical expressions, and should not have to learn details about wrapping
tools in order to evaluate expressions numerically, even if they are
computationally expensive.
When is this useful?
1) For computations on large arrays, Python iterations may be too slow,
and depending on the mathematical expression, it may be difficult to
exploit the advanced index operations provided by NumPy.
2) For *really* long expressions that will be called repeatedly, the
compiled binary should be significantly faster than SymPy's .evalf()
3) If you are generating code with the codegen utility in order to use
it in another project, the automatic python wrappers let you test the
binaries immediately from within SymPy.
4) To create customized ufuncs for use with numpy arrays.
See *ufuncify*.
When is this module NOT the best approach?
1) If you are really concerned about speed or memory optimizations,
you will probably get better results by working directly with the
wrapper tools and the low level code. However, the files generated
by this utility may provide a useful starting point and reference
code. Temporary files will be left intact if you supply the keyword
tempdir="path/to/files/".
2) If the array computation can be handled easily by numpy, and you
don't need the binaries for another project.
"""
from __future__ import print_function, division
import sys
import os
import shutil
import tempfile
from subprocess import STDOUT, CalledProcessError, check_output
from string import Template
from warnings import warn
from sympy.core.cache import cacheit
from sympy.core.compatibility import range, iterable
from sympy.core.function import Lambda
from sympy.core.relational import Eq
from sympy.core.symbol import Dummy, Symbol
from sympy.tensor.indexed import Idx, IndexedBase
from sympy.utilities.codegen import (make_routine, get_code_generator,
OutputArgument, InOutArgument,
InputArgument, CodeGenArgumentListError,
Result, ResultBase, C99CodeGen)
from sympy.utilities.lambdify import implemented_function
from sympy.utilities.decorator import doctest_depends_on
_doctest_depends_on = {'exe': ('f2py', 'gfortran', 'gcc'),
'modules': ('numpy',)}
class CodeWrapError(Exception):
pass
class CodeWrapper(object):
"""Base Class for code wrappers"""
_filename = "wrapped_code"
_module_basename = "wrapper_module"
_module_counter = 0
@property
def filename(self):
return "%s_%s" % (self._filename, CodeWrapper._module_counter)
@property
def module_name(self):
return "%s_%s" % (self._module_basename, CodeWrapper._module_counter)
def __init__(self, generator, filepath=None, flags=[], verbose=False):
"""
generator -- the code generator to use
"""
self.generator = generator
self.filepath = filepath
self.flags = flags
self.quiet = not verbose
@property
def include_header(self):
return bool(self.filepath)
@property
def include_empty(self):
return bool(self.filepath)
def _generate_code(self, main_routine, routines):
routines.append(main_routine)
self.generator.write(
routines, self.filename, True, self.include_header,
self.include_empty)
def wrap_code(self, routine, helpers=None):
helpers = helpers or []
if self.filepath:
workdir = os.path.abspath(self.filepath)
else:
workdir = tempfile.mkdtemp("_sympy_compile")
if not os.access(workdir, os.F_OK):
os.mkdir(workdir)
oldwork = os.getcwd()
os.chdir(workdir)
try:
sys.path.append(workdir)
self._generate_code(routine, helpers)
self._prepare_files(routine)
self._process_files(routine)
mod = __import__(self.module_name)
finally:
sys.path.remove(workdir)
CodeWrapper._module_counter += 1
os.chdir(oldwork)
if not self.filepath:
try:
shutil.rmtree(workdir)
except OSError:
# Could be some issues on Windows
pass
return self._get_wrapped_function(mod, routine.name)
def _process_files(self, routine):
command = self.command
command.extend(self.flags)
try:
retoutput = check_output(command, stderr=STDOUT)
except CalledProcessError as e:
raise CodeWrapError(
"Error while executing command: %s. Command output is:\n%s" % (
" ".join(command), e.output.decode('utf-8')))
if not self.quiet:
print(retoutput)
class DummyWrapper(CodeWrapper):
"""Class used for testing independent of backends """
template = """# dummy module for testing of SymPy
def %(name)s():
return "%(expr)s"
%(name)s.args = "%(args)s"
%(name)s.returns = "%(retvals)s"
"""
def _prepare_files(self, routine):
return
def _generate_code(self, routine, helpers):
with open('%s.py' % self.module_name, 'w') as f:
printed = ", ".join(
[str(res.expr) for res in routine.result_variables])
# convert OutputArguments to return value like f2py
args = filter(lambda x: not isinstance(
x, OutputArgument), routine.arguments)
retvals = []
for val in routine.result_variables:
if isinstance(val, Result):
retvals.append('nameless')
else:
retvals.append(val.result_var)
print(DummyWrapper.template % {
'name': routine.name,
'expr': printed,
'args': ", ".join([str(a.name) for a in args]),
'retvals': ", ".join([str(val) for val in retvals])
}, end="", file=f)
def _process_files(self, routine):
return
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
class CythonCodeWrapper(CodeWrapper):
"""Wrapper that uses Cython"""
setup_template = """\
try:
from setuptools import setup
from setuptools import Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
cy_opts = {cythonize_options}
{np_import}
ext_mods = [Extension(
{ext_args},
include_dirs={include_dirs},
library_dirs={library_dirs},
libraries={libraries},
extra_compile_args={extra_compile_args},
extra_link_args={extra_link_args}
)]
setup(ext_modules=cythonize(ext_mods, **cy_opts))
"""
pyx_imports = (
"import numpy as np\n"
"cimport numpy as np\n\n")
pyx_header = (
"cdef extern from '{header_file}.h':\n"
" {prototype}\n\n")
pyx_func = (
"def {name}_c({arg_string}):\n"
"\n"
"{declarations}"
"{body}")
std_compile_flag = '-std=c99'
def __init__(self, *args, **kwargs):
"""Instantiates a Cython code wrapper.
The following optional parameters get passed to ``distutils.Extension``
for building the Python extension module. Read its documentation to
learn more.
Parameters
==========
include_dirs : [list of strings]
A list of directories to search for C/C++ header files (in Unix
form for portability).
library_dirs : [list of strings]
A list of directories to search for C/C++ libraries at link time.
libraries : [list of strings]
A list of library names (not filenames or paths) to link against.
extra_compile_args : [list of strings]
Any extra platform- and compiler-specific information to use when
compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could be
anything. Note that the attribute ``std_compile_flag`` will be
appended to this list.
extra_link_args : [list of strings]
Any extra platform- and compiler-specific information to use when
linking object files together to create the extension (or to create
a new static Python interpreter). Similar interpretation as for
'extra_compile_args'.
cythonize_options : [dictionary]
Keyword arguments passed on to cythonize.
"""
self._include_dirs = kwargs.pop('include_dirs', [])
self._library_dirs = kwargs.pop('library_dirs', [])
self._libraries = kwargs.pop('libraries', [])
self._extra_compile_args = kwargs.pop('extra_compile_args', [])
self._extra_compile_args.append(self.std_compile_flag)
self._extra_link_args = kwargs.pop('extra_link_args', [])
self._cythonize_options = kwargs.pop('cythonize_options', {})
self._need_numpy = False
super(CythonCodeWrapper, self).__init__(*args, **kwargs)
@property
def command(self):
command = [sys.executable, "setup.py", "build_ext", "--inplace"]
return command
def _prepare_files(self, routine, build_dir=os.curdir):
# NOTE : build_dir is used for testing purposes.
pyxfilename = self.module_name + '.pyx'
codefilename = "%s.%s" % (self.filename, self.generator.code_extension)
# pyx
with open(os.path.join(build_dir, pyxfilename), 'w') as f:
self.dump_pyx([routine], f, self.filename)
# setup.py
ext_args = [repr(self.module_name), repr([pyxfilename, codefilename])]
if self._need_numpy:
np_import = 'import numpy as np\n'
self._include_dirs.append('np.get_include()')
else:
np_import = ''
with open(os.path.join(build_dir, 'setup.py'), 'w') as f:
includes = str(self._include_dirs).replace("'np.get_include()'",
'np.get_include()')
f.write(self.setup_template.format(
ext_args=", ".join(ext_args),
np_import=np_import,
include_dirs=includes,
library_dirs=self._library_dirs,
libraries=self._libraries,
extra_compile_args=self._extra_compile_args,
extra_link_args=self._extra_link_args,
cythonize_options=self._cythonize_options
))
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name + '_c')
def dump_pyx(self, routines, f, prefix):
"""Write a Cython file with python wrappers
This file contains all the definitions of the routines in c code and
refers to the header file.
Arguments
---------
routines
List of Routine instances
f
File-like object to write the file to
prefix
The filename prefix, used to refer to the proper header file.
Only the basename of the prefix is used.
"""
headers = []
functions = []
for routine in routines:
prototype = self.generator.get_prototype(routine)
# C Function Header Import
headers.append(self.pyx_header.format(header_file=prefix,
prototype=prototype))
# Partition the C function arguments into categories
py_rets, py_args, py_loc, py_inf = self._partition_args(routine.arguments)
# Function prototype
name = routine.name
arg_string = ", ".join(self._prototype_arg(arg) for arg in py_args)
# Local Declarations
local_decs = []
for arg, val in py_inf.items():
proto = self._prototype_arg(arg)
mat, ind = [self._string_var(v) for v in val]
local_decs.append(" cdef {0} = {1}.shape[{2}]".format(proto, mat, ind))
local_decs.extend([" cdef {0}".format(self._declare_arg(a)) for a in py_loc])
declarations = "\n".join(local_decs)
if declarations:
declarations = declarations + "\n"
# Function Body
args_c = ", ".join([self._call_arg(a) for a in routine.arguments])
rets = ", ".join([self._string_var(r.name) for r in py_rets])
if routine.results:
body = ' return %s(%s)' % (routine.name, args_c)
if rets:
body = body + ', ' + rets
else:
body = ' %s(%s)\n' % (routine.name, args_c)
body = body + ' return ' + rets
functions.append(self.pyx_func.format(name=name, arg_string=arg_string,
declarations=declarations, body=body))
# Write text to file
if self._need_numpy:
# Only import numpy if required
f.write(self.pyx_imports)
f.write('\n'.join(headers))
f.write('\n'.join(functions))
def _partition_args(self, args):
"""Group function arguments into categories."""
py_args = []
py_returns = []
py_locals = []
py_inferred = {}
for arg in args:
if isinstance(arg, OutputArgument):
py_returns.append(arg)
py_locals.append(arg)
elif isinstance(arg, InOutArgument):
py_returns.append(arg)
py_args.append(arg)
else:
py_args.append(arg)
# Find arguments that are array dimensions. These can be inferred
# locally in the Cython code.
if isinstance(arg, (InputArgument, InOutArgument)) and arg.dimensions:
dims = [d[1] + 1 for d in arg.dimensions]
sym_dims = [(i, d) for (i, d) in enumerate(dims) if
isinstance(d, Symbol)]
for (i, d) in sym_dims:
py_inferred[d] = (arg.name, i)
for arg in args:
if arg.name in py_inferred:
py_inferred[arg] = py_inferred.pop(arg.name)
# Filter inferred arguments from py_args
py_args = [a for a in py_args if a not in py_inferred]
return py_returns, py_args, py_locals, py_inferred
def _prototype_arg(self, arg):
mat_dec = "np.ndarray[{mtype}, ndim={ndim}] {name}"
np_types = {'double': 'np.double_t',
'int': 'np.int_t'}
t = arg.get_datatype('c')
if arg.dimensions:
self._need_numpy = True
ndim = len(arg.dimensions)
mtype = np_types[t]
return mat_dec.format(mtype=mtype, ndim=ndim, name=self._string_var(arg.name))
else:
return "%s %s" % (t, self._string_var(arg.name))
def _declare_arg(self, arg):
proto = self._prototype_arg(arg)
if arg.dimensions:
shape = '(' + ','.join(self._string_var(i[1] + 1) for i in arg.dimensions) + ')'
return proto + " = np.empty({shape})".format(shape=shape)
else:
return proto + " = 0"
def _call_arg(self, arg):
if arg.dimensions:
t = arg.get_datatype('c')
return "<{0}*> {1}.data".format(t, self._string_var(arg.name))
elif isinstance(arg, ResultBase):
return "&{0}".format(self._string_var(arg.name))
else:
return self._string_var(arg.name)
def _string_var(self, var):
printer = self.generator.printer.doprint
return printer(var)
class F2PyCodeWrapper(CodeWrapper):
"""Wrapper that uses f2py"""
def __init__(self, *args, **kwargs):
ext_keys = ['include_dirs', 'library_dirs', 'libraries',
'extra_compile_args', 'extra_link_args']
msg = ('The compilation option kwarg {} is not supported with the f2py '
'backend.')
for k in ext_keys:
if k in kwargs.keys():
warn(msg.format(k))
kwargs.pop(k, None)
super(F2PyCodeWrapper, self).__init__(*args, **kwargs)
@property
def command(self):
filename = self.filename + '.' + self.generator.code_extension
args = ['-c', '-m', self.module_name, filename]
command = [sys.executable, "-c", "import numpy.f2py as f2py2e;f2py2e.main()"]+args
return command
def _prepare_files(self, routine):
pass
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
# Here we define a lookup of backends -> tuples of languages. For now, each
# tuple is of length 1, but if a backend supports more than one language,
# the most preferable language is listed first.
_lang_lookup = {'CYTHON': ('C99', 'C89', 'C'),
'F2PY': ('F95',),
'NUMPY': ('C99', 'C89', 'C'),
'DUMMY': ('F95',)} # Dummy here just for testing
def _infer_language(backend):
"""For a given backend, return the top choice of language"""
langs = _lang_lookup.get(backend.upper(), False)
if not langs:
raise ValueError("Unrecognized backend: " + backend)
return langs[0]
def _validate_backend_language(backend, language):
"""Throws error if backend and language are incompatible"""
langs = _lang_lookup.get(backend.upper(), False)
if not langs:
raise ValueError("Unrecognized backend: " + backend)
if language.upper() not in langs:
raise ValueError(("Backend {0} and language {1} are "
"incompatible").format(backend, language))
@cacheit
@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',))
def autowrap(expr, language=None, backend='f2py', tempdir=None, args=None,
flags=None, verbose=False, helpers=None, code_gen=None, **kwargs):
"""Generates python callable binaries based on the math expression.
Parameters
==========
expr
The SymPy expression that should be wrapped as a binary routine.
language : string, optional
If supplied, (options: 'C' or 'F95'), specifies the language of the
generated code. If ``None`` [default], the language is inferred based
upon the specified backend.
backend : string, optional
Backend used to wrap the generated code. Either 'f2py' [default],
or 'cython'.
tempdir : string, optional
Path to directory for temporary files. If this argument is supplied,
the generated code and the wrapper input files are left intact in the
specified path.
args : iterable, optional
An ordered iterable of symbols. Specifies the argument sequence for the
function.
flags : iterable, optional
Additional option flags that will be passed to the backend.
verbose : bool, optional
If True, autowrap will not mute the command line backends. This can be
helpful for debugging.
helpers : 3-tuple or iterable of 3-tuples, optional
Used to define auxiliary expressions needed for the main expr. If the
main expression needs to call a specialized function it should be
passed in via ``helpers``. Autowrap will then make sure that the
compiled main expression can link to the helper routine. Items should
be 3-tuples with (<function_name>, <sympy_expression>,
<argument_tuple>). It is mandatory to supply an argument sequence to
helper routines.
code_gen : CodeGen instance
An instance of a CodeGen subclass. Overrides ``language``.
include_dirs : [string]
A list of directories to search for C/C++ header files (in Unix form
for portability).
library_dirs : [string]
A list of directories to search for C/C++ libraries at link time.
libraries : [string]
A list of library names (not filenames or paths) to link against.
extra_compile_args : [string]
Any extra platform- and compiler-specific information to use when
compiling the source files in 'sources'. For platforms and compilers
where "command line" makes sense, this is typically a list of
command-line arguments, but for other platforms it could be anything.
extra_link_args : [string]
Any extra platform- and compiler-specific information to use when
linking object files together to create the extension (or to create a
new static Python interpreter). Similar interpretation as for
'extra_compile_args'.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.autowrap import autowrap
>>> expr = ((x - y + z)**(13)).expand()
>>> binary_func = autowrap(expr)
>>> binary_func(1, 4, 2)
-1.0
"""
if language:
if not isinstance(language, type):
_validate_backend_language(backend, language)
else:
language = _infer_language(backend)
# two cases 1) helpers is an iterable of 3-tuples and 2) helpers is a
# 3-tuple
if iterable(helpers) and len(helpers) != 0 and iterable(helpers[0]):
helpers = helpers if helpers else ()
else:
helpers = [helpers] if helpers else ()
args = list(args) if iterable(args, exclude=set) else args
if code_gen is None:
code_gen = get_code_generator(language, "autowrap")
CodeWrapperClass = {
'F2PY': F2PyCodeWrapper,
'CYTHON': CythonCodeWrapper,
'DUMMY': DummyWrapper
}[backend.upper()]
code_wrapper = CodeWrapperClass(code_gen, tempdir, flags if flags else (),
verbose, **kwargs)
helps = []
for name_h, expr_h, args_h in helpers:
helps.append(code_gen.routine(name_h, expr_h, args_h))
for name_h, expr_h, args_h in helpers:
if expr.has(expr_h):
name_h = binary_function(name_h, expr_h, backend='dummy')
expr = expr.subs(expr_h, name_h(*args_h))
try:
routine = code_gen.routine('autofunc', expr, args)
except CodeGenArgumentListError as e:
# if all missing arguments are for pure output, we simply attach them
# at the end and try again, because the wrappers will silently convert
# them to return values anyway.
new_args = []
for missing in e.missing_args:
if not isinstance(missing, OutputArgument):
raise
new_args.append(missing.name)
routine = code_gen.routine('autofunc', expr, args + new_args)
return code_wrapper.wrap_code(routine, helpers=helps)
@doctest_depends_on(exe=('f2py', 'gfortran'), modules=('numpy',))
def binary_function(symfunc, expr, **kwargs):
"""Returns a sympy function with expr as binary implementation
This is a convenience function that automates the steps needed to
autowrap the SymPy expression and attaching it to a Function object
with implemented_function().
Parameters
==========
symfunc : sympy Function
The function to bind the callable to.
expr : sympy Expression
The expression used to generate the function.
kwargs : dict
Any kwargs accepted by autowrap.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.utilities.autowrap import binary_function
>>> expr = ((x - y)**(25)).expand()
>>> f = binary_function('f', expr)
>>> type(f)
<class 'sympy.core.function.UndefinedFunction'>
>>> 2*f(x, y)
2*f(x, y)
>>> f(x, y).evalf(2, subs={x: 1, y: 2})
-1.0
"""
binary = autowrap(expr, **kwargs)
return implemented_function(symfunc, binary)
#################################################################
# UFUNCIFY #
#################################################################
_ufunc_top = Template("""\
#include "Python.h"
#include "math.h"
#include "numpy/ndarraytypes.h"
#include "numpy/ufuncobject.h"
#include "numpy/halffloat.h"
#include ${include_file}
static PyMethodDef ${module}Methods[] = {
{NULL, NULL, 0, NULL}
};""")
_ufunc_outcalls = Template("*((double *)out${outnum}) = ${funcname}(${call_args});")
_ufunc_body = Template("""\
static void ${funcname}_ufunc(char **args, npy_intp *dimensions, npy_intp* steps, void* data)
{
npy_intp i;
npy_intp n = dimensions[0];
${declare_args}
${declare_steps}
for (i = 0; i < n; i++) {
${outcalls}
${step_increments}
}
}
PyUFuncGenericFunction ${funcname}_funcs[1] = {&${funcname}_ufunc};
static char ${funcname}_types[${n_types}] = ${types}
static void *${funcname}_data[1] = {NULL};""")
_ufunc_bottom = Template("""\
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"${module}",
NULL,
-1,
${module}Methods,
NULL,
NULL,
NULL,
NULL
};
PyMODINIT_FUNC PyInit_${module}(void)
{
PyObject *m, *d;
${function_creation}
m = PyModule_Create(&moduledef);
if (!m) {
return NULL;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
${ufunc_init}
return m;
}
#else
PyMODINIT_FUNC init${module}(void)
{
PyObject *m, *d;
${function_creation}
m = Py_InitModule("${module}", ${module}Methods);
if (m == NULL) {
return;
}
import_array();
import_umath();
d = PyModule_GetDict(m);
${ufunc_init}
}
#endif\
""")
_ufunc_init_form = Template("""\
ufunc${ind} = PyUFunc_FromFuncAndData(${funcname}_funcs, ${funcname}_data, ${funcname}_types, 1, ${n_in}, ${n_out},
PyUFunc_None, "${module}", ${docstring}, 0);
PyDict_SetItemString(d, "${funcname}", ufunc${ind});
Py_DECREF(ufunc${ind});""")
_ufunc_setup = Template("""\
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('',
parent_package,
top_path)
config.add_extension('${module}', sources=['${module}.c', '${filename}.c'])
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)""")
class UfuncifyCodeWrapper(CodeWrapper):
"""Wrapper for Ufuncify"""
def __init__(self, *args, **kwargs):
ext_keys = ['include_dirs', 'library_dirs', 'libraries',
'extra_compile_args', 'extra_link_args']
msg = ('The compilation option kwarg {} is not supported with the numpy'
' backend.')
for k in ext_keys:
if k in kwargs.keys():
warn(msg.format(k))
kwargs.pop(k, None)
super(UfuncifyCodeWrapper, self).__init__(*args, **kwargs)
@property
def command(self):
command = [sys.executable, "setup.py", "build_ext", "--inplace"]
return command
def wrap_code(self, routines, helpers=None):
# This routine overrides CodeWrapper because we can't assume funcname == routines[0].name
# Therefore we have to break the CodeWrapper private API.
# There isn't an obvious way to extend multi-expr support to
# the other autowrap backends, so we limit this change to ufuncify.
helpers = helpers if helpers is not None else []
# We just need a consistent name
funcname = 'wrapped_' + str(id(routines) + id(helpers))
workdir = self.filepath or tempfile.mkdtemp("_sympy_compile")
if not os.access(workdir, os.F_OK):
os.mkdir(workdir)
oldwork = os.getcwd()
os.chdir(workdir)
try:
sys.path.append(workdir)
self._generate_code(routines, helpers)
self._prepare_files(routines, funcname)
self._process_files(routines)
mod = __import__(self.module_name)
finally:
sys.path.remove(workdir)
CodeWrapper._module_counter += 1
os.chdir(oldwork)
if not self.filepath:
try:
shutil.rmtree(workdir)
except OSError:
# Could be some issues on Windows
pass
return self._get_wrapped_function(mod, funcname)
def _generate_code(self, main_routines, helper_routines):
all_routines = main_routines + helper_routines
self.generator.write(
all_routines, self.filename, True, self.include_header,
self.include_empty)
def _prepare_files(self, routines, funcname):
# C
codefilename = self.module_name + '.c'
with open(codefilename, 'w') as f:
self.dump_c(routines, f, self.filename, funcname=funcname)
# setup.py
with open('setup.py', 'w') as f:
self.dump_setup(f)
@classmethod
def _get_wrapped_function(cls, mod, name):
return getattr(mod, name)
def dump_setup(self, f):
setup = _ufunc_setup.substitute(module=self.module_name,
filename=self.filename)
f.write(setup)
def dump_c(self, routines, f, prefix, funcname=None):
"""Write a C file with python wrappers
This file contains all the definitions of the routines in c code.
Arguments
---------
routines
List of Routine instances
f
File-like object to write the file to
prefix
The filename prefix, used to name the imported module.
funcname
Name of the main function to be returned.
"""
if funcname is None:
if len(routines) == 1:
funcname = routines[0].name
else:
msg = 'funcname must be specified for multiple output routines'
raise ValueError(msg)
functions = []
function_creation = []
ufunc_init = []
module = self.module_name
include_file = "\"{0}.h\"".format(prefix)
top = _ufunc_top.substitute(include_file=include_file, module=module)
name = funcname
# Partition the C function arguments into categories
# Here we assume all routines accept the same arguments
r_index = 0
py_in, _ = self._partition_args(routines[0].arguments)
n_in = len(py_in)
n_out = len(routines)
# Declare Args
form = "char *{0}{1} = args[{2}];"
arg_decs = [form.format('in', i, i) for i in range(n_in)]
arg_decs.extend([form.format('out', i, i+n_in) for i in range(n_out)])
declare_args = '\n '.join(arg_decs)
# Declare Steps
form = "npy_intp {0}{1}_step = steps[{2}];"
step_decs = [form.format('in', i, i) for i in range(n_in)]
step_decs.extend([form.format('out', i, i+n_in) for i in range(n_out)])
declare_steps = '\n '.join(step_decs)
# Call Args
form = "*(double *)in{0}"
call_args = ', '.join([form.format(a) for a in range(n_in)])
# Step Increments
form = "{0}{1} += {0}{1}_step;"
step_incs = [form.format('in', i) for i in range(n_in)]
step_incs.extend([form.format('out', i, i) for i in range(n_out)])
step_increments = '\n '.join(step_incs)
# Types
n_types = n_in + n_out
types = "{" + ', '.join(["NPY_DOUBLE"]*n_types) + "};"
# Docstring
docstring = '"Created in SymPy with Ufuncify"'
# Function Creation
function_creation.append("PyObject *ufunc{0};".format(r_index))
# Ufunc initialization
init_form = _ufunc_init_form.substitute(module=module,
funcname=name,
docstring=docstring,
n_in=n_in, n_out=n_out,
ind=r_index)
ufunc_init.append(init_form)
outcalls = [_ufunc_outcalls.substitute(
outnum=i, call_args=call_args, funcname=routines[i].name) for i in
range(n_out)]
body = _ufunc_body.substitute(module=module, funcname=name,
declare_args=declare_args,
declare_steps=declare_steps,
call_args=call_args,
step_increments=step_increments,
n_types=n_types, types=types,
outcalls='\n '.join(outcalls))
functions.append(body)
body = '\n\n'.join(functions)
ufunc_init = '\n '.join(ufunc_init)
function_creation = '\n '.join(function_creation)
bottom = _ufunc_bottom.substitute(module=module,
ufunc_init=ufunc_init,
function_creation=function_creation)
text = [top, body, bottom]
f.write('\n\n'.join(text))
def _partition_args(self, args):
"""Group function arguments into categories."""
py_in = []
py_out = []
for arg in args:
if isinstance(arg, OutputArgument):
py_out.append(arg)
elif isinstance(arg, InOutArgument):
raise ValueError("Ufuncify doesn't support InOutArguments")
else:
py_in.append(arg)
return py_in, py_out
@cacheit
@doctest_depends_on(exe=('f2py', 'gfortran', 'gcc'), modules=('numpy',))
def ufuncify(args, expr, language=None, backend='numpy', tempdir=None,
flags=None, verbose=False, helpers=None, **kwargs):
"""Generates a binary function that supports broadcasting on numpy arrays.
Parameters
==========
args : iterable
Either a Symbol or an iterable of symbols. Specifies the argument
sequence for the function.
expr
A SymPy expression that defines the element wise operation.
language : string, optional
If supplied, (options: 'C' or 'F95'), specifies the language of the
generated code. If ``None`` [default], the language is inferred based
upon the specified backend.
backend : string, optional
Backend used to wrap the generated code. Either 'numpy' [default],
'cython', or 'f2py'.
tempdir : string, optional
Path to directory for temporary files. If this argument is supplied,
the generated code and the wrapper input files are left intact in
the specified path.
flags : iterable, optional
Additional option flags that will be passed to the backend.
verbose : bool, optional
If True, autowrap will not mute the command line backends. This can
be helpful for debugging.
helpers : iterable, optional
Used to define auxiliary expressions needed for the main expr. If
the main expression needs to call a specialized function it should
be put in the ``helpers`` iterable. Autowrap will then make sure
that the compiled main expression can link to the helper routine.
Items should be tuples with (<funtion_name>, <sympy_expression>,
<arguments>). It is mandatory to supply an argument sequence to
helper routines.
kwargs : dict
These kwargs will be passed to autowrap if the `f2py` or `cython`
backend is used and ignored if the `numpy` backend is used.
Notes
=====
The default backend ('numpy') will create actual instances of
``numpy.ufunc``. These support ndimensional broadcasting, and implicit type
conversion. Use of the other backends will result in a "ufunc-like"
function, which requires equal length 1-dimensional arrays for all
arguments, and will not perform any type conversions.
References
==========
.. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
Examples
========
>>> from sympy.utilities.autowrap import ufuncify
>>> from sympy.abc import x, y
>>> import numpy as np
>>> f = ufuncify((x, y), y + x**2)
>>> type(f)
<class 'numpy.ufunc'>
>>> f([1, 2, 3], 2)
array([ 3., 6., 11.])
>>> f(np.arange(5), 3)
array([ 3., 4., 7., 12., 19.])
For the 'f2py' and 'cython' backends, inputs are required to be equal length
1-dimensional arrays. The 'f2py' backend will perform type conversion, but
the Cython backend will error if the inputs are not of the expected type.
>>> f_fortran = ufuncify((x, y), y + x**2, backend='f2py')
>>> f_fortran(1, 2)
array([ 3.])
>>> f_fortran(np.array([1, 2, 3]), np.array([1.0, 2.0, 3.0]))
array([ 2., 6., 12.])
>>> f_cython = ufuncify((x, y), y + x**2, backend='Cython')
>>> f_cython(1, 2) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: Argument '_x' has incorrect type (expected numpy.ndarray, got int)
>>> f_cython(np.array([1.0]), np.array([2.0]))
array([ 3.])
"""
if isinstance(args, Symbol):
args = (args,)
else:
args = tuple(args)
if language:
_validate_backend_language(backend, language)
else:
language = _infer_language(backend)
helpers = helpers if helpers else ()
flags = flags if flags else ()
if backend.upper() == 'NUMPY':
# maxargs is set by numpy compile-time constant NPY_MAXARGS
# If a future version of numpy modifies or removes this restriction
# this variable should be changed or removed
maxargs = 32
helps = []
for name, expr, args in helpers:
helps.append(make_routine(name, expr, args))
code_wrapper = UfuncifyCodeWrapper(C99CodeGen("ufuncify"), tempdir,
flags, verbose)
if not isinstance(expr, (list, tuple)):
expr = [expr]
if len(expr) == 0:
raise ValueError('Expression iterable has zero length')
if len(expr) + len(args) > maxargs:
msg = ('Cannot create ufunc with more than {0} total arguments: '
'got {1} in, {2} out')
raise ValueError(msg.format(maxargs, len(args), len(expr)))
routines = [make_routine('autofunc{}'.format(idx), exprx, args) for
idx, exprx in enumerate(expr)]
return code_wrapper.wrap_code(routines, helpers=helps)
else:
# Dummies are used for all added expressions to prevent name clashes
# within the original expression.
y = IndexedBase(Dummy('y'))
m = Dummy('m', integer=True)
i = Idx(Dummy('i', integer=True), m)
f_dummy = Dummy('f')
f = implemented_function('%s_%d' % (f_dummy.name, f_dummy.dummy_index), Lambda(args, expr))
# For each of the args create an indexed version.
indexed_args = [IndexedBase(Dummy(str(a))) for a in args]
# Order the arguments (out, args, dim)
args = [y] + indexed_args + [m]
args_with_indices = [a[i] for a in indexed_args]
return autowrap(Eq(y[i], f(*args_with_indices)), language, backend,
tempdir, args, flags, verbose, helpers, **kwargs)
| bsd-3-clause | 163,633,342,712,998,180 | 35.570027 | 115 | 0.587169 | false |
biorack/metatlas | metatlas/io/write_utils.py | 1 | 2914 | """ Utility functions used in writing files"""
import filecmp
import logging
import os
import tempfile
logger = logging.getLogger(__name__)
def make_dir_for(file_path):
"""makes directories for file_path if they don't already exist"""
directory = os.path.dirname(file_path)
if directory != "":
os.makedirs(directory, exist_ok=True)
def check_existing_file(file_path, overwrite=False):
"""Creates directories as needed and throws an error if file exists and overwrite is False"""
make_dir_for(file_path)
try:
if not overwrite and os.path.exists(file_path):
raise FileExistsError(f"Not overwriting {file_path}.")
except FileExistsError as err:
logger.exception(err)
raise
def export_dataframe(dataframe, file_path, description, overwrite=False, **kwargs):
"""
inputs:
dataframe: pandas DataFrame to save
file_path: string with path of file to create
description: free string for logging
overwrite: if False, raise error if file already exists
remaining arguments are passed through to to_csv()
"""
check_existing_file(file_path, overwrite)
dataframe.to_csv(file_path, **kwargs)
logger.info("Exported %s to %s.", description, file_path)
def raise_on_diff(dataframe, file_path, description, **kwargs):
"""
inputs:
dataframe: pandas DataFrame to save
file_path: string with path of file to compare against
description: free string for logging
kwargs: passed through to to_csv()
If file_path exists and does not match file that would be generated by
saving dataframe to a csv, then raise ValueError
"""
if not os.path.exists(file_path):
return
with tempfile.NamedTemporaryFile(delete=False) as temp_path:
dataframe.to_csv(temp_path, **kwargs)
same = filecmp.cmp(file_path, temp_path.name)
os.remove(temp_path.name)
if same:
logger.info("Data in %s is the same as %s.", description, file_path)
else:
try:
raise ValueError("Data in %s is not the same as %s." % (description, file_path))
except ValueError as err:
logger.exception(err)
raise
def export_dataframe_die_on_diff(dataframe, file_path, description, **kwargs):
"""
inputs:
dataframe: pandas DataFrame to save
file_path: string with path of file to create
description: free string for logging
kwargs: passed through to to_csv()
If file_path does not exist then save the dataframe there
If file_path exists and matches data in dataframe then do nothing
If file_path exists and does not match dataframe then raise ValueError
"""
raise_on_diff(dataframe, file_path, description, **kwargs)
if not os.path.exists(file_path):
export_dataframe(dataframe, file_path, description, **kwargs)
| bsd-3-clause | 6,220,869,091,396,560,000 | 33.690476 | 97 | 0.668497 | false |
PetePriority/home-assistant | homeassistant/components/zone/zone.py | 1 | 3191 | """Component entity and functionality."""
from homeassistant.const import ATTR_HIDDEN, ATTR_LATITUDE, ATTR_LONGITUDE
from homeassistant.helpers.entity import Entity
from homeassistant.loader import bind_hass
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.location import distance
from .const import DOMAIN
ATTR_PASSIVE = 'passive'
ATTR_RADIUS = 'radius'
STATE = 'zoning'
@bind_hass
def active_zone(hass, latitude, longitude, radius=0):
"""Find the active zone for given latitude, longitude."""
return run_callback_threadsafe(
hass.loop, async_active_zone, hass, latitude, longitude, radius
).result()
@bind_hass
def async_active_zone(hass, latitude, longitude, radius=0):
"""Find the active zone for given latitude, longitude.
This method must be run in the event loop.
"""
# Sort entity IDs so that we are deterministic if equal distance to 2 zones
zones = (hass.states.get(entity_id) for entity_id
in sorted(hass.states.async_entity_ids(DOMAIN)))
min_dist = None
closest = None
for zone in zones:
if zone.attributes.get(ATTR_PASSIVE):
continue
zone_dist = distance(
latitude, longitude,
zone.attributes[ATTR_LATITUDE], zone.attributes[ATTR_LONGITUDE])
within_zone = zone_dist - radius < zone.attributes[ATTR_RADIUS]
closer_zone = closest is None or zone_dist < min_dist
smaller_zone = (zone_dist == min_dist and
zone.attributes[ATTR_RADIUS] <
closest.attributes[ATTR_RADIUS])
if within_zone and (closer_zone or smaller_zone):
min_dist = zone_dist
closest = zone
return closest
def in_zone(zone, latitude, longitude, radius=0) -> bool:
"""Test if given latitude, longitude is in given zone.
Async friendly.
"""
zone_dist = distance(
latitude, longitude,
zone.attributes[ATTR_LATITUDE], zone.attributes[ATTR_LONGITUDE])
return zone_dist - radius < zone.attributes[ATTR_RADIUS]
class Zone(Entity):
"""Representation of a Zone."""
def __init__(self, hass, name, latitude, longitude, radius, icon, passive):
"""Initialize the zone."""
self.hass = hass
self._name = name
self._latitude = latitude
self._longitude = longitude
self._radius = radius
self._icon = icon
self._passive = passive
@property
def name(self):
"""Return the name of the zone."""
return self._name
@property
def state(self):
"""Return the state property really does nothing for a zone."""
return STATE
@property
def icon(self):
"""Return the icon if any."""
return self._icon
@property
def state_attributes(self):
"""Return the state attributes of the zone."""
data = {
ATTR_HIDDEN: True,
ATTR_LATITUDE: self._latitude,
ATTR_LONGITUDE: self._longitude,
ATTR_RADIUS: self._radius,
}
if self._passive:
data[ATTR_PASSIVE] = self._passive
return data
| apache-2.0 | -3,663,965,267,479,924,700 | 28.009091 | 79 | 0.62833 | false |
jmsleiman/hypercube-election | hypercube.py | 1 | 5446 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 Joseph M. Sleiman
# This program is free software; you can redistribute it and/or
# modify it under the terms of the LGPLv2.1 or LGPLv3 License.
import math
import node
class HyperCube(object):
def __init__(self, dimension, nodeValues):
### define variables
self.dimension = dimension
self.listOfNodes = []
self.messageRegistry = []
self.dotString = ""
self.colourList = ['"#575329"', '"#00FECF"', '"#B05B6F"', '"#8CD0FF"', '"#3B9700"', '"#04F757"', '"#C8A1A1"', '"#1E6E00"',
'"#000000"', '"#FFFF00"', '"#1CE6FF"', '"#FF34FF"', '"#FF4A46"', '"#008941"', '"#006FA6"', '"#A30059"',
'"#FFDBE5"', '"#7A4900"', '"#0000A6"', '"#63FFAC"', '"#B79762"', '"#004D43"', '"#8FB0FF"', '"#997D87"',
'"#5A0007"', '"#809693"', '"#FEFFE6"', '"#1B4400"', '"#4FC601"', '"#3B5DFF"', '"#4A3B53"', '"#FF2F80"',
'"#61615A"', '"#BA0900"', '"#6B7900"', '"#00C2A0"', '"#FFAA92"', '"#FF90C9"', '"#B903AA"', '"#D16100"',
'"#DDEFFF"', '"#000035"', '"#7B4F4B"', '"#A1C299"', '"#300018"', '"#0AA6D8"', '"#013349"', '"#00846F"',
'"#372101"', '"#FFB500"', '"#C2FFED"', '"#A079BF"', '"#CC0744"', '"#C0B9B2"', '"#C2FF99"', '"#001E09"',
'"#00489C"', '"#6F0062"', '"#0CBD66"', '"#EEC3FF"', '"#456D75"', '"#B77B68"', '"#7A87A1"', '"#788D66"',
'"#885578"', '"#FAD09F"', '"#FF8A9A"', '"#D157A0"', '"#BEC459"', '"#456648"', '"#0086ED"', '"#886F4C"',
'"#34362D"', '"#B4A8BD"', '"#00A6AA"', '"#452C2C"', '"#636375"', '"#A3C8C9"', '"#FF913F"', '"#938A81"',
'"#7900D7"', '"#A77500"', '"#6367A9"', '"#A05837"', '"#6B002C"', '"#772600"', '"#D790FF"', '"#9B9700"',
'"#549E79"', '"#FFF69F"', '"#201625"', '"#72418F"', '"#BC23FF"', '"#99ADC0"', '"#3A2465"', '"#922329"',
'"#5B4534"', '"#FDE8DC"', '"#404E55"', '"#0089A3"', '"#CB7E98"', '"#A4E804"', '"#324E72"', '"#6A3A4C"']
### do some setting up
### add in those values as nodes
for value in nodeValues:
self.listOfNodes.append(node.Node(value, self.dimension))
self.setConnections(self.listOfNodes)
def setConnections(self, entry):
'''this method splits the list of entries into smaller and
smaller sublists until a list of 2 nodes is reached.
those 2 nodes form a connection in dimension 1, and after that
the other lists are superimposed and forms connections
accordingly:
0 1 2 3
4 5 6 7
0 and 4, 1 and 5, 2 and 6, 3 and 7 all form connections together
in dimension 3 (as this list has 8 elements, 2^3 = 8...)
'''
if(len(entry) > 2):
left, right = split_list(entry)
self.setConnections(left)
self.setConnections(right)
for x in xrange(0, len(left)):
left[x].attach(right[x], int(math.log(len(entry),2)))
right[x].attach(left[x], int(math.log(len(entry),2)))
if(len(entry) == 2):
entry[0].attach(entry[1], 1)
entry[1].attach(entry[0], 1)
# @profile
def election(self, largestWins):
'''
In this scenario, the nodes must find the smallest node among them, and name it their leader.
Strategy:
- Each node must message its neighbour on the i edge:
message contains:
rank
value
- When an active node receives a message:
- If the message received is from a smaller rank, there's been a catastrophic bug.
- If the message received is from an equal rank:
- If the receiver has a higher value, it increments its rank
- If the receiver has a lower value, it points the queen variable to the edge that sent the message, and goes dormant
- If the message received is from a higher rank:
- The node pushes it to a queue and comes back to it when it's ready (ie when the rank matches)
- When a passive node receives a message:
- If the message contains a rank lower than the rank of your queen, switch alliances
'''
messageMatrix = []
for node in self.listOfNodes:
messageMatrix.append(node.createChallenge(0))
clock = 0
victor = None
dots = []
while(victor == None):
dot = self.toDot()[:-1]
clock = clock + 1
messagesToProcess = []
messagesToQueue = []
while( len(messageMatrix) > 0):
msg = messageMatrix.pop(0)
dot += msg.toDot()
if(msg.delay <= 0):
messagesToProcess.append(msg)
else:
messagesToQueue.append(msg)
# now it's time to process messages
while(len(messagesToProcess) > 0):
msg = messagesToProcess.pop(0)
# however, how do we account for a redirected challenge?
# and how do we account for a success, defeat?
toBeContinued = msg.destination.processMessage(msg, largestWins)
if(toBeContinued != None):
messageMatrix.append(toBeContinued)
# now it's time to requeue those messages
for msg in messagesToQueue:
messageMatrix.append(msg)
for msg in messageMatrix:
msg.delay -= 1
dot += "}"
dots.append(dot)
for node in self.listOfNodes:
if node.rank == self.dimension:
print "Winner! {0}".format(node)
victor = node
break
dot = self.toDot()
dots.append(dot)
return dots
def toDot(self):
text = "digraph {\n\tlayout = circo\n"
for entry in self.listOfNodes:
text = text + entry.toDot(self.colourList)
text = text + "}"
self.dotString = text
return self.dotString
# now we need to draw all the leader directions...
# woohoo...
def split_list(a_list):
half = len(a_list)/2
return a_list[:half], a_list[half:]
| lgpl-2.1 | 453,523,443,621,024,500 | 33.251572 | 125 | 0.605215 | false |
ChillarAnand/junction | junction/base/emailer.py | 1 | 1236 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
# Standard Library
from os import path
# Third Party Stuff
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
def send_email(to, context, template_dir):
"""Render given templates and send email to `to`.
:param to: User object to send email to..
:param context: dict containing which needs to be passed to django template
:param template_dir: We expect files message.txt, subject.txt,
message.html etc in this folder.
:returns: None
:rtype: None
"""
def to_str(template_name):
return render_to_string(path.join(template_dir, template_name), context).strip()
subject = to_str('subject.txt')
text_message = to_str('message.txt')
html_message = to_str('message.html')
from_email = settings.DEFAULT_FROM_EMAIL
recipient_list = [_format_email(to)]
return send_mail(subject, text_message, from_email, recipient_list, html_message=html_message)
def _format_email(user):
return user.email if user.first_name and user.last_name else \
'"{} {}" <{}>'.format(user.first_name, user.last_name, user.email)
| mit | -6,158,837,254,071,177,000 | 31.526316 | 98 | 0.695793 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/status/cmd/devicequery/type_Params.py | 1 | 3783 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Params.py
from types import *
import array
PARAMS_DEVICE_TYPE_USER_SPECIFIC = 0
PARAMS_DEVICE_TYPE_U1394 = 1
PARAMS_DEVICE_TYPE_ADAPTER = 2
PARAMS_DEVICE_TYPE_ALL = 255
PARAMS_DEVICE_TYPE_APM_SUPPORT = 3
PARAMS_DEVICE_TYPE_BATTERY = 4
PARAMS_DEVICE_TYPE_CDROM = 5
PARAMS_DEVICE_TYPE_COMPUTER = 6
PARAMS_DEVICE_TYPE_DECODER = 7
PARAMS_DEVICE_TYPE_DISK_DRIVE = 8
PARAMS_DEVICE_TYPE_DISPLAY = 9
PARAMS_DEVICE_TYPE_FDC = 10
PARAMS_DEVICE_TYPE_FLOPPY = 11
PARAMS_DEVICE_TYPE_GPS = 12
PARAMS_DEVICE_TYPE_HDC = 13
PARAMS_DEVICE_TYPE_HID_CLASS = 14
PARAMS_DEVICE_TYPE_IMAGE = 15
PARAMS_DEVICE_TYPE_INFRARED = 16
PARAMS_DEVICE_TYPE_KEYBOARD = 17
PARAMS_DEVICE_TYPE_LEGACY_DRIVER = 18
PARAMS_DEVICE_TYPE_MEDIA = 19
PARAMS_DEVICE_TYPE_MEDIUM_CHANGER = 20
PARAMS_DEVICE_TYPE_MODEM = 21
PARAMS_DEVICE_TYPE_MONITOR = 22
PARAMS_DEVICE_TYPE_MOUSE = 23
PARAMS_DEVICE_TYPE_MTD = 24
PARAMS_DEVICE_TYPE_MULTIFUNCTION = 25
PARAMS_DEVICE_TYPE_MULTIPORT_SERIAL = 26
PARAMS_DEVICE_TYPE_NET = 27
PARAMS_DEVICE_TYPE_NET_CLIENT = 28
PARAMS_DEVICE_TYPE_NET_SERVICE = 29
PARAMS_DEVICE_TYPE_NET_TRANS = 30
PARAMS_DEVICE_TYPE_NO_DRIVER = 31
PARAMS_DEVICE_TYPE_PARALLEL = 32
PARAMS_DEVICE_TYPE_PCMCIA = 33
PARAMS_DEVICE_TYPE_PORTS = 34
PARAMS_DEVICE_TYPE_PRINTER = 35
PARAMS_DEVICE_TYPE_PRINTER_UPGRADE = 36
PARAMS_DEVICE_TYPE_SCSI_ADAPTER = 37
PARAMS_DEVICE_TYPE_SMART_CARD_READER = 38
PARAMS_DEVICE_TYPE_SOUND = 39
PARAMS_DEVICE_TYPE_STILL_IMAGE = 40
PARAMS_DEVICE_TYPE_SYSTEM = 41
PARAMS_DEVICE_TYPE_TAPE_DRIVE = 42
PARAMS_DEVICE_TYPE_UNKNOWN = 43
PARAMS_DEVICE_TYPE_USB = 44
PARAMS_DEVICE_TYPE_VOLUME = 45
PARAMS_DEVICE_TYPE_U1394DEBUG = 46
PARAMS_DEVICE_TYPE_U61883 = 47
PARAMS_DEVICE_TYPE_AVC = 48
PARAMS_DEVICE_TYPE_BIOMETRIC = 49
PARAMS_DEVICE_TYPE_BLUETOOTH = 50
PARAMS_DEVICE_TYPE_DOT4 = 51
PARAMS_DEVICE_TYPE_DOT4PRINT = 52
PARAMS_DEVICE_TYPE_ENUM1394 = 53
PARAMS_DEVICE_TYPE_INFINIBAND = 54
PARAMS_DEVICE_TYPE_PNPPRINTERS = 55
PARAMS_DEVICE_TYPE_PROCESSOR = 56
PARAMS_DEVICE_TYPE_SBP2 = 57
PARAMS_DEVICE_TYPE_SECURITYACCELERATOR = 58
PARAMS_DEVICE_TYPE_VOLUMESNAPSHOT = 59
PARAMS_DEVICE_TYPE_WCEUSBS = 60
PARAMS_GUID_LEN = 16
class Params:
def __init__(self):
self.__dict__['choice'] = PARAMS_DEVICE_TYPE_USER_SPECIFIC
self.__dict__['guid'] = array.array('B')
i = 0
while i < PARAMS_GUID_LEN:
self.__dict__['guid'].append(0)
i = i + 1
def __getattr__(self, name):
if name == 'choice':
return self.__dict__['choice']
if name == 'guid':
return self.__dict__['guid']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'choice':
self.__dict__['choice'] = value
elif name == 'guid':
self.__dict__['guid'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_PARAMS_CHOICE, self.__dict__['choice'])
submsg.AddData(MSG_KEY_PARAMS_GUID, self.__dict__['guid'])
mmsg.AddMessage(MSG_KEY_PARAMS, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_PARAMS, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['choice'] = submsg.FindU32(MSG_KEY_PARAMS_CHOICE)
try:
self.__dict__['guid'] = submsg.FindData(MSG_KEY_PARAMS_GUID)
except:
pass | unlicense | 8,606,701,729,567,228,000 | 32.785714 | 90 | 0.686228 | false |
metacloud/python-glanceclient | glanceclient/v1/image_members.py | 1 | 3610 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glanceclient.common import base
class ImageMember(base.Resource):
def __repr__(self):
return "<ImageMember %s>" % self._info
@property
def id(self):
return self.member_id
def delete(self):
self.manager.delete(self)
class ImageMemberManager(base.Manager):
resource_class = ImageMember
def get(self, image, member_id):
image_id = base.getid(image)
url = '/v1/images/%s/members/%s' % (image_id, member_id)
resp, body = self.api.json_request('GET', url)
member = body['member']
member['image_id'] = image_id
return ImageMember(self, member, loaded=True)
def list(self, image=None, member=None):
out = []
if image and member:
try:
out.append(self.get(image, member))
#TODO(bcwaldon): narrow this down to 404
except Exception:
pass
elif image:
out.extend(self._list_by_image(image))
elif member:
out.extend(self._list_by_member(member))
else:
#TODO(bcwaldon): figure out what is appropriate to do here as we
# are unable to provide the requested response
pass
return out
def _list_by_image(self, image):
image_id = base.getid(image)
url = '/v1/images/%s/members' % image_id
resp, body = self.api.json_request('GET', url)
out = []
for member in body['members']:
member['image_id'] = image_id
out.append(ImageMember(self, member, loaded=True))
return out
def _list_by_member(self, member):
member_id = base.getid(member)
url = '/v1/shared-images/%s' % member_id
resp, body = self.api.json_request('GET', url)
out = []
for member in body['shared_images']:
member['member_id'] = member_id
out.append(ImageMember(self, member, loaded=True))
return out
def delete(self, image_id, member_id):
self._delete("/v1/images/%s/members/%s" % (image_id, member_id))
def create(self, image, member_id, can_share=False):
"""Creates an image."""
url = '/v1/images/%s/members/%s' % (base.getid(image), member_id)
body = {'member': {'can_share': can_share}}
self._update(url, body=body)
def replace(self, image, members):
memberships = []
for member in members:
try:
obj = {
'member_id': member.member_id,
'can_share': member.can_share,
}
except AttributeError:
obj = {'member_id': member['member_id']}
if 'can_share' in member:
obj['can_share'] = member['can_share']
memberships.append(obj)
url = '/v1/images/%s/members' % base.getid(image)
self.api.json_request('PUT', url, {}, {'memberships': memberships})
| apache-2.0 | 5,861,809,855,356,957,000 | 34.048544 | 78 | 0.579778 | false |
Juniper/ceilometer | ceilometer/compute/virt/vmware/vsphere_operations.py | 1 | 10191 | # Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.vmware import vim_util
PERF_MANAGER_TYPE = "PerformanceManager"
PERF_COUNTER_PROPERTY = "perfCounter"
VM_INSTANCE_ID_PROPERTY = 'config.extraConfig["nvp.vm-uuid"].value'
# ESXi Servers sample performance data every 20 seconds. 20-second interval
# data is called instance data or real-time data. To retrieve instance data,
# we need to specify a value of 20 seconds for the "PerfQuerySpec.intervalId"
# property. In that case the "QueryPerf" method operates as a raw data feed
# that bypasses the vCenter database and instead retrieves performance data
# from an ESXi host.
# The following value is time interval for real-time performance stats
# in seconds and it is not configurable.
VC_REAL_TIME_SAMPLING_INTERVAL = 20
class VsphereOperations(object):
"""Class to invoke vSphere APIs calls.
vSphere APIs calls are required by various pollsters, collecting data from
VMware infrastructure.
"""
def __init__(self, api_session, max_objects):
self._api_session = api_session
self._max_objects = max_objects
# Mapping between "VM's Nova instance Id" -> "VM's MOID"
# In case a VM is deployed by Nova, then its name is instance ID.
# So this map essentially has VM names as keys.
self._vm_moid_lookup_map = {}
# Mapping from full name -> ID, for VC Performance counters
self._perf_counter_id_lookup_map = None
def _init_vm_moid_lookup_map(self):
session = self._api_session
result = session.invoke_api(vim_util, "get_objects", session.vim,
"VirtualMachine", self._max_objects,
[VM_INSTANCE_ID_PROPERTY],
False)
while result:
for vm_object in result.objects:
vm_moid = vm_object.obj.value
# propSet will be set only if the server provides value
if hasattr(vm_object, 'propSet') and vm_object.propSet:
vm_instance_id = vm_object.propSet[0].val
if vm_instance_id:
self._vm_moid_lookup_map[vm_instance_id] = vm_moid
result = session.invoke_api(vim_util, "continue_retrieval",
session.vim, result)
def get_vm_moid(self, vm_instance_id):
"""Method returns VC MOID of the VM by its NOVA instance ID."""
if vm_instance_id not in self._vm_moid_lookup_map:
self._init_vm_moid_lookup_map()
return self._vm_moid_lookup_map.get(vm_instance_id, None)
def _init_perf_counter_id_lookup_map(self):
# Query details of all the performance counters from VC
session = self._api_session
client_factory = session.vim.client.factory
perf_manager = session.vim.service_content.perfManager
prop_spec = vim_util.build_property_spec(
client_factory, PERF_MANAGER_TYPE, [PERF_COUNTER_PROPERTY])
obj_spec = vim_util.build_object_spec(
client_factory, perf_manager, None)
filter_spec = vim_util.build_property_filter_spec(
client_factory, [prop_spec], [obj_spec])
options = client_factory.create('ns0:RetrieveOptions')
options.maxObjects = 1
prop_collector = session.vim.service_content.propertyCollector
result = session.invoke_api(session.vim, "RetrievePropertiesEx",
prop_collector, specSet=[filter_spec],
options=options)
perf_counter_infos = result.objects[0].propSet[0].val.PerfCounterInfo
# Extract the counter Id for each counter and populate the map
self._perf_counter_id_lookup_map = {}
for perf_counter_info in perf_counter_infos:
counter_group = perf_counter_info.groupInfo.key
counter_name = perf_counter_info.nameInfo.key
counter_rollup_type = perf_counter_info.rollupType
counter_id = perf_counter_info.key
counter_full_name = (counter_group + ":" + counter_name + ":" +
counter_rollup_type)
self._perf_counter_id_lookup_map[counter_full_name] = counter_id
def get_perf_counter_id(self, counter_full_name):
"""Method returns the ID of VC performance counter by its full name.
A VC performance counter is uniquely identified by the
tuple {'Group Name', 'Counter Name', 'Rollup Type'}.
It will have an id - counter ID (changes from one VC to another),
which is required to query performance stats from that VC.
This method returns the ID for a counter,
assuming 'CounterFullName' => 'Group Name:CounterName:RollupType'.
"""
if not self._perf_counter_id_lookup_map:
self._init_perf_counter_id_lookup_map()
return self._perf_counter_id_lookup_map[counter_full_name]
# TODO([email protected]) Move this method to common library
# when it gets checked-in
def query_vm_property(self, vm_moid, property_name):
"""Method returns the value of specified property for a VM.
:param vm_moid: moid of the VM whose property is to be queried
:param property_name: path of the property
"""
vm_mobj = vim_util.get_moref(vm_moid, "VirtualMachine")
session = self._api_session
return session.invoke_api(vim_util, "get_object_property",
session.vim, vm_mobj, property_name)
def query_vm_aggregate_stats(self, vm_moid, counter_id, duration):
"""Method queries the aggregated real-time stat value for a VM.
This method should be used for aggregate counters.
:param vm_moid: moid of the VM
:param counter_id: id of the perf counter in VC
:param duration: in seconds from current time,
over which the stat value was applicable
:return: the aggregated stats value for the counter
"""
# For aggregate counters, device_name should be ""
stats = self._query_vm_perf_stats(vm_moid, counter_id, "", duration)
# Performance manager provides the aggregated stats value
# with device name -> None
return stats.get(None, 0)
def query_vm_device_stats(self, vm_moid, counter_id, duration):
"""Method queries the real-time stat values for a VM, for all devices.
This method should be used for device(non-aggregate) counters.
:param vm_moid: moid of the VM
:param counter_id: id of the perf counter in VC
:param duration: in seconds from current time,
over which the stat value was applicable
:return: a map containing the stat values keyed by the device ID/name
"""
# For device counters, device_name should be "*" to get stat values
# for all devices.
stats = self._query_vm_perf_stats(vm_moid, counter_id, "*", duration)
# For some device counters, in addition to the per device value
# the Performance manager also returns the aggregated value.
# Just to be consistent, deleting the aggregated value if present.
stats.pop(None, None)
return stats
def _query_vm_perf_stats(self, vm_moid, counter_id, device_name, duration):
"""Method queries the real-time stat values for a VM.
:param vm_moid: moid of the VM for which stats are needed
:param counter_id: id of the perf counter in VC
:param device_name: name of the device for which stats are to be
queried. For aggregate counters pass empty string ("").
For device counters pass "*", if stats are required over all
devices.
:param duration: in seconds from current time,
over which the stat value was applicable
:return: a map containing the stat values keyed by the device ID/name
"""
session = self._api_session
client_factory = session.vim.client.factory
# Construct the QuerySpec
metric_id = client_factory.create('ns0:PerfMetricId')
metric_id.counterId = counter_id
metric_id.instance = device_name
query_spec = client_factory.create('ns0:PerfQuerySpec')
query_spec.entity = vim_util.get_moref(vm_moid, "VirtualMachine")
query_spec.metricId = [metric_id]
query_spec.intervalId = VC_REAL_TIME_SAMPLING_INTERVAL
# We query all samples which are applicable over the specified duration
samples_cnt = (int(duration / VC_REAL_TIME_SAMPLING_INTERVAL)
if duration and
duration >= VC_REAL_TIME_SAMPLING_INTERVAL else 1)
query_spec.maxSample = samples_cnt
perf_manager = session.vim.service_content.perfManager
perf_stats = session.invoke_api(session.vim, 'QueryPerf', perf_manager,
querySpec=[query_spec])
stat_values = {}
if perf_stats:
entity_metric = perf_stats[0]
sample_infos = entity_metric.sampleInfo
if len(sample_infos) > 0:
for metric_series in entity_metric.value:
# Take the average of all samples to improve the accuracy
# of the stat value
stat_value = float(sum(metric_series.value)) / samples_cnt
device_id = metric_series.id.instance
stat_values[device_id] = stat_value
return stat_values
| apache-2.0 | -2,132,534,185,499,780,900 | 43.308696 | 79 | 0.631538 | false |
Kaushikpatnaik/Active-Learning-and-Best-Response-Dynamics | experiments.py | 1 | 8013 | import sys
from classrooms import *
from learners import *
from datasets import *
from stat_trackers import *
from svmutil import *
import math
#from memory_profiler import profile
#from pympler import muppy
#from pympler import asizeof
#from pympler import summary
#from pympler.classtracker import ClassTracker
import copy
def compare_passive_learners(learner_types, dataset, num_trials, train_size, test_size, display = False):
trackers = [StatTracker() for l in learner_types]
for trial in range(num_trials):
dataset.initialize()
for tracker, learner_type in zip(trackers, learner_types):
learner = learner_type()
classroom = PassiveSupervisedBatchClassroom(learner, dataset, tracker, train_size, test_size)
classroom.learn()
classroom.test()
#classroom.separatorprint()
if display:
sys.stdout.write('.')
sys.stdout.flush()
if display:
print
return trackers
def compare_active_source_learners(learner_types, dataset, num_trials, label_budget, test_size, display = False):
trackers = [StatTracker() for l in learner_types]
for trial in range(num_trials):
dataset.initialize()
for tracker, learner_type in zip(trackers, learner_types):
learner = learner_type()
classroom = ActiveSourceClassroom(learner, dataset, tracker, label_budget, test_size)
classroom.learn()
classroom.test()
#dataset.iterate()
if display:
sys.stdout.write('.')
sys.stdout.flush()
if display:
print
return trackers
def compare_active_noise_linear_learners(learner_types, dataset, num_trials, label_budget, test_size, eps, display = False):
trackers = [StatTracker() for l in learner_types]
for trial in range(num_trials):
dataset.initialize()
for tracker, learner_type in zip(trackers, learner_types):
learner = learner_type()
classroom = ActiveNoiseLinearSourceClassroom(learner, dataset, tracker, label_budget, test_size, eps)
classroom.learn()
classroom.test()
if display:
sys.stdout.write('.')
sys.stdout.flush()
if display:
print
return trackers
def compare_active_batch_learners(learner_types, dataset, num_trials, num_unlabeled, label_budget, test_size, display = False):
trackers = [StatTracker() for l in learner_types]
for trial in range(num_trials):
dataset.initialize()
for tracker, learner_type in zip(trackers, learner_types):
dataset.initialize(shuffle = False)
learner = learner_type()
tracker.set_name(learner)
classroom = ActiveBatchClassroom(learner, dataset, tracker, num_unlabeled, label_budget, test_size)
classroom.learn()
classroom.test()
#classroom.separator_inner_iter_print()
if display:
sys.stdout.write('.')
sys.stdout.flush()
if display:
print
return trackers
def mistake_bound_run(learner_type, dataset, num_trials, num_iters):
tracker = MistakeTracker()
for i in range(num_trials):
dataset.initialize()
learner = learner_type()
classroom = IterativeTrackingClassroom(learner, dataset, tracker, num_iters)
classroom.learn()
return tracker
#@profile
def libsvm_angle_compare_test(learner_types, dataset, num_trials, num_unlabeled, label_budget, w_star, display = False):
#trackers = [StatTracker() for l in learner_types]
tracker1 = []
acc= []
for trial in range(num_trials):
local_data = copy.copy(dataset)
local_data.initialize()
learner1 = learner_types[0]()
classroom1 = ActiveBatchClassroom(learner1, local_data, tracker1, num_unlabeled, label_budget, 0)
classroom1.learn()
if display:
sys.stdout.write('.')
sys.stdout.flush()
# Calculating difference in angle for the theoretical algorithm
algo_w = learner1.w
#print algo_w
angle_diff_algo = 0
angle_diff_algo = dot(w_star, algo_w)
angle_diff_algo /= norm(w_star,2) * norm(algo_w,2)
angle_diff_algo = math.acos(angle_diff_algo)
#print angle_diff_algo
local_data.initialize(shuffle = False)
x_train =[]
y_train =[]
for i in range(label_budget):
temp1, temp2 = local_data.next()
x_train.append(temp1.tolist())
y_train.append(temp2)
prob = svm_problem(y_train, x_train)
param = svm_parameter('-q -t 0 -c 10')
m = svm_train(prob, param)
# libsvm returns the support vectors as a list of dictionaries
# and coefficients as a list of tuples
support = m.get_SV()
coef = m.get_sv_coef()
labels = m.get_nr_class()
support_arr = zeros((len(coef), 2))
coef_arr = zeros(len(coef))
angle_diff_svm = 0
svm_w = zeros(2)
#Calculating distance between the best separator and the svm and algorithm weight vectors
# for svm
# checked for non-zero bias - not present
# checked for mismatch between coef and support vector length - none
for i in range(len(coef)):
for key in support[i].keys():
if key != -1:
support_arr[i][key-1] = support[i][key]
#print support[i][key]
coef_arr[i] = coef[i][0]
if support[i][-1] != 0:
print support[i][-1]
svm_w += coef_arr[i] * support_arr[i]
angle_diff_svm = dot(w_star, svm_w)
angle_diff_svm /= norm(w_star, 2) * norm(svm_w, 2)
angle_diff_svm = math.acos(angle_diff_svm)
simple_margin_diff = 0
acc.append([angle_diff_svm, angle_diff_algo, simple_margin_diff])
#print angle_diff_algo, angle_diff_svm
local_data = None
if display:
print
return acc
def libsvm_compare_learners(learner_types, dataset, num_trials, num_unlabeled, label_budget, test_size, display = False):
trackers = [StatTracker() for l in learner_types]
acc= []
return_acc_tuple = []
d = dataset.d
for trial in range(num_trials):
local_data = copy.copy(dataset)
local_data.initialize()
avgpointsusage = []
for tracker1, learner_type1 in zip(trackers, learner_types):
local_data.initialize(shuffle = False)
learner1 = learner_type1()
classroom1 = ActiveBatchClassroom(learner1, local_data, tracker1, num_unlabeled, label_budget, test_size)
classroom1.learn()
classroom1.test()
if display:
sys.stdout.write('.')
sys.stdout.flush()
x = zeros((num_unlabeled, d))
y = zeros(num_unlabeled)
local_data.initialize(shuffle = False)
for i in range(num_unlabeled):
temp1, temp2 = local_data.next()
x[i] = temp1
y[i] = temp2
x_mod = x.tolist()
y_mod = y.tolist()
x_train = x_mod[0:label_budget]
y_train = y_mod[0:label_budget]
x_test = x_mod[label_budget:label_budget+test_size]
y_test = y_mod[label_budget:label_budget+test_size]
prob = svm_problem(y_train, x_train)
param = svm_parameter('-q -t 2 -g 0.1 -c 10')
m = svm_train(prob, param)
p_label, p_acc, p_val = svm_predict(y_test, x_test, m)
acc.append(p_acc[0])
accuracy = sum(acc[:]*test_size)/(test_size*num_trials)
print accuracy
return trackers
| mit | -7,848,801,689,367,619,000 | 29.011236 | 127 | 0.58168 | false |
YulongWu/my-utils | yidian/WYLDocFeatureDumpFetcher.py | 1 | 6233 | #coding: u8
import sys
reload(sys)
sys.setdefaultencoding('u8')
import urllib
import urllib2
import json
import traceback
import datetime
import re
# call format:
class WYLDocFeatureDumpFetcher(object):
serve_url = "http://10.111.0.54:8025/service/feature?docid={0}" #url for doc feature dump
serve_url = "http://10.111.0.54:8025/service/featuredump?docid={0}" #url for doc feature dump
cfb_cols = ['VClickDoc', 'VShareDoc', 'VViewComment', 'VAddComment', 'VLike', 'VDislike', 'VDWell', 'VDWellShortClick', 'VDWellClickDoc', 'ThumbUp', 'ThumbDown', 'RViewDoc']
docInfoBuffer = {}
def _getInfo(self, docid):
if docid in self.docInfoBuffer:
info = self.docInfoBuffer[docid]
else:
try:
req_url = self.serve_url.format(docid)
info = json.loads(urllib2.urlopen(req_url).read())
self.docInfoBuffer[docid] = info
except Exception, e:
print >> sys.stderr, "Error occured for docid: " + docid
print >> sys.stderr, traceback.format_exc()
return None
return info
def _getMetric(self, docid, f):
info = self._getInfo(docid)
if not info:
return -1
else:
clkbt = f(info)
return clkbt
def getDate(self, docid):
date_diff = self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^date'])
date = datetime.datetime.now() - datetime.timedelta(milliseconds = float(date_diff)*100000)
return date.strftime("%Y-%m-%d %H:%M:%S")
def getCFBFromDict(self, cfb_dict):
res_map = {}
for col in self.cfb_cols:
res_map[col] = -1 if col not in cfb_dict else cfb_dict[col]
return res_map
def getAllCFB(self, docid, prefix='all'):
res_map = {}
t = self._getMetric(docid, lambda info:info['result']['clickfeedbacks'][prefix])
if t and t != -1:
for col in self.cfb_cols:
res_map[col] = self._getMetric(docid, lambda info:info['result']['clickfeedbacks'][prefix][col])
return res_map
def _getSegmentCFBs(self, cfb_dict, cur_key, res_dict):
if not cfb_dict:
return
for key in cfb_dict.keys():
if key == 'stats':
res_dict[cur_key] = getCFBFromDict(self, cfb_dict[key])
elif key != 'all':
self._getSegmentCFBs(self, cfb_dict[key], cur_key + '_' + key, res_dict)
return
def getCFBSegments(self, docid):
cfb_all = self._getMetric(docid, lambda info:info['result']['clickfeedbacks'])
res_dict = {}
self._getSegmentCFBs(cfb_all, '', res_dict)
return res_dict
# 做为getBestCFB()的第三个参数传入
def bestCFB_getter(cfb_dict, numerator_key, denominator_key, denominator_bar):
if numerator_key not in cfb_dict or denominator_key not in cfb_dict:
return -1
denominator = cfb_dict[demoninator_key]
if denominator == -1 or denominator < denominator_bar:
return -1
numerator = cfb_dict[numerator_key]
return 1.0*numerator/denominator
def getBestCFB(self, docid, n_key, d_key, d_bar):
res_dict = self.getCFBSegments(docid)
best_key, best_value = '', 0
for key in res_dict.keys():
v = self.bestCFB_getter(res_dict[key], n_key, d_key, d_bar)
if v > best_value:
best_value = v
best_key = key
return best_key, best_value
def getClkbtScore(self, docid):
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^clkbt'])
def getLCRScore(self, docid):
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^plcr'])
def getSCRScore(self, docid):
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^pscr'])
def _fetchDocumentData(self, docid, pattern):
doc_s = self._getMetric(docid, lambda info:info['result']['documentData'])
if doc_s == -1:
return None
match = re.search(pattern, doc_s)
if match:
return match.group(1)
else:
return None
def getDemandType(self, docid):
return self._fetchDocumentData(docid, 'demandType=(\w+?),')
def getCategories(self, docid):
return self._fetchDocumentData(docid, ' cat=\[(.+?)\]')
def fetchDictValue(self,d, keys):
v = None
t = d
for key in keys:
if d and key in d:
t = t[key]
else:
return None
return t
def getVClickDoc(self, docid):
# return self._getMetric(docid, lambda info:info['result']['clickfeedbacks']['all']['stats']['VClickDoc'])
info = self._getInfo(docid)
res = self.fetchDictValue(info, ['result', 'clickfeedbacks', 'all', 'stats', 'VClickDoc'])
if not res:
return -1
return res
def getIndepthScore(self, docid):
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^indepth'])
def getBpctr(self, docid):
if self.serve_url.find('featuredump') != -1:
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^bpctr'])
else:
return self._fetchDocumentData(docid, ' sc_bpctr=([\d\.\-e]+?),')
def getTmsstScore(self, docid):
# for http://10.111.0.54:8025/service/featuredump?docid=
if self.serve_url.find('featuredump') != -1:
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^tmsst'])
# for http://10.111.0.54:8025/service/feature?docid=
else:
return self._fetchDocumentData(docid, ' tmsst=(\w+?),')
def getMHot(self, docid):
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^mhot'])
def getRnkc(self, docid):
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^rnkc'])
def getRnksc(self, docid):
return self._getMetric(docid, lambda info:info['result']['docFeatureMap']['d^^rnksc'])
| mit | -2,834,678,351,225,799,000 | 36.654545 | 177 | 0.589087 | false |
dplusic/daff | scripts/python23.py | 1 | 1683 | from __future__ import unicode_literals, print_function
try:
import builtins
except:
import __builtin__
builtins = __builtin__
import functools
if hasattr(builtins,'unicode'):
# python2 variant
hxunicode = builtins.unicode
hxunichr = builtins.unichr
hxrange = xrange
def hxnext(x):
return x.next()
if hasattr(functools,"cmp_to_key"):
hx_cmp_to_key = functools.cmp_to_key
else:
# stretch to support python2.6
def hx_cmp_to_key(mycmp):
class K(object):
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
else:
# python3 variant
hxunicode = str
hxrange = range
hxunichr = chr
def hxnext(x):
return x.__next__()
hx_cmp_to_key = functools.cmp_to_key
python_lib_Builtins = python_lib_Builtin = builtins
String = builtins.str
python_lib_Dict = builtins.dict
python_lib_Set = builtins.set
def get_stdout():
return (python_lib_Sys.stdout.buffer if hasattr(python_lib_Sys.stdout,"buffer") else python_lib_Sys.stdout)
| mit | 7,958,329,564,262,106,000 | 31.365385 | 111 | 0.54902 | false |
p4r4digm/todo-helper | src/todoLogging.py | 1 | 1872 | from src.todoMelvin import settings
from datetime import datetime
from subprocess import check_output
logSender = None
class WarningLevels:
Debug = {'level' : 0, 'tag' : 'DEBUG'}
Info = {'level' : 1, 'tag' : 'INFO'}
Warn = {'level' : 2, 'tag' : 'WARNING'}
Fatal = {'level' : 3, 'tag' : 'FATAL'}
def callWithLogging(callData):
dateTime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
messageTag = "%s [%s] [CALL]"%(dateTime, logSender)
try:
with open(settings.logFile, "a") as myfile:
msg = "%s %s"%(messageTag, (' ').join(callData))
myfile.write(msg + "\n")
if settings.logPrintCalls.lower() == 'true':
print msg
output = check_output(callData)
for line in output.split('\n'):
if len(line) > 0:
msg = "%s %s"%(messageTag, line)
myfile.write(msg+ "\n")
if settings.logPrintCalls.lower() == 'true':
print msg
myfile.close()
except:
print "Unable to open logfile for subprocess call \'%s\'"%(' '.join(callData))
return
def log(warningLevel, message):
dateTime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
finalMessage = "%s [%s] [%s] %s"%(dateTime, logSender, warningLevel['tag'], message)
if int(settings.logStdoutWLevel) <= warningLevel['level']:
print finalMessage
if int(settings.logFileWLevel) <= warningLevel['level']:
try:
with open(settings.logFile, "a") as myfile:
myfile.write(finalMessage + "\n")
myfile.close()
except:
print "Unable to open logfile."
return
| mit | -1,290,323,906,406,167,300 | 29.688525 | 88 | 0.498932 | false |
jiadaizhao/LeetCode | 0001-0100/0005-Longest Palindromic Substring/0005-Longest Palindromic Substring.py | 1 | 1407 | class Solution:
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
n = len(s)
maxLen, maxStart = 0, 0
for i in range(n):
l, left, right = 1, i - 1, i + 1
while left >= 0 and right < n and s[left] == s[right]:
left -= 1
right += 1
l += 2
if l > maxLen:
maxLen = l
maxStart = left + 1
l, left, right = 0, i, i + 1
while left >= 0 and right < n and s[left] == s[right]:
left -= 1
right += 1
l += 2
if l > maxLen:
maxLen = l
maxStart = left + 1
return s[maxStart:maxStart + maxLen]
# O(n)
class Solution2:
def longestPalindrome(self, s: str) -> str:
T = '#'.join('^{}$'.format(s))
n = len(T)
P = [0] * n
C = R = 0
for i in range(1, n - 1):
if R > i:
P[i] = min(R - i, P[2*C - i])
while T[i + 1 + P[i]] == T[i - 1 - P[i]]:
P[i] += 1
if i + P[i] > R:
C, R = i, i + P[i]
maxLen, ci = max((l, i) for i, l in enumerate(P))
return s[(ci - maxLen)//2 : (ci + maxLen)//2]
| mit | -528,725,900,913,107,800 | 26.588235 | 66 | 0.340441 | false |
aio-libs/aiohttp | aiohttp/web_server.py | 1 | 2247 | """Low level HTTP server."""
import asyncio
import warnings
from typing import Any, Awaitable, Callable, Dict, List, Optional # noqa
from .abc import AbstractStreamWriter
from .http_parser import RawRequestMessage
from .streams import StreamReader
from .web_protocol import RequestHandler, _RequestFactory, _RequestHandler
from .web_request import BaseRequest
__all__ = ("Server",)
class Server:
def __init__(
self,
handler: _RequestHandler,
*,
request_factory: Optional[_RequestFactory] = None,
debug: Optional[bool] = None,
**kwargs: Any,
) -> None:
if debug is not None:
warnings.warn(
"debug argument is no-op since 4.0 " "and scheduled for removal in 5.0",
DeprecationWarning,
stacklevel=2,
)
self._loop = asyncio.get_running_loop()
self._connections = {} # type: Dict[RequestHandler, asyncio.Transport]
self._kwargs = kwargs
self.requests_count = 0
self.request_handler = handler
self.request_factory = request_factory or self._make_request
@property
def connections(self) -> List[RequestHandler]:
return list(self._connections.keys())
def connection_made(
self, handler: RequestHandler, transport: asyncio.Transport
) -> None:
self._connections[handler] = transport
def connection_lost(
self, handler: RequestHandler, exc: Optional[BaseException] = None
) -> None:
if handler in self._connections:
del self._connections[handler]
def _make_request(
self,
message: RawRequestMessage,
payload: StreamReader,
protocol: RequestHandler,
writer: AbstractStreamWriter,
task: "asyncio.Task[None]",
) -> BaseRequest:
return BaseRequest(message, payload, protocol, writer, task, self._loop)
async def shutdown(self, timeout: Optional[float] = None) -> None:
coros = [conn.shutdown(timeout) for conn in self._connections]
await asyncio.gather(*coros)
self._connections.clear()
def __call__(self) -> RequestHandler:
return RequestHandler(self, loop=self._loop, **self._kwargs)
| apache-2.0 | -5,668,502,912,688,113,000 | 32.044118 | 88 | 0.634179 | false |
Lukas-Stuehrk/selenese | selenese/patterns.py | 1 | 1351 | import re
from fnmatch import fnmatch
class Pattern(object):
def __init__(self, pattern_string):
self.pattern_string = pattern_string
class ExactPattern(Pattern):
def compare(self, string):
return self.pattern_string == string
class RegexPattern(Pattern):
def compare(self, string):
if not hasattr(self, '_regex'):
self._regex = re.compile(self.pattern_string)
return self._regex.sub('', string) == ''
class RegexIgnorecasePattern(Pattern):
def compare(self, string):
if not hasattr(self, '_regex'):
self._regex = re.compile(self.pattern_string, flags=re.IGNORECASE)
return self._regex.sub('', string) == ''
class GlobPattern(Pattern):
def compare(self, string):
return fnmatch(string, self.pattern_string)
def create_pattern(pattern_string):
if pattern_string.startswith('exact:'):
return ExactPattern(pattern_string[6:])
elif pattern_string.startswith('glob:'):
return GlobPattern(pattern_string[5:])
elif pattern_string.startswith('regexp:'):
return RegexPattern(pattern_string[7:])
elif pattern_string.startswith('regexpi:'):
return RegexIgnorecasePattern(pattern_string[8:])
# if no pattern scheme is given, asssume that it is a 'glob' pattern
return GlobPattern(pattern_string) | bsd-3-clause | 8,510,309,876,751,397,000 | 29.727273 | 78 | 0.670614 | false |
shear/rppy | test_ruger_hti.py | 2 | 2682 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 3 17:24:04 2015
@author: Sean
"""
import rppy
import numpy as np
import matplotlib.pyplot as plt
p1 = 2000
vp1 = 3000
vs1 = 1500
e1 = 0.0
d1 = 0.0
y1 = 0.0
p2 = 2200
vp2 = 4000
vs2 = 2000
y2 = 0.1
d2 = 0.1
e2 = 0.1
theta = 30
phi = np.arange(0, 90, 1)
phit = np.array([1.2500, 4.9342, 8.6184, 11.842, 15.526, 19.211, 22.664,
25.888, 28.421, 30.724, 34.638, 38.092, 41.546, 45.461,
49.375, 53.289, 56.974, 60.888, 65.493, 69.408, 73.783,
79.079, 84.375, 89.211])
exp = np.array([0.19816, 0.19816, 0.19678, 0.19539, 0.19263, 0.19056,
0.18711, 0.18365, 0.18020, 0.17813, 0.17329, 0.16845,
0.16431, 0.15878, 0.15326, 0.14842, 0.14359, 0.13875,
0.13391, 0.12977, 0.12632, 0.12286, 0.12079, 0.12010])
Rpp = np.zeros(np.shape(phi))
Rpo = np.zeros(np.shape(phi))
Rpk = np.zeros(np.shape(phi))
for ind, phiv in enumerate(phi):
Rpp[ind] = rppy.reflectivity.ruger_hti(vp1, vs1, p1, e1, d1, y1,
vp2, vs2, p2, e2, d2, y2,
theta, phiv)
Rpo[ind] = rppy.reflectivity.exact_ortho(rppy.reflectivity.Cij(vp1, vs1, p1, 0, 0, 0, e1, d1, y1, 0), p1,
rppy.reflectivity.Cij(vp2, vs2, p2, 0, 0, 0, e2, d2, y2, 0), p2,
0, 0, phiv, theta)
Rpk[ind] = rppy.reflectivity.vavrycuk_psencik_hti(vp1, vs1, p1, e1, d1, y1,
vp2, vs2, p2, e2, d2, y1,
phiv, theta)
plt.figure(1)
plt.plot(phi, Rpp, phi, Rpo, phi, Rpk)
plt.show()
theta = np.arange(0, 60, 1)
phi = 45
Rpp = np.zeros(np.shape(theta))
Rpo = np.zeros(np.shape(theta))
Rpk = np.zeros(np.shape(theta))
Rpa = np.zeros(np.shape(theta))
for ind, thetav in enumerate(theta):
Rpp[ind] = rppy.reflectivity.ruger_hti(vp1, vs1, p1, e1, d1, y1,
vp2, vs2, p2, e2, d2, y1,
thetav, phi)
Rpk[ind] = rppy.reflectivity.vavrycuk_psencik_hti(vp1, vs1, p1, e1, d1, y1,
vp2, vs2, p2, e2, d2, y1,
phi, thetav)
Rpo = rppy.reflectivity.zoeppritz(vp1, vs1, p1, vp2, vs2, p2, theta)
Rpa = rppy.reflectivity.aki_richards(vp1, vs1, p1, vp2, vs2, p2, theta)
plt.figure(2)
plt.plot(theta, Rpp, theta, Rpo, theta, Rpk, theta, Rpa)
plt.xlim([0, 60])
plt.ylim([0.125, 0.275])
plt.legend(['Ruger', 'Zoe', 'Vavrycuk', 'A-R'])
plt.show()
| bsd-2-clause | -2,847,318,867,181,452,000 | 32.111111 | 109 | 0.503356 | false |
vjFaLk/frappe | frappe/core/doctype/communication/email.py | 1 | 19348 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, absolute_import
from six.moves import range
from six import string_types
import frappe
import json
from email.utils import formataddr
from frappe.core.utils import get_parent_doc
from frappe.utils import (get_url, get_formatted_email, cint,
validate_email_add, split_emails, time_diff_in_seconds, parse_addr, get_datetime)
from frappe.utils.file_manager import get_file, add_attachments
from frappe.email.queue import check_email_limit
from frappe.utils.scheduler import log
from frappe.email.email_body import get_message_id
import frappe.email.smtp
import time
from frappe import _
from frappe.utils.background_jobs import enqueue
# imports - third-party imports
import pymysql
from pymysql.constants import ER
@frappe.whitelist()
def make(doctype=None, name=None, content=None, subject=None, sent_or_received = "Sent",
sender=None, sender_full_name=None, recipients=None, communication_medium="Email", send_email=False,
print_html=None, print_format=None, attachments='[]', send_me_a_copy=False, cc=None, bcc=None,
flags=None, read_receipt=None, print_letterhead=True):
"""Make a new communication.
:param doctype: Reference DocType.
:param name: Reference Document name.
:param content: Communication body.
:param subject: Communication subject.
:param sent_or_received: Sent or Received (default **Sent**).
:param sender: Communcation sender (default current user).
:param recipients: Communication recipients as list.
:param communication_medium: Medium of communication (default **Email**).
:param send_mail: Send via email (default **False**).
:param print_html: HTML Print format to be sent as attachment.
:param print_format: Print Format name of parent document to be sent as attachment.
:param attachments: List of attachments as list of files or JSON string.
:param send_me_a_copy: Send a copy to the sender (default **False**).
"""
is_error_report = (doctype=="User" and name==frappe.session.user and subject=="Error Report")
send_me_a_copy = cint(send_me_a_copy)
if doctype and name and not is_error_report and not frappe.has_permission(doctype, "email", name) and not (flags or {}).get('ignore_doctype_permissions'):
raise frappe.PermissionError("You are not allowed to send emails related to: {doctype} {name}".format(
doctype=doctype, name=name))
if not sender:
sender = get_formatted_email(frappe.session.user)
comm = frappe.get_doc({
"doctype":"Communication",
"subject": subject,
"content": content,
"sender": sender,
"sender_full_name":sender_full_name,
"recipients": recipients,
"cc": cc or None,
"bcc": bcc or None,
"communication_medium": communication_medium,
"sent_or_received": sent_or_received,
"reference_doctype": doctype,
"reference_name": name,
"message_id":get_message_id().strip(" <>"),
"read_receipt":read_receipt,
"has_attachment": 1 if attachments else 0
})
comm.insert(ignore_permissions=True)
if not doctype:
# if no reference given, then send it against the communication
comm.db_set(dict(reference_doctype='Communication', reference_name=comm.name))
if isinstance(attachments, string_types):
attachments = json.loads(attachments)
# if not committed, delayed task doesn't find the communication
if attachments:
add_attachments("Communication", comm.name, attachments)
frappe.db.commit()
if cint(send_email):
frappe.flags.print_letterhead = cint(print_letterhead)
comm.send(print_html, print_format, attachments, send_me_a_copy=send_me_a_copy)
return {
"name": comm.name,
"emails_not_sent_to": ", ".join(comm.emails_not_sent_to) if hasattr(comm, "emails_not_sent_to") else None
}
def validate_email(doc):
"""Validate Email Addresses of Recipients and CC"""
if not (doc.communication_type=="Communication" and doc.communication_medium == "Email") or doc.flags.in_receive:
return
# validate recipients
for email in split_emails(doc.recipients):
validate_email_add(email, throw=True)
# validate CC
for email in split_emails(doc.cc):
validate_email_add(email, throw=True)
for email in split_emails(doc.bcc):
validate_email_add(email, throw=True)
# validate sender
def notify(doc, print_html=None, print_format=None, attachments=None,
recipients=None, cc=None, bcc=None, fetched_from_email_account=False):
"""Calls a delayed task 'sendmail' that enqueus email in Email Queue queue
:param print_html: Send given value as HTML attachment
:param print_format: Attach print format of parent document
:param attachments: A list of filenames that should be attached when sending this email
:param recipients: Email recipients
:param cc: Send email as CC to
:param bcc: Send email as BCC to
:param fetched_from_email_account: True when pulling email, the notification shouldn't go to the main recipient
"""
recipients, cc, bcc = get_recipients_cc_and_bcc(doc, recipients, cc, bcc,
fetched_from_email_account=fetched_from_email_account)
if not recipients and not cc:
return
doc.emails_not_sent_to = set(doc.all_email_addresses) - set(doc.sent_email_addresses)
if frappe.flags.in_test:
# for test cases, run synchronously
doc._notify(print_html=print_html, print_format=print_format, attachments=attachments,
recipients=recipients, cc=cc, bcc=None)
else:
check_email_limit(list(set(doc.sent_email_addresses)))
enqueue(sendmail, queue="default", timeout=300, event="sendmail",
communication_name=doc.name,
print_html=print_html, print_format=print_format, attachments=attachments,
recipients=recipients, cc=cc, bcc=bcc, lang=frappe.local.lang,
session=frappe.local.session, print_letterhead=frappe.flags.print_letterhead)
def _notify(doc, print_html=None, print_format=None, attachments=None,
recipients=None, cc=None, bcc=None):
prepare_to_notify(doc, print_html, print_format, attachments)
if doc.outgoing_email_account.send_unsubscribe_message:
unsubscribe_message = _("Leave this conversation")
else:
unsubscribe_message = ""
frappe.sendmail(
recipients=(recipients or []),
cc=(cc or []),
bcc=(bcc or []),
expose_recipients="header",
sender=doc.sender,
reply_to=doc.incoming_email_account,
subject=doc.subject,
content=doc.content,
reference_doctype=doc.reference_doctype,
reference_name=doc.reference_name,
attachments=doc.attachments,
message_id=doc.message_id,
unsubscribe_message=unsubscribe_message,
delayed=True,
communication=doc.name,
read_receipt=doc.read_receipt,
is_notification=True if doc.sent_or_received =="Received" else False,
print_letterhead=frappe.flags.print_letterhead
)
def update_parent_mins_to_first_response(doc):
"""Update mins_to_first_communication of parent document based on who is replying."""
parent = get_parent_doc(doc)
if not parent:
return
# update parent mins_to_first_communication only if we create the Email communication
# ignore in case of only Comment is added
if doc.communication_type == "Comment":
return
status_field = parent.meta.get_field("status")
if status_field:
options = (status_field.options or '').splitlines()
# if status has a "Replied" option, then update the status for received communication
if ('Replied' in options) and doc.sent_or_received=="Received":
parent.db_set("status", "Open")
else:
# update the modified date for document
parent.update_modified()
update_mins_to_first_communication(parent, doc)
parent.run_method('notify_communication', doc)
parent.notify_update()
def get_recipients_cc_and_bcc(doc, recipients, cc, bcc, fetched_from_email_account=False):
doc.all_email_addresses = []
doc.sent_email_addresses = []
doc.previous_email_sender = None
if not recipients:
recipients = get_recipients(doc, fetched_from_email_account=fetched_from_email_account)
if not cc:
cc = get_cc(doc, recipients, fetched_from_email_account=fetched_from_email_account)
if not bcc:
bcc = get_bcc(doc, recipients, fetched_from_email_account=fetched_from_email_account)
if fetched_from_email_account:
# email was already sent to the original recipient by the sender's email service
original_recipients, recipients = recipients, []
# send email to the sender of the previous email in the thread which this email is a reply to
#provides erratic results and can send external
#if doc.previous_email_sender:
# recipients.append(doc.previous_email_sender)
# cc that was received in the email
original_cc = split_emails(doc.cc)
# don't cc to people who already received the mail from sender's email service
cc = list(set(cc) - set(original_cc) - set(original_recipients))
remove_administrator_from_email_list(cc)
original_bcc = split_emails(doc.bcc)
bcc = list(set(bcc) - set(original_bcc) - set(original_recipients))
remove_administrator_from_email_list(bcc)
remove_administrator_from_email_list(recipients)
return recipients, cc, bcc
def remove_administrator_from_email_list(email_list):
if 'Administrator' in email_list:
email_list.remove('Administrator')
def prepare_to_notify(doc, print_html=None, print_format=None, attachments=None):
"""Prepare to make multipart MIME Email
:param print_html: Send given value as HTML attachment.
:param print_format: Attach print format of parent document."""
view_link = frappe.utils.cint(frappe.db.get_value("Print Settings", "Print Settings", "attach_view_link"))
if print_format and view_link:
doc.content += get_attach_link(doc, print_format)
set_incoming_outgoing_accounts(doc)
if not doc.sender:
doc.sender = doc.outgoing_email_account.email_id
if not doc.sender_full_name:
doc.sender_full_name = doc.outgoing_email_account.name or _("Notification")
if doc.sender:
# combine for sending to get the format 'Jane <[email protected]>'
doc.sender = formataddr([doc.sender_full_name, doc.sender])
doc.attachments = []
if print_html or print_format:
doc.attachments.append({"print_format_attachment":1, "doctype":doc.reference_doctype,
"name":doc.reference_name, "print_format":print_format, "html":print_html})
if attachments:
if isinstance(attachments, string_types):
attachments = json.loads(attachments)
for a in attachments:
if isinstance(a, string_types):
# is it a filename?
try:
# keep this for error handling
file = get_file(a)
# these attachments will be attached on-demand
# and won't be stored in the message
doc.attachments.append({"fid": a})
except IOError:
frappe.throw(_("Unable to find attachment {0}").format(a))
else:
doc.attachments.append(a)
def set_incoming_outgoing_accounts(doc):
doc.incoming_email_account = doc.outgoing_email_account = None
if not doc.incoming_email_account and doc.sender:
doc.incoming_email_account = frappe.db.get_value("Email Account",
{"email_id": doc.sender, "enable_incoming": 1}, "email_id")
if not doc.incoming_email_account and doc.reference_doctype:
doc.incoming_email_account = frappe.db.get_value("Email Account",
{"append_to": doc.reference_doctype, }, "email_id")
doc.outgoing_email_account = frappe.db.get_value("Email Account",
{"append_to": doc.reference_doctype, "enable_outgoing": 1},
["email_id", "always_use_account_email_id_as_sender", "name",
"always_use_account_name_as_sender_name"], as_dict=True)
if not doc.incoming_email_account:
doc.incoming_email_account = frappe.db.get_value("Email Account",
{"default_incoming": 1, "enable_incoming": 1}, "email_id")
if not doc.outgoing_email_account:
# if from address is not the default email account
doc.outgoing_email_account = frappe.db.get_value("Email Account",
{"email_id": doc.sender, "enable_outgoing": 1},
["email_id", "always_use_account_email_id_as_sender", "name",
"send_unsubscribe_message", "always_use_account_name_as_sender_name"], as_dict=True) or frappe._dict()
if not doc.outgoing_email_account:
doc.outgoing_email_account = frappe.db.get_value("Email Account",
{"default_outgoing": 1, "enable_outgoing": 1},
["email_id", "always_use_account_email_id_as_sender", "name",
"send_unsubscribe_message", "always_use_account_name_as_sender_name"],as_dict=True) or frappe._dict()
if doc.sent_or_received == "Sent":
doc.db_set("email_account", doc.outgoing_email_account.name)
def get_recipients(doc, fetched_from_email_account=False):
"""Build a list of email addresses for To"""
# [EDGE CASE] doc.recipients can be None when an email is sent as BCC
recipients = split_emails(doc.recipients)
#if fetched_from_email_account and doc.in_reply_to:
# add sender of previous reply
#doc.previous_email_sender = frappe.db.get_value("Communication", doc.in_reply_to, "sender")
#recipients.append(doc.previous_email_sender)
if recipients:
recipients = filter_email_list(doc, recipients, [])
return recipients
def get_cc(doc, recipients=None, fetched_from_email_account=False):
"""Build a list of email addresses for CC"""
# get a copy of CC list
cc = split_emails(doc.cc)
if doc.reference_doctype and doc.reference_name:
if fetched_from_email_account:
# if it is a fetched email, add follows to CC
cc.append(get_owner_email(doc))
cc += get_assignees(doc)
if getattr(doc, "send_me_a_copy", False) and doc.sender not in cc:
cc.append(doc.sender)
if cc:
# exclude unfollows, recipients and unsubscribes
exclude = [] #added to remove account check
exclude += [d[0] for d in frappe.db.get_all("User", ["email"], {"thread_notify": 0}, as_list=True)]
exclude += [(parse_addr(email)[1] or "").lower() for email in recipients]
if fetched_from_email_account:
# exclude sender when pulling email
exclude += [parse_addr(doc.sender)[1]]
if doc.reference_doctype and doc.reference_name:
exclude += [d[0] for d in frappe.db.get_all("Email Unsubscribe", ["email"],
{"reference_doctype": doc.reference_doctype, "reference_name": doc.reference_name}, as_list=True)]
cc = filter_email_list(doc, cc, exclude, is_cc=True)
return cc
def get_bcc(doc, recipients=None, fetched_from_email_account=False):
"""Build a list of email addresses for BCC"""
bcc = split_emails(doc.bcc)
if bcc:
exclude = []
exclude += [d[0] for d in frappe.db.get_all("User", ["email"], {"thread_notify": 0}, as_list=True)]
exclude += [(parse_addr(email)[1] or "").lower() for email in recipients]
if fetched_from_email_account:
# exclude sender when pulling email
exclude += [parse_addr(doc.sender)[1]]
if doc.reference_doctype and doc.reference_name:
exclude += [d[0] for d in frappe.db.get_all("Email Unsubscribe", ["email"],
{"reference_doctype": doc.reference_doctype, "reference_name": doc.reference_name}, as_list=True)]
bcc = filter_email_list(doc, bcc, exclude, is_bcc=True)
return bcc
def filter_email_list(doc, email_list, exclude, is_cc=False, is_bcc=False):
# temp variables
filtered = []
email_address_list = []
for email in list(set(email_list)):
email_address = (parse_addr(email)[1] or "").lower()
if not email_address:
continue
# this will be used to eventually find email addresses that aren't sent to
doc.all_email_addresses.append(email_address)
if (email in exclude) or (email_address in exclude):
continue
if is_cc:
is_user_enabled = frappe.db.get_value("User", email_address, "enabled")
if is_user_enabled==0:
# don't send to disabled users
continue
if is_bcc:
is_user_enabled = frappe.db.get_value("User", email_address, "enabled")
if is_user_enabled==0:
continue
# make sure of case-insensitive uniqueness of email address
if email_address not in email_address_list:
# append the full email i.e. "Human <[email protected]>"
filtered.append(email)
email_address_list.append(email_address)
doc.sent_email_addresses.extend(email_address_list)
return filtered
def get_owner_email(doc):
owner = get_parent_doc(doc).owner
return get_formatted_email(owner) or owner
def get_assignees(doc):
return [( get_formatted_email(d.owner) or d.owner ) for d in
frappe.db.get_all("ToDo", filters={
"reference_type": doc.reference_doctype,
"reference_name": doc.reference_name,
"status": "Open"
}, fields=["owner"])
]
def get_attach_link(doc, print_format):
"""Returns public link for the attachment via `templates/emails/print_link.html`."""
return frappe.get_template("templates/emails/print_link.html").render({
"url": get_url(),
"doctype": doc.reference_doctype,
"name": doc.reference_name,
"print_format": print_format,
"key": get_parent_doc(doc).get_signature()
})
def sendmail(communication_name, print_html=None, print_format=None, attachments=None,
recipients=None, cc=None, bcc=None, lang=None, session=None, print_letterhead=None):
try:
if lang:
frappe.local.lang = lang
if session:
# hack to enable access to private files in PDF
session['data'] = frappe._dict(session['data'])
frappe.local.session.update(session)
if print_letterhead:
frappe.flags.print_letterhead = print_letterhead
# upto 3 retries
for i in range(3):
try:
communication = frappe.get_doc("Communication", communication_name)
communication._notify(print_html=print_html, print_format=print_format, attachments=attachments,
recipients=recipients, cc=cc, bcc=bcc)
except pymysql.InternalError as e:
# deadlock, try again
if e.args[0] == ER.LOCK_DEADLOCK:
frappe.db.rollback()
time.sleep(1)
continue
else:
raise
else:
break
except:
traceback = log("frappe.core.doctype.communication.email.sendmail", frappe.as_json({
"communication_name": communication_name,
"print_html": print_html,
"print_format": print_format,
"attachments": attachments,
"recipients": recipients,
"cc": cc,
"bcc": bcc,
"lang": lang
}))
frappe.logger(__name__).error(traceback)
raise
def update_mins_to_first_communication(parent, communication):
if parent.meta.has_field('mins_to_first_response') and not parent.get('mins_to_first_response'):
if frappe.db.get_all('User', filters={'email': communication.sender,
'user_type': 'System User', 'enabled': 1}, limit=1):
first_responded_on = communication.creation
if parent.meta.has_field('first_responded_on'):
parent.db_set('first_responded_on', first_responded_on)
parent.db_set('mins_to_first_response', round(time_diff_in_seconds(first_responded_on, parent.creation) / 60), 2)
@frappe.whitelist(allow_guest=True)
def mark_email_as_seen(name=None):
try:
if name and frappe.db.exists("Communication", name) and not frappe.db.get_value("Communication", name, "read_by_recipient"):
frappe.db.set_value("Communication", name, "read_by_recipient", 1)
frappe.db.set_value("Communication", name, "delivery_status", "Read")
frappe.db.set_value("Communication", name, "read_by_recipient_on", get_datetime())
frappe.db.commit()
except Exception:
frappe.log_error(frappe.get_traceback())
finally:
# Return image as response under all circumstances
from PIL import Image
import io
im = Image.new('RGBA', (1, 1))
im.putdata([(255,255,255,0)])
buffered_obj = io.BytesIO()
im.save(buffered_obj, format="PNG")
frappe.response["type"] = 'binary'
frappe.response["filename"] = "imaginary_pixel.png"
frappe.response["filecontent"] = buffered_obj.getvalue()
| mit | 5,878,466,721,758,545,000 | 34.896104 | 155 | 0.723641 | false |
apanda/modeling | tests/num_dnode_test.py | 1 | 1303 | import z3
from z3 import is_true, is_false
from examples import *
import time
import mcnet.components as components
import random
import sys
"""Check time as increase in nodes"""
def ResetZ3 ():
z3._main_ctx = None
z3.main_ctx()
z3.set_param('auto_config', False)
z3.set_param('smt.mbqi', True)
z3.set_param('model.compact', True)
z3.set_param('smt.pull_nested_quantifiers', True)
z3.set_param('smt.mbqi.max_iterations', 10000)
z3.set_param('smt.random_seed', random.SystemRandom().randint(0, sys.maxint))
iters = 10
bad_in_row = 0
for sz in xrange(2, 200):
times = []
all_bad = True
for it in xrange(0, iters):
ResetZ3()
obj = NumDumbNodesTest (sz)
start = time.time()
# Set timeout to some largish number
obj.check.solver.set(timeout=10000000)
ret = obj.check.CheckIsolationProperty(obj.e_0, obj.e_1)
bad = False
if z3.sat != ret.result:
bad = True
stop = time.time()
if not bad:
times.append(stop - start)
all_bad = False
print "%d %s %s"%(sz, ' '.join(map(str, times)), "bad" if all_bad else "good")
if all_bad:
bad_in_row += 1
else:
bad_in_row = 0
assert bad_in_row <= 5, \
"Too many failures"
| bsd-3-clause | 4,604,562,846,646,764,000 | 29.302326 | 82 | 0.594014 | false |
h2oloopan/easymerge | EasyMerge/tests/reddit/scripts/migrate/backfill/subreddit_images.py | 1 | 2325 |
# The contents of this file are subject to the Common Public Attribution
# License Version 1.0. (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://code.reddit.com/LICENSE. The License is based on the Mozilla Public
# License Version 1.1, but Sections 14 and 15 have been added to cover use of
# software over a computer network and provide for limited attribution for the
# Original Developer. In addition, Exhibit A has been modified to be consistent
# with Exhibit B.
#
# Software distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License for
# the specific language governing rights and limitations under the License.
#
# The Original Code is reddit.
#
# The Original Developer is the Initial Developer. The Initial Developer of
# the Original Code is reddit Inc.
#
# All portions of the code written by reddit are Copyright (c) 2006-2013 reddit
# Inc. All Rights Reserved.
###############################################################################
import urllib2
from pylons import g
from r2.lib.db.operators import desc
from r2.lib.utils import fetch_things2
from r2.lib.media import upload_media
from r2.models.subreddit import Subreddit
from r2.models.wiki import WikiPage, ImagesByWikiPage
all_subreddits = Subreddit._query(sort=desc("_date"))
for sr in fetch_things2(all_subreddits):
images = sr.images.copy()
images.pop("/empties/", None)
if not images:
continue
print 'Processing /r/%s (id36: %s)' % (sr.name, sr._id36)
# upgrade old-style image ids to urls
for name, image_url in images.items():
if not isinstance(image_url, int):
continue
print " upgrading image %r" % image_url
url = "http://%s/%s_%d.png" % (g.s3_old_thumb_bucket,
sr._fullname, image_url)
image_data = urllib2.urlopen(url).read()
new_url = upload_media(image_data, file_type=".png")
images[name] = new_url
# use a timestamp of zero to make sure that we don't overwrite any changes
# from live dual-writes.
rowkey = WikiPage.id_for(sr, "config/stylesheet")
ImagesByWikiPage._cf.insert(rowkey, images, timestamp=0)
| mit | -8,692,393,015,163,003,000 | 37.75 | 79 | 0.68 | false |
wyoder/pubs | ui/fields.py | 1 | 14935 | '''
Fields that are used in our UI
#.. todo: Make a field specifically for lists
'''
from pubs.ui import *
import pubs.ui.models as models
import pubs.pGraph as pGraph
import pubs.pNode as pNode
class BaseField(QtWidgets.QWidget):
def __init__(self, label, value = None, description = str(), parent = None, attribute = None):
super(BaseField, self).__init__(parent)
self.__label = QtWidgets.QLabel(label)
self.__label.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,
QtWidgets.QSizePolicy.Policy.Fixed)
self.__value = value
#self.__description = self.setAccessibleDescription(description)
self.__attribute= attribute
self.setContentsMargins(0,2,0,2)
def label(self):
return self.__label
def attribute(self):
return self.__attribute
def labelText(self):
return self.__label.text()
def setLabel(self, value):
self.__label.setText(value)
def value(self):
return self.__value
def setValue(self,value):
self.__value = value
if self.__attribute:
self.__attribute.setValue(value)
def setDescription(self, value):
'''
Sets the description of the current field
@param value: String describing the field
@type value: *str* or *QString*
'''
#Check type
if not isinstance(value, basestring) and not isinstance(value, QtCore.QString):
raise TypeError('%s must be a string or QString' % value)
#set values
self.__description = value
self.setDescription(value)
class LineEditField(BaseField):
def __init__(self, *args, **kwargs):
super(LineEditField, self).__init__(*args,**kwargs)
self._layout = QtWidgets.QHBoxLayout()
self._lineEdit = QtWidgets.QLineEdit()
self._lineEdit.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding,
QtWidgets.QSizePolicy.Policy.Fixed)
#set text if any value
if self.value():
self.setText(self.value())
#self._lineEdit.setMaximumHeight(20)
self._lineEdit.setMinimumHeight(40)
self._lineEdit.setMinimumWidth(200)
self._lineEdit.textChanged.connect(self.setText)
self._layout.addWidget(self.label())
self._layout.addWidget(self._lineEdit)
self._layout.addStretch()
self.setLayout(self._layout)
def setText(self, value):
'''
Sets the text for the QLineEdit
'''
if not isinstance(value, basestring) and not isinstance(value, QtCore.QString):
raise TypeError('%s must be an string' % value)
#get the souce of the call for setText function
source = self.sender()
#set the value on field
self.setValue(str(value))
#set lineEdit text
if not source == self._lineEdit:
self._lineEdit.setText(value)
def getLineEdit(self):
return self._lineEdit
class DirBrowserField(LineEditField):
def __init__(self, *args, **kwargs):
super(DirBrowserField, self).__init__(*args, **kwargs)
self._dirBrowseButton = QtWidgets.QPushButton(QtGui.QIcon( os.path.join(os.path.dirname( __file__ ), 'icons/folder.png') ),'')
self._dirBrowseButton.clicked.connect(self._getDir)
self._layout.addWidget(self._dirBrowseButton)
self._layout.setContentsMargins(0,0,0,0)
def _getDir(self,index):
dir = QtWidgets.QFileDialog.getExistingDirectory(self, 'open', str(os.getcwd()))
self.setText(str(dir))
class FileBrowserField(LineEditField):
def __init__(self, mode = 'open', filter = "", *args, **kwargs):
super(FileBrowserField, self).__init__(*args, **kwargs)
self.__mode = mode.lower()
self.__filter = filter
self._fileBrowseButton = QtWidgets.QPushButton(QtGui.QIcon( os.path.join(os.path.dirname( __file__ ), 'icons/folder.png') ),'')
self._fileBrowseButton.clicked.connect(self._getFile)
self._layout.addWidget(self._fileBrowseButton)
self._layout.addStretch()
self._layout.setContentsMargins(0,0,0,0)
def _getFile(self,*args):
if self.__mode == 'save':
file = QtWidgets.QFileDialog.getSaveFileName(self, 'save', str(os.getcwd()), self.__filter)[0]
else:
file = QtWidgets.QFileDialog.getOpenFileName(self, 'open', str(os.getcwd()), self.__filter)[0]
if file:
self.setText(str(file))
class ListField(BaseField):
def __init__(self, *args, **kwargs):
super(ListField, self).__init__(*args, **kwargs)
self.listGraph = pGraph.PGraph('listGraph')
for value in self.value():
self.listGraph.addNode(value)
self._model = models.LayerGraphModel(self.listGraph)
self._layout = QtWidgets.QHBoxLayout()
self._listView = QtWidgets.QListView()
self._listView.setModel(self._model)
self._listView.setMaximumHeight(100)
#self._listView.setMaximumWidth(100)
self._layout.addWidget(self.label())
self._layout.addWidget(self._listView)
self.setLayout(self._layout)
self._layout.setContentsMargins(0,0,0,0)
self._layout.addStretch()
#CONTEXT MENU
self._listView.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.connect(self._listView, QtCore.SIGNAL("customContextMenuRequested(const QPoint &)"), self.showCustomContextMenu)
def showCustomContextMenu(self, pos):
'''
Show the context menu at the position of the curser
:param pos: The point where the curser is on the screen
:type pos: QtCore.QPoint
'''
index = self._listView.indexAt(pos)
if not index.isValid():
return
node = self._model.itemFromIndex(index)
#If node is disabled, return
if not node.isActive():
return
#construct menus
mainMenu = QtWidgets.QMenu(self)
#main menu actions
mainMenu.addSeparator()
addNodeAction = mainMenu.addAction('Add Item')
removeNodeAction = mainMenu.addAction('Remove Item')
QtCore.QObject.connect(addNodeAction, QtCore.SIGNAL('triggered()'), self.__addDialog)
QtCore.QObject.connect(removeNodeAction, QtCore.SIGNAL('triggered()'), self._removeSelectedNode)
mainMenu.popup(QtGui.QCursor.pos())
def _removeSelectedNode(self):
index = self._listView.currentIndex()
node = self._selectedNode()
#self._model.removeRows(index.row(), 1, self._model)
if node:
self._model.beginRemoveRows( index.parent(), index.row(), index.row()+1-1 )
self.listGraph.removeNode(node)
self._model.endRemoveRows()
del node
self.setValue(self.listGraph.nodeNames())
def _addNode(self,value):
if not isinstance(self.value(),list):
self.setValue([value])
else:
self.setValue(self.value().append(value))
self.listGraph.addNode(value)
self._model = models.LayerGraphModel(self.listGraph)
self._listView.setModel(self._model)
def __addDialog(self,*args):
dialog = QtWidgets.QDialog(self)
dialog.exec_()
def _selectedNode(self):
'''
Returns the selected node
'''
index = self._listView.currentIndex()
if not index.isValid():
return None
return self._model.itemFromIndex(index)
class TextEditField(BaseField):
def __init__(self, *args, **kwargs):
super(TextEditField, self).__init__(*args, **kwargs)
self._textEdit = QtWidgets.QPlainTextEdit(self.value())
self._layout = QtWidgets.QVBoxLayout()
self._layout.addWidget(self.label())
self._layout.addWidget(self._textEdit)
self._layout.addStretch()
self.setLayout(self._layout)
self._textEdit.textChanged.connect(self.setText)
self._layout.setContentsMargins(0,0,0,0)
def setText(self):
self.setValue(str(self._textEdit.toPlainText()))
class IntField(BaseField):
def __init__(self, label, value = 0, description = str(), parent = None, min = -100, max = 100, **kwargs):
super(IntField, self).__init__(label, value, description, parent, **kwargs)
self._layout = QtWidgets.QHBoxLayout()
self._intBox = QtWidgets.QSpinBox()
self._intBox.setRange(min,max)
self._layout.addWidget(self.label())
if value:
self._intBox.setValue(value)
self._intBox.valueChanged.connect(self.setValue)
self._layout.addWidget(self._intBox)
self._layout.addStretch()
self.setLayout(self._layout)
self._layout.setContentsMargins(0,0,0,0)
def setValue(self, value):
'''
Sets the text for the QLineEdit
'''
if not isinstance(value, int):
raise TypeError('%s must be an integer' % value)
#get the source of where the function is being called
source = self.sender()
#set field value
super(IntField, self).setValue(value)
#set spinBox value
if not source == self._intBox:
self._intBox.setValue(value)
def value(self):
value = self._intBox.value()
super(IntField, self).setValue(int(value))
return super(IntField,self).value()
class VectorField(BaseField):
def __init__(self, *args, **kwargs):
super(VectorField, self).__init__(*args,**kwargs)
#create layouts
self._layout = QtWidgets.QHBoxLayout()
self._valueLayout = QtWidgets.QVBoxLayout()
#create widgets
self._xField = LineEditField(label = 'X')
self._yField = LineEditField(label = 'Y')
self._zField = LineEditField(label = 'Z')
#set line edit widths
self._xField.getLineEdit().setMaximumWidth(55)
self._xField.getLineEdit().setMinimumWidth(55)
self._xField.getLineEdit().setMaximumHeight(20)
self._xField.getLineEdit().setMinimumHeight(20)
self._yField.getLineEdit().setMaximumWidth(55)
self._yField.getLineEdit().setMinimumWidth(55)
self._yField.getLineEdit().setMaximumHeight(20)
self._yField.getLineEdit().setMinimumHeight(20)
self._zField.getLineEdit().setMaximumWidth(55)
self._zField.getLineEdit().setMinimumWidth(55)
self._zField.getLineEdit().setMaximumHeight(20)
self._zField.getLineEdit().setMinimumHeight(20)
#set validators for line edits
self._xField.getLineEdit().setValidator(QtGui.QDoubleValidator())
self._yField.getLineEdit().setValidator(QtGui.QDoubleValidator())
self._zField.getLineEdit().setValidator(QtGui.QDoubleValidator())
#connect line edits to set value methods
self._xField.getLineEdit().editingFinished.connect(self._setValue)
self._yField.getLineEdit().editingFinished.connect(self._setValue)
self._zField.getLineEdit().editingFinished.connect(self._setValue)
#add widgets to the layout
self._valueLayout.addWidget(self._xField)
self._valueLayout.addWidget(self._yField)
self._valueLayout.addWidget(self._zField)
#self._valueLayout.addStretch()
self._layout.addWidget(self.label())
self._layout.addLayout(self._valueLayout)
self._valueLayout.setContentsMargins(0,0,0,0)
self._layout.setContentsMargins(0,0,0,0)
#set text if any value
if self.value():
if isinstance(self.value(), list) or isinstance(self.value(), tuple):
if len(self.value()) < 3 or len(self.value()) > 3:
raise TypeError('%s must be a list of 3 values' % self.value())
#set the values on the individual fields
self._xField.getLineEdit().setText('%.4f' % float(self.value()[0]))
self._yField.getLineEdit().setText('%.4f' % float(self.value()[1]))
self._zField.getLineEdit().setText('%.4f' % float(self.value()[2]))
else:
raise TypeError('%s must be a list of 3 values' % self.value())
else:
self.setValue(['%.4f' % float(0.0),'%.4f' % float(0.0),'%.4f' % float(0.0)])
self.setLayout(self._layout)
self._layout.addStretch()
def setValue(self, value):
self._xField.getLineEdit().setText('%.4f' % float(value[0]))
self._yField.getLineEdit().setText('%.4f' % float(value[1]))
self._zField.getLineEdit().setText('%.4f' % float(value[2]))
super(VectorField, self).setValue(*value)
def _setValue(self, *args):
sender = self.sender()
if sender == self._xField.getLineEdit():
value = self._xField.getLineEdit().text()
self._xField.getLineEdit().setText('%.4f' % float(value))
super(VectorField, self).setValue((float(value),self.value()[1],self.value()[2]))
if sender == self._yField.getLineEdit():
value = self._yField.getLineEdit().text()
self._yField.getLineEdit().setText('%.4f' % float(value))
super(VectorField, self).setValue((self.value()[0], float(value), self.value()[2]))
if sender == self._zField.getLineEdit():
value = self._zField.getLineEdit().text()
self._zField.getLineEdit().setText('%.4f' % float(value))
super(VectorField, self).setValue((self.value()[0],self.value()[1], float(value)))
class BooleanField(BaseField):
def __init__(self, *args, **kwargs):
super(BooleanField, self).__init__(*args, **kwargs)
self._layout = QtWidgets.QHBoxLayout()
self._checkBox = QtWidgets.QCheckBox()
self._checkBox.toggled.connect(self.setValue)
self._layout.addWidget(self.label())
#self._layout.addStretch()
self._layout.addWidget(self._checkBox)
self._layout.addStretch()
self.setValue(self.value())
self._layout.setContentsMargins(0,0,0,0)
self.setLayout(self._layout)
def setValue(self, value):
super(BooleanField, self).setValue(value)
self._checkBox.blockSignals(True)
if value:
self._checkBox.setCheckState(QtCore.Qt.Checked)
else:
self._checkBox.setCheckState(QtCore.Qt.Unchecked)
self._checkBox.blockSignals(False)
| gpl-3.0 | 696,080,950,196,650,100 | 37.196931 | 135 | 0.600737 | false |
migonzalvar/threaded-launcher | watchdog.py | 1 | 1246 | #!/bin/env python3.4
from threading import Thread
import os
import subprocess
import time
def watchdog():
"""Launch all the scripts in a folder and wait until completion."""
scripts_processes = []
base_dir = os.path.join(os.path.dirname(__file__), 'modules')
# Launch scripts
for script in os.listdir(base_dir):
script = os.path.join(base_dir, script)
print('** Executing {}'.format(script))
process = subprocess.Popen(['{}'.format(script)], shell=True, stdout=subprocess.PIPE)
scripts_processes.append(process)
# Wait for script completion
while scripts_processes:
time.sleep(1)
for process in scripts_processes:
ret_code = process.poll()
if ret_code is not None:
scripts_processes.remove(process)
print('** {} finished with code {}'.format(process, ret_code))
print('** {} start output'.format(process))
print(process.stdout.read())
print('** {} end output'.format(process))
else:
print('** {} Still running'.format(process))
t = Thread(target=watchdog)
print('## Start watchdog')
t.start()
t.join()
print('## Finish watchdog')
| mit | 5,560,709,927,552,056,000 | 29.390244 | 93 | 0.599518 | false |
sorki/faf | src/pyfaf/bugtrackers/bugzilla.py | 1 | 23495 | # Copyright (C) 2013 ABRT Team
# Copyright (C) 2013 Red Hat, Inc.
#
# This file is part of faf.
#
# faf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# faf is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with faf. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
import time
import datetime
import bugzilla
from pyfaf import queries
from pyfaf.common import FafError
from pyfaf.utils.decorators import retry
from pyfaf.utils.date import daterange
from pyfaf.storage import column_len
from pyfaf.storage.bugzilla import (BzBug,
BzUser,
BzBugCc,
BzComment,
BzAttachment,
BzBugHistory)
from pyfaf.bugtrackers import BugTracker
from xmlrpclib import Fault
__all__ = ["Bugzilla"]
class Bugzilla(BugTracker):
"""
Proxy over python-bugzilla library handling bug downloading,
creation and updates.
"""
name = "abstract_bugzilla"
report_backref_name = "bz_bugs"
def __init__(self):
"""
Load required configuration based on instance name.
"""
super(Bugzilla, self).__init__()
# load config for corresponding bugzilla (e.g. fedorabz.api_url,
# rhelbz.user, xyzbz.password)
self.load_config_to_self("api_url", "{0}.api_url".format(self.name))
self.load_config_to_self("web_url", "{0}.web_url".format(self.name))
self.load_config_to_self("new_bug_url", "{0}.new_bug_url"
.format(self.name))
self.load_config_to_self("user", "{0}.user".format(self.name))
self.load_config_to_self("password", "{0}.password".format(self.name))
self.connected = False
if not self.api_url:
self.log_error("No api_url specified for '{0}' bugzilla instance".
format(self.name))
return
# url has to be string not unicode due to pycurl
self.api_url = str(self.api_url)
def _connect(self):
if self.connected:
return
self.log_debug("Opening bugzilla connection for '{0}'"
.format(self.name))
self.bz = bugzilla.Bugzilla(url=str(self.api_url), cookiefile=None)
if self.user and self.password:
self.log_debug("Logging into bugzilla '{0}' as '{1}'"
.format(self.name, self.user))
self.bz.login(self.user, self.password)
self.connected = True
def download_bug_to_storage_no_retry(self, db, bug_id):
"""
Download and save single bug identified by `bug_id`.
"""
self.log_debug(u"Downloading bug #{0}".format(bug_id))
self._connect()
try:
bug = self.bz.getbug(bug_id)
except Fault as ex:
if int(ex.faultCode) == 102:
# Access denied to a private bug
raise FafError(ex.faultString)
else:
raise
return self._save_bug(db, bug)
@retry(3, delay=10, backoff=3, verbose=True)
def download_bug_to_storage(self, db, bug_id):
return self.download_bug_to_storage_no_retry(db, bug_id)
def list_bugs(self, from_date=datetime.date.today(),
to_date=datetime.date(2000, 1, 1),
step=7,
stop_after_empty_steps=10,
updated_first=False,
custom_fields=dict()):
"""
Fetch all bugs by creation or modification date
starting `from_date` until we are not able to find more
of them or `to_date` is hit.
Bugs are pulled in date ranges defined by `step`
not to hit bugzilla timeouts.
Number of empty queries required before we stop querying is
controlled by `stop_after_empty_steps`.
If `updated_first` is True, recently modified bugs
are queried first.
`custom_fields` dictionary can be used to create more specific
bugzilla queries.
"""
if not updated_first:
custom_fields.update(dict(chfield="[Bug creation]"))
empty = 0
over_days = list(daterange(from_date, to_date, step, desc=True))
prev = over_days[0]
for current in over_days[1:]:
limit = 100
offset = 0
fetched_per_date_range = 0
while True:
try:
result = self._query_bugs(
prev, current, limit, offset, custom_fields)
except Exception as e:
self.log_error("Exception after multiple attempts: {0}."
" Ignoring".format(e.message))
continue
count = len(result)
fetched_per_date_range += count
self.log_debug("Got {0} bugs".format(count))
for bug in result:
yield bug.bug_id
if not count:
self.log_debug("No more bugs in this date range")
break
offset += limit
if not fetched_per_date_range:
empty += 1
if empty >= stop_after_empty_steps:
break
else:
empty = 0
prev = current - datetime.timedelta(1)
@retry(3, delay=10, backoff=3, verbose=True)
def _query_bugs(self, to_date, from_date,
limit=100, offset=0, custom_fields=dict()):
"""
Perform bugzilla query for bugs since `from_date` to `to_date`.
Use `custom_fields` to perform additional filtering.
"""
target = "bugs modified"
if "chfield" in custom_fields:
target = "bugs created"
self.log_debug("Fetching {0} between "
"{1} and {2}, offset is: {3}".format(target, from_date,
to_date, offset))
que = dict(
chfieldto=to_date.strftime("%Y-%m-%d"),
chfieldfrom=from_date.strftime("%Y-%m-%d"),
query_format="advanced",
limit=limit,
offset=offset,
)
que.update(custom_fields)
self._connect()
return self.bz.query(que)
def _convert_datetime(self, bz_datetime):
"""
Convert `bz_datetime` returned by python-bugzilla
to standard datetime.
"""
return datetime.datetime.fromtimestamp(
time.mktime(bz_datetime.timetuple()))
def _preprocess_bug(self, bug):
"""
Process the bug instance and return
dictionary with fields required by lower logic.
Returns `None` if there are missing fields.
"""
required_fields = [
"bug_id",
"creation_time",
"last_change_time",
"product",
"version",
"component",
"summary",
"status",
"resolution",
"cc",
"status_whiteboard",
"reporter",
"comments",
"attachments",
]
bug_dict = dict()
for field in required_fields:
if not hasattr(bug, field):
self.log_error("Missing bug field {0}".format(field))
return None
bug_dict[field] = getattr(bug, field)
for field in ["creation_time", "last_change_time"]:
bug_dict[field] = self._convert_datetime(bug_dict[field])
history = bug.get_history()
bug_dict["history"] = history["bugs"][0]["history"]
if bug.resolution == "DUPLICATE":
bug_dict["dupe_id"] = bug.dupe_id
return bug_dict
def _save_bug(self, db, bug):
"""
Save bug represented by `bug_dict` to the database.
If bug is marked as duplicate, the duplicate bug is downloaded
as well.
"""
bug_dict = self._preprocess_bug(bug)
if not bug_dict:
self.log_error("Bug pre-processing failed")
return
self.log_debug("Saving bug #{0}: {1}".format(bug_dict["bug_id"],
bug_dict["summary"]))
bug_id = bug_dict["bug_id"]
# check if we already have this bug up-to-date
old_bug = (
db.session.query(BzBug)
.filter(BzBug.id == bug_id)
.filter(BzBug.last_change_time == bug_dict["last_change_time"])
.first())
if old_bug:
self.log_info("Bug already up-to-date")
return old_bug
tracker = queries.get_bugtracker_by_name(db, self.name)
if not tracker:
self.log_error("Tracker with name '{0}' is not installed"
.format(self.name))
return
opsysrelease = queries.get_osrelease(db, bug_dict["product"],
bug_dict["version"])
if not opsysrelease:
self.log_error("Unable to save this bug due to unknown "
"release '{0} {1}'".format(bug_dict["product"],
bug_dict["version"]))
return
relcomponent = queries.get_component_by_name_release(
db, opsysrelease, bug_dict["component"])
if not relcomponent:
self.log_error("Unable to save this bug due to unknown "
"component '{0}'".format(bug_dict["component"]))
return
component = relcomponent.component
reporter = queries.get_bz_user(db, bug_dict["reporter"])
if not reporter:
self.log_debug("Creator {0} not found".format(
bug_dict["reporter"]))
downloaded = self._download_user(bug_dict["reporter"])
if not downloaded:
self.log_error("Unable to download user, skipping.")
return
reporter = self._save_user(db, downloaded)
new_bug = BzBug()
new_bug.id = bug_dict["bug_id"]
new_bug.summary = bug_dict["summary"]
new_bug.status = bug_dict["status"]
new_bug.creation_time = bug_dict["creation_time"]
new_bug.last_change_time = bug_dict["last_change_time"]
if bug_dict["status"] == "CLOSED":
new_bug.resolution = bug_dict["resolution"]
if bug_dict["resolution"] == "DUPLICATE":
if not queries.get_bz_bug(db, bug_dict["dupe_id"]):
self.log_debug("Duplicate #{0} not found".format(
bug_dict["dupe_id"]))
dup = self.download_bug_to_storage(db, bug_dict["dupe_id"])
if dup:
new_bug.duplicate = dup.id
new_bug.tracker_id = tracker.id
new_bug.component_id = component.id
new_bug.opsysrelease_id = opsysrelease.id
new_bug.creator_id = reporter.id
new_bug.whiteboard = bug_dict["status_whiteboard"]
# the bug itself might be downloaded during duplicate processing
# exit in this case - it would cause duplicate database entry
if queries.get_bz_bug(db, bug_dict["bug_id"]):
self.log_debug("Bug #{0} already exists in storage,"
" updating".format(bug_dict["bug_id"]))
bugdict = {}
for col in new_bug.__table__._columns:
bugdict[col.name] = getattr(new_bug, col.name)
(db.session.query(BzBug)
.filter(BzBug.id == bug_id).update(bugdict))
new_bug = queries.get_bz_bug(db, bug_dict["bug_id"])
else:
db.session.add(new_bug)
db.session.flush()
self._save_ccs(db, bug_dict["cc"], new_bug.id)
self._save_history(db, bug_dict["history"], new_bug.id)
self._save_attachments(db, bug_dict["attachments"], new_bug.id)
self._save_comments(db, bug_dict["comments"], new_bug.id)
return new_bug
def _save_ccs(self, db, ccs, new_bug_id):
"""
Save CC"ed users to the database.
Expects list of emails (`ccs`) and ID of the bug as `new_bug_id`.
"""
total = len(ccs)
for num, user_email in enumerate(ccs):
self.log_debug("Processing CC: {0}/{1}".format(num + 1, total))
cc = (
db.session.query(BzBugCc)
.join(BzUser)
.filter((BzUser.email == user_email) &
(BzBugCc.bug_id == new_bug_id)).first())
if cc:
self.log_debug("CC'ed user {0} already"
" exists".format(user_email))
continue
cced = queries.get_bz_user(db, user_email)
if not cced:
self.log_debug("CC'ed user {0} not found,"
" adding.".format(user_email))
downloaded = self._download_user(user_email)
if not downloaded:
self.log_error("Unable to download user, skipping.")
continue
cced = self._save_user(db, downloaded)
new = BzBugCc()
new.bug_id = new_bug_id
new.user = cced
db.session.add(new)
db.session.flush()
def _save_history(self, db, events, new_bug_id):
"""
Save bug history to the database.
Expects list of `events` and ID of the bug as `new_bug_id`.
"""
total = len(events)
for num, event in enumerate(events):
self.log_debug("Processing history event {0}/{1}".format(num + 1,
total))
user_email = event["who"]
user = queries.get_bz_user(db, user_email)
if not user:
self.log_debug("History changed by unknown user #{0}".format(
user_email))
downloaded = self._download_user(user_email)
if not downloaded:
self.log_error("Unable to download user, skipping.")
continue
user = self._save_user(db, downloaded)
for change in event["changes"]:
chtime = self._convert_datetime(event["when"])
ch = (
db.session.query(BzBugHistory)
.filter((BzBugHistory.user == user) &
(BzBugHistory.time == chtime) &
(BzBugHistory.field == change["field_name"]) &
(BzBugHistory.added == change["added"]) &
(BzBugHistory.removed == change["removed"]))
.first())
if ch:
self.log_debug("Skipping existing history event "
"#{0}".format(ch.id))
continue
new = BzBugHistory()
new.bug_id = new_bug_id
new.user = user
new.time = chtime
new.field = change["field_name"]
new.added = change["added"][:column_len(BzBugHistory, "added")]
new.removed = change["removed"][:column_len(BzBugHistory, "removed")]
db.session.add(new)
db.session.flush()
def _save_attachments(self, db, attachments, new_bug_id):
"""
Save bug attachments to the database.
Expects list of `attachments` and ID of the bug as `new_bug_id`.
"""
total = len(attachments)
for num, attachment in enumerate(attachments):
self.log_debug("Processing attachment {0}/{1}".format(num + 1,
total))
if queries.get_bz_attachment(db, attachment["id"]):
self.log_debug("Skipping existing attachment #{0}".format(
attachment["id"]))
continue
user_email = attachment["attacher"]
user = queries.get_bz_user(db, user_email)
if not user:
self.log_debug("Attachment from unknown user {0}".format(
user_email))
downloaded = self._download_user(user_email)
if not downloaded:
self.log_error("Unable to download user, skipping.")
continue
user = self._save_user(db, downloaded)
new = BzAttachment()
new.id = attachment["id"]
new.bug_id = new_bug_id
new.mimetype = attachment["content_type"]
new.description = attachment["description"]
new.filename = attachment["file_name"]
new.is_private = bool(attachment["is_private"])
new.is_patch = bool(attachment["is_patch"])
new.is_obsolete = bool(attachment["is_obsolete"])
new.creation_time = self._convert_datetime(
attachment["creation_time"])
new.last_change_time = self._convert_datetime(
attachment["last_change_time"])
new.user = user
db.session.add(new)
self._connect()
data = self.bz.openattachment(attachment["id"])
# save_lob is inherited method which cannot be seen by pylint
# because of sqlalchemy magic
# pylint: disable=E1101
new.save_lob("content", data, truncate=True, overwrite=True)
data.close()
db.session.flush()
def _save_comments(self, db, comments, new_bug_id):
"""
Save bug comments to the database.
Expects list of `comments` and ID of the bug as `new_bug_id`.
"""
total = len(comments)
for num, comment in enumerate(comments):
self.log_debug("Processing comment {0}/{1}".format(num + 1,
total))
if queries.get_bz_comment(db, comment["id"]):
self.log_debug("Skipping existing comment #{0}".format(
comment["id"]))
continue
self.log_debug("Downloading comment #{0}".format(comment["id"]))
user_email = comment["creator"]
user = queries.get_bz_user(db, user_email)
if not user:
self.log_debug("History changed by unknown user #{0}".format(
user_email))
downloaded = self._download_user(user_email)
if not downloaded:
self.log_error("Unable to download user, skipping.")
continue
user = self._save_user(db, downloaded)
new = BzComment()
new.id = comment["id"]
new.bug_id = new_bug_id
new.creation_time = self._convert_datetime(comment["time"])
new.is_private = comment["is_private"]
if "attachment_id" in comment:
attachment = queries.get_bz_attachment(
db, comment["attachment_id"])
if attachment:
new.attachment = attachment
else:
self.log_warning("Comment is referencing an attachment"
" which is not accessible.")
new.number = num
new.user = user
db.session.add(new)
if not isinstance(comment["text"], basestring):
comment["text"] = str(comment["text"])
# save_lob is inherited method which cannot
# be seen by pylint because of sqlalchemy magic
# pylint: disable=E1101
new.save_lob("content", comment["text"].encode("utf-8"),
overwrite=True)
db.session.flush()
@retry(3, delay=10, backoff=3, verbose=True)
def _download_user(self, user_email):
"""
Return user with `user_email` downloaded from bugzilla.
"""
self.log_debug("Downloading user {0}".format(user_email))
self._connect()
user = self.bz.getuser(user_email)
return user
def _save_user(self, db, user):
"""
Save bugzilla `user` to the database. Return persisted
BzUser object.
"""
# We need to account for case when user has changed
# the email address.
dbuser = (db.session.query(BzUser)
.filter(BzUser.id == user.userid).first())
if not dbuser:
dbuser = BzUser(id=user.userid)
for field in ["name", "email", "can_login", "real_name"]:
setattr(dbuser, field, getattr(user, field))
db.session.add(dbuser)
db.session.flush()
return dbuser
@retry(3, delay=10, backoff=3, verbose=True)
def create_bug(self, **data):
"""
Create new bugzilla ticket using `data` dictionary.
"""
self._connect()
return self.bz.createbug(**data)
@retry(2, delay=60, backoff=1, verbose=True)
def clone_bug(self, orig_bug_id, new_product, new_version):
self._connect()
origbug = self.bz.getbug(orig_bug_id)
desc = ["+++ This bug was initially created as a clone "
"of Bug #{0} +++".format(orig_bug_id)]
private = False
first = True
for comment in origbug.longdescs:
if comment["is_private"]:
private = True
if not first:
desc.append("--- Additional comment from {0} on {1} ---"
.format(comment["author"], comment["time"]))
if "extra_data" in comment:
desc.append("*** This bug has been marked as a duplicate "
"of bug {0} ***".format(comment["extra_data"]))
else:
desc.append(comment["text"])
first = False
data = {
'product': new_product,
'component': origbug.component,
'version': new_version,
'op_sys': origbug.op_sys,
'platform': origbug.platform,
'summary': origbug.summary,
'description': "\n\n".join(desc),
'comment_is_private': private,
'priority': origbug.priority,
'bug_severity': origbug.bug_severity,
'blocked': origbug.blocked,
'whiteboard': origbug.whiteboard,
'keywords': origbug.keywords,
'cf_clone_of': str(orig_bug_id),
'cf_verified': ['Any'],
'cf_environment': origbug.cf_environment,
'groups': origbug.groups
}
for key in data:
if data[key] is None:
kwargs.pop(key)
newbug = self.bz.createbug(**data)
return newbug
| gpl-3.0 | 7,789,109,973,879,203,000 | 32.805755 | 85 | 0.520962 | false |
adeverteuil/backup | backup/config.py | 1 | 9546 | # Alexandre's backup script
# Copyright © 2014 Alexandre A. de Verteuil
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Get configuration options.
This module defines the Configuration class which handles all the configuration
and parsing tasks.
Option values are the first ones found in the following places:
Command line arguments
Configuration files
Environment variables
Hard coded default values
The Configuration instance configures two logging handlers:
1. A stream handler that writes to stdout and stderr;
2. A memory handler that memorizes output from the rsync subprocess for
post-processing;
"""
import argparse
import atexit
import collections.abc
import configparser
import io
import logging
import os
import os.path
import sys
from . import _logging
from .dry_run import if_not_dry_run
from .version import __version__
def _make_sources_list():
"""Return a default string of colon-separated paths to back up.
Start with the list of direct children of "/".
Remove virtual filesystems from the list.
"""
sources = os.listdir('/')
for d in ("sys", "proc", "dev", "lost+found"):
try:
sources.remove(d)
except ValueError:
continue
return ":".join(sorted(["/"+s for s in sources]))
DEFAULTS = {
'configfile': "/etc/backup",
'configdir': "/etc/backup.d",
'rsync': "/usr/bin/rsync",
'ssh': "/usr/bin/ssh",
'ssh_port': "22",
'sourcehost': "localhost",
'sourcedirs': _make_sources_list(),
'dest': "/root/var/backups",
'hourlies': "24",
'dailies': "31",
#'weeklies': "8",
'warn bytes transferred': str(1 * 10**8), # 100MB
'bw_warn': "0",
'bw_err': "0",
'force': "False",
}
class Configuration(_logging.Logging):
"""Collects options from command line arguments and configuration files."""
def __init__(self, argv=None, environ=None, **kwargs):
"""Instantiates ConfigParser with defaults and ArgumentParser.
Parameters:
argv -- If not None, will be parsed instead of sys.argv[1:].
environ -- If not None, will be used insted of os.environ.
"""
super().__init__(**kwargs)
self.argv = argv if argv is not None else sys.argv[1:]
self.args = None # This will hold the return value of parse_args().
self.environ = environ if environ is not None else os.environ
self.config = configparser.ConfigParser(
defaults=DEFAULTS,
default_section="default",
)
self.argumentparser = argparse.ArgumentParser(add_help=False)
self._configure_argumentparser()
def _configure_argumentparser(self):
parser = self.argumentparser
parser.add_argument("--help", "-h",
# The only change from the default is a capital S and a full stop.
action="help",
help="Show this help and exit.",
)
parser.add_argument("--version",
action="version",
version="%(prog)s {}".format(__version__),
help="Show program's version number and exit.",
)
parser.add_argument("--verbose", "-v",
action="count",
help=("Set verbosity to INFO. This option may be repeated once for"
" verbosity level DEBUG."),
)
parser.add_argument("--print-rsync", "-p",
help=("Also print the output of rsync to stdout. Otherwise, only "
"log its output to the log file. Ineffective if -v option "
"is not given."),
action="store_true",
)
parser.add_argument("--configfile", "-c",
help="Use this file rather than the default.",
)
parser.add_argument("--configdir", "-d",
help="Use this directory rather than the default.",
)
parser.add_argument("--dry-run", "-n",
help="Perform a trial run with no changes made.",
action="store_true",
)
parser.add_argument("--force", "-f",
help="Disable any bw_err trigger.",
action="store_true",
)
parser.add_argument("-e",
metavar="EXECUTABLE",
help=argparse.SUPPRESS,
const="echo",
nargs="?",
#help=("Executable to use instead of rsync. "
# "Use echo when debugging. "
# "echo is the default if this option is used but no "
# "executable is specified."),
)
parser.add_argument("hosts",
nargs="*",
help=("List of hosts to do a backup of. Hosts are defined through "
"configuration files in /etc/backup.d. If no hosts are "
"specified, all defined hosts are backed up sequentially."),
metavar="host",
)
def configure(self):
"""Executes all the configurations tasks in the right order.
Returns the ConfigParser object with all the collected options.
"""
self._parse_environ()
self._parse_args()
self._do_early_logging_config()
self._read_config()
self._merge_args_with_config()
self._logger.debug(
"Hosts defined: {}".format(self.config.sections())
)
return self.config
def _parse_environ(self):
"""Overrides some defaults with environment variables."""
if 'BACKUP_CONFIGFILE' in self.environ:
self.config.defaults()['configfile'] = \
self.environ['BACKUP_CONFIGFILE']
self._logger.debug(
"From env: BACKUP_CONFIGFILE = {}".format(
self.environ['BACKUP_CONFIGFILE']
)
)
if 'BACKUP_CONFIGDIR' in self.environ:
self.config.defaults()['configdir'] = \
self.environ['BACKUP_CONFIGDIR']
self._logger.debug(
"From env: BACKUP_CONFIGDIR = {}".format(
self.environ['BACKUP_CONFIGDIR']
)
)
def _parse_args(self):
"""Adds arguments to the ArgumentParser instance and parses args."""
self.args = self.argumentparser.parse_args(self.argv)
self._logger.debug("Parsed args: {}".format(vars(self.args)))
def _do_early_logging_config(self):
"""Configures early logging according to the --verbose option."""
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# The handler will be configured, but not added to the Logger. This
# must be done in backup.controller.main() so that logging will not
# interfere with unit tests.
#logger.addHandler(_handlers['stream'])
atexit.register(logging.shutdown)
lvl = "WARNING"
if self.args.verbose:
if self.args.verbose >= 2:
_logging.handlers['stream'].setLevel(logging.DEBUG)
lvl = "DEBUG"
elif self.args.verbose == 1:
_logging.handlers['stream'].setLevel(logging.INFO)
lvl = "INFO"
logger.addHandler(_logging.handlers['memory'])
self._logger.debug("stdout log level set to {}".format(lvl))
logging.getLogger("rsync").propagate = False
if self.args.print_rsync:
logging.getLogger("rsync").addHandler(_logging.handlers['stream'])
# The logging FileHandler will be added to the "rsync" logger by
# the Controller object so the output will be recorded in any case.
def _read_config(self):
"""Finds and reads the config files. Uses the --configfile option."""
if self.args.configfile:
configfile = self.args.configfile
else:
configfile = self.config.defaults()['configfile']
with open(configfile) as fh:
self._logger.debug(
"Reading configuration from {}.".format(configfile)
)
self.config.read_file(fh)
def _merge_args_with_config(self):
# --configfile has already been parsed in _read_config().
if self.args.hosts:
self.config.defaults()['hosts'] = " ".join(self.args.hosts)
elif 'hosts' not in self.config.defaults():
# If the hosts key in the default section is not defined and no
# hosts were specified on the command line, build the hosts list
# from the sections of the configuration file.
self.config.defaults()['hosts'] = " ".join(self.config.sections())
self.config.defaults()['dry-run'] = str(self.args.dry_run)
if_not_dry_run.dry_run = self.args.dry_run
if self.args.e is not None:
self.config.defaults()['rsync'] = self.args.e
self.config.defaults()['force'] = str(self.args.force)
| gpl-3.0 | -1,983,022,879,159,606,500 | 36.727273 | 79 | 0.590885 | false |
mfsteen/CIQTranslate-Kristian | openpyxl/comments/reader.py | 1 | 1558 | from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
import os.path
from openpyxl.comments import Comment
from openpyxl.xml.constants import (
PACKAGE_WORKSHEET_RELS,
COMMENTS_NS,
PACKAGE_XL,
)
from openpyxl.xml.functions import fromstring
from .properties import CommentSheet
def read_comments(ws, xml_source):
"""Given a worksheet and the XML of its comments file, assigns comments to cells"""
root = fromstring(xml_source)
comments = CommentSheet.from_tree(root)
authors = comments.authors.author
for comment in comments.commentList:
author = authors[comment.authorId]
ref = comment.ref
comment = Comment(comment.content, author)
ws.cell(coordinate=ref).comment = comment
def get_comments_file(worksheet_path, archive, valid_files):
"""Returns the XML filename in the archive which contains the comments for
the spreadsheet with codename sheet_codename. Returns None if there is no
such file"""
sheet_codename = os.path.split(worksheet_path)[-1]
rels_file = PACKAGE_WORKSHEET_RELS + '/' + sheet_codename + '.rels'
if rels_file not in valid_files:
return None
rels_source = archive.read(rels_file)
root = fromstring(rels_source)
for i in root:
if i.attrib['Type'] == COMMENTS_NS:
comments_file = os.path.split(i.attrib['Target'])[-1]
comments_file = PACKAGE_XL + '/' + comments_file
if comments_file in valid_files:
return comments_file
return None
| gpl-3.0 | 159,124,037,491,564,830 | 31.458333 | 87 | 0.679076 | false |
janelia-flyem/pydvid | tests/test_keyvalue.py | 1 | 3531 | import os
import shutil
import tempfile
import httplib
import h5py
from pydvid import keyvalue
from mockserver.h5mockserver import H5MockServer, H5MockServerDataFile
class TestKeyValue(object):
@classmethod
def setupClass(cls):
"""
Override. Called by nosetests.
- Create an hdf5 file to store the test data
- Start the mock server, which serves the test data from the file.
"""
cls._tmp_dir = tempfile.mkdtemp()
cls.test_filepath = os.path.join( cls._tmp_dir, "test_data.h5" )
cls._generate_testdata_h5(cls.test_filepath)
cls.server_proc, cls.shutdown_event = cls._start_mockserver( cls.test_filepath, same_process=True )
cls.client_connection = httplib.HTTPConnection( "localhost:8000" )
@classmethod
def teardownClass(cls):
"""
Override. Called by nosetests.
"""
shutil.rmtree(cls._tmp_dir)
cls.shutdown_event.set()
cls.server_proc.join()
@classmethod
def _generate_testdata_h5(cls, test_filepath):
"""
Generate a temporary hdf5 file for the mock server to use (and us to compare against)
"""
# Choose names
cls.dvid_dataset = "datasetA"
cls.data_uuid = "abcde"
cls.data_name = "my_keyvalue_stuff"
cls.keyvalue_store_location = "/datasets/{dvid_dataset}/volumes/{data_name}".format( **cls.__dict__ )
cls.node_location = "/datasets/{dvid_dataset}/nodes/{data_uuid}".format( **cls.__dict__ )
# Write to h5 file
with H5MockServerDataFile( test_filepath ) as test_h5file:
test_h5file.add_node( cls.dvid_dataset, cls.data_uuid )
# test_h5file.add_keyvalue_group( cls.dvid_dataset, cls.data_name )
@classmethod
def _start_mockserver(cls, h5filepath, same_process=False, disable_server_logging=True):
"""
Start the mock DVID server in a separate process.
h5filepath: The file to serve up.
same_process: If True, start the server in this process as a
separate thread (useful for debugging).
Otherwise, start the server in its own process (default).
disable_server_logging: If true, disable the normal HttpServer logging of every request.
"""
return H5MockServer.create_and_start( h5filepath, "localhost", 8000, same_process, disable_server_logging )
def test_basic(self):
# (1) NEW
keyvalue.create_new( self.client_connection, self.data_uuid, self.data_name )
# Manually check that the keyvalue store was created by checking the underlying hdf5 file...
with h5py.File(self.test_filepath, 'r') as f:
assert self.keyvalue_store_location in f
# (2) PUT
keyvalue.put_value( self.client_connection, self.data_uuid, self.data_name, 'key_abc', 'abcdefghijklmnopqrstuvwxyz' )
# Manual check...
with h5py.File(self.test_filepath, 'r') as f:
assert self.keyvalue_store_location + '/key_abc' in f
# (3) GET
value = keyvalue.get_value( self.client_connection, self.data_uuid, self.data_name, 'key_abc' )
assert value == 'abcdefghijklmnopqrstuvwxyz'
if __name__ == "__main__":
import sys
import nose
sys.argv.append("--nocapture") # Don't steal stdout. Show it on the console as usual.
sys.argv.append("--nologcapture") # Don't set the logging level to DEBUG. Leave it alone.
nose.run(defaultTest=__file__)
| bsd-3-clause | 6,521,098,699,988,964,000 | 38.674157 | 125 | 0.636647 | false |
beni55/rinohtype | rinoh/annotation.py | 1 | 1363 | # This file is part of RinohType, the Python document preparation system.
#
# Copyright (c) Brecht Machiels.
#
# Use of this source code is subject to the terms of the GNU Affero General
# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.
from .util import Decorator
from .text import MixedStyledText
__all__ = ['NamedDestination', 'NamedDestinationLink', 'HyperLink',
'AnnotatedSpan', 'AnnotatedText']
class Annotation(object):
pass
class NamedDestination(Annotation):
type = 'NamedDestination'
def __init__(self, name):
self.name = name
class NamedDestinationLink(Annotation):
type = 'NamedDestinationLink'
def __init__(self, name):
self.name = name
class HyperLink(Annotation):
type = 'URI'
def __init__(self, target):
self.target = target
class AnnotatedSpan(Decorator):
def __init__(self, span, annotation):
super().__init__(span)
self.annotation = annotation
class AnnotatedText(MixedStyledText):
def __init__(self, text_or_items, annotation, style=None, parent=None):
super().__init__(text_or_items, style=style, parent=parent)
self.annotation = annotation
def spans(self, document):
return (AnnotatedSpan(span, self.annotation)
for item in self for span in item.spans(document))
| agpl-3.0 | 4,685,080,658,187,097,000 | 22.912281 | 75 | 0.668379 | false |
felipessalvatore/CNNexample | src/tunning/fc.py | 1 | 2217 | import os
import sys
from random import randint
import numpy as np
import inspect
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from util import run_test, get_data_4d, get_time
from CNN import CNNModel, train_model, check_valid
from DataHolder import DataHolder
from Config import Config
train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = get_data_4d()
my_dataholder = DataHolder(train_dataset,
train_labels,
valid_dataset,
valid_labels,
test_dataset,
test_labels)
FC = [5, 10, 15, 20, 30, 40, 60, 200]
number_of_exp = len(FC)
results = []
duration = []
info = []
for i, fc in enumerate(FC):
print("\n ({0} of {1})".format(i + 1, number_of_exp))
my_config = Config(tunning=True, hidden_nodes_1=3 * fc,
hidden_nodes_2=2 * fc,
hidden_nodes_3=fc)
attrs = vars(my_config)
config_info = ["%s: %s" % item for item in attrs.items()]
info.append(config_info)
my_model = CNNModel(my_config, my_dataholder)
train_model(my_model, my_dataholder, 10001, 1000, False)
current_dur = get_time(train_model, 10001)
score = check_valid(my_model)
results.append(score)
duration.append(current_dur)
best_result = max(list(zip(results, FC, duration, info)))
result_string = """In an experiment with {0} fully connected sizes
the best one is {1} with valid accuracy = {2}.
\nThe training takes {3:.2f} seconds using the following params:
\n{4}""".format(number_of_exp,
best_result[1],
best_result[0],
best_result[2],
best_result[3])
file = open("final.txt", "w")
file.write(result_string)
file.close()
plt.plot(FC, results)
plt.xlabel("hidden_nodes_3")
plt.ylabel("valid acc")
plt.savefig("fc.png")
plt.clf()
plt.plot(FC, duration)
plt.xlabel("hidden_nodes_3")
plt.ylabel("duration (s)")
plt.savefig("fc_du.png")
plt.clf()
| mit | -7,174,310,966,665,166,000 | 29.369863 | 99 | 0.630582 | false |
google-research/dreamer | dreamer/models/base.py | 1 | 2449 | # Copyright 2019 The Dreamer Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow_probability import distributions as tfd
from dreamer import tools
class Base(tf.nn.rnn_cell.RNNCell):
def __init__(self, transition_tpl, posterior_tpl, reuse=None):
super(Base, self).__init__(_reuse=reuse)
self._posterior_tpl = posterior_tpl
self._transition_tpl = transition_tpl
self._debug = False
@property
def state_size(self):
raise NotImplementedError
@property
def updates(self):
return []
@property
def losses(self):
return []
@property
def output_size(self):
return (self.state_size, self.state_size)
def zero_state(self, batch_size, dtype):
return tools.nested.map(
lambda size: tf.zeros([batch_size, size], dtype),
self.state_size)
def features_from_state(self, state):
raise NotImplementedError
def dist_from_state(self, state, mask=None):
raise NotImplementedError
def divergence_from_states(self, lhs, rhs, mask=None):
lhs = self.dist_from_state(lhs, mask)
rhs = self.dist_from_state(rhs, mask)
divergence = tfd.kl_divergence(lhs, rhs)
if mask is not None:
divergence = tools.mask(divergence, mask)
return divergence
def call(self, inputs, prev_state):
obs, prev_action, use_obs = inputs
if self._debug:
with tf.control_dependencies([tf.assert_equal(use_obs, use_obs[0, 0])]):
use_obs = tf.identity(use_obs)
use_obs = use_obs[0, 0]
zero_obs = tools.nested.map(tf.zeros_like, obs)
prior = self._transition_tpl(prev_state, prev_action, zero_obs)
posterior = tf.cond(
use_obs,
lambda: self._posterior_tpl(prev_state, prev_action, obs),
lambda: prior)
return (prior, posterior), posterior
| apache-2.0 | 3,964,951,076,290,957,300 | 29.6125 | 78 | 0.695794 | false |
jolyonb/edx-platform | common/lib/xmodule/xmodule/video_module/transcripts_utils.py | 1 | 37083 | """
Utility functions for transcripts.
++++++++++++++++++++++++++++++++++
"""
from __future__ import absolute_import
import copy
import json
import logging
import os
from functools import wraps
import requests
import six
from django.conf import settings
from lxml import etree
from pysrt import SubRipFile, SubRipItem, SubRipTime
from pysrt.srtexc import Error
from six import text_type
from six.moves import range, zip
from six.moves.html_parser import HTMLParser # pylint: disable=import-error
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from .bumper_utils import get_bumper_settings
try:
from edxval import api as edxval_api
except ImportError:
edxval_api = None
log = logging.getLogger(__name__)
NON_EXISTENT_TRANSCRIPT = 'non_existent_dummy_file_name'
class TranscriptException(Exception): # pylint: disable=missing-docstring
pass
class TranscriptsGenerationException(Exception): # pylint: disable=missing-docstring
pass
class GetTranscriptsFromYouTubeException(Exception): # pylint: disable=missing-docstring
pass
class TranscriptsRequestValidationException(Exception): # pylint: disable=missing-docstring
pass
def exception_decorator(func):
"""
Generate NotFoundError for TranscriptsGenerationException, UnicodeDecodeError.
Args:
`func`: Input function
Returns:
'wrapper': Decorated function
"""
@wraps(func)
def wrapper(*args, **kwds):
try:
return func(*args, **kwds)
except (TranscriptsGenerationException, UnicodeDecodeError) as ex:
log.exception(text_type(ex))
raise NotFoundError
return wrapper
def generate_subs(speed, source_speed, source_subs):
"""
Generate transcripts from one speed to another speed.
Args:
`speed`: float, for this speed subtitles will be generated,
`source_speed`: float, speed of source_subs
`source_subs`: dict, existing subtitles for speed `source_speed`.
Returns:
`subs`: dict, actual subtitles.
"""
if speed == source_speed:
return source_subs
coefficient = 1.0 * speed / source_speed
subs = {
'start': [
int(round(timestamp * coefficient)) for
timestamp in source_subs['start']
],
'end': [
int(round(timestamp * coefficient)) for
timestamp in source_subs['end']
],
'text': source_subs['text']}
return subs
def save_to_store(content, name, mime_type, location):
"""
Save named content to store by location.
Returns location of saved content.
"""
content_location = Transcript.asset_location(location, name)
content = StaticContent(content_location, name, mime_type, content)
contentstore().save(content)
return content_location
def save_subs_to_store(subs, subs_id, item, language='en'):
"""
Save transcripts into `StaticContent`.
Args:
`subs_id`: str, subtitles id
`item`: video module instance
`language`: two chars str ('uk'), language of translation of transcripts
Returns: location of saved subtitles.
"""
filedata = json.dumps(subs, indent=2)
filename = subs_filename(subs_id, language)
return save_to_store(filedata, filename, 'application/json', item.location)
def youtube_video_transcript_name(youtube_text_api):
"""
Get the transcript name from available transcripts of video
with respect to language from youtube server
"""
utf8_parser = etree.XMLParser(encoding='utf-8')
transcripts_param = {'type': 'list', 'v': youtube_text_api['params']['v']}
lang = youtube_text_api['params']['lang']
# get list of transcripts of specific video
# url-form
# http://video.google.com/timedtext?type=list&v={VideoId}
youtube_response = requests.get('http://' + youtube_text_api['url'], params=transcripts_param)
if youtube_response.status_code == 200 and youtube_response.text:
youtube_data = etree.fromstring(youtube_response.content, parser=utf8_parser)
# iterate all transcripts information from youtube server
for element in youtube_data:
# search specific language code such as 'en' in transcripts info list
if element.tag == 'track' and element.get('lang_code', '') == lang:
return element.get('name')
return None
def get_transcripts_from_youtube(youtube_id, settings, i18n, youtube_transcript_name=''):
"""
Gets transcripts from youtube for youtube_id.
Parses only utf-8 encoded transcripts.
Other encodings are not supported at the moment.
Returns (status, transcripts): bool, dict.
"""
_ = i18n.ugettext
utf8_parser = etree.XMLParser(encoding='utf-8')
youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])
youtube_text_api['params']['v'] = youtube_id
# if the transcript name is not empty on youtube server we have to pass
# name param in url in order to get transcript
# example http://video.google.com/timedtext?lang=en&v={VideoId}&name={transcript_name}
youtube_transcript_name = youtube_video_transcript_name(youtube_text_api)
if youtube_transcript_name:
youtube_text_api['params']['name'] = youtube_transcript_name
data = requests.get('http://' + youtube_text_api['url'], params=youtube_text_api['params'])
if data.status_code != 200 or not data.text:
msg = _("Can't receive transcripts from Youtube for {youtube_id}. Status code: {status_code}.").format(
youtube_id=youtube_id,
status_code=data.status_code
)
raise GetTranscriptsFromYouTubeException(msg)
sub_starts, sub_ends, sub_texts = [], [], []
xmltree = etree.fromstring(data.content, parser=utf8_parser)
for element in xmltree:
if element.tag == "text":
start = float(element.get("start"))
duration = float(element.get("dur", 0)) # dur is not mandatory
text = element.text
end = start + duration
if text:
# Start and end should be ints representing the millisecond timestamp.
sub_starts.append(int(start * 1000))
sub_ends.append(int((end + 0.0001) * 1000))
sub_texts.append(text.replace('\n', ' '))
return {'start': sub_starts, 'end': sub_ends, 'text': sub_texts}
def download_youtube_subs(youtube_id, video_descriptor, settings):
"""
Download transcripts from Youtube.
Args:
youtube_id: str, actual youtube_id of the video.
video_descriptor: video descriptor instance.
We save transcripts for 1.0 speed, as for other speed conversion is done on front-end.
Returns:
Serialized sjson transcript content, if transcripts were successfully downloaded and saved.
Raises:
GetTranscriptsFromYouTubeException, if fails.
"""
i18n = video_descriptor.runtime.service(video_descriptor, "i18n")
_ = i18n.ugettext
subs = get_transcripts_from_youtube(youtube_id, settings, i18n)
return json.dumps(subs, indent=2)
def remove_subs_from_store(subs_id, item, lang='en'):
"""
Remove from store, if transcripts content exists.
"""
filename = subs_filename(subs_id, lang)
Transcript.delete_asset(item.location, filename)
def generate_subs_from_source(speed_subs, subs_type, subs_filedata, item, language='en'):
"""Generate transcripts from source files (like SubRip format, etc.)
and save them to assets for `item` module.
We expect, that speed of source subs equal to 1
:param speed_subs: dictionary {speed: sub_id, ...}
:param subs_type: type of source subs: "srt", ...
:param subs_filedata:unicode, content of source subs.
:param item: module object.
:param language: str, language of translation of transcripts
:returns: True, if all subs are generated and saved successfully.
"""
_ = item.runtime.service(item, "i18n").ugettext
if subs_type.lower() != 'srt':
raise TranscriptsGenerationException(_("We support only SubRip (*.srt) transcripts format."))
try:
srt_subs_obj = SubRipFile.from_string(subs_filedata)
except Exception as ex:
msg = _("Something wrong with SubRip transcripts file during parsing. Inner message is {error_message}").format(
error_message=text_type(ex)
)
raise TranscriptsGenerationException(msg)
if not srt_subs_obj:
raise TranscriptsGenerationException(_("Something wrong with SubRip transcripts file during parsing."))
sub_starts = []
sub_ends = []
sub_texts = []
for sub in srt_subs_obj:
sub_starts.append(sub.start.ordinal)
sub_ends.append(sub.end.ordinal)
sub_texts.append(sub.text.replace('\n', ' '))
subs = {
'start': sub_starts,
'end': sub_ends,
'text': sub_texts}
for speed, subs_id in six.iteritems(speed_subs):
save_subs_to_store(
generate_subs(speed, 1, subs),
subs_id,
item,
language
)
return subs
def generate_srt_from_sjson(sjson_subs, speed):
"""Generate transcripts with speed = 1.0 from sjson to SubRip (*.srt).
:param sjson_subs: "sjson" subs.
:param speed: speed of `sjson_subs`.
:returns: "srt" subs.
"""
output = ''
equal_len = len(sjson_subs['start']) == len(sjson_subs['end']) == len(sjson_subs['text'])
if not equal_len:
return output
sjson_speed_1 = generate_subs(speed, 1, sjson_subs)
for i in range(len(sjson_speed_1['start'])):
item = SubRipItem(
index=i,
start=SubRipTime(milliseconds=sjson_speed_1['start'][i]),
end=SubRipTime(milliseconds=sjson_speed_1['end'][i]),
text=sjson_speed_1['text'][i]
)
output += (six.text_type(item))
output += '\n'
return output
def generate_sjson_from_srt(srt_subs):
"""
Generate transcripts from sjson to SubRip (*.srt).
Arguments:
srt_subs(SubRip): "SRT" subs object
Returns:
Subs converted to "SJSON" format.
"""
sub_starts = []
sub_ends = []
sub_texts = []
for sub in srt_subs:
sub_starts.append(sub.start.ordinal)
sub_ends.append(sub.end.ordinal)
sub_texts.append(sub.text.replace('\n', ' '))
sjson_subs = {
'start': sub_starts,
'end': sub_ends,
'text': sub_texts
}
return sjson_subs
def copy_or_rename_transcript(new_name, old_name, item, delete_old=False, user=None):
"""
Renames `old_name` transcript file in storage to `new_name`.
If `old_name` is not found in storage, raises `NotFoundError`.
If `delete_old` is True, removes `old_name` files from storage.
"""
filename = u'subs_{0}.srt.sjson'.format(old_name)
content_location = StaticContent.compute_location(item.location.course_key, filename)
transcripts = contentstore().find(content_location).data
save_subs_to_store(json.loads(transcripts), new_name, item)
item.sub = new_name
item.save_with_metadata(user)
if delete_old:
remove_subs_from_store(old_name, item)
def get_html5_ids(html5_sources):
"""
Helper method to parse out an HTML5 source into the ideas
NOTE: This assumes that '/' are not in the filename
"""
html5_ids = [x.split('/')[-1].rsplit('.', 1)[0] for x in html5_sources]
return html5_ids
def manage_video_subtitles_save(item, user, old_metadata=None, generate_translation=False):
"""
Does some specific things, that can be done only on save.
Video player item has some video fields: HTML5 ones and Youtube one.
If value of `sub` field of `new_item` is cleared, transcripts should be removed.
`item` is video module instance with updated values of fields,
but actually have not been saved to store yet.
`old_metadata` contains old values of XFields.
# 1.
If value of `sub` field of `new_item` is different from values of video fields of `new_item`,
and `new_item.sub` file is present, then code in this function creates copies of
`new_item.sub` file with new names. That names are equal to values of video fields of `new_item`
After that `sub` field of `new_item` is changed to one of values of video fields.
This whole action ensures that after user changes video fields, proper `sub` files, corresponding
to new values of video fields, will be presented in system.
# 2. convert /static/filename.srt to filename.srt in self.transcripts.
(it is done to allow user to enter both /static/filename.srt and filename.srt)
# 3. Generate transcripts translation only when user clicks `save` button, not while switching tabs.
a) delete sjson translation for those languages, which were removed from `item.transcripts`.
Note: we are not deleting old SRT files to give user more flexibility.
b) For all SRT files in`item.transcripts` regenerate new SJSON files.
(To avoid confusing situation if you attempt to correct a translation by uploading
a new version of the SRT file with same name).
"""
_ = item.runtime.service(item, "i18n").ugettext
# # 1.
# html5_ids = get_html5_ids(item.html5_sources)
# # Youtube transcript source should always have a higher priority than html5 sources. Appending
# # `youtube_id_1_0` at the end helps achieve this when we read transcripts list.
# possible_video_id_list = html5_ids + [item.youtube_id_1_0]
# sub_name = item.sub
# for video_id in possible_video_id_list:
# if not video_id:
# continue
# if not sub_name:
# remove_subs_from_store(video_id, item)
# continue
# # copy_or_rename_transcript changes item.sub of module
# try:
# # updates item.sub with `video_id`, if it is successful.
# copy_or_rename_transcript(video_id, sub_name, item, user=user)
# except NotFoundError:
# # subtitles file `sub_name` is not presented in the system. Nothing to copy or rename.
# log.debug(
# "Copying %s file content to %s name is failed, "
# "original file does not exist.",
# sub_name, video_id
# )
# 2.
if generate_translation:
for lang, filename in item.transcripts.items():
item.transcripts[lang] = os.path.split(filename)[-1]
# 3.
if generate_translation:
old_langs = set(old_metadata.get('transcripts', {})) if old_metadata else set()
new_langs = set(item.transcripts)
html5_ids = get_html5_ids(item.html5_sources)
possible_video_id_list = html5_ids + [item.youtube_id_1_0]
for lang in old_langs.difference(new_langs): # 3a
for video_id in possible_video_id_list:
if video_id:
remove_subs_from_store(video_id, item, lang)
reraised_message = ''
for lang in new_langs: # 3b
try:
generate_sjson_for_all_speeds(
item,
item.transcripts[lang],
{speed: subs_id for subs_id, speed in six.iteritems(youtube_speed_dict(item))},
lang,
)
except TranscriptException as ex:
pass
if reraised_message:
item.save_with_metadata(user)
raise TranscriptException(reraised_message)
def youtube_speed_dict(item):
"""
Returns {speed: youtube_ids, ...} dict for existing youtube_ids
"""
yt_ids = [item.youtube_id_0_75, item.youtube_id_1_0, item.youtube_id_1_25, item.youtube_id_1_5]
yt_speeds = [0.75, 1.00, 1.25, 1.50]
youtube_ids = {p[0]: p[1] for p in zip(yt_ids, yt_speeds) if p[0]}
return youtube_ids
def subs_filename(subs_id, lang='en'):
"""
Generate proper filename for storage.
"""
if lang == 'en':
return u'subs_{0}.srt.sjson'.format(subs_id)
else:
return u'{0}_subs_{1}.srt.sjson'.format(lang, subs_id)
def generate_sjson_for_all_speeds(item, user_filename, result_subs_dict, lang):
"""
Generates sjson from srt for given lang.
`item` is module object.
"""
_ = item.runtime.service(item, "i18n").ugettext
try:
srt_transcripts = contentstore().find(Transcript.asset_location(item.location, user_filename))
except NotFoundError as ex:
raise TranscriptException(_("{exception_message}: Can't find uploaded transcripts: {user_filename}").format(
exception_message=text_type(ex),
user_filename=user_filename
))
if not lang:
lang = item.transcript_language
# Used utf-8-sig encoding type instead of utf-8 to remove BOM(Byte Order Mark), e.g. U+FEFF
generate_subs_from_source(
result_subs_dict,
os.path.splitext(user_filename)[1][1:],
srt_transcripts.data.decode('utf-8-sig'),
item,
lang
)
def get_or_create_sjson(item, transcripts):
"""
Get sjson if already exists, otherwise generate it.
Generate sjson with subs_id name, from user uploaded srt.
Subs_id is extracted from srt filename, which was set by user.
Args:
transcipts (dict): dictionary of (language: file) pairs.
Raises:
TranscriptException: when srt subtitles do not exist,
and exceptions from generate_subs_from_source.
`item` is module object.
"""
user_filename = transcripts[item.transcript_language]
user_subs_id = os.path.splitext(user_filename)[0]
source_subs_id, result_subs_dict = user_subs_id, {1.0: user_subs_id}
try:
sjson_transcript = Transcript.asset(item.location, source_subs_id, item.transcript_language).data
except NotFoundError: # generating sjson from srt
generate_sjson_for_all_speeds(item, user_filename, result_subs_dict, item.transcript_language)
sjson_transcript = Transcript.asset(item.location, source_subs_id, item.transcript_language).data
return sjson_transcript
def get_video_ids_info(edx_video_id, youtube_id_1_0, html5_sources):
"""
Returns list internal or external video ids.
Arguments:
edx_video_id (unicode): edx_video_id
youtube_id_1_0 (unicode): youtube id
html5_sources (list): html5 video ids
Returns:
tuple: external or internal, video ids list
"""
clean = lambda item: item.strip() if isinstance(item, six.string_types) else item
external = not bool(clean(edx_video_id))
video_ids = [edx_video_id, youtube_id_1_0] + get_html5_ids(html5_sources)
# video_ids cleanup
video_ids = [item for item in video_ids if bool(clean(item))]
return external, video_ids
def clean_video_id(edx_video_id):
"""
Cleans an edx video ID.
Arguments:
edx_video_id(unicode): edx-val's video identifier
"""
return edx_video_id and edx_video_id.strip()
def get_video_transcript_content(edx_video_id, language_code):
"""
Gets video transcript content, only if the corresponding feature flag is enabled for the given `course_id`.
Arguments:
language_code(unicode): Language code of the requested transcript
edx_video_id(unicode): edx-val's video identifier
Returns:
A dict containing transcript's file name and its sjson content.
"""
transcript = None
edx_video_id = clean_video_id(edx_video_id)
if edxval_api and edx_video_id:
transcript = edxval_api.get_video_transcript_data(edx_video_id, language_code)
return transcript
def get_available_transcript_languages(edx_video_id):
"""
Gets available transcript languages for a video.
Arguments:
edx_video_id(unicode): edx-val's video identifier
Returns:
A list containing distinct transcript language codes against all the passed video ids.
"""
available_languages = []
edx_video_id = clean_video_id(edx_video_id)
if edxval_api and edx_video_id:
available_languages = edxval_api.get_available_transcript_languages(video_id=edx_video_id)
return available_languages
def convert_video_transcript(file_name, content, output_format):
"""
Convert video transcript into desired format
Arguments:
file_name: name of transcript file along with its extension
content: transcript content stream
output_format: the format in which transcript will be converted
Returns:
A dict containing the new transcript filename and the content converted into desired format.
"""
name_and_extension = os.path.splitext(file_name)
basename, input_format = name_and_extension[0], name_and_extension[1][1:]
filename = u'{base_name}.{ext}'.format(base_name=basename, ext=output_format)
converted_transcript = Transcript.convert(content, input_format=input_format, output_format=output_format)
return dict(filename=filename, content=converted_transcript)
class Transcript(object):
"""
Container for transcript methods.
"""
SRT = 'srt'
TXT = 'txt'
SJSON = 'sjson'
mime_types = {
SRT: 'application/x-subrip; charset=utf-8',
TXT: 'text/plain; charset=utf-8',
SJSON: 'application/json',
}
@staticmethod
def convert(content, input_format, output_format):
"""
Convert transcript `content` from `input_format` to `output_format`.
Accepted input formats: sjson, srt.
Accepted output format: srt, txt, sjson.
Raises:
TranscriptsGenerationException: On parsing the invalid srt content during conversion from srt to sjson.
"""
assert input_format in ('srt', 'sjson')
assert output_format in ('txt', 'srt', 'sjson')
if input_format == output_format:
return content
if input_format == 'srt':
if output_format == 'txt':
text = SubRipFile.from_string(content.decode('utf8')).text
return HTMLParser().unescape(text)
elif output_format == 'sjson':
try:
# With error handling (set to 'ERROR_RAISE'), we will be getting
# the exception if something went wrong in parsing the transcript.
srt_subs = SubRipFile.from_string(
# Skip byte order mark(BOM) character
content.decode('utf-8-sig'),
error_handling=SubRipFile.ERROR_RAISE
)
except Error as ex: # Base exception from pysrt
raise TranscriptsGenerationException(text_type(ex))
return json.dumps(generate_sjson_from_srt(srt_subs))
if input_format == 'sjson':
if output_format == 'txt':
text = json.loads(content)['text']
text_without_none = [line if line else '' for line in text]
return HTMLParser().unescape("\n".join(text_without_none))
elif output_format == 'srt':
return generate_srt_from_sjson(json.loads(content), speed=1.0)
@staticmethod
def asset(location, subs_id, lang='en', filename=None):
"""
Get asset from contentstore, asset location is built from subs_id and lang.
`location` is module location.
"""
# HACK Warning! this is temporary and will be removed once edx-val take over the
# transcript module and contentstore will only function as fallback until all the
# data is migrated to edx-val. It will be saving a contentstore hit for a hardcoded
# dummy-non-existent-transcript name.
if NON_EXISTENT_TRANSCRIPT in [subs_id, filename]:
raise NotFoundError
asset_filename = subs_filename(subs_id, lang) if not filename else filename
return Transcript.get_asset(location, asset_filename)
@staticmethod
def get_asset(location, filename):
"""
Return asset by location and filename.
"""
return contentstore().find(Transcript.asset_location(location, filename))
@staticmethod
def asset_location(location, filename):
"""
Return asset location. `location` is module location.
"""
# If user transcript filename is empty, raise `TranscriptException` to avoid `InvalidKeyError`.
if not filename:
raise TranscriptException("Transcript not uploaded yet")
return StaticContent.compute_location(location.course_key, filename)
@staticmethod
def delete_asset(location, filename):
"""
Delete asset by location and filename.
"""
try:
contentstore().delete(Transcript.asset_location(location, filename))
log.info("Transcript asset %s was removed from store.", filename)
except NotFoundError:
pass
return StaticContent.compute_location(location.course_key, filename)
class VideoTranscriptsMixin(object):
"""Mixin class for transcript functionality.
This is necessary for both VideoModule and VideoDescriptor.
"""
def available_translations(self, transcripts, verify_assets=None, is_bumper=False):
"""
Return a list of language codes for which we have transcripts.
Arguments:
verify_assets (boolean): If True, checks to ensure that the transcripts
really exist in the contentstore. If False, we just look at the
VideoDescriptor fields and do not query the contentstore. One reason
we might do this is to avoid slamming contentstore() with queries
when trying to make a listing of videos and their languages.
Defaults to `not FALLBACK_TO_ENGLISH_TRANSCRIPTS`.
transcripts (dict): A dict with all transcripts and a sub.
include_val_transcripts(boolean): If True, adds the edx-val transcript languages as well.
"""
translations = []
if verify_assets is None:
verify_assets = not settings.FEATURES.get('FALLBACK_TO_ENGLISH_TRANSCRIPTS')
sub, other_langs = transcripts["sub"], transcripts["transcripts"]
if verify_assets:
all_langs = dict(**other_langs)
if sub:
all_langs.update({'en': sub})
for language, filename in six.iteritems(all_langs):
try:
# for bumper videos, transcripts are stored in content store only
if is_bumper:
get_transcript_for_video(self.location, filename, filename, language)
else:
get_transcript(self, language)
except NotFoundError:
continue
translations.append(language)
else:
# If we're not verifying the assets, we just trust our field values
translations = list(other_langs)
if not translations or sub:
translations += ['en']
# to clean redundant language codes.
return list(set(translations))
def get_transcript(self, transcripts, transcript_format='srt', lang=None):
"""
Returns transcript, filename and MIME type.
transcripts (dict): A dict with all transcripts and a sub.
Raises:
- NotFoundError if cannot find transcript file in storage.
- ValueError if transcript file is empty or incorrect JSON.
- KeyError if transcript file has incorrect format.
If language is 'en', self.sub should be correct subtitles name.
If language is 'en', but if self.sub is not defined, this means that we
should search for video name in order to get proper transcript (old style courses).
If language is not 'en', give back transcript in proper language and format.
"""
if not lang:
lang = self.get_default_transcript_language(transcripts)
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if lang == 'en':
if sub: # HTML5 case and (Youtube case for new style videos)
transcript_name = sub
elif self.youtube_id_1_0: # old courses
transcript_name = self.youtube_id_1_0
else:
log.debug("No subtitles for 'en' language")
raise ValueError
data = Transcript.asset(self.location, transcript_name, lang).data
filename = u'{}.{}'.format(transcript_name, transcript_format)
content = Transcript.convert(data, 'sjson', transcript_format)
else:
data = Transcript.asset(self.location, None, None, other_lang[lang]).data
filename = u'{}.{}'.format(os.path.splitext(other_lang[lang])[0], transcript_format)
content = Transcript.convert(data, 'srt', transcript_format)
if not content:
log.debug('no subtitles produced in get_transcript')
raise ValueError
return content, filename, Transcript.mime_types[transcript_format]
def get_default_transcript_language(self, transcripts):
"""
Returns the default transcript language for this video module.
Args:
transcripts (dict): A dict with all transcripts and a sub.
"""
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if self.transcript_language in other_lang:
transcript_language = self.transcript_language
elif sub:
transcript_language = u'en'
elif len(other_lang) > 0:
transcript_language = sorted(other_lang)[0]
else:
transcript_language = u'en'
return transcript_language
def get_transcripts_info(self, is_bumper=False):
"""
Returns a transcript dictionary for the video.
Arguments:
is_bumper(bool): If True, the request is for the bumper transcripts
include_val_transcripts(bool): If True, include edx-val transcripts as well
"""
if is_bumper:
transcripts = copy.deepcopy(get_bumper_settings(self).get('transcripts', {}))
sub = transcripts.pop("en", "")
else:
transcripts = self.transcripts if self.transcripts else {}
sub = self.sub
# Only attach transcripts that are not empty.
transcripts = {
language_code: transcript_file
for language_code, transcript_file in transcripts.items() if transcript_file != ''
}
# bumper transcripts are stored in content store so we don't need to include val transcripts
if not is_bumper:
transcript_languages = get_available_transcript_languages(edx_video_id=self.edx_video_id)
# HACK Warning! this is temporary and will be removed once edx-val take over the
# transcript module and contentstore will only function as fallback until all the
# data is migrated to edx-val.
for language_code in transcript_languages:
if language_code == 'en' and not sub:
sub = NON_EXISTENT_TRANSCRIPT
elif not transcripts.get(language_code):
transcripts[language_code] = NON_EXISTENT_TRANSCRIPT
return {
"sub": sub,
"transcripts": transcripts,
}
@exception_decorator
def get_transcript_from_val(edx_video_id, lang=None, output_format=Transcript.SRT):
"""
Get video transcript from edx-val.
Arguments:
edx_video_id (unicode): video identifier
lang (unicode): transcript language
output_format (unicode): transcript output format
Returns:
tuple containing content, filename, mimetype
"""
transcript = get_video_transcript_content(edx_video_id, lang)
if not transcript:
raise NotFoundError(u'Transcript not found for {}, lang: {}'.format(edx_video_id, lang))
transcript_conversion_props = dict(transcript, output_format=output_format)
transcript = convert_video_transcript(**transcript_conversion_props)
filename = transcript['filename']
content = transcript['content']
mimetype = Transcript.mime_types[output_format]
return content, filename, mimetype
def get_transcript_for_video(video_location, subs_id, file_name, language):
"""
Get video transcript from content store.
NOTE: Transcripts can be searched from content store by two ways:
1. by an id(a.k.a subs_id) which will be used to construct transcript filename
2. by providing transcript filename
Arguments:
video_location (Locator): Video location
subs_id (unicode): id for a transcript in content store
file_name (unicode): file_name for a transcript in content store
language (unicode): transcript language
Returns:
tuple containing transcript input_format, basename, content
"""
try:
if subs_id is None:
raise NotFoundError
content = Transcript.asset(video_location, subs_id, language).data
base_name = subs_id
input_format = Transcript.SJSON
except NotFoundError:
content = Transcript.asset(video_location, None, language, file_name).data
base_name = os.path.splitext(file_name)[0]
input_format = Transcript.SRT
return input_format, base_name, content
@exception_decorator
def get_transcript_from_contentstore(video, language, output_format, transcripts_info, youtube_id=None):
"""
Get video transcript from content store.
Arguments:
video (Video Descriptor): Video descriptor
language (unicode): transcript language
output_format (unicode): transcript output format
transcripts_info (dict): transcript info for a video
youtube_id (unicode): youtube video id
Returns:
tuple containing content, filename, mimetype
"""
input_format, base_name, transcript_content = None, None, None
if output_format not in (Transcript.SRT, Transcript.SJSON, Transcript.TXT):
raise NotFoundError('Invalid transcript format `{output_format}`'.format(output_format=output_format))
sub, other_languages = transcripts_info['sub'], transcripts_info['transcripts']
transcripts = dict(other_languages)
# this is sent in case of a translation dispatch and we need to use it as our subs_id.
possible_sub_ids = [youtube_id, sub, video.youtube_id_1_0] + get_html5_ids(video.html5_sources)
for sub_id in possible_sub_ids:
try:
transcripts[u'en'] = sub_id
input_format, base_name, transcript_content = get_transcript_for_video(
video.location,
subs_id=sub_id,
file_name=transcripts[language],
language=language
)
break
except (KeyError, NotFoundError):
continue
if transcript_content is None:
raise NotFoundError('No transcript for `{lang}` language'.format(
lang=language
))
# add language prefix to transcript file only if language is not None
language_prefix = '{}_'.format(language) if language else ''
transcript_name = u'{}{}.{}'.format(language_prefix, base_name, output_format)
transcript_content = Transcript.convert(transcript_content, input_format=input_format, output_format=output_format)
if not transcript_content.strip():
raise NotFoundError('No transcript content')
if youtube_id:
youtube_ids = youtube_speed_dict(video)
transcript_content = json.dumps(
generate_subs(youtube_ids.get(youtube_id, 1), 1, json.loads(transcript_content))
)
return transcript_content, transcript_name, Transcript.mime_types[output_format]
def get_transcript(video, lang=None, output_format=Transcript.SRT, youtube_id=None):
"""
Get video transcript from edx-val or content store.
Arguments:
video (Video Descriptor): Video Descriptor
lang (unicode): transcript language
output_format (unicode): transcript output format
youtube_id (unicode): youtube video id
Returns:
tuple containing content, filename, mimetype
"""
transcripts_info = video.get_transcripts_info()
if not lang:
lang = video.get_default_transcript_language(transcripts_info)
try:
edx_video_id = clean_video_id(video.edx_video_id)
if not edx_video_id:
raise NotFoundError
return get_transcript_from_val(edx_video_id, lang, output_format)
except NotFoundError:
return get_transcript_from_contentstore(
video,
lang,
youtube_id=youtube_id,
output_format=output_format,
transcripts_info=transcripts_info
)
| agpl-3.0 | -4,293,292,588,554,137,600 | 35.178537 | 120 | 0.641183 | false |
mlk/thefuck | thefuck/rules/apt_get.py | 1 | 1174 | from thefuck.specific.apt import apt_available
from thefuck.utils import memoize, which
from thefuck.shells import shell
try:
from CommandNotFound import CommandNotFound
command_not_found = CommandNotFound()
enabled_by_default = apt_available
except ImportError:
enabled_by_default = False
def _get_executable(command):
if command.script_parts[0] == 'sudo':
return command.script_parts[1]
else:
return command.script_parts[0]
@memoize
def get_package(executable):
try:
packages = command_not_found.getPackages(executable)
return packages[0][0]
except IndexError:
# IndexError is thrown when no matching package is found
return None
def match(command):
if 'not found' in command.stderr or 'not installed' in command.stderr:
executable = _get_executable(command)
return not which(executable) and get_package(executable)
else:
return False
def get_new_command(command):
executable = _get_executable(command)
name = get_package(executable)
formatme = shell.and_('sudo apt-get install {}', '{}')
return formatme.format(name, command.script)
| mit | -3,930,856,329,572,652,500 | 26.302326 | 74 | 0.69506 | false |
IlyaGusev/PoetryCorpus | poetry/apps/corpus/views/comparison_view.py | 1 | 4872 | from collections import namedtuple
from typing import List
from django.views.generic import TemplateView, View
from django.http import HttpResponse
from braces.views import LoginRequiredMixin, GroupRequiredMixin
from poetry.apps.corpus.models import Poem, MarkupVersion
from rupo.main.markup import Markup
def get_accents(markup: Markup):
accents = []
for line in markup.lines:
for word in line.words:
for syllable in word.syllables:
accents.append(syllable.stress != -1)
return accents
def get_accuracy(standard_accents: List[bool], test_accents: List[bool]):
l = min(len(standard_accents), len(test_accents))
hits = sum([1 for standard_accent, test_accent in zip(standard_accents, test_accents)
if standard_accent == test_accent])
return float(hits) / l
def get_precision(standard_accents: List[bool], test_accents: List[bool]):
tp = sum([1 for standard_accent, test_accent in zip(standard_accents, test_accents)
if standard_accent == test_accent == 1])
tp_fp = sum([1 for accent in test_accents if accent == 1])
return float(tp) / tp_fp
def get_recall(standard_accents: List[bool], test_accents: List[bool]):
tp = sum([1 for standard_accent, test_accent in zip(standard_accents, test_accents)
if standard_accent == test_accent == 1])
tp_fn = sum([1 for accent in standard_accents if accent == 1])
return float(tp) / tp_fn
def get_comparison(poem, standard_pk, test_pk):
test_markup = None
standard_markup = None
for markup in poem.markups.all():
if markup.markup_version.pk == standard_pk:
standard_markup = markup
if markup.markup_version.pk == test_pk:
test_markup = markup
assert test_markup.get_markup().text == standard_markup.get_markup().text
standard_accents = get_accents(standard_markup.get_markup())
test_accents = get_accents(test_markup.get_markup())
accuracy = get_accuracy(standard_accents, test_accents)
precision = get_precision(standard_accents, test_accents)
recall = get_recall(standard_accents, test_accents)
f1 = 2*precision*recall/(precision+recall)
Comparison = namedtuple("Comparison", "poem test standard accuracy precision recall f1")
return Comparison(poem=poem, test=test_markup, standard=standard_markup, accuracy=accuracy,
precision=precision, recall=recall, f1=f1)
def get_all_comparisons(standard_pk, test_pk):
standard_markup_version = MarkupVersion.objects.get(pk=standard_pk)
poems = list(set([markup.poem for markup in standard_markup_version.markups.filter(
poem__markups__markup_version=test_pk)]))
return [get_comparison(poem, standard_pk, test_pk) for poem in poems]
class ComparisonView(LoginRequiredMixin, GroupRequiredMixin, TemplateView):
template_name = 'comparison.html'
group_required = "Approved"
def get_context_data(self, **kwargs):
context = super(ComparisonView, self).get_context_data(**kwargs)
test_pk = int(self.request.GET["test"])
standard_pk = int(self.request.GET["standard"])
document_pk = self.request.GET.get("document", None)
if document_pk is None:
comparisons = get_all_comparisons(standard_pk, test_pk)
else:
comparisons = [get_comparison(Poem.objects.get(pk=document_pk), standard_pk, test_pk)]
context["comparisons"] = comparisons
context["avg_accuracy"] = sum([comparison.accuracy for comparison in comparisons])/len(comparisons)
context["avg_f1"] = sum([comparison.f1 for comparison in comparisons]) / len(comparisons)
return context
class ComparisonCSVView(LoginRequiredMixin, GroupRequiredMixin, View):
group_required = "Approved"
def get(self, request, *args, **kwargs):
standard_pk = int(request.GET["standard"])
test_pk = int(request.GET["test"])
response = HttpResponse()
comparisons = get_all_comparisons(standard_pk, test_pk)
content = "poem,test,standard,accuracy,precision,recall,f1\n"
for comparison in comparisons:
content += ",".join([comparison.poem.name.replace(",", ""),
comparison.test.author.replace(",", ""),
comparison.standard.author.replace(",", ""),
"{:.3f}".format(comparison.accuracy),
"{:.3f}".format(comparison.precision),
"{:.3f}".format(comparison.recall),
"{:.3f}".format(comparison.f1)]) + "\n"
response.content = content
response["Content-Disposition"] = "attachment; filename={0}".format(
"comparison" + str(standard_pk) + "-" + str(test_pk) + ".csv")
return response
| apache-2.0 | 5,808,079,583,847,912,000 | 43.697248 | 107 | 0.649015 | false |
samuelmaudo/yepes | yepes/view_mixins/cache.py | 1 | 2980 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
import hashlib
from django.contrib import messages
from django.http import HttpResponsePermanentRedirect
from django.utils.encoding import force_bytes
from yepes.cache import MintCache
from yepes.conf import settings
from yepes.utils.minifier import minify_html_response
class CacheMixin(object):
"""
Provides the ability to cache the response to save resources in
further requests.
By default, it only caches responses for GET and HEAD requests,
and only if the response status code is 200, 301 or 404. However,
it is highly customizable.
"""
cache_alias = None
cached_methods = ('GET', 'HEAD')
cached_statuses = (200, 301, 404)
delay = None
timeout = None
use_cache = True
def __init__(self, *args, **kwargs):
super(CacheMixin, self).__init__(*args, **kwargs)
self._cache = MintCache(
self.cache_alias or settings.VIEW_CACHE_ALIAS,
timeout=self.timeout or settings.VIEW_CACHE_SECONDS,
delay=self.delay or settings.VIEW_CACHE_DELAY_SECONDS)
def get_cache_hash(self, request):
return '{0}://{1}{2}'.format(
'https' if request.is_secure() else 'http',
request.get_host(),
request.path)
def get_cache_key(self, request):
class_name = self.__class__.__name__
hash = hashlib.md5(force_bytes(self.get_cache_hash(request)))
return 'yepes.views.{0}.{1}'.format(class_name, hash.hexdigest())
def dispatch(self, request, *args, **kwargs):
super_dispatch = super(CacheMixin, self).dispatch
self.request = request
self.args = args
self.kwargs = kwargs
if (settings.VIEW_CACHE_AVAILABLE
and self.get_use_cache(request)):
key = self.get_cache_key(request)
response = self._cache.get(key)
if response is None:
response = super_dispatch(request, *args, **kwargs)
if response.status_code not in self.cached_statuses:
return response
if (hasattr(response, 'render')
and callable(response.render)):
def update_cache(resp):
resp = minify_html_response(resp)
return self._cache.set(key, resp)
response.add_post_render_callback(update_cache)
else:
self._cache.set(key, minify_html_response(response))
return response
else:
return super_dispatch(request, *args, **kwargs)
def get_use_cache(self, request):
if not self.use_cache:
return False
if request.method.upper() not in self.cached_methods:
return False
try:
return not request.user.is_staff
except AttributeError:
return True
| bsd-3-clause | -7,693,372,715,246,965,000 | 31.391304 | 73 | 0.591611 | false |
evvers/git-pre-commit-hook-utils | tests/test_git_pre_commit_hook_utils.py | 1 | 2519 | import git_pre_commit_hook_utils as utils
import scripttest
import os
import copy
def test_git_mode():
m = utils.GitMode('120000')
assert m.is_symlink()
assert not m.is_gitlink()
def test_with_empty_repo(tmpdir):
os_environ = copy.deepcopy(os.environ)
os_environ['GIT_DIR'] = str(tmpdir) + '/.git'
os_environ['GIT_WORK_TREE'] = str(tmpdir)
env = scripttest.TestFileEnvironment(
str(tmpdir),
start_clear=False,
template_path='data',
environ=os_environ,
)
env.writefile('empty_file', content='')
env.run('git', 'init', str(tmpdir))
env.run('git', 'add', 'empty_file')
files_staged_for_commit = list(utils.files_staged_for_commit())
assert len(files_staged_for_commit) == 1
file_at_index = files_staged_for_commit[0]
assert file_at_index.path == 'empty_file'
assert file_at_index.contents == ''
assert file_at_index.size == 0
assert file_at_index.status == 'A'
env.run('git', 'commit', '-m', 'Initial commit')
env.run('ln', '-s', 'empty_file', 'link_to_empty_file')
env.run('git', 'add', 'link_to_empty_file')
files_staged_for_commit = list(utils.files_staged_for_commit())
assert len(files_staged_for_commit) == 1
file_at_index = files_staged_for_commit[0]
assert file_at_index.is_symlink()
def test_is_python_code_by_path():
file_at_index = utils.FileAtIndex(
contents='',
size=0,
mode='',
sha1='',
status='',
path='some/path/main.py',
)
assert file_at_index.is_python_code()
def test_is_python_code_by_contents():
file_at_index = utils.FileAtIndex(
contents='#!/usr/bin/env/python\nprint "hello"\n',
size=0,
mode='',
sha1='',
status='',
path='some/path/python_script',
)
assert file_at_index.is_python_code()
def test_is_not_python_code():
file_at_index = utils.FileAtIndex(
contents='some text with python\n',
size=0,
mode='',
sha1='',
status='',
path='some/path/not_python_script.cpp',
)
assert not file_at_index.is_python_code()
def test_is_fnmatch():
file_at_index = utils.FileAtIndex(
contents='some text with python\n',
size=0,
mode='',
sha1='',
status='',
path='some/path/not_python_script.cpp',
)
assert file_at_index.is_fnmatch('*.cpp')
assert file_at_index.is_fnmatch('*')
assert not file_at_index.is_fnmatch('*.py')
| mit | -2,185,100,609,934,616,600 | 27.303371 | 67 | 0.593886 | false |
rohit01/sethji | sethji/util.py | 1 | 2148 | import datetime
import calendar
import re
def validate_email(email, email_regex_csv):
regex_list = [e.strip() for e in email_regex_csv.split(',')]
for user_regex in regex_list:
## Only * is allowed in user email regex
match_regex = re.escape(user_regex)
match_regex = "^%s$" % match_regex.replace('\\*', '.*')
if re.match(match_regex, email):
return True
return False
def convert_none_into_blank_values(details):
for k, v in details.items():
if v == None:
details[k] = ''
return details
def pretty_date(time_object=False):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
now = datetime.datetime.now()
if type(time_object) is int:
diff = now - datetime.datetime.fromtimestamp(time_object)
elif isinstance(time_object, datetime.datetime):
diff = now - time_object
elif not time_object:
return ''
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 10:
return "just now"
if second_diff < 60:
return str(second_diff) + " seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return str(second_diff / 60) + " minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return str(second_diff / 3600) + " hours ago"
if day_diff == 1:
return "Yesterday"
if day_diff < 7:
return str(day_diff) + " days ago"
if day_diff < 31:
return str(day_diff / 7) + " weeks ago"
if day_diff < 365:
return str(day_diff / 30) + " months ago"
return str(day_diff / 365) + " years ago"
def get_current_month_day_count():
now = datetime.datetime.now()
return calendar.monthrange(now.year, now.month)[1]
def get_current_month_and_year():
now = datetime.datetime.now()
return now.strftime("%B"), now.year
| mit | 1,566,480,901,819,199,500 | 28.833333 | 66 | 0.582868 | false |
jmaher/randomtools | wget/wget_helper.py | 1 | 4423 | import subprocess
import re
import os
filename = ''
def findAndGet(url, lines):
retVal = None
fname, root, query = getFilename(url)
resrc = re.compile('.*src="%s(.*)".*' % url)
rebg = re.compile('.*\(%s(.*)\).*' % url)
for line in lines:
match = resrc.match(line)
if match:
retVal = url + match.group(1).split('"')[0]
break
match = rebg.match(line)
if match:
retVal = url + match.group(1).split('"')[0]
break
if retVal:
retVal = retVal.replace("&", "&")
return retVal
def findEscapedUrl(url, lines):
#look for the \/ version of the url
retVal = None
fname, root, query = getFilename(url)
refname = re.compile('.*[=:]"https:(.*)%s(.*)".*' % fname)
refname2 = re.compile('.*src=https:(.*)%s(.*)".*' % fname)
for line in lines:
match = refname.match(line)
if match:
first = match.group(1).split('"')[-1]
if first.startswith('files/'):
break
retVal = 'https:' + first + fname + match.group(2).split('"')[0]
print "matched on refname: %s" % retVal
break
match = refname2.match(line)
if match:
first = match.group(1).split('"')[-1]
if first.startswith('files/'):
break
retVal = 'https:' + first + fname + match.group(2).split('"')[0]
print "matched on refname2: %s" % retVal
break
if retVal:
retVal = retVal.replace("&", "&")
return retVal
def getFilename(url):
parts = url.split('?')
query = ""
if len(parts) > 1:
query = '?'.join(parts[1:])
dirparts = parts[0].split('/')
root = '/'.join(dirparts[:-1])
fname = dirparts[-1]
return fname, root, query
def wgetFile(filename, url):
try:
url.index('&')
url = '"%s"' % url
except:
pass
if os.path.exists('files/%s' % filename):
stats = os.stat('files/%s' % filename)
if stats.st_size > 0:
return ""
url = url.replace('\/', '/')
cmd = 'wget --user-agent=Firefox -O files/%s %s' % (filename, url)
print cmd
# NOTE: using subprocess fails for wget as it has a scheme error
os.system('%s > wget.out' % cmd)
with open('wget.out', 'r') as fHandle:
stderr = fHandle.read()
if os.path.exists('files/%s' % filename):
stats = os.stat('files/%s' % filename)
if stats.st_size <= 0:
stderr = "%s\nERROR: file %s is size 0" % (stderr, filename)
os.system('rm files/%s' % filename)
return stderr
def replaceLines(query, root, lines):
newlines = []
newline = ""
for line in lines:
if query:
newline = line.replace('%s' % query, '')
else:
newline = line
newline = newline.replace('%s' % root, 'files')
newlines.append(newline)
return newlines
with open('f.txt', 'r') as fHandle:
urls = fHandle.readlines()
with open(filename, 'r') as fHandle:
lines = fHandle.readlines()
redo = []
for url in urls:
url = url.split(' ')[0]
url = url.strip('\n')
if url.strip(' ') == "":
continue
if url.startswith('file://'):
continue
fname, root, query = getFilename(url)
stderr = wgetFile(fname, url)
replace = True
rewget = re.compile('.*ERROR.*', re.MULTILINE|re.DOTALL)
if rewget.match(stderr):
found = findAndGet(url, lines)
if not found:
redo.append(url)
replace = False
else:
url = found
fname, root, query = getFilename(url)
stderr = wgetFile(fname, url)
if rewget.match(stderr):
redo.append(url)
replace = False
if replace:
lines = replaceLines(query, root, lines)
# Handle second pass for escaped urls
found = findEscapedUrl(url, lines)
if found:
fname, root, query = getFilename(found)
stderr = wgetFile(fname, found)
if rewget.match(stderr):
if url not in redo:
redo.remove(url)
else:
lines = replaceLines(query, root, lines)
with open(filename, 'w') as fHandle:
for line in lines:
fHandle.write(line)
print "\n\n:Files that didn't work out so well:"
for r in redo:
print r
| mpl-2.0 | 6,678,670,892,547,276,000 | 25.644578 | 76 | 0.530861 | false |
CartoDB/cartoframes | tests/unit/viz/test_map.py | 1 | 12097 | from cartoframes.auth import Credentials
from cartoframes.viz import Map, Layer, popup_element, constants
from cartoframes.viz.source import Source
from cartoframes.io.managers.context_manager import ContextManager
from .utils import build_geodataframe
from ..mocks.kuviz_mock import KuvizPublisherMock
def setup_mocks(mocker):
mocker.patch('cartoframes.viz.map._get_publisher', return_value=KuvizPublisherMock())
mocker.patch.object(ContextManager, 'compute_query', return_value='select * from fake_table')
mocker.patch.object(ContextManager, 'get_geom_type', return_value='point')
mocker.patch.object(ContextManager, 'get_bounds', return_value=None)
class TestMap(object):
def test_is_defined(self):
"""Map"""
assert Map is not None
class TestMapInitialization(object):
def test_size(self):
"""Map should set the size by default"""
map = Map()
assert map.size is None
def test__init(self):
"""Map should return a valid template"""
map = Map()
map._repr_html_()
assert map.bounds is not None
assert map._html_map is not None
def test_bounds(self):
"""Map should set the bounds"""
map = Map(bounds={
'west': -10,
'east': 10,
'north': -10,
'south': 10
})
assert map.bounds == [[-10, 10], [10, -10]]
def test_bounds_clamp(self):
"""Map should set the bounds clamped"""
map = Map(bounds={
'west': -1000,
'east': 1000,
'north': -1000,
'south': 1000
})
assert map.bounds == [[-180, 90], [180, -90]]
class TestMapLayer(object):
def test_one_layer(self):
"""Map layer should be able to initialize one layer"""
source = Source(build_geodataframe([-10, 0], [-10, 0]))
layer = Layer(source)
map = Map(layer)
assert map.layers == [layer]
layer_def = map.layers[0].get_layer_def()
assert layer_def.get('interactivity') == []
assert layer_def.get('credentials') is None
assert layer_def.get('legends') is not None
assert layer_def.get('widgets') is not None
assert layer_def.get('data') is not None
assert layer_def.get('type') == 'GeoJSON'
assert layer_def.get('viz') is not None
def test_two_layers(self):
"""Map layer should be able to initialize two layers in the correct order"""
source_1 = Source(build_geodataframe([-10, 0], [-10, 0]))
source_2 = Source(build_geodataframe([0, 10], [10, 0]))
layer_1 = Layer(source_1)
layer_2 = Layer(source_2)
map = Map([layer_1, layer_2])
assert map.layers == [layer_1, layer_2]
def test_interactive_layer(self):
"""Map layer should indicate if the layer has interactivity configured"""
source_1 = Source(build_geodataframe([-10, 0], [-10, 0], ['pop', 'name']))
layer = Layer(
source_1,
popup_click=[
popup_element('pop'),
popup_element('name')
],
popup_hover=[
popup_element('pop', 'Pop')
]
)
map = Map(layer)
layer_def = map.layers[0].get_layer_def()
assert layer_def.get('interactivity') == [
{
'event': 'click',
'attrs': {
'name': 'v6ae999',
'title': 'name',
'format': None
}
}, {
'event': 'click',
'attrs': {
'name': 'v4f197c',
'title': 'pop',
'format': None
}
}, {
'event': 'hover',
'attrs': {
'name': 'v4f197c',
'title': 'Pop',
'format': None
}
}
]
def test_default_interactive_layer(self):
"""Map layer should get the default event if the interactivity is set to []"""
source_1 = Source(build_geodataframe([-10, 0], [-10, 0]))
layer = Layer(
source_1
)
map = Map(layer)
layer_def = map.layers[0].get_layer_def()
assert layer_def.get('interactivity') == []
class TestMapDevelopmentPath(object):
def test_default_carto_vl_path(self):
"""Map dev path should use default paths if none are given"""
map = Map()
map._repr_html_()
template = map._html_map.html
assert constants.CARTO_VL_URL in template
def test_custom_carto_vl_path(self):
"""Map dev path should use custom paths"""
_carto_vl_path = 'custom_carto_vl_path'
map = Map(_carto_vl_path=_carto_vl_path)
map._repr_html_()
template = map._html_map.html
assert _carto_vl_path + constants.CARTO_VL_DEV in template
def test_default_airship_path(self):
"""Map dev path should use default paths if none are given"""
map = Map()
map._repr_html_()
template = map._html_map.html
assert constants.AIRSHIP_COMPONENTS_URL in template
assert constants.AIRSHIP_BRIDGE_URL in template
assert constants.AIRSHIP_STYLES_URL in template
assert constants.AIRSHIP_MODULE_URL in template
assert constants.AIRSHIP_ICONS_URL in template
def test_custom_airship_path(self):
"""Map dev path should use custom paths"""
_airship_path = 'custom_airship_path'
map = Map(_airship_path=_airship_path)
map._repr_html_()
template = map._html_map.html
assert _airship_path + constants.AIRSHIP_COMPONENTS_DEV in template
assert _airship_path + constants.AIRSHIP_BRIDGE_DEV in template
assert _airship_path + constants.AIRSHIP_STYLES_DEV in template
assert _airship_path + constants.AIRSHIP_MODULE_DEV in template
assert _airship_path + constants.AIRSHIP_ICONS_DEV in template
class TestMapPublication(object):
def setup_method(self):
self.username = 'fake_username'
self.api_key = 'fake_api_key'
self.credentials = Credentials(username=self.username, api_key=self.api_key)
self.test_geojson = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Point",
"coordinates": [
-3.1640625,
42.032974332441405
]
}
}
]
}
def assert_kuviz_dict(self, kuviz_dict, name, privacy):
assert kuviz_dict['id'] is not None
assert kuviz_dict['url'] is not None
assert kuviz_dict['name'] == name
assert kuviz_dict['privacy'] == privacy
def test_map_publish_remote_default(self, mocker):
setup_mocks(mocker)
mock_set_content = mocker.patch('cartoframes.viz.html.html_map.HTMLMap.set_content')
vmap = Map(Layer('fake_table', credentials=self.credentials))
name = 'cf_publish'
kuviz_dict = vmap.publish(name, None, self.credentials)
self.assert_kuviz_dict(kuviz_dict, name, 'public')
mock_set_content.assert_called_once_with(
_airship_path=None,
_carto_vl_path=None,
basemap='Positron',
bounds=[[-180, -90], [180, 90]],
camera=None,
description=None,
is_embed=True,
is_static=None,
layer_selector=False,
layers=[{
'credentials': {
'username': 'fake_username',
'api_key': 'fake_api_key',
'base_url': 'https://fake_username.carto.com'
},
'interactivity': [],
'legends': [],
'has_legend_list': True,
'encode_data': True,
'widgets': [],
'data': 'select * from fake_table',
'type': 'Query',
'title': None,
'options': {},
'map_index': 0,
'source': 'select * from fake_table',
'viz': '''color: hex("#EE4D5A")
strokeColor: opacity(#222,ramp(linear(zoom(),0,18),[0,0.6]))
strokeWidth: ramp(linear(zoom(),0,18),[0,1])
width: ramp(linear(zoom(),0,18),[2,10])
'''}],
show_info=False,
size=None,
theme=None,
title='cf_publish'
)
def test_map_publish_remote_params(self, mocker):
setup_mocks(mocker)
mock_set_content = mocker.patch('cartoframes.viz.html.html_map.HTMLMap.set_content')
vmap = Map(
Layer('fake_table', credentials=self.credentials),
basemap='yellow',
bounds={'west': 1, 'east': 2, 'north': 3, 'south': 4},
viewport={'zoom': 5, 'lat': 50, 'lng': -10},
is_static=True,
layer_selector=False,
theme='dark',
title='title',
description='description'
)
name = 'cf_publish'
kuviz_dict = vmap.publish(name, None, self.credentials, maps_api_key='1234567890')
self.assert_kuviz_dict(kuviz_dict, name, 'public')
mock_set_content.assert_called_once_with(
_airship_path=None,
_carto_vl_path=None,
basemap='yellow',
bounds=[[1, 2], [4, 3]],
camera={'bearing': None, 'center': [-10, 50], 'pitch': None, 'zoom': 5},
description='description',
is_embed=True,
is_static=True,
layer_selector=False,
layers=[{
'credentials': {
'username': 'fake_username',
'api_key': '1234567890',
'base_url': 'https://fake_username.carto.com'
},
'interactivity': [],
'legends': [],
'has_legend_list': True,
'encode_data': True,
'widgets': [],
'data': 'select * from fake_table',
'type': 'Query',
'title': None,
'options': {},
'map_index': 0,
'source': 'select * from fake_table',
'viz': '''color: hex("#EE4D5A")
strokeColor: opacity(#222,ramp(linear(zoom(),0,18),[0,0.6]))
strokeWidth: ramp(linear(zoom(),0,18),[0,1])
width: ramp(linear(zoom(),0,18),[2,10])
'''}],
show_info=False,
size=None,
theme='dark',
title='cf_publish'
)
def test_map_publish_with_password(self, mocker):
setup_mocks(mocker)
map = Map(Layer(Source('fake_table', credentials=self.credentials)))
name = 'cf_publish'
kuviz_dict = map.publish(name, '1234', credentials=self.credentials)
self.assert_kuviz_dict(kuviz_dict, name, 'password')
def test_map_publish_update_name(self, mocker):
setup_mocks(mocker)
map = Map(Layer(Source('fake_table', credentials=self.credentials)))
name = 'cf_publish'
map.publish(name, None, credentials=self.credentials)
new_name = 'cf_update'
kuviz_dict = map.update_publication(new_name, password=None)
self.assert_kuviz_dict(kuviz_dict, new_name, 'public')
def test_map_publish_update_password(self, mocker):
setup_mocks(mocker)
map = Map(Layer(Source('fake_table', credentials=self.credentials)))
name = 'cf_publish'
map.publish(name, None, credentials=self.credentials)
kuviz_dict = map.update_publication(name, '1234"')
self.assert_kuviz_dict(kuviz_dict, name, 'password')
| bsd-3-clause | -3,972,619,867,122,570,000 | 34.371345 | 97 | 0.523766 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.