repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
franciscomoura/data-science-and-bigdata
|
introducao-linguagens-estatisticas/mineracao-dados-python/codigo-fonte/code-06.py
|
1
|
2285
|
# -*- coding: utf-8 -*-
# code-06.py
"""
Dependência: Matplotlib, NumPy
Executar no prompt: pip install matplotlib
Executar no prompt: pip install numpy
Executar no prompt: pip install scikit-learn
Executar no prompt: pip install scipy
*** Atenção:
Este arquivo deverá executado no mesmo diretório do arquivo iris.csv
"""
import numpy as np
# lê as primeiras 4 colunas
data = np.genfromtxt('iris.csv', delimiter=',', usecols=(0, 1, 2, 3))
# lê a quinta coluna(última)
target_names = np.genfromtxt('iris.csv', delimiter=',', usecols=(4), dtype=str)
# converter o vetor de strings que contêm a classe em números inteiros
target = np.zeros(len(target_names), dtype=np.int)
target[target_names == 'setosa'] = 0
target[target_names == 'versicolor'] = 1
target[target_names == 'virginica'] = 2
# parte 1
from sklearn.cluster import KMeans
# inicialização correta para o cluster mostrar o mesmo resultado a cada execução
kmeans = KMeans(n_clusters=3, init="k-means++", random_state=3425)
kmeans.fit(data)
# parte 2
clusters = kmeans.predict(data)
# parte 3
print("Completude e homogeneidade:")
from sklearn.metrics import completeness_score, homogeneity_score
print(completeness_score(target, clusters))
# Saída: 0.764986151449
print(homogeneity_score(target, clusters))
# Saída: 0.751485402199
# parte 4 - revisada
print("Gera o gráfico de dispersão")
import pylab as pl
pl.figure()
pl.subplot(211) # topo, figura com as classes reais
pl.plot(data[target == 0, 2], data[target == 0, 3], 'bo', alpha=.7) # 0 setosa
pl.plot(data[target == 1, 2], data[target == 1, 3], 'ro', alpha=.7) # 1 versicolor
pl.plot(data[target == 2, 2], data[target == 2, 3], 'go', alpha=.7) # 2 virginica
pl.xlabel('Comprimento da petala - cm')
pl.ylabel('Largura da petala - cm')
pl.axis([0.5, 7, 0, 3])
pl.subplot(212) # embaixo, figura com as classes atribuídas automaticamente
pl.plot(data[clusters == 0, 2], data[clusters == 0, 3], 'go', alpha=.7) # clusters 0 verginica
pl.plot(data[clusters == 1, 2], data[clusters == 1, 3], 'bo', alpha=.7) # clusters 1 setosa
pl.plot(data[clusters == 2, 2], data[clusters == 2, 3], 'ro', alpha=.7) # clusters 2 versicolor
pl.xlabel('Comprimento da petala - cm')
pl.ylabel('Largura da petala - cm')
pl.axis([0.5, 7, 0, 3])
pl.show()
|
apache-2.0
| -8,049,449,182,533,556,000 | 31.371429 | 96 | 0.700353 | false | 2.66275 | false | true | false |
mozaik-association/mozaik
|
odoo_addons/mozaik_sample_accounting/__openerp__.py
|
1
|
1795
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of mozaik_sample_accounting, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# mozaik_sample_accounting is free software:
# you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# mozaik_sample_accounting is distributed in the hope that it will
# be useful but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with mozaik_sample_accounting.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'MOZAIK: Sample Accounting Localization',
'version': '8.0.1.0.0',
"author": "ACSONE SA/NV",
"maintainer": "ACSONE SA/NV",
"website": "http://www.acsone.eu",
'category': 'Political Association',
'depends': [
'mozaik_sample_customization',
'l10n_mozaik',
],
'description': """
MOZAIK Sample Accounting Localization
=====================================
""",
'images': [
],
'data': [
'../mozaik_account/tests/data/account_installer.xml',
'data/retrocession_validation.xml'
],
'qweb': [
],
'demo': [
],
'test': [
],
'license': 'AGPL-3',
'sequence': 150,
'auto_install': False,
'installable': True,
}
|
agpl-3.0
| 5,638,437,834,411,976,000 | 31.053571 | 78 | 0.558217 | false | 3.763103 | false | false | false |
wengzhilai/family
|
iSoft/dal/QueryDal.py
|
1
|
3612
|
from iSoft.entity.model import db, FaQuery
import math
import json
from iSoft.model.AppReturnDTO import AppReturnDTO
from iSoft.core.Fun import Fun
import re
class QueryDal(FaQuery):
def __init__(self):
pass
def query_findall(self, pageIndex, pageSize, criterion, where):
relist, is_succ = Fun.model_findall(FaQuery, pageIndex, pageSize,
criterion, where)
return relist, is_succ
def query_Save(self, in_dict, saveKeys):
jsonStr = re.sub(r'\r|\n| ', "", in_dict["QUERY_CFG_JSON"])
jsonStr = re.sub(r'"onComponentInitFunction"((.|\n)+?)},', "", jsonStr)
jsonStr = re.sub(r',},', ",", jsonStr)
try:
x = json.loads(jsonStr)
except :
return None, AppReturnDTO(False, "列配置信息有误")
relist, is_succ = Fun.model_save(FaQuery, self, in_dict, saveKeys)
return relist, is_succ
def query_delete(self, key):
is_succ = Fun.model_delete(FaQuery, key)
return is_succ
def query_single(self, key):
relist, is_succ = Fun.model_single(FaQuery, key)
return relist, is_succ
def query_singleByCode(self, code):
db_ent = FaQuery.query.filter(FaQuery.CODE == code).first()
if db_ent is None:
return db_ent, AppReturnDTO(False, "代码不存在")
return db_ent, AppReturnDTO(True)
# 查看数据
def query_queryByCode(self, code, pageIndex, pageSize, criterion, where):
sql, cfg, msg = self.query_GetSqlByCode(code, criterion, where)
if not msg.IsSuccess:
return sql, msg
relist = db.session.execute(sql)
num = relist.rowcount
relist.close()
if pageIndex < 1:
pageSize = 1
if pageSize < 1:
pageSize = 10
# 最大页码
max_page = math.ceil(num / pageSize) # 向上取整
if pageIndex > max_page:
return None, AppReturnDTO(True, num)
pageSql = "{0} LIMIT {1},{2}".format(sql, (pageIndex - 1) * pageSize,
pageSize)
allData, msg = Fun.sql_to_dict(pageSql)
if msg.IsSuccess:
msg.Msg = num
# relist = relist.paginate(pageIndex, per_page=pageSize).items
return allData, msg
def query_GetSqlByCode(self, code, criterion, where):
"""
根据查询代码运算出查询的SQL
用于导出数据,并统一管理配置的SQL
返回SQL和配置
"""
db_ent = FaQuery.query.filter(FaQuery.CODE == code).first()
if db_ent is None:
return "", "", AppReturnDTO(False, "代码不存在")
sql = db_ent.QUERY_CONF
orderArr = []
for order in criterion:
orderArr.append("T.%(Key)s %(Value)s" % order)
whereArr = []
for search in where:
if search["Type"] == "like":
whereArr.append("T.%(Key)s like ('%%%(Value)s%%')" % search)
else:
whereArr.append("T.%(Key)s %(Type)s %(Value)s " % search)
sql = "SELECT * FROM ({0}) T{1}{2}".format(
sql,
" WHERE " + " AND ".join(whereArr) if len(whereArr) > 0 else "",
" ORDER BY " + " , ".join(orderArr) if len(orderArr) > 0 else "",
)
jsonStr = re.sub(r'\r|\n| ', "", db_ent.QUERY_CFG_JSON)
jsonStr = re.sub(r'"onComponentInitFunction"((.|\n)+?)},', "", jsonStr)
jsonStr = re.sub(r',},', ",", jsonStr)
return sql, json.loads(jsonStr), AppReturnDTO(True)
|
bsd-3-clause
| 4,524,635,679,267,799,000 | 32.557692 | 79 | 0.547564 | false | 3.283161 | false | false | false |
heromod/migrid
|
mig/mig-xsss/jobmanager.py
|
1
|
4660
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# jobmanager - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
import time
import pickle
import os
G_WEEKDICTFILE = './data/weekdict.dat'
G_ACTIVELOGFILE = './log/active'
# Returns Tuppel of the form: ("Year", "Month", "MonthDay", "WeekNr", "WeekDay", "Hour", "Minutes")
def getTimeTuppel():
year = time.strftime('%Y', time.localtime())
month = time.strftime('%m', time.localtime())
monthday = time.strftime('%d', time.localtime())
weeknr = time.strftime('%U', time.localtime())
weekday = time.strftime('%w', time.localtime())
hour = time.strftime('%H', time.localtime())
minutes = time.strftime('%M', time.localtime())
return (
year,
month,
monthday,
weeknr,
weekday,
hour,
minutes,
)
# Get the dictionary with estimated times
def getWeekDict():
input = open(G_WEEKDICTFILE, 'r')
weekDict = pickle.load(input)
input.close()
return weekDict
# Write the dictionary with estimated times
def writeWeekDict(param_WeekDict):
output = open(G_WEEKDICTFILE, 'w')
pickle.dump(param_WeekDict, output)
output.close()
# Log when screensaver was activited,
# how long it was expected to be active and how long it actually was active.
# log syntax: YEAR MONTH MONTHDAY WEEKNR WEEKDAY HOURS MINUTES ACTIVE_MINUTES EXPECTED_ACTIVE_MINUTES
def writeActiveLog(param_tStartTime, param_iNumOfMinutes,
param_iExpectedTime):
logline = '' + param_tStartTime[0] + '\t' + param_tStartTime[1]\
+ '\t' + param_tStartTime[2] + '\t' + param_tStartTime[3]\
+ '\t' + param_tStartTime[4] + '\t' + param_tStartTime[5]\
+ '\t' + param_tStartTime[6] + '\t' + str(param_iNumOfMinutes)\
+ '\t' + str(param_iExpectedTime) + '\n'
output = open(G_ACTIVELOGFILE, 'a')
output.write(logline)
output.close()
# Returns the expected number of minutes screensaver will
# be active.
#
# param_tActivated[4]: Weekday
# param_tActivated[5]: Hour
def getExpectedActiveMinutes(param_tActivated):
weekDict = getWeekDict()
return weekDict[int(param_tActivated[4])][int(param_tActivated[5])]
# Get the timedifference in minutes betewen the
# timetuppel param_tStartTime and param_tEndTime
def getTimeDiff(param_tStartTime, param_tEndTime):
iNumOfWeeks = int(param_tEndTime[3]) - int(param_tStartTime[3])
iNumOfDays = int(param_tEndTime[4]) - int(param_tStartTime[4])
iNumOfHours = int(param_tEndTime[5]) - int(param_tStartTime[5])
iNumOfMinutes = int(param_tEndTime[6]) - int(param_tStartTime[6])
if iNumOfWeeks < 0:
iNumOfWeeks = 53 + iNumOfWeeks
if iNumOfDays < 0:
iNumOfWeeks = iNumOfWeeks - 1
iNumOfDays = 7 + iNumOfDays
if iNumOfHours < 0:
iNumOfDays = iNumOfDays - 1
iNumOfHours = 24 + iNumOfHours
if iNumOfMinutes < 0:
iNumOfHours = iNumOfHours - 1
iNumOfMinutes = 60 + iNumOfMinutes
iNumOfMinutes = ((iNumOfWeeks * 7 + iNumOfDays) * 24 + iNumOfHours)\
* 60 + iNumOfMinutes
return iNumOfMinutes
# Log the time the screensaver has been active
def logTimeActive(param_tActivated, param_tDeActivated,
param_fExpectedTimeFactor):
iNumOfMinutes = getTimeDiff(param_tActivated, param_tDeActivated)
weekDict = getWeekDict()
iLastExpectedTime = \
weekDict[int(param_tActivated[4])][int(param_tActivated[5])]
writeActiveLog(param_tActivated, iNumOfMinutes, iLastExpectedTime)
iThisExpectedTime = param_fExpectedTimeFactor * iNumOfMinutes + (1
- param_fExpectedTimeFactor) * iLastExpectedTime
weekDict[int(param_tActivated[4])][int(param_tActivated[5])] = \
iThisExpectedTime
writeWeekDict(weekDict)
|
gpl-2.0
| 4,537,091,169,991,236,600 | 27.242424 | 101 | 0.674893 | false | 3.265592 | false | false | false |
rjfarmer/mesaTest
|
main.py
|
1
|
1816
|
#!/usr/bin/env python
#Note its this is both python2.7 and 3 compatible (other versions may work)
#Copyright (c) 2015, Robert Farmer [email protected]
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from __future__ import print_function,unicode_literals
import inputCfg
import config
import checkout as c
import log as l
import build as b
import test as t
cfg=config.config()
inp=inputCfg.inputProcess(cfg)
#test suites to run
cfg.test_names=['0.001M_tau1_atm','15M_dynamo']
#cfg.version_list=["cabecd188bb18003ada7c9470d005ac007d1be2c","597e4d662bb9f56cc9f1005d00210293072b5066"]
#List of versions
cfg.version_list=["7518","7525"]
#Results
cfg.log_file='/home/rob/Desktop/mesaTest.log'
#Somewhere to build MESA
cfg.temp_fold='/media/data/mesa/temp/'
cfg.mesasdk_root='/media/data/mesa/sdk/mesasdk-20141212'
cfg.omp_num_threads='8'
#Ignore for now
cfg.vcs_mode='svn'
cfg.vcs_git_base_folder='/media/data/mesa/mesa/dev/'
for cfg.version in cfg.version_list:
print("Running "+cfg.version)
cfg.setDefaults()
cfg.setPaths()
log=l.logger(cfg)
check=c.checkout(cfg)
gb=b.build(cfg)
tt=t.test(cfg)
log.writeLog(cfg)
cfg.cleanup()
print("Done "+cfg.version)
|
gpl-2.0
| -4,912,147,528,989,903,000 | 28.290323 | 105 | 0.764317 | false | 3.052101 | false | false | false |
YACOWS/opps-polls
|
opps/polls/admin.py
|
1
|
3227
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import (Poll, Choice, PollPost, PollBox,
PollBoxPolls, PollConfig)
from opps.core.admin import PublishableAdmin
from redactor.widgets import RedactorEditor
class PollAdminForm(forms.ModelForm):
class Meta:
model = Poll
widgets = {"headline": RedactorEditor()}
class ChoiceInline(admin.TabularInline):
model = Choice
fk_name = 'poll'
raw_id_fields = ['image']
action = None
extra = 1
fieldsets = [(None, {'fields': ('choice', 'image', 'order', 'votes')})]
class PollPostInline(admin.TabularInline):
model = PollPost
fk_name = 'poll'
raw_id_fields = ['post']
actions = None
extra = 1
classes = ('collapse',)
class PollAdmin(PublishableAdmin):
form = PollAdminForm
prepopulated_fields = {"slug": ["question"]}
list_display = ['question', 'channel', 'date_available', 'date_end', 'published']
list_filter = ["date_end", "date_available", "published", "channel"]
search_fields = ["question", "headline"]
exclude = ('user',)
raw_id_fields = ['main_image', 'channel']
inlines = [ChoiceInline, PollPostInline]
fieldsets = (
(_(u'Identification'), {
'fields': ('question', 'slug')}),
(_(u'Content'), {
'fields': ('headline', 'main_image', 'tags')}),
(_(u'Relationships'), {
'fields': ('channel',)}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('published', ('date_available', 'date_end'),
'order', 'multiple_choices', ('min_multiple_choices',
'max_multiple_choices'), 'display_choice_images',
'show_results')}),
)
class PollBoxPollsInline(admin.TabularInline):
model = PollBoxPolls
fk_name = 'pollbox'
raw_id_fields = ['poll']
actions = None
extra = 1
fieldsets = [(None, {
'classes': ('collapse',),
'fields': ('poll', 'order')})]
class PollBoxAdmin(PublishableAdmin):
prepopulated_fields = {"slug": ["name"]}
list_display = ['name', 'date_available', 'published']
list_filter = ['date_available', 'published']
inlines = [PollBoxPollsInline]
exclude = ('user',)
raw_id_fields = ['channel', 'article']
fieldsets = (
(_(u'Identification'), {
'fields': ('site', 'name', 'slug')}),
(_(u'Relationships'), {
'fields': (('channel', 'article'),)}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('published', 'date_available')}),
)
class PollConfigAdmin(PublishableAdmin):
list_display = ['key', 'key_group', 'channel', 'date_insert',
'date_available', 'published']
list_filter = ["key", 'key_group', "channel", "published"]
search_fields = ["key", "key_group", "value"]
raw_id_fields = ['poll', 'channel', 'article']
exclude = ('user',)
admin.site.register(Poll, PollAdmin)
admin.site.register(PollBox, PollBoxAdmin)
admin.site.register(PollConfig, PollConfigAdmin)
|
mit
| 2,595,985,477,295,994,400 | 29.733333 | 85 | 0.579795 | false | 3.761072 | false | false | false |
softwaresaved/SSINetworkGraphics
|
Fellows/Python/map_fellows_network.py
|
1
|
3548
|
import os
import ast
import requests, gspread
import numpy as np
import matplotlib.pyplot as plt
from oauth2client.client import SignedJwtAssertionCredentials
from mpl_toolkits.basemap import Basemap
#Google Authorisation section and getting a worksheet from Google Spreadsheet
def authenticate_google_docs():
f = file(os.path.join('SSI Network Graphics-3357cb9f30de.p12'), 'rb')
SIGNED_KEY = f.read()
f.close()
scope = ['https://spreadsheets.google.com/feeds', 'https://docs.google.com/feeds']
credentials = SignedJwtAssertionCredentials('[email protected]', SIGNED_KEY, scope)
data = {
'refresh_token' : '1/NM56uCG7uFT6VVAAYX3B5TbcMk43wn1xE8Wr-7dsb7lIgOrJDtdun6zK6XiATCKT',
'client_id' : '898367260-pmm78rtfct8af7e0utis686bv78eqmqs.apps.googleusercontent.com',
'client_secret' : 'Cby-rjWDg_wWTSQw_8DDKb3v',
'grant_type' : 'refresh_token',
}
r = requests.post('https://accounts.google.com/o/oauth2/token', data = data)
credentials.access_token = ast.literal_eval(r.text)['access_token']
gc = gspread.authorize(credentials)
return gc
gc_ret = authenticate_google_docs()
sh = gc_ret.open_by_url('https://docs.google.com/spreadsheets/d/13_ZIdeF7oS0xwp_nhGRoVTv7PaXvfLMwVxvgt_hNOkg/edit#gid=383409775')
worksheet_list = sh.worksheets() # Get list of worksheets
#Print the names of first and second worksheets
print "First 2 worksheets of Fellows data Google spreadsheet are:", worksheet_list[0], worksheet_list[1]
# Get all values from the first, seventh and eight columns of Sample datset
values_list_names = worksheet_list[0].col_values(1)
destination_lat_values = worksheet_list[0].col_values(7)
destination_lon_values = worksheet_list[0].col_values(8)
print "Names of SSI fellows are:",values_list_names
print "Destination Latitude values are:",destination_lat_values
print "Destination Longitude values are:", destination_lon_values
# get all values from first, fourth and fifth columns of Home Institutions worksheet
fellows_list_names = worksheet_list[1].col_values(1)
home_lat_values = worksheet_list[1].col_values(4)
home_lon_values = worksheet_list[1].col_values(5)
print "Names of SSI fellows are:",fellows_list_names
print "Home Institution Latitude values are:",home_lat_values
print "Home Institution Longitude values are:", home_lon_values
# create new figure, axes instances.
fig=plt.figure()
ax=fig.add_axes([0.1,0.1,0.8,0.8])
# setup mercator map projection.
m = Basemap(llcrnrlon=-150.,llcrnrlat=-40.,urcrnrlon=150.,urcrnrlat=80.,\
rsphere=(6378137.00,6356752.3142),\
resolution='l',projection='merc',\
lat_0=40.,lon_0=-20.,lat_ts=20.)
#Plotting fellows routes on map
print "No. of unique fellows are:", (len(worksheet_list[1].col_values(1))-1)
colcode = ['b','r','g','y','m','c','k','w']
i = 1
j = 1
print "No. of destination entries in the Sample datasheet:", (len(worksheet_list[0].col_values(7))-1)
while i < len(worksheet_list[1].col_values(1)):
while j < len(worksheet_list[0].col_values(7)):
m.drawgreatcircle(float(home_lon_values[i]),float(home_lat_values[i]),float(destination_lon_values[j]),float(destination_lat_values[j]),linewidth=2,color=colcode[i-1])
j = j + 1
i = i + 1
#label=fellows_list_names[i]
m.drawcoastlines()
m.fillcontinents()
# draw parallels
m.drawparallels(np.arange(10,90,20),labels=[1,1,0,1])
# draw meridians
m.drawmeridians(np.arange(-180,180,30),labels=[1,1,0,1])
ax.set_title('SSI Fellows Impact')
plt.legend()
plt.show()
|
bsd-3-clause
| 2,741,477,577,881,294,300 | 35.958333 | 176 | 0.721251 | false | 2.840673 | false | false | false |
klahnakoski/JsonSchemaToMarkdown
|
vendor/mo_hg/hg_mozilla_org.py
|
1
|
28441
|
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import re
from collections import Mapping
from copy import copy
import mo_threads
from mo_dots import set_default, Null, coalesce, unwraplist, listwrap, wrap, Data
from mo_future import text_type, binary_type
from mo_hg.parse import diff_to_json, diff_to_moves
from mo_hg.repos.changesets import Changeset
from mo_hg.repos.pushs import Push
from mo_hg.repos.revisions import Revision, revision_schema
from mo_json import json2value
from mo_kwargs import override
from mo_logs import Log, strings, machine_metadata
from mo_logs.exceptions import Explanation, assert_no_exception, Except, suppress_exception
from mo_logs.strings import expand_template
from mo_math.randoms import Random
from mo_threads import Thread, Lock, Queue, THREAD_STOP, Till
from mo_times.dates import Date
from mo_times.durations import SECOND, Duration, HOUR, MINUTE, DAY
from pyLibrary.env import http, elasticsearch
from pyLibrary.meta import cache
_hg_branches = None
_OLD_BRANCH = None
def _count(values):
return len(list(values))
def _late_imports():
global _hg_branches
global _OLD_BRANCH
from mo_hg import hg_branches as _hg_branches
from mo_hg.hg_branches import OLD_BRANCH as _OLD_BRANCH
_ = _hg_branches
_ = _OLD_BRANCH
DEFAULT_LOCALE = "en-US"
DEBUG = False
DAEMON_DEBUG = False
DAEMON_HG_INTERVAL = 30 * SECOND # HOW LONG TO WAIT BETWEEN HG REQUESTS (MAX)
DAEMON_WAIT_AFTER_TIMEOUT = 10 * MINUTE # IF WE SEE A TIMEOUT, THEN WAIT
WAIT_AFTER_NODE_FAILURE = 10 * MINUTE # IF WE SEE A NODE FAILURE OR CLUSTER FAILURE, THEN WAIT
WAIT_AFTER_CACHE_MISS = 30 * SECOND # HOW LONG TO WAIT BETWEEN CACHE MISSES
DAEMON_DO_NO_SCAN = ["try"] # SOME BRANCHES ARE NOT WORTH SCANNING
DAEMON_QUEUE_SIZE = 2 ** 15
DAEMON_RECENT_HG_PULL = 2 * SECOND # DETERMINE IF WE GOT DATA FROM HG (RECENT), OR ES (OLDER)
MAX_TODO_AGE = DAY # THE DAEMON WILL NEVER STOP SCANNING; DO NOT ADD OLD REVISIONS TO THE todo QUEUE
MIN_ETL_AGE = Date("03may2018").unix # ARTIFACTS OLDER THAN THIS IN ES ARE REPLACED
UNKNOWN_PUSH = "Unknown push {{revision}}"
MAX_DIFF_SIZE = 1000
DIFF_URL = "{{location}}/raw-rev/{{rev}}"
FILE_URL = "{{location}}/raw-file/{{rev}}{{path}}"
last_called_url = {}
class HgMozillaOrg(object):
"""
USE hg.mozilla.org FOR REPO INFORMATION
USE ES AS A FASTER CACHE FOR THE SAME
"""
@override
def __init__(
self,
hg=None, # CONNECT TO hg
repo=None, # CONNECTION INFO FOR ES CACHE
branches=None, # CONNECTION INFO FOR ES CACHE
use_cache=False, # True IF WE WILL USE THE ES FOR DOWNLOADING BRANCHES
timeout=30 * SECOND,
kwargs=None
):
if not _hg_branches:
_late_imports()
self.es_locker = Lock()
self.todo = mo_threads.Queue("todo for hg daemon", max=DAEMON_QUEUE_SIZE)
self.settings = kwargs
self.timeout = Duration(timeout)
# VERIFY CONNECTIVITY
with Explanation("Test connect with hg"):
response = http.head(self.settings.hg.url)
if branches == None:
self.branches = _hg_branches.get_branches(kwargs=kwargs)
self.es = None
return
self.last_cache_miss = Date.now()
set_default(repo, {"schema": revision_schema})
self.es = elasticsearch.Cluster(kwargs=repo).get_or_create_index(kwargs=repo)
def setup_es(please_stop):
with suppress_exception:
self.es.add_alias()
with suppress_exception:
self.es.set_refresh_interval(seconds=1)
Thread.run("setup_es", setup_es)
self.branches = _hg_branches.get_branches(kwargs=kwargs)
self.timeout = timeout
Thread.run("hg daemon", self._daemon)
def _daemon(self, please_stop):
while not please_stop:
with Explanation("looking for work"):
try:
branch, revisions = self.todo.pop(till=please_stop)
except Exception as e:
if please_stop:
break
else:
raise e
if branch.name in DAEMON_DO_NO_SCAN:
continue
revisions = set(revisions)
# FIND THE REVSIONS ON THIS BRANCH
for r in list(revisions):
try:
rev = self.get_revision(Revision(branch=branch, changeset={"id": r}))
if DAEMON_DEBUG:
Log.note("found revision with push date {{date|datetime}}", date=rev.push.date)
revisions.discard(r)
if rev.etl.timestamp > Date.now()-DAEMON_RECENT_HG_PULL:
# SOME PUSHES ARE BIG, RUNNING THE RISK OTHER MACHINES ARE
# ALSO INTERESTED AND PERFORMING THE SAME SCAN. THIS DELAY
# WILL HAVE SMALL EFFECT ON THE MAJORITY OF SMALL PUSHES
# https://bugzilla.mozilla.org/show_bug.cgi?id=1417720
Till(seconds=Random.float(DAEMON_HG_INTERVAL).seconds*2).wait()
except Exception as e:
Log.warning(
"Scanning {{branch}} {{revision|left(12)}}",
branch=branch.name,
revision=r,
cause=e
)
if "Read timed out" in e:
Till(seconds=DAEMON_WAIT_AFTER_TIMEOUT.seconds).wait()
# FIND ANY BRANCH THAT MAY HAVE THIS REVISION
for r in list(revisions):
self._find_revision(r)
@cache(duration=HOUR, lock=True)
def get_revision(self, revision, locale=None, get_diff=False, get_moves=True):
"""
EXPECTING INCOMPLETE revision OBJECT
RETURNS revision
"""
rev = revision.changeset.id
if not rev:
return Null
elif rev == "None":
return Null
elif revision.branch.name == None:
return Null
locale = coalesce(locale, revision.branch.locale, DEFAULT_LOCALE)
output = self._get_from_elasticsearch(revision, locale=locale, get_diff=get_diff)
if output:
if not get_diff: # DIFF IS BIG, DO NOT KEEP IT IF NOT NEEDED
output.changeset.diff = None
if not get_moves:
output.changeset.moves = None
DEBUG and Log.note("Got hg ({{branch}}, {{locale}}, {{revision}}) from ES", branch=output.branch.name, locale=locale, revision=output.changeset.id)
if output.push.date >= Date.now()-MAX_TODO_AGE:
self.todo.add((output.branch, listwrap(output.parents)))
self.todo.add((output.branch, listwrap(output.children)))
if output.push.date:
return output
# RATE LIMIT CALLS TO HG (CACHE MISSES)
next_cache_miss = self.last_cache_miss + (Random.float(WAIT_AFTER_CACHE_MISS.seconds * 2) * SECOND)
self.last_cache_miss = Date.now()
if next_cache_miss > self.last_cache_miss:
Log.note("delaying next hg call for {{seconds|round(decimal=1)}}", seconds=next_cache_miss - self.last_cache_miss)
Till(till=next_cache_miss.unix).wait()
found_revision = copy(revision)
if isinstance(found_revision.branch, (text_type, binary_type)):
lower_name = found_revision.branch.lower()
else:
lower_name = found_revision.branch.name.lower()
if not lower_name:
Log.error("Defective revision? {{rev|json}}", rev=found_revision.branch)
b = found_revision.branch = self.branches[(lower_name, locale)]
if not b:
b = found_revision.branch = self.branches[(lower_name, DEFAULT_LOCALE)]
if not b:
Log.warning("can not find branch ({{branch}}, {{locale}})", branch=lower_name, locale=locale)
return Null
if Date.now() - Date(b.etl.timestamp) > _OLD_BRANCH:
self.branches = _hg_branches.get_branches(kwargs=self.settings)
push = self._get_push(found_revision.branch, found_revision.changeset.id)
url1 = found_revision.branch.url.rstrip("/") + "/json-info?node=" + found_revision.changeset.id[0:12]
url2 = found_revision.branch.url.rstrip("/") + "/json-rev/" + found_revision.changeset.id[0:12]
with Explanation("get revision from {{url}}", url=url1, debug=DEBUG):
raw_rev2 = Null
try:
raw_rev1 = self._get_raw_json_info(url1, found_revision.branch)
raw_rev2 = self._get_raw_json_rev(url2, found_revision.branch)
except Exception as e:
if "Hg denies it exists" in e:
raw_rev1 = Data(node=revision.changeset.id)
else:
raise e
output = self._normalize_revision(set_default(raw_rev1, raw_rev2), found_revision, push, get_diff, get_moves)
if output.push.date >= Date.now()-MAX_TODO_AGE:
self.todo.add((output.branch, listwrap(output.parents)))
self.todo.add((output.branch, listwrap(output.children)))
if not get_diff: # DIFF IS BIG, DO NOT KEEP IT IF NOT NEEDED
output.changeset.diff = None
if not get_moves:
output.changeset.moves = None
return output
def _get_from_elasticsearch(self, revision, locale=None, get_diff=False, get_moves=True):
rev = revision.changeset.id
if self.es.cluster.version.startswith("1.7."):
query = {
"query": {"filtered": {
"query": {"match_all": {}},
"filter": {"and": [
{"term": {"changeset.id12": rev[0:12]}},
{"term": {"branch.name": revision.branch.name}},
{"term": {"branch.locale": coalesce(locale, revision.branch.locale, DEFAULT_LOCALE)}},
{"range": {"etl.timestamp": {"gt": MIN_ETL_AGE}}}
]}
}},
"size": 20
}
else:
query = {
"query": {"bool": {"must": [
{"term": {"changeset.id12": rev[0:12]}},
{"term": {"branch.name": revision.branch.name}},
{"term": {"branch.locale": coalesce(locale, revision.branch.locale, DEFAULT_LOCALE)}},
{"range": {"etl.timestamp": {"gt": MIN_ETL_AGE}}}
]}},
"size": 20
}
for attempt in range(3):
try:
with self.es_locker:
docs = self.es.search(query).hits.hits
if len(docs) == 0:
return None
best = docs[0]._source
if len(docs) > 1:
for d in docs:
if d._id.endswith(d._source.branch.locale):
best = d._source
Log.warning("expecting no more than one document")
return best
except Exception as e:
e = Except.wrap(e)
if "EsRejectedExecutionException[rejected execution (queue capacity" in e:
(Till(seconds=Random.int(30))).wait()
continue
else:
Log.warning("Bad ES call, waiting for {{num}} seconds", num=WAIT_AFTER_NODE_FAILURE.seconds, cause=e)
Till(seconds=WAIT_AFTER_NODE_FAILURE.seconds).wait()
continue
Log.warning("ES did not deliver, fall back to HG")
return None
@cache(duration=HOUR, lock=True)
def _get_raw_json_info(self, url, branch):
raw_revs = self._get_and_retry(url, branch)
if "(not in 'served' subset)" in raw_revs:
Log.error("Tried {{url}}. Hg denies it exists.", url=url)
if isinstance(raw_revs, text_type) and raw_revs.startswith("unknown revision '"):
Log.error("Tried {{url}}. Hg denies it exists.", url=url)
if len(raw_revs) != 1:
Log.error("do not know what to do")
return raw_revs.values()[0]
@cache(duration=HOUR, lock=True)
def _get_raw_json_rev(self, url, branch):
raw_rev = self._get_and_retry(url, branch)
return raw_rev
@cache(duration=HOUR, lock=True)
def _get_push(self, branch, changeset_id):
if self.es.cluster.version.startswith("1.7."):
query = {
"query": {"filtered": {
"query": {"match_all": {}},
"filter": {"and": [
{"term": {"branch.name": branch.name}},
{"prefix": {"changeset.id": changeset_id[0:12]}}
]}
}},
"size": 1
}
else:
query = {
"query": {"bool": {"must": [
{"term": {"branch.name": branch.name}},
{"prefix": {"changeset.id": changeset_id[0:12]}}
]}},
"size": 1
}
try:
# ALWAYS TRY ES FIRST
with self.es_locker:
response = self.es.search(query)
json_push = response.hits.hits[0]._source.push
if json_push:
return json_push
except Exception:
pass
url = branch.url.rstrip("/") + "/json-pushes?full=1&changeset=" + changeset_id
with Explanation("Pulling pushlog from {{url}}", url=url, debug=DEBUG):
Log.note(
"Reading pushlog from {{url}}",
url=url,
changeset=changeset_id
)
data = self._get_and_retry(url, branch)
# QUEUE UP THE OTHER CHANGESETS IN THE PUSH
self.todo.add((branch, [c.node for cs in data.values().changesets for c in cs]))
pushes = [
Push(id=int(index), date=_push.date, user=_push.user)
for index, _push in data.items()
]
if len(pushes) == 0:
return Null
elif len(pushes) == 1:
return pushes[0]
else:
Log.error("do not know what to do")
def _normalize_revision(self, r, found_revision, push, get_diff, get_moves):
new_names = set(r.keys()) - KNOWN_TAGS
if new_names and not r.tags:
Log.warning(
"hg is returning new property names {{names|quote}} for {{changeset}} from {{url}}",
names=new_names,
changeset=r.node,
url=found_revision.branch.url
)
changeset = Changeset(
id=r.node,
id12=r.node[0:12],
author=r.user,
description=strings.limit(coalesce(r.description, r.desc), 2000),
date=parse_hg_date(r.date),
files=r.files,
backedoutby=r.backedoutby if r.backedoutby else None,
bug=self._extract_bug_id(r.description)
)
rev = Revision(
branch=found_revision.branch,
index=r.rev,
changeset=changeset,
parents=unwraplist(list(set(r.parents))),
children=unwraplist(list(set(r.children))),
push=push,
phase=r.phase,
bookmarks=unwraplist(r.bookmarks),
landingsystem=r.landingsystem,
etl={"timestamp": Date.now().unix, "machine": machine_metadata}
)
r.pushuser = None
r.pushdate = None
r.pushid = None
r.node = None
r.user = None
r.desc = None
r.description = None
r.date = None
r.files = None
r.backedoutby = None
r.parents = None
r.children = None
r.bookmarks = None
r.landingsystem = None
set_default(rev, r)
# ADD THE DIFF
if get_diff:
rev.changeset.diff = self._get_json_diff_from_hg(rev)
if get_moves:
rev.changeset.moves = self._get_moves_from_hg(rev)
try:
_id = coalesce(rev.changeset.id12, "") + "-" + rev.branch.name + "-" + coalesce(rev.branch.locale, DEFAULT_LOCALE)
with self.es_locker:
self.es.add({"id": _id, "value": rev})
except Exception as e:
e = Except.wrap(e)
Log.warning("Did not save to ES, waiting {{duration}}", duration=WAIT_AFTER_NODE_FAILURE, cause=e)
Till(seconds=WAIT_AFTER_NODE_FAILURE.seconds).wait()
if "FORBIDDEN/12/index read-only" in e:
pass # KNOWN FAILURE MODE
return rev
def _get_and_retry(self, url, branch, **kwargs):
"""
requests 2.5.0 HTTPS IS A LITTLE UNSTABLE
"""
kwargs = set_default(kwargs, {"timeout": self.timeout.seconds})
try:
output = _get_url(url, branch, **kwargs)
return output
except Exception as e:
if UNKNOWN_PUSH in e:
Log.error("Tried {{url}} and failed", {"url": url}, cause=e)
try:
(Till(seconds=5)).wait()
return _get_url(url.replace("https://", "http://"), branch, **kwargs)
except Exception as f:
pass
path = url.split("/")
if path[3] == "l10n-central":
# FROM https://hg.mozilla.org/l10n-central/tr/json-pushes?full=1&changeset=a6eeb28458fd
# TO https://hg.mozilla.org/mozilla-central/json-pushes?full=1&changeset=a6eeb28458fd
path = path[0:3] + ["mozilla-central"] + path[5:]
return self._get_and_retry("/".join(path), branch, **kwargs)
elif len(path) > 5 and path[5] == "mozilla-aurora":
# FROM https://hg.mozilla.org/releases/l10n/mozilla-aurora/pt-PT/json-pushes?full=1&changeset=b44a8c68fc60
# TO https://hg.mozilla.org/releases/mozilla-aurora/json-pushes?full=1&changeset=b44a8c68fc60
path = path[0:4] + ["mozilla-aurora"] + path[7:]
return self._get_and_retry("/".join(path), branch, **kwargs)
elif len(path) > 5 and path[5] == "mozilla-beta":
# FROM https://hg.mozilla.org/releases/l10n/mozilla-beta/lt/json-pushes?full=1&changeset=03fbf7556c94
# TO https://hg.mozilla.org/releases/mozilla-beta/json-pushes?full=1&changeset=b44a8c68fc60
path = path[0:4] + ["mozilla-beta"] + path[7:]
return self._get_and_retry("/".join(path), branch, **kwargs)
elif len(path) > 7 and path[5] == "mozilla-release":
# FROM https://hg.mozilla.org/releases/l10n/mozilla-release/en-GB/json-pushes?full=1&changeset=57f513ab03308adc7aa02cc2ea8d73fe56ae644b
# TO https://hg.mozilla.org/releases/mozilla-release/json-pushes?full=1&changeset=57f513ab03308adc7aa02cc2ea8d73fe56ae644b
path = path[0:4] + ["mozilla-release"] + path[7:]
return self._get_and_retry("/".join(path), branch, **kwargs)
elif len(path) > 5 and path[4] == "autoland":
# FROM https://hg.mozilla.org/build/autoland/json-pushes?full=1&changeset=3ccccf8e5036179a3178437cabc154b5e04b333d
# TO https://hg.mozilla.org/integration/autoland/json-pushes?full=1&changeset=3ccccf8e5036179a3178437cabc154b5e04b333d
path = path[0:3] + ["try"] + path[5:]
return self._get_and_retry("/".join(path), branch, **kwargs)
Log.error("Tried {{url}} twice. Both failed.", {"url": url}, cause=[e, f])
@cache(duration=HOUR, lock=True)
def _find_revision(self, revision):
please_stop = False
locker = Lock()
output = []
queue = Queue("branches", max=2000)
queue.extend(b for b in self.branches if b.locale == DEFAULT_LOCALE and b.name in ["try", "mozilla-inbound", "autoland"])
queue.add(THREAD_STOP)
problems = []
def _find(please_stop):
for b in queue:
if please_stop:
return
try:
url = b.url + "json-info?node=" + revision
rev = self.get_revision(Revision(branch=b, changeset={"id": revision}))
with locker:
output.append(rev)
Log.note("Revision found at {{url}}", url=url)
except Exception as f:
problems.append(f)
threads = []
for i in range(3):
threads.append(Thread.run("find changeset " + text_type(i), _find, please_stop=please_stop))
for t in threads:
with assert_no_exception:
t.join()
return output
def _extract_bug_id(self, description):
"""
LOOK INTO description to FIND bug_id
"""
if description == None:
return None
match = re.findall(r'[Bb](?:ug)?\s*([0-9]{5,7})', description)
if match:
return int(match[0])
return None
def _get_json_diff_from_hg(self, revision):
"""
:param revision: INCOMPLETE REVISION OBJECT
:return:
"""
@cache(duration=MINUTE, lock=True)
def inner(changeset_id):
if self.es.cluster.version.startswith("1.7."):
query = {
"query": {"filtered": {
"query": {"match_all": {}},
"filter": {"and": [
{"prefix": {"changeset.id": changeset_id}},
{"range": {"etl.timestamp": {"gt": MIN_ETL_AGE}}}
]}
}},
"size": 1
}
else:
query = {
"query": {"bool": {"must": [
{"prefix": {"changeset.id": changeset_id}},
{"range": {"etl.timestamp": {"gt": MIN_ETL_AGE}}}
]}},
"size": 1
}
try:
# ALWAYS TRY ES FIRST
with self.es_locker:
response = self.es.search(query)
json_diff = response.hits.hits[0]._source.changeset.diff
if json_diff:
return json_diff
except Exception as e:
pass
url = expand_template(DIFF_URL, {"location": revision.branch.url, "rev": changeset_id})
DEBUG and Log.note("get unified diff from {{url}}", url=url)
try:
response = http.get(url)
diff = response.content.decode("utf8")
json_diff = diff_to_json(diff)
num_changes = _count(c for f in json_diff for c in f.changes)
if json_diff:
if revision.changeset.description.startswith("merge "):
return None # IGNORE THE MERGE CHANGESETS
elif num_changes < MAX_DIFF_SIZE:
return json_diff
else:
Log.warning("Revision at {{url}} has a diff with {{num}} changes, ignored", url=url, num=num_changes)
for file in json_diff:
file.changes = None
return json_diff
except Exception as e:
Log.warning("could not get unified diff from {{url}}", url=url, cause=e)
return inner(revision.changeset.id)
def _get_moves_from_hg(self, revision):
"""
:param revision: INCOMPLETE REVISION OBJECT
:return:
"""
@cache(duration=MINUTE, lock=True)
def inner(changeset_id):
if self.es.cluster.version.startswith("1.7."):
query = {
"query": {"filtered": {
"query": {"match_all": {}},
"filter": {"and": [
{"prefix": {"changeset.id": changeset_id}},
{"range": {"etl.timestamp": {"gt": MIN_ETL_AGE}}}
]}
}},
"size": 1
}
else:
query = {
"query": {"bool": {"must": [
{"prefix": {"changeset.id": changeset_id}},
{"range": {"etl.timestamp": {"gt": MIN_ETL_AGE}}}
]}},
"size": 1
}
try:
# ALWAYS TRY ES FIRST
with self.es_locker:
response = self.es.search(query)
moves = response.hits.hits[0]._source.changeset.moves
if moves:
return moves
except Exception as e:
pass
url = expand_template(DIFF_URL, {"location": revision.branch.url, "rev": changeset_id})
DEBUG and Log.note("get unified diff from {{url}}", url=url)
try:
moves = http.get(url).content.decode('latin1') # THE ENCODING DOES NOT MATTER BECAUSE WE ONLY USE THE '+', '-' PREFIXES IN THE DIFF
return diff_to_moves(text_type(moves))
except Exception as e:
Log.warning("could not get unified diff from {{url}}", url=url, cause=e)
return inner(revision.changeset.id)
def _get_source_code_from_hg(self, revision, file_path):
response = http.get(expand_template(FILE_URL, {"location": revision.branch.url, "rev": revision.changeset.id, "path": file_path}))
return response.content.decode("utf8", "replace")
def _trim(url):
return url.split("/json-pushes?")[0].split("/json-info?")[0].split("/json-rev/")[0]
def _get_url(url, branch, **kwargs):
with Explanation("get push from {{url}}", url=url, debug=DEBUG):
response = http.get(url, **kwargs)
data = json2value(response.content.decode("utf8"))
if isinstance(data, (text_type, str)) and data.startswith("unknown revision"):
Log.error(UNKNOWN_PUSH, revision=strings.between(data, "'", "'"))
branch.url = _trim(url) # RECORD THIS SUCCESS IN THE BRANCH
return data
def parse_hg_date(date):
if isinstance(date, text_type):
return Date(date)
elif isinstance(date, list):
# FIRST IN TUPLE (timestamp, time_zone) TUPLE, WHERE timestamp IS GMT
return Date(date[0])
else:
Log.error("Can not deal with date like {{date|json}}", date=date)
def minimize_repo(repo):
"""
RETURN A MINIMAL VERSION OF THIS CHANGESET
"""
if repo == None:
return Null
output = wrap(_copy_but(repo, _exclude_from_repo))
output.changeset.description = strings.limit(output.changeset.description, 1000)
return output
_exclude_from_repo = Data()
for k in [
"changeset.files",
"changeset.diff",
"changeset.moves",
"etl",
"branch.last_used",
"branch.description",
"branch.etl",
"branch.parent_name",
"children",
"parents",
"phase",
"bookmarks",
"tags"
]:
_exclude_from_repo[k] = True
_exclude_from_repo = _exclude_from_repo
def _copy_but(value, exclude):
output = {}
for k, v in value.items():
e = exclude.get(k, {})
if e!=True:
if isinstance(v, Mapping):
v2 = _copy_but(v, e)
if v2 != None:
output[k] = v2
elif v != None:
output[k] = v
return output if output else None
KNOWN_TAGS = {
"rev",
"node",
"user",
"description",
"desc",
"date",
"files",
"backedoutby",
"parents",
"children",
"branch",
"tags",
"pushuser",
"pushdate",
"pushid",
"phase",
"bookmarks",
"landingsystem"
}
|
mpl-2.0
| -8,912,115,382,508,336,000 | 37.800819 | 159 | 0.53451 | false | 3.828891 | false | false | false |
matthewbentley/teenlink
|
callhandler.py
|
1
|
5899
|
import webapp2
import jinja2
import os
from twilio import twiml
from twilio.rest import TwilioRestClient
from twilio.util import RequestValidator
from google.appengine.ext import ndb
import logging
import json
from private import account_sid, auth_token
from common import make_template
class Call(ndb.Model):
"""Model for the calls db"""
calls = ndb.StringProperty(indexed=True)
class User(ndb.Model):
"""Model for the user db"""
fullname = ndb.StringProperty(indexed=True)
phone_number = ndb.StringProperty(indexed=True)
phone_worker = ndb.BooleanProperty()
can_text = ndb.BooleanProperty()
PAB = ndb.BooleanProperty()
class Group(ndb.Model):
"""Model for groups db"""
groupname = ndb.StringProperty(indexed=True)
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
from common import add_header
class StartHere(webapp2.RequestHandler):
"""XML response for the initial recording call"""
def get(self):
validator = RequestValidator(auth_token)
url = self.request.url
params = {}
try:
twilio_signature = self.request.headers["X-Twilio-Signature"]
except:
twilio_signature = ""
r = twiml.Response()
if validator.validate(url, params, twilio_signature):
logging.debug(self.request.get('to_call'))
r.record(action="/handle-recording?to_call=%s" % (self.request.get('to_call')), method="GET")
self.response.headers['Content-Type'] = 'text/xml'
self.response.write(str(r))
class HandleRecording(webapp2.RedirectHandler):
"""Makes calls to everyone who needs to get the call, points twilio to make-calls"""
def get(self):
client = TwilioRestClient(account_sid, auth_token)
validator = RequestValidator(auth_token)
url = self.request.url
params = {}
try:
twilio_signature = self.request.headers["X-Twilio-Signature"]
logging.debug(twilio_signature)
except:
twilio_signature = ""
if validator.validate(url, params, twilio_signature):
logging.debug("Validated")
call_id = self.request.get('to_call')
print call_id
infos = Call.query(Call.key==ndb.Key(Call, int(call_id))).fetch()
print infos
for info in infos:
print info
for i in json.loads(info.calls):
print i
call_out = client.calls.create(to=i, from_="2065576875",
url="https://teen-link.appspot.com/make-calls?RecordingUrl=" + self.request.get("RecordingUrl"),
method="GET",
if_machine="Continue")
print call_out
else:
self.response.headers['Content-Type'] = 'text/html'
self.response.write("Please don't try to hack me.")
class MakeCalls(webapp2.RedirectHandler):
"""Returns XML to play the recording"""
def get(self):
validator = RequestValidator(auth_token)
url = self.request.url
params = {}
try:
twilio_signature = self.request.headers["X-Twilio-Signature"]
except:
twilio_signature = ""
if validator.validate(url, params, twilio_signature):
r = twiml.Response()
r.play(self.request.get("RecordingUrl"))
self.response.headers['Content-Type'] = 'text/xml'
self.response.write(str(r))
else:
self.response.headers['Content-Type'] = 'text/html'
self.response.write("Please don't try to hack me.")
class MainPage(webapp2.RequestHandler):
"""Main landing page with links to different pages"""
def get(self):
template_values = make_template(self)
pages={"Add User":"/users/manage", "List and Edit Users":"/users/list", "Make New Call":"/action/newcall"}
template_values['pages'] = pages
template = JINJA_ENVIRONMENT.get_template('home.jinja')
self.response.write(template.render(template_values))
#class Test(webapp2.RequestHandler):
# def get(self):
## if self.request.get('group'):
## group=Group(key_name=self.request.get('group'))
## group.groupname = self.request.get('group')
## group.put()
# if self.request.get('user') and self.request.get('group'):
## print self.request.get('user')
## print self.request.get('group')
# info = ndb.gql("SELECT * FROM User WHERE fullname=:1", self.request.get('user'))
# group=Group(parent=info.next().key())
# group.groupname = self.request.get('group')
# group.put()
## info = ndb.GqlQuery("SELECT * FROM Group WHERE groupname=:1", self.request.get('group'))
## print info.next().parent().fullname
## print info.next()
## info = ndb.GqlQuery("SELECT * FROM User WHERE fullname=:1", self.request.get('user'))
## key = info.next()
## infog = Group.all().next().parent()
## info = User.all().filter("fullname ==", self.request.get('user'))
## info2 = info
## print infog.fullname
## print dir(infog.ancestor(key).next())
app = webapp2.WSGIApplication([
('/twiml', StartHere),
('/handle-recording', HandleRecording),
('/make-calls', MakeCalls),
('/index', MainPage),
('/', MainPage)],
debug=True)
# Test comment please ignore
|
mit
| 6,147,632,002,842,502,000 | 39.129252 | 149 | 0.577556 | false | 4.04318 | false | false | false |
google/nerfactor
|
third_party/xiuminglib/xiuminglib/io/objmtl.py
|
1
|
21250
|
# pylint: disable=len-as-condition
from os.path import basename, dirname, join
from shutil import copy
import numpy as np
from .. import os as xm_os
from ..imprt import preset_import
from ..log import get_logger
logger = get_logger()
class Obj:
"""Wavefront .obj Object.
Face, vertex, or other indices here all start from 1.
Attributes:
o (str)
v (numpy.ndarray)
f (list)
vn (numpy.ndarray)
fn (list)
vt (numpy.ndarray)
ft (list)
s (bool)
mtllib (str)
usemtl (str)
diffuse_map_path (str)
diffuse_map_scale (float)
"""
def __init__(
self, o=None, v=None, f=None, vn=None, fn=None, vt=None, ft=None,
s=False, mtllib=None, usemtl=None, diffuse_map_path=None,
diffuse_map_scale=1):
"""
Args:
o (str, optional): Object name.
v (numpy.ndarray, optional): Vertex coordinates.
f (list, optional): Faces' vertex indices (1-indexed), e.g.,
``[[1, 2, 3], [4, 5, 6], [7, 8, 9, 10], ...]``.
vn (numpy.ndarray, optional): Vertex normals of shape N-by-3,
normalized or not.
fn (list, optional): Faces' vertex normal indices, e.g.,
``[[1, 1, 1], [], [2, 2, 2, 2], ...]``. Must be of the same
length as ``f``.
vt (numpy.ndarray, optional): Vertex texture coordinates of shape
N-by-2. Coordinates must be normalized to :math:`[0, 1]`.
ft (list, optional): Faces' texture vertex indices, e.g.,
``[[1, 2, 3], [4, 5, 6], [], ...]``. Must be of the same length
as ``f``.
s (bool, optional): Group smoothing.
mtllib (str, optional): Material file name, e.g., ``'cube.mtl'``.
usemtl (str, optional): Material name (defined in .mtl file).
diffuse_map_path (str, optional): Path to diffuse texture map.
diffuse_map_scale (float, optional): Scale of diffuse texture map.
"""
self.mtllib = mtllib
self.o = o
# Vertices
if v is not None:
assert (len(v.shape) == 2 and v.shape[1] == 3), "'v' must be *-by-3"
if vt is not None:
assert (len(vt.shape) == 2 and vt.shape[1] == 2), \
"'vt' must be *-by-2"
if vn is not None:
assert (len(vn.shape) == 2 and vn.shape[1] == 3), \
"'vn' must be *-by-3"
self.v = v
self.vt = vt
self.vn = vn
# Faces
if f is not None:
if ft is not None:
assert (len(ft) == len(f)), \
"'ft' must be of the same length as 'f' (use '[]' to fill)"
if fn is not None:
assert (len(fn) == len(f)), \
"'fn' must be of the same length as 'f' (use '[]' to fill)"
self.f = f
self.ft = ft
self.fn = fn
self.usemtl = usemtl
self.s = s
self.diffuse_map_path = diffuse_map_path
self.diffuse_map_scale = diffuse_map_scale
def load_file(self, obj_file):
"""Loads a (basic) .obj file as an object.
Populates attributes with contents read from file.
Args:
obj_file (str): Path to .obj file.
"""
fid = open(obj_file, 'r')
lines = [l.strip('\n') for l in fid.readlines()]
lines = [l for l in lines if len(l) > 0] # remove empty lines
# Check if there's only one object
n_o = len([l for l in lines if l[0] == 'o'])
if n_o > 1:
raise ValueError((
".obj file containing multiple objects is not supported "
"-- consider using ``assimp`` instead"))
# Count for array initializations
n_v = len([l for l in lines if l[:2] == 'v '])
n_vt = len([l for l in lines if l[:3] == 'vt '])
n_vn = len([l for l in lines if l[:3] == 'vn '])
lines_f = [l for l in lines if l[:2] == 'f ']
n_f = len(lines_f)
# Initialize arrays
mtllib = None
o = None
v = np.zeros((n_v, 3))
vt = np.zeros((n_vt, 2))
vn = np.zeros((n_vn, 3))
usemtl = None
s = False
f = [None] * n_f
# If there's no 'ft' or 'fn' for a 'f', a '[]' is inserted as a
# placeholder. This guarantees 'f[i]' always corresponds to 'ft[i]'
# and 'fn[i]'
ft = [None] * n_f
fn = [None] * n_f
# Load data line by line
n_ft, n_fn = 0, 0
i_v, i_vt, i_vn, i_f = 0, 0, 0, 0
for l in lines:
if l[0] == '#': # comment
pass
elif l[:7] == 'mtllib ': # mtl file
mtllib = l[7:]
elif l[:2] == 'o ': # object name
o = l[2:]
elif l[:2] == 'v ': # geometric vertex
v[i_v, :] = [float(x) for x in l[2:].split(' ')]
i_v += 1
elif l[:3] == 'vt ': # texture vertex
vt[i_vt, :] = [float(x) for x in l[3:].split(' ')]
i_vt += 1
elif l[:3] == 'vn ': # normal vector
vn[i_vn, :] = [float(x) for x in l[3:].split(' ')]
i_vn += 1
elif l[:7] == 'usemtl ': # material name
usemtl = l[7:]
elif l[:2] == 's ': # group smoothing
if l[2:] == 'on':
s = True
elif l[:2] == 'f ': # face
n_slashes = l[2:].split(' ')[0].count('/')
if n_slashes == 0: # just f (1 2 3)
f[i_f] = [int(x) for x in l[2:].split(' ')]
ft[i_f] = []
fn[i_f] = []
elif n_slashes == 1: # f and ft (1/1 2/2 3/3)
f[i_f] = [int(x.split('/')[0]) for x in l[2:].split(' ')]
ft[i_f] = [int(x.split('/')[1]) for x in l[2:].split(' ')]
fn[i_f] = []
n_ft += 1
elif n_slashes == 2:
if l[2:].split(' ')[0].count('//') == 1:
# f and fn (1//1 2//1 3//1)
f[i_f] = [
int(x.split('//')[0]) for x in l[2:].split(' ')]
ft[i_f] = []
fn[i_f] = [
int(x.split('//')[1]) for x in l[2:].split(' ')]
n_fn += 1
else:
# f, ft and fn (1/1/1 2/2/1 3/3/1)
f[i_f] = [
int(x.split('/')[0]) for x in l[2:].split(' ')]
ft[i_f] = [
int(x.split('/')[1]) for x in l[2:].split(' ')]
fn[i_f] = [
int(x.split('/')[2]) for x in l[2:].split(' ')]
n_ft += 1
n_fn += 1
i_f += 1
else:
raise ValueError("Unidentified line type: %s" % l)
# Update self
self.mtllib = mtllib
self.o = o
self.v = v
self.vt = vt if vt.shape[0] > 0 else None
self.vn = vn if vn.shape[0] > 0 else None
self.f = f
self.ft = ft if any(ft) else None # any member list not empty
self.fn = fn if any(fn) else None
self.usemtl = usemtl
self.s = s
# Print model info
def print_info(self):
# Basic stats
mtllib = self.mtllib
o = self.o
n_v = self.v.shape[0] if self.v is not None else 0
n_vt = self.vt.shape[0] if self.vt is not None else 0
n_vn = self.vn.shape[0] if self.vn is not None else 0
usemtl = self.usemtl
s = self.s
diffuse_map_path = self.diffuse_map_path
diffuse_map_scale = self.diffuse_map_scale
n_f = len(self.f) if self.f is not None else 0
if self.ft is not None:
n_ft = sum(len(x) > 0 for x in self.ft)
else:
n_ft = 0
if self.fn is not None:
n_fn = sum(len(x) > 0 for x in self.fn)
else:
n_fn = 0
logger.info("-------------------------------------------------------")
logger.info("Object name 'o' %s", o)
logger.info("Material file 'mtllib' %s", mtllib)
logger.info("Material 'usemtl' %s", usemtl)
logger.info("Diffuse texture map 'map_Kd' %s", diffuse_map_path)
logger.info("Diffuse map scale %f", diffuse_map_scale)
logger.info("Group smoothing 's' %r", s)
logger.info("# geometric vertices 'v' %d", n_v)
logger.info("# texture vertices 'vt' %d", n_vt)
logger.info("# normal vectors 'vn' %d", n_vn)
logger.info("# geometric faces 'f x/o/o' %d", n_f)
logger.info("# texture faces 'f o/x/o' %d", n_ft)
logger.info("# normal faces 'f o/o/x' %d", n_fn)
# How many triangles, quads, etc.
if n_f > 0:
logger.info("")
logger.info("Among %d faces:", n_f)
vert_counts = [len(x) for x in self.f]
for c in np.unique(vert_counts):
howmany = vert_counts.count(c)
logger.info(" - %d are formed by %d vertices", howmany, c)
logger.info("-------------------------------------------------------")
# Set vn and fn according to v and f
def set_face_normals(self):
"""Sets face normals according to geometric vertices and their orders
in forming faces.
Returns:
tuple:
- **vn** (*numpy.ndarray*) -- Normal vectors.
- **fn** (*list*) -- Normal faces. Each member list consists of
the same integer, e.g., ``[[1, 1, 1], [2, 2, 2, 2], ...]``.
"""
n_f = len(self.f)
vn = np.zeros((n_f, 3))
fn = [None] * n_f
# For each face
for i, verts_id in enumerate(self.f):
# Vertices must be coplanar to be valid, so we can just pick the
# first three
ind = [x - 1 for x in verts_id[:3]] # in .obj, index starts from 1,
# not 0
verts = self.v[ind, :]
p1p2 = verts[1, :] - verts[0, :]
p1p3 = verts[2, :] - verts[0, :]
normal = np.cross(p1p2, p1p3)
if np.linalg.norm(normal) == 0:
raise ValueError((
"Normal vector of zero length probably due to numerical "
"issues?"))
vn[i, :] = normal / np.linalg.norm(normal) # normalize
fn[i] = [i + 1] * len(verts_id)
# Set normals and return
self.vn = vn
self.fn = fn
logger.info((
"Face normals recalculated with 'v' and 'f' -- 'vn' and 'fn' "
"updated"))
return vn, fn
# Output object to file
def write_file(self, objpath):
"""Writes the current model to a .obj file.
Args:
objpath (str): Path to the output .obj.
Writes
- Output .obj file.
"""
mtllib = self.mtllib
o = self.o
v, vt, vn = self.v, self.vt, self.vn
usemtl = self.usemtl
s = self.s
f, ft, fn = self.f, self.ft, self.fn
# mkdir if necessary
outdir = dirname(objpath)
xm_os.makedirs(outdir)
# Write .obj
with open(objpath, 'w') as fid:
# Material file
if mtllib is not None:
fid.write('mtllib %s\n' % mtllib)
# Object name
fid.write('o %s\n' % o)
# Vertices
for i in range(v.shape[0]):
fid.write('v %f %f %f\n' % tuple(v[i, :]))
if vt is not None:
for i in range(vt.shape[0]):
fid.write('vt %f %f\n' % tuple(vt[i, :]))
if vn is not None:
for i in range(vn.shape[0]):
fid.write('vn %f %f %f\n' % tuple(vn[i, :]))
# Material name
if usemtl is not None:
fid.write('usemtl %s\n' % usemtl)
# Group smoothing
if s:
fid.write('s on\n')
else:
fid.write('s off\n')
# Faces
if ft is None and fn is None: # just f (1 2 3)
for v_id in f:
fid.write(('f' + ' %d' * len(v_id) + '\n') % tuple(v_id))
elif ft is not None and fn is None:
# f and ft (1/1 2/2 3/3 or 1 2 3)
for i, v_id in enumerate(f):
vt_id = ft[i]
if len(vt_id) == len(v_id):
fid.write((
'f' + ' %d/%d' * len(v_id) + '\n') % tuple(
[x for pair in zip(v_id, vt_id) for x in pair]))
elif not vt_id:
fid.write(
('f' + ' %d' * len(v_id) + '\n') % tuple(v_id))
else:
raise ValueError((
"'ft[%d]', not empty, doesn't match length of "
"'f[%d]'") % (i, i))
elif ft is None and fn is not None:
# f and fn (1//1 2//1 3//1 or 1 2 3)
for i, v_id in enumerate(f):
vn_id = fn[i]
if len(vn_id) == len(v_id):
fid.write((
'f' + ' %d//%d' * len(v_id) + '\n') % tuple(
[x for pair in zip(v_id, vn_id) for x in pair]))
elif not vn_id:
fid.write(
('f' + ' %d' * len(v_id) + '\n') % tuple(v_id))
else:
raise ValueError((
"'fn[%d]', not empty, doesn't match length of "
"'f[%d]'") % (i, i))
elif ft is not None and fn is not None:
# f, ft and fn (1/1/1 2/2/1 3/3/1 or 1/1 2/2 3/3 or
# 1//1 2//1 3//1 or 1 2 3)
for i, v_id in enumerate(f):
vt_id = ft[i]
vn_id = fn[i]
if len(vt_id) == len(v_id) and len(vn_id) == len(v_id):
fid.write((
'f' + ' %d/%d/%d' * len(v_id) + '\n') % tuple(
[x for triple in zip(v_id, vt_id, vn_id)
for x in triple]))
elif len(vt_id) == len(v_id) and not vn_id:
fid.write((
'f' + ' %d/%d' * len(v_id) + '\n') % tuple(
[x for pair in zip(v_id, vt_id) for x in pair]))
elif not vt_id and len(vn_id) == len(v_id):
fid.write((
'f' + ' %d//%d' * len(v_id) + '\n') % tuple(
[x for pair in zip(v_id, vn_id) for x in pair]))
elif not vt_id and not vn_id:
fid.write(
('f' + ' %d' * len(v_id) + '\n') % tuple(v_id))
else:
raise ValueError((
"If not empty, 'ft[%d]' or 'fn[%d]' doesn't match "
"length of 'f[%d]'") % (i, i, i))
logger.info("Done writing to %s", objpath)
class Mtl:
r"""Wavefront .mtl object.
Attributes:
mtlfile (str): Material file name, set to ``obj.mtllib``.
newmtl (str): Material name, set to ``obj.usemtl``.
map_Kd_path (str): Path to the diffuse map, set to
``obj.diffuse_map_path``.
map_Kd_scale (float): Scale of the diffuse map, set to
``obj.diffuse_map_scale``.
Ns (float)
Ka (tuple)
Kd (tuple)
Ks (tuple)
Ni (float)
d (float)
illum (int)
"""
def __init__(
self, obj, Ns=96.078431, Ka=(1, 1, 1), Kd=(0.64, 0.64, 0.64),
Ks=(0.5, 0.5, 0.5), Ni=1, d=1, illum=2):
r"""
Args:
obj (Obj): ``Obj`` object for which this ``Mtl`` object is created.
Ns (float, optional): Specular exponent, normally
:math:`\in[0, 1000]`.
Ka (tuple, optional): Ambient reflectivity, each float normally
:math:`\in[0, 1]`. Values outside increase or decrease
relectivity accordingly.
Kd (tuple, optional): Diffuse reflectivity. Same range as ``Ka``.
Ks (tuple, optional): Specular reflectivity. Same range as ``Ka``.
Ni (float, optional): Optical density, a.k.a. index of refraction
:math:`\in[0.001, 10]`. 1 means light doesn't bend as it passes
through. Increasing it increases the amount of bending. Glass
has an index of refraction of about 1.5. Values of less than 1.0
produce bizarre results and are not recommended.
d (float, optional): Amount this material dissolves into the
background :math:`\in[0, 1]`. 1.0 is fully opaque (default),
and 0 is fully dissolved (completely transparent). Unlike a real
transparent material, the dissolve does not depend upon material
thickness, nor does it have any spectral character. Dissolve
works on all illumination models.
illum (int, optional): Illumination model
:math:`\in[0, 1, ..., 10]`.
"""
self.mtlfile = obj.mtllib
self.newmtl = obj.usemtl
self.map_Kd_path = obj.diffuse_map_path
self.map_Kd_scale = obj.diffuse_map_scale
self.Ns = Ns
self.Ka = Ka
self.Kd = Kd
self.Ks = Ks
self.Ni = Ni
self.d = d
self.illum = illum
def print_info(self):
logger.info("---------------------------------------------------------")
logger.info("Material file %s", self.mtlfile)
logger.info("Material name 'newmtl' %s", self.newmtl)
logger.info("Diffuse texture map 'map_Kd' %s", self.map_Kd_path)
logger.info("Diffuse map scale %f", self.map_Kd_scale)
logger.info("Specular exponent 'Ns' %f", self.Ns)
logger.info("Ambient reflectivity 'Ka' %s", self.Ka)
logger.info("Diffuse reflectivity 'Kd' %s", self.Kd)
logger.info("Specular reflectivity 'Ks' %s", self.Ks)
logger.info("Refraction index 'Ni' %s", self.Ni)
logger.info("Dissolve 'd' %f", self.d)
logger.info("Illumination model 'illum' %d", self.illum)
logger.info("---------------------------------------------------------")
def write_file(self, outdir):
"""Unit tests that can also serve as example usage.
Args:
outdir (str): Output directory.
Writes
- Output .mtl file.
"""
cv2 = preset_import('cv2', assert_success=True)
# Validate inputs
assert (self.mtlfile is not None and self.newmtl is not None), \
"'mtlfile' and 'newmtl' must not be 'None'"
# mkdir if necessary
xm_os.makedirs(outdir)
# Write .mtl
mtlpath = join(outdir, self.mtlfile)
with open(mtlpath, 'w') as fid:
fid.write('newmtl %s\n' % self.newmtl)
fid.write('Ns %f\n' % self.Ns)
fid.write('Ka %f %f %f\n' % self.Ka)
fid.write('Kd %f %f %f\n' % self.Kd)
fid.write('Ks %f %f %f\n' % self.Ks)
fid.write('Ni %f\n' % self.Ni)
fid.write('d %f\n' % self.d)
fid.write('illum %d\n' % self.illum)
map_Kd_path = self.map_Kd_path
map_Kd_scale = self.map_Kd_scale
if map_Kd_path is not None:
fid.write('map_Kd %s\n' % basename(map_Kd_path))
if map_Kd_scale == 1:
copy(map_Kd_path, outdir)
else:
im = cv2.imread(map_Kd_path, cv2.IMREAD_UNCHANGED) # TODO: switch to xm.io.img
im = cv2.resize(im, None, fx=map_Kd_scale, fy=map_Kd_scale) # TODO: switch to xm.img
cv2.imwrite(join(outdir, basename(map_Kd_path)), im) # TODO: switch to xm.io.img
logger.info("Done writing to %s", mtlpath)
def main():
"""Unit tests that can also serve as example usage."""
objf = '../../../toy-data/obj-mtl_cube/cube.obj'
myobj = Obj()
myobj.print_info()
myobj.load_file(objf)
myobj.print_info()
objf_reproduce = objf.replace('.obj', '_reproduce.obj')
myobj.write_file(objf_reproduce)
myobj.set_face_normals()
myobj.print_info()
if __name__ == '__main__':
main()
|
apache-2.0
| -7,813,962,791,409,773,000 | 40.830709 | 104 | 0.438729 | false | 3.488181 | false | false | false |
pybursa/homeworks
|
s_shybkoy/hw5/hw5_task1.py
|
1
|
2975
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
u"""
Задание 1: классный Человек.
УСЛОВИЕ:
Реализовать класс Person, который отображает запись в книге контактов.
Класс имеет 4 атрибута:
- surname - строка - фамилия контакта (обязательный)
- first_name - строка - имя контакта (обязательный)
- nickname - строка - псевдоним (опциональный)
- birth_date - объект datetime.date (обязательный)
Каждый вызов класса должен создавать экземпляр (инстанс) класса с указанными
атрибутами.
Также класс имеет 2 метода:
- get_age() - считает возраст контакта в полных годах на дату вызова и
возвращает строку вида: "27";
- get_fullname() - возвращает строку, отражающую полное имя (фамилия + имя)
контакта;
"""
__author__ = "Sergei Shybkoi"
__copyright__ = "Copyright 2014, The Homework Project"
__email__ = "[email protected]"
__status__ = "Production"
__date__ = "2014-11-18"
import datetime
class Person(object):
u"""Класс Person"""
def __init__(self, surname, first_name, birth_date, nickname=None):
u"""Инишн класса"""
try:
var_date = datetime.datetime.strptime(birth_date, "%Y-%m-%d")
res_date = datetime.date(var_date.year,
var_date.month, var_date.day)
except TypeError:
print "Incorrect type of birthday date!"
res_date = None
except ValueError:
print "Wrong value of birthday date!"
res_date = None
self.surname = surname
self.first_name = first_name
self.birth_date = res_date
if nickname is not None:
self.nickname = nickname
def get_age(self):
u"""Метод класса подсчитывает и выводит количество полных лет"""
if self.birth_date is not None:
today_date = datetime.date.today()
delta = today_date.year - self.birth_date.year
if today_date.month <= self.birth_date.month \
and today_date.day < self.birth_date.day:
delta -= 1
print "Age:", delta
return str(delta)
else:
print "No correct data about person's birthday."
return "0"
def get_fullname(self):
u"""Метод выводит и возвращаем полное имя экземпляра класса Person"""
print self.surname, self.first_name
return self.surname + " " + self.first_name
|
gpl-2.0
| -1,922,213,597,879,849,000 | 32.814286 | 77 | 0.60197 | false | 2.307765 | false | false | false |
pmisik/buildbot
|
master/buildbot/changes/gitpoller.py
|
1
|
17640
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import re
import stat
from urllib.parse import quote as urlquote
from twisted.internet import defer
from twisted.python import log
from buildbot import config
from buildbot.changes import base
from buildbot.util import bytes2unicode
from buildbot.util import private_tempdir
from buildbot.util import runprocess
from buildbot.util.git import GitMixin
from buildbot.util.git import getSshKnownHostsContents
from buildbot.util.misc import writeLocalFile
from buildbot.util.state import StateMixin
class GitError(Exception):
"""Raised when git exits with code 128."""
class GitPoller(base.PollingChangeSource, StateMixin, GitMixin):
"""This source will poll a remote git repo for changes and submit
them to the change master."""
compare_attrs = ("repourl", "branches", "workdir", "pollInterval", "gitbin", "usetimestamps",
"category", "project", "pollAtLaunch", "buildPushesWithNoCommits",
"sshPrivateKey", "sshHostKey", "sshKnownHosts", "pollRandomDelayMin",
"pollRandomDelayMax")
secrets = ("sshPrivateKey", "sshHostKey", "sshKnownHosts")
def __init__(self, repourl, branches=None, branch=None, workdir=None, pollInterval=10 * 60,
gitbin="git", usetimestamps=True, category=None, project=None, pollinterval=-2,
fetch_refspec=None, encoding="utf-8", name=None, pollAtLaunch=False,
buildPushesWithNoCommits=False, only_tags=False, sshPrivateKey=None,
sshHostKey=None, sshKnownHosts=None, pollRandomDelayMin=0, pollRandomDelayMax=0):
# for backward compatibility; the parameter used to be spelled with 'i'
if pollinterval != -2:
pollInterval = pollinterval
if name is None:
name = repourl
super().__init__(name=name, pollInterval=pollInterval, pollAtLaunch=pollAtLaunch,
pollRandomDelayMin=pollRandomDelayMin,
pollRandomDelayMax=pollRandomDelayMax, sshPrivateKey=sshPrivateKey,
sshHostKey=sshHostKey, sshKnownHosts=sshKnownHosts)
if project is None:
project = ''
if only_tags and (branch or branches):
config.error("GitPoller: can't specify only_tags and branch/branches")
if branch and branches:
config.error("GitPoller: can't specify both branch and branches")
elif branch:
branches = [branch]
elif not branches:
if only_tags:
branches = lambda ref: ref.startswith('refs/tags/') # noqa: E731
else:
branches = ['master']
self.repourl = repourl
self.branches = branches
self.encoding = encoding
self.buildPushesWithNoCommits = buildPushesWithNoCommits
self.gitbin = gitbin
self.workdir = workdir
self.usetimestamps = usetimestamps
self.category = category if callable(
category) else bytes2unicode(category, encoding=self.encoding)
self.project = bytes2unicode(project, encoding=self.encoding)
self.changeCount = 0
self.lastRev = {}
self.sshPrivateKey = sshPrivateKey
self.sshHostKey = sshHostKey
self.sshKnownHosts = sshKnownHosts
self.setupGit(logname='GitPoller')
if fetch_refspec is not None:
config.error("GitPoller: fetch_refspec is no longer supported. "
"Instead, only the given branches are downloaded.")
if self.workdir is None:
self.workdir = 'gitpoller-work'
@defer.inlineCallbacks
def _checkGitFeatures(self):
stdout = yield self._dovccmd('--version', [])
self.parseGitFeatures(stdout)
if not self.gitInstalled:
raise EnvironmentError('Git is not installed')
if (self.sshPrivateKey is not None and
not self.supportsSshPrivateKeyAsEnvOption):
raise EnvironmentError('SSH private keys require Git 2.3.0 or newer')
@defer.inlineCallbacks
def activate(self):
# make our workdir absolute, relative to the master's basedir
if not os.path.isabs(self.workdir):
self.workdir = os.path.join(self.master.basedir, self.workdir)
log.msg("gitpoller: using workdir '{}'".format(self.workdir))
try:
self.lastRev = yield self.getState('lastRev', {})
super().activate()
except Exception as e:
log.err(e, 'while initializing GitPoller repository')
def describe(self):
str = ('GitPoller watching the remote git repository ' +
bytes2unicode(self.repourl, self.encoding))
if self.branches:
if self.branches is True:
str += ', branches: ALL'
elif not callable(self.branches):
str += ', branches: ' + ', '.join(self.branches)
if not self.master:
str += " [STOPPED - check log]"
return str
def _getBranches(self):
d = self._dovccmd('ls-remote', ['--refs', self.repourl])
@d.addCallback
def parseRemote(rows):
branches = []
for row in rows.splitlines():
if '\t' not in row:
# Not a useful line
continue
sha, ref = row.split("\t")
branches.append(ref)
return branches
return d
def _headsFilter(self, branch):
"""Filter out remote references that don't begin with 'refs/heads'."""
return branch.startswith("refs/heads/")
def _removeHeads(self, branch):
"""Remove 'refs/heads/' prefix from remote references."""
if branch.startswith("refs/heads/"):
branch = branch[11:]
return branch
def _trackerBranch(self, branch):
# manually quote tilde for Python 3.7
url = urlquote(self.repourl, '').replace('~', '%7E')
return "refs/buildbot/{}/{}".format(url, self._removeHeads(branch))
def poll_should_exit(self):
# A single gitpoller loop may take a while on a loaded master, which would block
# reconfiguration, so we try to exit early.
return not self.doPoll.running
@defer.inlineCallbacks
def poll(self):
yield self._checkGitFeatures()
try:
yield self._dovccmd('init', ['--bare', self.workdir])
except GitError as e:
log.msg(e.args[0])
return
branches = self.branches if self.branches else []
remote_refs = yield self._getBranches()
if self.poll_should_exit():
return
if branches is True or callable(branches):
if callable(self.branches):
branches = [b for b in remote_refs if self.branches(b)]
else:
branches = [b for b in remote_refs if self._headsFilter(b)]
elif branches and remote_refs:
remote_branches = [self._removeHeads(b) for b in remote_refs]
branches = sorted(list(set(branches) & set(remote_branches)))
refspecs = [
'+{}:{}'.format(self._removeHeads(branch), self._trackerBranch(branch))
for branch in branches
]
try:
yield self._dovccmd('fetch', [self.repourl] + refspecs,
path=self.workdir)
except GitError as e:
log.msg(e.args[0])
return
revs = {}
log.msg('gitpoller: processing changes from "{}"'.format(self.repourl))
for branch in branches:
try:
if self.poll_should_exit(): # pragma: no cover
# Note that we still want to update the last known revisions for the branches
# we did process
break
rev = yield self._dovccmd(
'rev-parse', [self._trackerBranch(branch)], path=self.workdir)
revs[branch] = bytes2unicode(rev, self.encoding)
yield self._process_changes(revs[branch], branch)
except Exception:
log.err(_why="trying to poll branch {} of {}".format(
branch, self.repourl))
self.lastRev.update(revs)
yield self.setState('lastRev', self.lastRev)
def _get_commit_comments(self, rev):
args = ['--no-walk', r'--format=%s%n%b', rev, '--']
d = self._dovccmd('log', args, path=self.workdir)
return d
def _get_commit_timestamp(self, rev):
# unix timestamp
args = ['--no-walk', r'--format=%ct', rev, '--']
d = self._dovccmd('log', args, path=self.workdir)
@d.addCallback
def process(git_output):
if self.usetimestamps:
try:
stamp = int(git_output)
except Exception as e:
log.msg(('gitpoller: caught exception converting output \'{}\' to timestamp'
).format(git_output))
raise e
return stamp
return None
return d
def _get_commit_files(self, rev):
args = ['--name-only', '--no-walk', r'--format=%n', rev, '--']
d = self._dovccmd('log', args, path=self.workdir)
def decode_file(file):
# git use octal char sequences in quotes when non ASCII
match = re.match('^"(.*)"$', file)
if match:
file = bytes2unicode(match.groups()[0], encoding=self.encoding,
errors='unicode_escape')
return bytes2unicode(file, encoding=self.encoding)
@d.addCallback
def process(git_output):
fileList = [decode_file(file)
for file in
[s for s in git_output.splitlines() if len(s)]]
return fileList
return d
def _get_commit_author(self, rev):
args = ['--no-walk', r'--format=%aN <%aE>', rev, '--']
d = self._dovccmd('log', args, path=self.workdir)
@d.addCallback
def process(git_output):
if not git_output:
raise EnvironmentError('could not get commit author for rev')
return git_output
return d
@defer.inlineCallbacks
def _get_commit_committer(self, rev):
args = ['--no-walk', r'--format=%cN <%cE>', rev, '--']
res = yield self._dovccmd('log', args, path=self.workdir)
if not res:
raise EnvironmentError('could not get commit committer for rev')
return res
@defer.inlineCallbacks
def _process_changes(self, newRev, branch):
"""
Read changes since last change.
- Read list of commit hashes.
- Extract details from each commit.
- Add changes to database.
"""
# initial run, don't parse all history
if not self.lastRev:
return
# get the change list
revListArgs = (['--ignore-missing'] +
['--format=%H', '{}'.format(newRev)] +
['^' + rev
for rev in sorted(self.lastRev.values())] +
['--'])
self.changeCount = 0
results = yield self._dovccmd('log', revListArgs, path=self.workdir)
# process oldest change first
revList = results.split()
revList.reverse()
if self.buildPushesWithNoCommits and not revList:
existingRev = self.lastRev.get(branch)
if existingRev != newRev:
revList = [newRev]
if existingRev is None:
# This branch was completely unknown, rebuild
log.msg('gitpoller: rebuilding {} for new branch "{}"'.format(
newRev, branch))
else:
# This branch is known, but it now points to a different
# commit than last time we saw it, rebuild.
log.msg('gitpoller: rebuilding {} for updated branch "{}"'.format(
newRev, branch))
self.changeCount = len(revList)
self.lastRev[branch] = newRev
if self.changeCount:
log.msg('gitpoller: processing {} changes: {} from "{}" branch "{}"'.format(
self.changeCount, revList, self.repourl, branch))
for rev in revList:
dl = defer.DeferredList([
self._get_commit_timestamp(rev),
self._get_commit_author(rev),
self._get_commit_committer(rev),
self._get_commit_files(rev),
self._get_commit_comments(rev),
], consumeErrors=True)
results = yield dl
# check for failures
failures = [r[1] for r in results if not r[0]]
if failures:
for failure in failures:
log.err(
failure, "while processing changes for {} {}".format(newRev, branch))
# just fail on the first error; they're probably all related!
failures[0].raiseException()
timestamp, author, committer, files, comments = [r[1] for r in results]
yield self.master.data.updates.addChange(
author=author,
committer=committer,
revision=bytes2unicode(rev, encoding=self.encoding),
files=files, comments=comments, when_timestamp=timestamp,
branch=bytes2unicode(self._removeHeads(branch)),
project=self.project,
repository=bytes2unicode(self.repourl, encoding=self.encoding),
category=self.category, src='git')
def _isSshPrivateKeyNeededForCommand(self, command):
commandsThatNeedKey = [
'fetch',
'ls-remote',
]
if self.sshPrivateKey is not None and command in commandsThatNeedKey:
return True
return False
def _downloadSshPrivateKey(self, keyPath):
# We change the permissions of the key file to be user-readable only so
# that ssh does not complain. This is not used for security because the
# parent directory will have proper permissions.
writeLocalFile(keyPath, self.sshPrivateKey, mode=stat.S_IRUSR)
def _downloadSshKnownHosts(self, path):
if self.sshKnownHosts is not None:
contents = self.sshKnownHosts
else:
contents = getSshKnownHostsContents(self.sshHostKey)
writeLocalFile(path, contents)
def _getSshPrivateKeyPath(self, ssh_data_path):
return os.path.join(ssh_data_path, 'ssh-key')
def _getSshKnownHostsPath(self, ssh_data_path):
return os.path.join(ssh_data_path, 'ssh-known-hosts')
@defer.inlineCallbacks
def _dovccmd(self, command, args, path=None):
if self._isSshPrivateKeyNeededForCommand(command):
with private_tempdir.PrivateTemporaryDirectory(
dir=self.workdir, prefix='.buildbot-ssh') as tmp_path:
stdout = yield self._dovccmdImpl(command, args, path, tmp_path)
else:
stdout = yield self._dovccmdImpl(command, args, path, None)
return stdout
@defer.inlineCallbacks
def _dovccmdImpl(self, command, args, path, ssh_workdir):
full_args = []
full_env = os.environ.copy()
if self._isSshPrivateKeyNeededForCommand(command):
key_path = self._getSshPrivateKeyPath(ssh_workdir)
self._downloadSshPrivateKey(key_path)
known_hosts_path = None
if self.sshHostKey is not None or self.sshKnownHosts is not None:
known_hosts_path = self._getSshKnownHostsPath(ssh_workdir)
self._downloadSshKnownHosts(known_hosts_path)
self.adjustCommandParamsForSshPrivateKey(full_args, full_env,
key_path, None,
known_hosts_path)
full_args += [command] + args
res = yield runprocess.run_process(self.master.reactor, [self.gitbin] + full_args, path,
env=full_env)
(code, stdout, stderr) = res
stdout = bytes2unicode(stdout, self.encoding)
stderr = bytes2unicode(stderr, self.encoding)
if code != 0:
if code == 128:
raise GitError('command {} in {} on repourl {} failed with exit code {}: {}'.format(
full_args, path, self.repourl, code, stderr))
raise EnvironmentError(('command {} in {} on repourl {} failed with exit code {}: {}'
).format(full_args, path, self.repourl, code, stderr))
return stdout.strip()
|
gpl-2.0
| -7,909,465,922,960,029,000 | 37.940397 | 100 | 0.579762 | false | 4.377171 | false | false | false |
tboyce1/home-assistant
|
homeassistant/helpers/template.py
|
2
|
17501
|
"""Template helper methods for rendering strings with Home Assistant data."""
from datetime import datetime
import json
import logging
import math
import random
import re
import jinja2
from jinja2 import contextfilter
from jinja2.sandbox import ImmutableSandboxedEnvironment
from homeassistant.const import (
ATTR_LATITUDE, ATTR_LONGITUDE, ATTR_UNIT_OF_MEASUREMENT, MATCH_ALL,
STATE_UNKNOWN)
from homeassistant.core import State
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import location as loc_helper
from homeassistant.loader import bind_hass, get_component
from homeassistant.util import convert
from homeassistant.util import dt as dt_util
from homeassistant.util import location as loc_util
from homeassistant.util.async import run_callback_threadsafe
_LOGGER = logging.getLogger(__name__)
_SENTINEL = object()
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
_RE_NONE_ENTITIES = re.compile(r"distance\(|closest\(", re.I | re.M)
_RE_GET_ENTITIES = re.compile(
r"(?:(?:states\.|(?:is_state|is_state_attr|states)"
r"\((?:[\ \'\"]?))([\w]+\.[\w]+)|([\w]+))", re.I | re.M
)
@bind_hass
def attach(hass, obj):
"""Recursively attach hass to all template instances in list and dict."""
if isinstance(obj, list):
for child in obj:
attach(hass, child)
elif isinstance(obj, dict):
for child in obj.values():
attach(hass, child)
elif isinstance(obj, Template):
obj.hass = hass
def render_complex(value, variables=None):
"""Recursive template creator helper function."""
if isinstance(value, list):
return [render_complex(item, variables)
for item in value]
elif isinstance(value, dict):
return {key: render_complex(item, variables)
for key, item in value.items()}
return value.async_render(variables)
def extract_entities(template, variables=None):
"""Extract all entities for state_changed listener from template string."""
if template is None or _RE_NONE_ENTITIES.search(template):
return MATCH_ALL
extraction = _RE_GET_ENTITIES.findall(template)
extraction_final = []
for result in extraction:
if result[0] == 'trigger.entity_id' and 'trigger' in variables and \
'entity_id' in variables['trigger']:
extraction_final.append(variables['trigger']['entity_id'])
elif result[0]:
extraction_final.append(result[0])
if variables and result[1] in variables and \
isinstance(variables[result[1]], str):
extraction_final.append(variables[result[1]])
if extraction_final:
return list(set(extraction_final))
return MATCH_ALL
class Template(object):
"""Class to hold a template and manage caching and rendering."""
def __init__(self, template, hass=None):
"""Instantiate a template."""
if not isinstance(template, str):
raise TypeError('Expected template to be a string')
self.template = template
self._compiled_code = None
self._compiled = None
self.hass = hass
def ensure_valid(self):
"""Return if template is valid."""
if self._compiled_code is not None:
return
try:
self._compiled_code = ENV.compile(self.template)
except jinja2.exceptions.TemplateSyntaxError as err:
raise TemplateError(err)
def extract_entities(self, variables=None):
"""Extract all entities for state_changed listener."""
return extract_entities(self.template, variables)
def render(self, variables=None, **kwargs):
"""Render given template."""
if variables is not None:
kwargs.update(variables)
return run_callback_threadsafe(
self.hass.loop, self.async_render, kwargs).result()
def async_render(self, variables=None, **kwargs):
"""Render given template.
This method must be run in the event loop.
"""
if self._compiled is None:
self._ensure_compiled()
if variables is not None:
kwargs.update(variables)
try:
return self._compiled.render(kwargs).strip()
except jinja2.TemplateError as err:
raise TemplateError(err)
def render_with_possible_json_value(self, value, error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
"""
return run_callback_threadsafe(
self.hass.loop, self.async_render_with_possible_json_value, value,
error_value).result()
# pylint: disable=invalid-name
def async_render_with_possible_json_value(self, value,
error_value=_SENTINEL):
"""Render template with value exposed.
If valid JSON will expose value_json too.
This method must be run in the event loop.
"""
if self._compiled is None:
self._ensure_compiled()
variables = {
'value': value
}
try:
variables['value_json'] = json.loads(value)
except ValueError:
pass
try:
return self._compiled.render(variables).strip()
except jinja2.TemplateError as ex:
_LOGGER.error("Error parsing value: %s (value: %s, template: %s)",
ex, value, self.template)
return value if error_value is _SENTINEL else error_value
def _ensure_compiled(self):
"""Bind a template to a specific hass instance."""
self.ensure_valid()
assert self.hass is not None, 'hass variable not set on template'
template_methods = TemplateMethods(self.hass)
global_vars = ENV.make_globals({
'closest': template_methods.closest,
'distance': template_methods.distance,
'is_state': self.hass.states.is_state,
'is_state_attr': template_methods.is_state_attr,
'states': AllStates(self.hass),
})
self._compiled = jinja2.Template.from_code(
ENV, self._compiled_code, global_vars, None)
return self._compiled
def __eq__(self, other):
"""Compare template with another."""
return (self.__class__ == other.__class__ and
self.template == other.template and
self.hass == other.hass)
class AllStates(object):
"""Class to expose all HA states as attributes."""
def __init__(self, hass):
"""Initialize all states."""
self._hass = hass
def __getattr__(self, name):
"""Return the domain state."""
return DomainStates(self._hass, name)
def __iter__(self):
"""Return all states."""
return iter(
_wrap_state(state) for state in
sorted(self._hass.states.async_all(),
key=lambda state: state.entity_id))
def __len__(self):
"""Return number of states."""
return len(self._hass.states.async_entity_ids())
def __call__(self, entity_id):
"""Return the states."""
state = self._hass.states.get(entity_id)
return STATE_UNKNOWN if state is None else state.state
class DomainStates(object):
"""Class to expose a specific HA domain as attributes."""
def __init__(self, hass, domain):
"""Initialize the domain states."""
self._hass = hass
self._domain = domain
def __getattr__(self, name):
"""Return the states."""
return _wrap_state(
self._hass.states.get('{}.{}'.format(self._domain, name)))
def __iter__(self):
"""Return the iteration over all the states."""
return iter(sorted(
(_wrap_state(state) for state in self._hass.states.async_all()
if state.domain == self._domain),
key=lambda state: state.entity_id))
def __len__(self):
"""Return number of states."""
return len(self._hass.states.async_entity_ids(self._domain))
class TemplateState(State):
"""Class to represent a state object in a template."""
# Inheritance is done so functions that check against State keep working
# pylint: disable=super-init-not-called
def __init__(self, state):
"""Initialize template state."""
self._state = state
@property
def state_with_unit(self):
"""Return the state concatenated with the unit if available."""
state = object.__getattribute__(self, '_state')
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
if unit is None:
return state.state
return "{} {}".format(state.state, unit)
def __getattribute__(self, name):
"""Return an attribute of the state."""
if name in TemplateState.__dict__:
return object.__getattribute__(self, name)
else:
return getattr(object.__getattribute__(self, '_state'), name)
def __repr__(self):
"""Representation of Template State."""
rep = object.__getattribute__(self, '_state').__repr__()
return '<template ' + rep[1:]
def _wrap_state(state):
"""Wrap a state."""
return None if state is None else TemplateState(state)
class TemplateMethods(object):
"""Class to expose helpers to templates."""
def __init__(self, hass):
"""Initialize the helpers."""
self._hass = hass
def closest(self, *args):
"""Find closest entity.
Closest to home:
closest(states)
closest(states.device_tracker)
closest('group.children')
closest(states.group.children)
Closest to a point:
closest(23.456, 23.456, 'group.children')
closest('zone.school', 'group.children')
closest(states.zone.school, 'group.children')
"""
if len(args) == 1:
latitude = self._hass.config.latitude
longitude = self._hass.config.longitude
entities = args[0]
elif len(args) == 2:
point_state = self._resolve_state(args[0])
if point_state is None:
_LOGGER.warning("Closest:Unable to find state %s", args[0])
return None
elif not loc_helper.has_location(point_state):
_LOGGER.warning(
"Closest:State does not contain valid location: %s",
point_state)
return None
latitude = point_state.attributes.get(ATTR_LATITUDE)
longitude = point_state.attributes.get(ATTR_LONGITUDE)
entities = args[1]
else:
latitude = convert(args[0], float)
longitude = convert(args[1], float)
if latitude is None or longitude is None:
_LOGGER.warning(
"Closest:Received invalid coordinates: %s, %s",
args[0], args[1])
return None
entities = args[2]
if isinstance(entities, (AllStates, DomainStates)):
states = list(entities)
else:
if isinstance(entities, State):
gr_entity_id = entities.entity_id
else:
gr_entity_id = str(entities)
group = get_component('group')
states = [self._hass.states.get(entity_id) for entity_id
in group.expand_entity_ids(self._hass, [gr_entity_id])]
return _wrap_state(loc_helper.closest(latitude, longitude, states))
def distance(self, *args):
"""Calculate distance.
Will calculate distance from home to a point or between points.
Points can be passed in using state objects or lat/lng coordinates.
"""
locations = []
to_process = list(args)
while to_process:
value = to_process.pop(0)
if isinstance(value, State):
latitude = value.attributes.get(ATTR_LATITUDE)
longitude = value.attributes.get(ATTR_LONGITUDE)
if latitude is None or longitude is None:
_LOGGER.warning(
"Distance:State does not contains a location: %s",
value)
return None
else:
# We expect this and next value to be lat&lng
if not to_process:
_LOGGER.warning(
"Distance:Expected latitude and longitude, got %s",
value)
return None
value_2 = to_process.pop(0)
latitude = convert(value, float)
longitude = convert(value_2, float)
if latitude is None or longitude is None:
_LOGGER.warning("Distance:Unable to process latitude and "
"longitude: %s, %s", value, value_2)
return None
locations.append((latitude, longitude))
if len(locations) == 1:
return self._hass.config.distance(*locations[0])
return self._hass.config.units.length(
loc_util.distance(*locations[0] + locations[1]), 'm')
def is_state_attr(self, entity_id, name, value):
"""Test if a state is a specific attribute."""
state_obj = self._hass.states.get(entity_id)
return state_obj is not None and \
state_obj.attributes.get(name) == value
def _resolve_state(self, entity_id_or_state):
"""Return state or entity_id if given."""
if isinstance(entity_id_or_state, State):
return entity_id_or_state
elif isinstance(entity_id_or_state, str):
return self._hass.states.get(entity_id_or_state)
return None
def forgiving_round(value, precision=0):
"""Round accepted strings."""
try:
value = round(float(value), precision)
return int(value) if precision == 0 else value
except (ValueError, TypeError):
# If value can't be converted to float
return value
def multiply(value, amount):
"""Filter to convert value to float and multiply it."""
try:
return float(value) * amount
except (ValueError, TypeError):
# If value can't be converted to float
return value
def logarithm(value, base=math.e):
"""Filter to get logarithm of the value with a specific base."""
try:
return math.log(float(value), float(base))
except (ValueError, TypeError):
return value
def timestamp_custom(value, date_format=DATE_STR_FORMAT, local=True):
"""Filter to convert given timestamp to format."""
try:
date = dt_util.utc_from_timestamp(value)
if local:
date = dt_util.as_local(date)
return date.strftime(date_format)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_local(value):
"""Filter to convert given timestamp to local date/time."""
try:
return dt_util.as_local(
dt_util.utc_from_timestamp(value)).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def timestamp_utc(value):
"""Filter to convert given timestamp to UTC date/time."""
try:
return dt_util.utc_from_timestamp(value).strftime(DATE_STR_FORMAT)
except (ValueError, TypeError):
# If timestamp can't be converted
return value
def forgiving_as_timestamp(value):
"""Try to convert value to timestamp."""
try:
return dt_util.as_timestamp(value)
except (ValueError, TypeError):
return None
def strptime(string, fmt):
"""Parse a time string to datetime."""
try:
return datetime.strptime(string, fmt)
except (ValueError, AttributeError):
return string
def fail_when_undefined(value):
"""Filter to force a failure when the value is undefined."""
if isinstance(value, jinja2.Undefined):
value()
return value
def forgiving_float(value):
"""Try to convert value to a float."""
try:
return float(value)
except (ValueError, TypeError):
return value
@contextfilter
def random_every_time(context, values):
"""Choose a random value.
Unlike Jinja's random filter,
this is context-dependent to avoid caching the chosen value.
"""
return random.choice(values)
class TemplateEnvironment(ImmutableSandboxedEnvironment):
"""The Home Assistant template environment."""
def is_safe_callable(self, obj):
"""Test if callback is safe."""
return isinstance(obj, AllStates) or super().is_safe_callable(obj)
ENV = TemplateEnvironment()
ENV.filters['round'] = forgiving_round
ENV.filters['multiply'] = multiply
ENV.filters['log'] = logarithm
ENV.filters['timestamp_custom'] = timestamp_custom
ENV.filters['timestamp_local'] = timestamp_local
ENV.filters['timestamp_utc'] = timestamp_utc
ENV.filters['is_defined'] = fail_when_undefined
ENV.filters['max'] = max
ENV.filters['min'] = min
ENV.filters['random'] = random_every_time
ENV.globals['log'] = logarithm
ENV.globals['float'] = forgiving_float
ENV.globals['now'] = dt_util.now
ENV.globals['utcnow'] = dt_util.utcnow
ENV.globals['as_timestamp'] = forgiving_as_timestamp
ENV.globals['relative_time'] = dt_util.get_age
ENV.globals['strptime'] = strptime
|
apache-2.0
| 3,488,710,536,227,762,000 | 30.994516 | 79 | 0.602366 | false | 4.274792 | false | false | false |
garrettkatz/directional-fibers
|
dfibers/experiments/levy_opt/levy_opt.py
|
1
|
6952
|
"""
Measure global optimization performance of Levy function
"""
import sys, time
import numpy as np
import matplotlib.pyplot as pt
import multiprocessing as mp
import dfibers.traversal as tv
import dfibers.numerical_utilities as nu
import dfibers.logging_utilities as lu
import dfibers.fixed_points as fx
import dfibers.solvers as sv
import dfibers.examples.levy as lv
from mpl_toolkits.mplot3d import Axes3D
def run_trial(args):
basename, sample, timeout = args
stop_time = time.clock() + timeout
logfile = open("%s_s%d.log"%(basename,sample),"w")
# Set up fiber arguments
np.random.seed()
v = 20*np.random.rand(2,1) - 10 # random point in domain
c = lv.f(v) # direction at that point
c = c + 0.1*np.random.randn(2,1) # perturb for more variability
fiber_kwargs = {
"f": lv.f,
"ef": lv.ef,
"Df": lv.Df,
"compute_step_amount": lambda trace: (0.0001, 0),
"v": v,
"c": c,
"stop_time": stop_time,
"terminate": lambda trace: (np.fabs(trace.x[:-1]) > 10).any(),
"max_solve_iterations": 2**5,
}
solve_start = time.clock()
# Run in one direction
solution = sv.fiber_solver(
logger=lu.Logger(logfile).plus_prefix("+: "),
**fiber_kwargs)
X1 = np.concatenate(solution["Fiber trace"].points, axis=1)
V1 = solution["Fixed points"]
z = solution["Fiber trace"].z_initial
# print("Status: %s\n"%solution["Fiber trace"].status)
# Run in other direction (negate initial tangent)
solution = sv.fiber_solver(
z= -z,
logger=lu.Logger(logfile).plus_prefix("-: "),
**fiber_kwargs)
X2 = np.concatenate(solution["Fiber trace"].points, axis=1)
V2 = solution["Fixed points"]
# print("Status: %s\n"%solution["Fiber trace"].status)
# Join fiber segments
fiber = np.concatenate((np.fliplr(X1), X2), axis=1)
# Union solutions
fxpts = fx.sanitize_points(
np.concatenate((V1, V2), axis=1),
f = lv.f,
ef = lv.ef,
Df = lv.Df,
duplicates = lambda V, v: (np.fabs(V - v) < 10**-6).all(axis=0),
)
# Save results
with open("%s_s%d.npz"%(basename,sample), 'w') as rf: np.savez(rf, **{
"fxpts": fxpts,
"fiber": fiber,
"runtime": time.clock() - solve_start })
logfile.close()
def run_experiment(basename, num_samples, timeout, num_procs=0):
pool_args = []
for sample in range(num_samples):
pool_args.append((basename, sample, timeout))
if num_procs > 0:
num_procs = min(num_procs, mp.cpu_count())
print("using %d processes..."%num_procs)
pool = mp.Pool(processes=num_procs)
pool.map(run_trial, pool_args)
pool.close()
pool.join()
else:
for pa in pool_args: run_trial(pa)
def compile_results(basename, num_samples):
L = []
F = []
runtimes = []
for sample in range(num_samples):
with open("%s_s%d.npz"%(basename,sample), 'r') as rf: data = dict(np.load(rf))
fxpts = data["fxpts"]
Fs = np.fabs(lv.f(fxpts)).max(axis=0)
Ls = lv.levy(fxpts)
within = (np.fabs(fxpts) < 10).all(axis=0)
mean_within = Ls[within].mean() if within.any() else np.nan
print("sample %d: %d secs, %d solns, mean %f, mean within %f, min %f"%(
sample, data["runtime"], len(Ls), Ls.mean(), mean_within, Ls.min()))
L.append(Ls)
F.append(Fs)
runtimes.append(data["runtime"])
counts = np.array([len(Ls) for Ls in L])
bests = np.array([Ls.min() for Ls in L])
resids = np.array([Fs.max() for Fs in F])
runtimes = np.array(runtimes)
print("avg count = %d, avg best = %f, avg resid = %f, best best = %f"%(
counts.mean(), bests.mean(), resids.mean(), bests.min()))
return counts, bests, runtimes
def plot_results(basename, num_samples, counts, bests, runtimes, timeout):
### Optimization order stats
pt.figure(figsize=(5,4))
pt.subplot(2,1,1)
pt.plot(np.sort(bests), '-k.')
pt.xlabel("Ordered samples")
pt.ylabel("Best objective value")
##### Work complexity
pt.subplot(2,1,2)
terms = (runtimes < timeout)
pt.plot(runtimes[terms], bests[terms], 'k+', markerfacecolor='none')
pt.plot(runtimes[~terms], bests[~terms], 'ko', markerfacecolor='none')
pt.legend(["terminated","timed out"])
pt.xlabel("Runtime (seconds)")
pt.ylabel("Best objective value")
pt.tight_layout()
pt.show()
### Fiber visuals
pt.figure(figsize=(4,7))
# objective fun
X_surface, Y_surface = np.mgrid[-10:10:100j,-10:10:100j]
L = lv.levy(np.array([X_surface.flatten(), Y_surface.flatten()])).reshape(X_surface.shape)
ax_surface = pt.gcf().add_subplot(2,1,1,projection="3d")
ax_surface.plot_surface(X_surface, Y_surface, L, linewidth=0, antialiased=False, color='gray')
ax_surface.set_xlabel("v0")
ax_surface.set_ylabel("v1")
ax_surface.set_zlabel("levy(v)")
ax_surface.view_init(azim=-80, elev=20)
# fibers
ax = pt.gcf().add_subplot(2,1,2)
X_grid, Y_grid = np.mgrid[-10:10:60j,-10:10:60j]
XY = np.array([X_grid.flatten(), Y_grid.flatten()])
C_XY = lv.f(XY)
ax.quiver(XY[0,:],XY[1,:],C_XY[0,:],C_XY[1,:],color=0.5*np.ones((1,3)),
scale=10,units='xy',angles='xy')
num_plot_samples = 3
sort_idx = np.argsort(bests)
plot_idx = [0] + list(np.random.permutation(num_samples)[:num_plot_samples-1])
samples = sort_idx[plot_idx]
# samples = [41,73,20] # all through global
# samples = [41, 97, 11] # two through global
# samples = [41, 49, 13] # two through global, one horiz not through
# samples = [41, 46, 70] # one through global, one horiz
# samples = [41, 96, 27] # two through global, one almost horiz
samples = [41, 63, 28] # two through global, all interesting
print("samples:")
print(samples)
for i,sample in enumerate(samples[::-1]):
with open("%s_s%d.npz"%(basename,sample), 'r') as rf: data = dict(np.load(rf))
fxpts = data["fxpts"]
fiber = data["fiber"][:,::]
L = lv.levy(fxpts).min()
col = 0.5*float(num_plot_samples-i-1)/num_plot_samples
print(sample,col)
ax.plot(fiber[0],fiber[1],color=(col,col,col,1), linestyle='-', linewidth=1)
pt.plot(fxpts[0],fxpts[1], 'o', color=(col,col,col,1))
pt.xlabel("v0")
pt.ylabel("v1",rotation=0)
pt.yticks(np.linspace(-10,10,5))
pt.xlim([-10,10])
pt.ylim([-10,10])
pt.tight_layout()
pt.show()
if __name__ == "__main__":
basename = "levy_opt"
num_samples = 100
num_plot_samples = 3
timeout = 60*30
num_procs = 10
# run_experiment(basename, num_samples=num_samples, timeout=timeout, num_procs=num_procs)
counts, bests, runtimes = compile_results(basename, num_samples)
plot_results(basename, num_samples, counts, bests, runtimes, timeout)
|
mit
| 6,812,665,237,747,678,000 | 32.423077 | 98 | 0.597957 | false | 3.037134 | false | false | false |
pedro-aaron/stego-chi-2
|
embeddingRgb.py
|
1
|
2081
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Watermarkero, Mario, Ariel
"""
from PIL import Image
import random
import matplotlib.pyplot as plt
import numpy as np
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def marcarPixel(color, bitporinsertar):
if (color%2)==1:
if bitporinsertar==0:
color=color-1
elif (color%2)==0:
if bitporinsertar==1:
color=color+1
return color
def plotLsbRgb(img):
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.set_title('Imagen RGB')
ax1.imshow(img)
ax2.set_title('LSB RGB')
img=255*(img%2)
ax2.imshow(img)
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10,
right=0.95, hspace=0.3,wspace=0.35)
#imagen original
path="img3.jpg"
imgOriginal = np.array(Image.open(path))
nFilas, nCols, nCanales = imgOriginal.shape
#marca
key=41196
random.seed(key)
porcentajeDeimagenPorMarcar=50
sizeMarca = nCols*int(porcentajeDeimagenPorMarcar*(nFilas/100))
#marca = [random.randint(0,1) for i in range(sizeMarca)]
plotLsbRgb(imgOriginal)
#proceso de marcado
imgMarcada = imgOriginal.copy();
cont = 1 #contador del numero de bits inscrustados
#Proceso de incrustacion
for fila in range(0,nFilas):
for columna in range(0,nCols):
pixel=imgOriginal[fila,columna]
newPixel = [marcarPixel(
pixel[0],random.randint(0,1)),
marcarPixel(pixel[1],random.randint(0,1)),
marcarPixel(pixel[2],random.randint(0,1))]
imgMarcada[fila,columna] = newPixel
if cont >= sizeMarca:
break
cont = cont +1
if cont >= sizeMarca:
break
plotLsbRgb(imgMarcada)
image = Image.fromarray(imgMarcada, 'RGB')
image.save('ImagenMarcada.bmp')
print('Porciento de la imagen marcada: ' + str(porcentajeDeimagenPorMarcar)+'%')
print('bits incrustados: ' + str(sizeMarca*3))
print('Bytes incrustados: ' + str(sizeMarca*3/8))
print('KiloBytes incrustados: ' + str(sizeMarca*3/8/1024))
print('MegaBytes incrustados: ' + str(sizeMarca*3/8/1024/1024))
|
mit
| 1,626,415,533,977,255,400 | 28.309859 | 80 | 0.658818 | false | 2.71671 | false | false | false |
akshaykr/oracle_cb
|
RegretExp.py
|
1
|
6615
|
import numpy as np
import sklearn.linear_model
import sklearn.tree
import Simulators, Logger, Evaluators, Semibandits, Metrics
import warnings
import argparse
import pickle
import sys
class RegretExp(object):
def __init__(self, weight=None, link="linear", K=10, L=5, T=1000, dataset="synth", feat_noise=0.25, reward_noise=1.0, policies="finite", structure='none'):
self.T = T
self.K = K
self.L = L
if weight == None:
weight = np.arange(1,self.L+1)
self.weight = weight
self.link = link
self.feat_noise = feat_noise
self.reward_noise = reward_noise
self.dataset = dataset
self.policies = policies
self.structure = structure
if self.dataset == "synth":
print("----Generating Semibandit Simulator----")
self.Sim = Simulators.OrderedSBSim(100,100,self.K,
self.L,self.feat_noise,
w_vec=self.weight,
link=self.link,
one_pass=False)
print("----Done----")
elif self.dataset == "mq2007":
print("----Generating MQ2007 Simulator----")
self.Sim = Simulators.DatasetBandit(self.L,loop=True,
dataset='mq2007',
metric=Metrics.NavigationalTTS,
## metric=Metrics.NDCG,
structure=self.structure)
if self.policies == "finite":
trees = pickle.load(open('./mq2007_trees.pkl', 'rb'))
self.Sim.set_policies(trees)
print("----Done----")
elif self.dataset == "mq2008":
print("----Generating MQ2008 Simulator----")
self.Sim = Simulators.DatasetBandit(self.L,loop=True,
dataset='mq2008',
metric=Metrics.NavigationalTTS,
structure=self.structure)
if self.policies == "finite":
trees = pickle.load(open('./mq2008_trees.pkl', 'rb'))
self.Sim.set_policies(trees)
print("----Done----")
elif self.dataset == 'yahoo':
print("----Generating Yahoo Simulator----")
self.Sim = Simulators.DatasetBandit(self.L,loop=True,
dataset='yahoo',
## metric=Metrics.NDCG,
metric=Metrics.NavigationalTTS,
structure=self.structure)
if self.policies == "finite":
trees = pickle.load(open('./yahoo_trees.pkl', 'rb'))
self.Sim.set_policies(trees)
print("----Done----")
else:
print("Error invalid dataset")
sys.exit(1)
def run_alg(self, Alg, params={}):
A = Alg(self.Sim)
(reward, regret) = A.play(self.T,params=params,verbose=False)
return (reward, regret)
if __name__=='__main__':
warnings.simplefilter("ignore")
parser = argparse.ArgumentParser()
parser.add_argument('--T', action='store',
default=1000,
help='number of rounds')
parser.add_argument('--link', action='store', choices=['linear', 'logistic'], default='linear')
parser.add_argument('--dataset', action='store', choices=['synth','mq2007','mq2008', 'yahoo'])
parser.add_argument('--policies', action='store', choices=['finite', 'tree', 'linear'], default='linear')
parser.add_argument('--K', action='store', default=10)
parser.add_argument('--L', action='store', default=5)
parser.add_argument('--structure', action='store', default='none', choices=['none','cluster'])
Args = parser.parse_args(sys.argv[1:])
print(Args)
Args.T = int(Args.T)
Args.K = int(Args.K)
Args.L = int(Args.L)
weight = np.arange(1,Args.L+1)[::-1] ## np.arange(1,Args.L+1,1)[::-1] ## /np.sum(np.arange(1,Args.L+1))
Algs = {
## 'EELS': Semibandits.EELS,
'EELS2': Semibandits.EELS2,
## 'Eps': Semibandits.EpsGreedy,
'EpsOracle': Semibandits.EpsGreedy,
## 'Random': Semibandits.Semibandit
}
Params = {
'EELS': {
'link': Args.link,
},
'EELS2': {
'link': Args.link,
},
'Eps': {
'reward': True,
},
'EpsOracle': {
'reward': False,
'weight': weight,
'link': Args.link
},
'Random': {}
}
if Args.dataset != "synth" and Args.policies == 'tree':
Params['EELS']['learning_alg'] = sklearn.tree.DecisionTreeRegressor
Params['EELS2']['learning_alg'] = sklearn.tree.DecisionTreeRegressor
Params['Eps']['learning_alg'] = sklearn.tree.DecisionTreeRegressor
Params['EpsOracle']['learning_alg'] = sklearn.tree.DecisionTreeRegressor
if Args.dataset != "synth" and Args.policies == 'linear':
Params['EELS']['learning_alg'] = sklearn.linear_model.LinearRegression
Params['EELS2']['learning_alg'] = sklearn.linear_model.LinearRegression
Params['Eps']['learning_alg'] = sklearn.linear_model.LinearRegression
Params['EpsOracle']['learning_alg'] = sklearn.linear_model.LinearRegression
Out = {
'EELS': [],
'EELS_regret': [],
'EELS2': [],
'EELS2_regret': [],
'Eps': [],
'Eps_regret': [],
'EpsOracle': [],
'EpsOracle_regret': [],
'Random': [],
'Random_regret': []
}
Exp = RegretExp(weight = weight, link=Args.link, K=Args.K, L=Args.L, T=Args.T, dataset=Args.dataset, policies=Args.policies,structure=Args.structure)
for i in range(10):
print('----Iter %d----' % (i))
for (k,v) in Algs.items():
print('----Running %s with params %s----' % (k, Params[k]))
(reward, regret) = Exp.run_alg(v, params=Params[k])
Out[k].append(reward)
Out[k+"_regret"].append(regret)
print('%s final: %0.3f' % (k, reward[-1]))
pickle.dump(Out, open("./data/%s_%s_%s_link=%s_T=%d_K=%d_L=%d.pkl" %(Args.dataset, Args.policies, Args.structure, Args.link, Args.T, Args.K, Args.L), "wb"))
|
mit
| 1,477,917,713,475,098,000 | 41.133758 | 160 | 0.503099 | false | 3.891176 | false | false | false |
eallik/spinoff
|
spinoff/util/logging/logging.py
|
1
|
7740
|
# coding: utf8
from __future__ import print_function, absolute_import
import datetime
import inspect
import re
import sys
import time
import traceback
import types
import os
import multiprocessing
from collections import defaultdict
from spinoff.util.python import dump_method_call
try:
import colorama
except ImportError:
colorama = None
WIN32 = sys.platform == 'win32'
_lock = multiprocessing.Lock()
if WIN32:
from .win32fix import fix_unicode_on_win32
fix_unicode_on_win32()
if colorama:
import colorama.initialise
# colorama remembers those at import time, so we need to set them again after our unicode fix
colorama.initialise.orig_stdout = sys.stdout
colorama.initialise.orig_stderr = sys.stderr
# colorama doesn't touch stuff that is not .isatty()==True for some reason
try:
sys.stdout.isatty = sys.stderr.isatty = lambda: True
except AttributeError:
pass
# see also: http://code.google.com/p/colorama/issues/detail?id=41
colorama.init()
else:
print("Colored log output disabled on WIN32; easy_install colorama to enable")
if not WIN32 or colorama:
BLUE = '\x1b[1;34m'
CYAN = '\x1b[1;36m'
GREEN = '\x1b[1;32m'
RED = '\x1b[1;31m'
DARK_RED = '\x1b[0;31m'
RESET_COLOR = '\x1b[0m'
YELLOW = '\x1b[1;33m'
BLINK = '\x1b[5;31m'
else:
BLUE = ''
CYAN = ''
GREEN = ''
RED = ''
DARK_RED = ''
RESET_COLOR = ''
YELLOW = ''
BLINK = ''
OUTFILE = sys.stderr
LEVEL = 0
ENABLE_ONLY = False
LEVELS = [
('dbg', GREEN),
('log', GREEN),
('log', GREEN),
('log', GREEN),
('log', GREEN),
('fail', YELLOW),
('flaw', YELLOW),
('err', RED),
('err', RED),
('panic', BLINK + RED),
('fatal', BLINK + RED),
]
LEVELS = [(name.ljust(5), style) for name, style in LEVELS]
def dbg(*args, **kwargs):
_write(0, *args, **kwargs)
def dbg_call(fn, *args, **kwargs):
t0 = time.time()
ret = fn(*args, **kwargs)
t1 = time.time()
_write(0, "%sms for %s => %r" % (round((t1 - t0) * 1000), dump_method_call(fn.__name__, args, kwargs), ret))
return ret
def dbg1(*args, **kwargs):
_write(0, end='', *args, **kwargs)
# def dbg2(*args, **kwargs):
# _write(0, end='.', *args, **kwargs)
def dbg3(*args, **kwargs):
_write(0, end='\n', *args, **kwargs)
def log(*args, **kwargs):
_write(1, *args, **kwargs)
def fail(*args, **kwargs):
_write(5, *args, **kwargs)
def flaw(*args, **kwargs):
"""Logs a failure that is more important to the developer than a regular failure because there might be a static
programming flaw in the code as opposed to a state/conflict/interaction induced one.
"""
_write(6, *args, **kwargs)
def err(*args, **kwargs):
_write(7, *((RED,) + args + (RESET_COLOR,)), **kwargs)
def panic(*args, **kwargs):
_write(9, *((RED,) + args + (RESET_COLOR,)), **kwargs)
def fatal(*args, **kwargs):
_write(10, *((RED,) + args + (RESET_COLOR,)), **kwargs)
_pending_end = defaultdict(bool)
_logstrings = {}
def get_calling_context(frame):
caller = frame.f_locals.get('self', frame.f_locals.get('cls', None))
f_code = frame.f_code
file, lineno, caller_name = f_code.co_filename, frame.f_lineno, f_code.co_name
file = file.rsplit('/', 1)[-1]
return file, lineno, caller_name, caller
def _write(level, *args, **kwargs):
_lock.acquire()
try:
if level >= LEVEL:
frame = sys._getframe(2)
file, lineno, caller_name, caller = get_calling_context(frame)
if caller:
caller_module = caller.__module__
cls_name = caller.__name__ if isinstance(caller, type) else type(caller).__name__
caller_full_path = '%s.%s' % (caller_module, cls_name)
else:
# TODO: find a faster way to get the module than inspect.getmodule
caller = inspect.getmodule(frame)
if caller:
caller_full_path = caller_module = caller.__name__
else:
caller_full_path = caller_module = '' # .pyc
if ENABLE_ONLY and not any(re.match(x, caller_full_path) for x in ENABLE_ONLY):
return
caller_fn = getattr(caller, caller_name, None)
logstring = getattr(caller_fn, '_r_logstring', None) if caller_fn else None
if not logstring:
# TODO: add logstring "inheritance"
logstring = getattr(caller_fn, '_logstring', None)
if logstring:
if isinstance(logstring, unicode):
logstring = logstring.encode('utf8')
else:
logstring = caller_name + (':' if args else '')
logstring = YELLOW + logstring + RESET_COLOR
# cache it
if isinstance(caller_fn, types.MethodType):
caller_fn.im_func._r_logstring = logstring
elif caller_fn:
caller_fn._r_logstring = logstring
logname = getattr(caller, '_r_logname', None) if caller else ''
if logname is None:
logname = CYAN + get_logname(caller) + RESET_COLOR
if not hasattr(caller, '__slots__'):
caller._r_logname = logname
statestr = GREEN + ' '.join(k for k, v in get_logstate(caller).items() if v) + RESET_COLOR
comment = get_logcomment(caller)
file = os.path.split(file)[-1]
loc = "%s:%s" % (file, lineno)
if level >= 9: # blink for panics
loc = BLINK + loc + RESET_COLOR
levelname = LEVELS[level][1] + LEVELS[level][0] + RESET_COLOR
dump_parent_caller = kwargs.pop('caller', False)
# args = tuple(x.encode('utf-8') for x in args if isinstance(x, unicode))
print(("%s %s %s %s %s %s in %s" %
(datetime.datetime.strftime(datetime.datetime.utcfromtimestamp(time.time() - time.timezone), "%X.%f"), os.getpid(), levelname, loc, logname, statestr, logstring)),
file=OUTFILE, *(args + (comment,)))
if dump_parent_caller:
parent_frame = frame
for i in range(dump_parent_caller):
parent_frame = parent_frame.f_back
if not parent_frame:
break
file_, lineno, caller_name, caller = get_calling_context(parent_frame)
loc = "%s:%s" % (file_, lineno)
print(" " * (i + 1) + "(invoked by) %s %s %s" % (get_logname(caller), caller_name, loc), file=OUTFILE)
except Exception:
# from nose.tools import set_trace; set_trace()
print(RED, "!!%d: (logger failure)" % (level,), file=sys.stderr, *args, **kwargs)
print(RED, "...while trying to log", repr(args), repr(comment) if 'comment' in locals() else '')
print(traceback.format_exc(), RESET_COLOR, file=sys.stderr)
finally:
_lock.release()
def get_logname(obj):
return (obj.__name__
if isinstance(obj, type) else
repr(obj).strip('<>')
if not isinstance(obj, types.ModuleType) else
obj.__name__)
def get_logstate(obj):
try:
return obj.logstate()
except AttributeError:
return {}
def get_logcomment(obj):
try:
x = obj.logcomment
except AttributeError:
return ''
else:
return ' ' + x()
def logstring(logstr):
def dec(fn):
fn._logstring = logstr
return fn
return dec
|
bsd-2-clause
| -4,722,331,085,763,889,000 | 27.043478 | 181 | 0.555297 | false | 3.537477 | false | false | false |
hsghost/ntm
|
ntm/sentiment/sa.py
|
1
|
1863
|
#!/usr/bin/python
import nltk
import nltk.data
from nltk.util import ngrams
from nltk.tokenize import word_tokenize
import MySQLdb
mHost = "10.240.119.20"
mUser = "root"
mPasswd = "cis700fall2014"
mDb = "cis700"
mCharset = "utf8"
conn = MySQLdb.connect(host=mHost,user=mUser,passwd=mPasswd,db=mDb,charset=mCharset)
cur = conn.cursor()
classifier = nltk.data.load("classifiers/movie_reviews_NaiveBayes.pickle")
def sa_text (raw_text):
dtext = raw_text.decode('utf-8')
text = word_tokenize(dtext)
feats = dict([(word, True) for word in text + list(ngrams(text, 2))])
return classifier.classify(feats)
# @param tweet_id
# @return sentiment towords it
def sa_by_tweet_id (tweet_id):
cur.execute("select content from tweets where id=%s", tweet_id)
res = cur.fetchall()
if len(res) == 0:
return "nul"
tweet_text = res[0]
return sa_text(tweet_text[0])
def get_uid (tweet_id):
cur.execute("select user from tweets where id=%s", tweet_id)
res = cur.fetchall()
if len(res) == 0:
return "nul"
return res[0]
def sa_on_word (word):
cur.execute("select id from tweets_newterm where word=%s", word)
res = cur.fetchall()
pos = []
neg = []
for tid in res:
sent = sa_by_tweet_id(tid)
uid = get_uid(tid)
if sent == "pos":
pos += uid
elif sent == "neg":
neg += uid
ret = [word, pos, neg]
return ret
# main entry
# get top 'num' of new term and do SA
# @para num
# @return list[word, pos, neg]
def sa_main(num = 20):
cur.execute("select word,freq from newterm where count>10 and analyzed_time=0 order by freq DESC limit %s", num)
res = cur.fetchall()
sa = []
for r in res:
sow=sa_on_word(r[0])
sow.append(r[1])
sa.append(sow)
print sa
return sa
# print sa_main(10)
|
mit
| -4,181,793,751,906,841,000 | 21.719512 | 116 | 0.618894 | false | 2.976038 | false | false | false |
mementum/backtrader
|
samples/observer-benchmark/observer-benchmark.py
|
1
|
7280
|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015-2020 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import datetime
import random
import backtrader as bt
class St(bt.Strategy):
params = (
('period', 10),
('printout', False),
('stake', 1000),
)
def __init__(self):
sma = bt.indicators.SMA(self.data, period=self.p.period)
self.crossover = bt.indicators.CrossOver(self.data, sma)
def start(self):
if self.p.printout:
txtfields = list()
txtfields.append('Len')
txtfields.append('Datetime')
txtfields.append('Open')
txtfields.append('High')
txtfields.append('Low')
txtfields.append('Close')
txtfields.append('Volume')
txtfields.append('OpenInterest')
print(','.join(txtfields))
def next(self):
if self.p.printout:
# Print only 1st data ... is just a check that things are running
txtfields = list()
txtfields.append('%04d' % len(self))
txtfields.append(self.data.datetime.datetime(0).isoformat())
txtfields.append('%.2f' % self.data0.open[0])
txtfields.append('%.2f' % self.data0.high[0])
txtfields.append('%.2f' % self.data0.low[0])
txtfields.append('%.2f' % self.data0.close[0])
txtfields.append('%.2f' % self.data0.volume[0])
txtfields.append('%.2f' % self.data0.openinterest[0])
print(','.join(txtfields))
if self.position:
if self.crossover < 0.0:
if self.p.printout:
print('CLOSE {} @%{}'.format(size,
self.data.close[0]))
self.close()
else:
if self.crossover > 0.0:
self.buy(size=self.p.stake)
if self.p.printout:
print('BUY {} @%{}'.format(self.p.stake,
self.data.close[0]))
TIMEFRAMES = {
None: None,
'days': bt.TimeFrame.Days,
'weeks': bt.TimeFrame.Weeks,
'months': bt.TimeFrame.Months,
'years': bt.TimeFrame.Years,
'notimeframe': bt.TimeFrame.NoTimeFrame,
}
def runstrat(args=None):
args = parse_args(args)
cerebro = bt.Cerebro()
cerebro.broker.set_cash(args.cash)
dkwargs = dict()
if args.fromdate:
fromdate = datetime.datetime.strptime(args.fromdate, '%Y-%m-%d')
dkwargs['fromdate'] = fromdate
if args.todate:
todate = datetime.datetime.strptime(args.todate, '%Y-%m-%d')
dkwargs['todate'] = todate
data0 = bt.feeds.YahooFinanceCSVData(dataname=args.data0, **dkwargs)
cerebro.adddata(data0, name='Data0')
cerebro.addstrategy(St,
period=args.period,
stake=args.stake,
printout=args.printout)
if args.timereturn:
cerebro.addobserver(bt.observers.TimeReturn,
timeframe=TIMEFRAMES[args.timeframe])
else:
benchdata = data0
if args.benchdata1:
data1 = bt.feeds.YahooFinanceCSVData(dataname=args.data1, **dkwargs)
cerebro.adddata(data1, name='Data1')
benchdata = data1
cerebro.addobserver(bt.observers.Benchmark,
data=benchdata,
timeframe=TIMEFRAMES[args.timeframe])
cerebro.run()
if args.plot:
pkwargs = dict()
if args.plot is not True: # evals to True but is not True
pkwargs = eval('dict(' + args.plot + ')') # args were passed
cerebro.plot(**pkwargs)
def parse_args(pargs=None):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Benchmark/TimeReturn Observers Sample')
parser.add_argument('--data0', required=False,
default='../../datas/yhoo-1996-2015.txt',
help='Data0 to be read in')
parser.add_argument('--data1', required=False,
default='../../datas/orcl-1995-2014.txt',
help='Data1 to be read in')
parser.add_argument('--benchdata1', required=False, action='store_true',
help=('Benchmark against data1'))
parser.add_argument('--fromdate', required=False,
default='2005-01-01',
help='Starting date in YYYY-MM-DD format')
parser.add_argument('--todate', required=False,
default='2006-12-31',
help='Ending date in YYYY-MM-DD format')
parser.add_argument('--printout', required=False, action='store_true',
help=('Print data lines'))
parser.add_argument('--cash', required=False, action='store',
type=float, default=50000,
help=('Cash to start with'))
parser.add_argument('--period', required=False, action='store',
type=int, default=30,
help=('Period for the crossover moving average'))
parser.add_argument('--stake', required=False, action='store',
type=int, default=1000,
help=('Stake to apply for the buy operations'))
parser.add_argument('--timereturn', required=False, action='store_true',
default=None,
help=('Use TimeReturn observer instead of Benchmark'))
parser.add_argument('--timeframe', required=False, action='store',
default=None, choices=TIMEFRAMES.keys(),
help=('TimeFrame to apply to the Observer'))
# Plot options
parser.add_argument('--plot', '-p', nargs='?', required=False,
metavar='kwargs', const=True,
help=('Plot the read data applying any kwargs passed\n'
'\n'
'For example:\n'
'\n'
' --plot style="candle" (to plot candles)\n'))
if pargs:
return parser.parse_args(pargs)
return parser.parse_args()
if __name__ == '__main__':
runstrat()
|
gpl-3.0
| 5,036,608,712,308,105,000 | 34.339806 | 80 | 0.542308 | false | 4.067039 | false | false | false |
ecreall/nova-ideo
|
novaideo/content/processes/novaideo_abstract_process/behaviors.py
|
1
|
8566
|
# -*- coding: utf8 -*-
# Copyright (c) 2014 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# licence: AGPL
# author: Amen Souissi
import pytz
import datetime
from pyramid.httpexceptions import HTTPFound
from persistent.list import PersistentList
from dace.objectofcollaboration.principal import User
from dace.objectofcollaboration.principal.util import (
has_role,
get_current)
from dace.processinstance.activity import InfiniteCardinality
from ..user_management.behaviors import global_user_processsecurity
from novaideo.content.interface import (
INovaIdeoApplication, ISearchableEntity,
IEmojiable)
from novaideo import _, nothing
from novaideo.utilities.util import update_ajax_action
def select_roles_validation(process, context):
return has_role(role=('Member',))
def select_processsecurity_validation(process, context):
user = get_current()
return user is not context and \
context not in getattr(user, 'selections', []) and \
global_user_processsecurity()
def select_state_validation(process, context):
return context.is_published
class SelectEntity(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'communication-action'
style_interaction = 'ajax-action'
style_interaction_type = 'direct'
style_picto = 'glyphicon glyphicon-star-empty'
style_order = 100
isSequential = False
context = ISearchableEntity
roles_validation = select_roles_validation
processsecurity_validation = select_processsecurity_validation
state_validation = select_state_validation
def get_title(self, context, request, nb_only=False):
len_selections = getattr(context, 'len_selections', 0)
if nb_only:
return str(len_selections)
return _("${title} (${number})",
mapping={'number': len_selections,
'title': request.localizer.translate(self.title)})
def start(self, context, request, appstruct, **kw):
user = get_current()
user.addtoproperty('selections', context)
if not isinstance(context, User):
channel = getattr(context, 'channel', None)
if channel and user not in channel.members:
channel.addtoproperty('members', user)
user.reindex()
context.reindex()
return {}
def redirect(self, context, request, **kw):
return nothing
def selecta_roles_validation(process, context):
return has_role(role=('Anonymous',), ignore_superiors=True)
def selecta_processsecurity_validation(process, context):
return True
class SelectEntityAnonymous(SelectEntity):
roles_validation = selecta_roles_validation
processsecurity_validation = selecta_processsecurity_validation
style_interaction = 'ajax-action'
style_interaction_type = 'popover'
behavior_id = 'select_anonymous'
def start(self, context, request, appstruct, **kw):
return {}
def deselect_roles_validation(process, context):
return has_role(role=('Member',))
def deselect_processsecurity_validation(process, context):
user = get_current()
return (context in getattr(user, 'selections', [])) and \
global_user_processsecurity()
class DeselectEntity(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'communication-action'
style_interaction = 'ajax-action'
style_interaction_type = 'direct'
style_picto = 'glyphicon glyphicon-star'
style_order = 101
isSequential = False
context = ISearchableEntity
roles_validation = deselect_roles_validation
processsecurity_validation = deselect_processsecurity_validation
state_validation = select_state_validation
def get_title(self, context, request, nb_only=False):
len_selections = getattr(context, 'len_selections', 0)
if nb_only:
return str(len_selections)
return _("${title} (${number})",
mapping={'number': len_selections,
'title': request.localizer.translate(self.title)})
def start(self, context, request, appstruct, **kw):
user = get_current()
user.delfromproperty('selections', context)
if not isinstance(context, User):
channel = getattr(context, 'channel', None)
if channel:
channel.delfromproperty('members', user)
user.reindex()
context.reindex()
return {}
def redirect(self, context, request, **kw):
return nothing
def addr_roles_validation(process, context):
return has_role(role=('Member',))
def addr_state_validation(process, context):
return 'published' in context.state
def addr_processsecurity_validation(process, context):
security = global_user_processsecurity()
if security:
can_add_reaction = False
if hasattr(context, 'can_add_reaction'):
can_add_reaction = context.can_add_reaction(get_current(), process)
return can_add_reaction
return False
class AddReaction(InfiniteCardinality):
style = 'button' #TODO add style abstract class
style_descriminator = 'communication-body-action'
style_interaction = 'ajax-action'
style_interaction_type = 'popover'
style_picto = 'novaideo-icon icon-add-emoji'
template = 'novaideo:views/templates/actions/add_reaction_idea.pt'
context = IEmojiable
roles_validation = addr_roles_validation
state_validation = addr_state_validation
processsecurity_validation = addr_processsecurity_validation
def get_update_action(self, context, request):
actions_data = update_ajax_action(
context, request, self.process_id, 'updatereaction')
if actions_data and actions_data[0]:
return actions_data[0][0]
return None
def start(self, context, request, appstruct, **kw):
reaction = appstruct.get('reaction', None)
context.add_emoji(reaction, get_current(request))
return {}
def redirect(self, context, request, **kw):
return nothing
class UpdateReaction(AddReaction):
style = 'button' #TODO add style abstract class
style_descriminator = 'controled-action'
style_interaction = 'ajax-action'
style_interaction_type = 'direct'
style_picto = 'none'
template = None
def get_title(self, selected):
return selected and _('Remove my reaction') or _('Add a reaction')
def deadline_roles_validation(process, context):
return has_role(role=('Examiner', ))
def adddeadline_processsecurity_validation(process, context):
return getattr(context, 'content_to_examine', []) and\
datetime.datetime.now(tz=pytz.UTC) >= \
context.deadlines[-1].replace(tzinfo=pytz.UTC) and \
global_user_processsecurity()
class AddDeadLine(InfiniteCardinality):
style_descriminator = 'admin-action'
style_picto = 'glyphicon glyphicon-time'
style_order = 9
submission_title = _('Save')
isSequential = False
context = INovaIdeoApplication
roles_validation = deadline_roles_validation
processsecurity_validation = adddeadline_processsecurity_validation
def start(self, context, request, appstruct, **kw):
if hasattr(context, 'deadlines'):
context.deadlines.append(appstruct['deadline'])
else:
context.deadlines = PersistentList([appstruct['deadline']])
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context))
def editdeadline_processsecurity_validation(process, context):
return getattr(context, 'content_to_examine', []) and\
global_user_processsecurity() and \
getattr(context, 'deadlines', [])
class EditDeadLine(InfiniteCardinality):
style_descriminator = 'admin-action'
style_picto = 'glyphicon glyphicon-time'
style_order = 9
submission_title = _('Save')
isSequential = False
context = INovaIdeoApplication
roles_validation = deadline_roles_validation
processsecurity_validation = editdeadline_processsecurity_validation
def start(self, context, request, appstruct, **kw):
current = context.deadlines[-1]
context.deadlines.remove(current)
context.deadlines.append(appstruct['deadline'])
return {}
def redirect(self, context, request, **kw):
return HTTPFound(request.resource_url(context))
#TODO behaviors
|
agpl-3.0
| -7,422,076,832,301,350,000 | 31.082397 | 79 | 0.681882 | false | 4.040566 | false | false | false |
edwardekstrom/BZRflag
|
bzagents/pf_agent.py
|
1
|
10127
|
#!/usr/bin/python -tt
# An incredibly simple agent. All we do is find the closest enemy tank, drive
# towards it, and shoot. Note that if friendly fire is allowed, you will very
# often kill your own tanks with this code.
#################################################################
# NOTE TO STUDENTS
# This is a starting point for you. You will need to greatly
# modify this code if you want to do anything useful. But this
# should help you to know how to interact with BZRC in order to
# get the information you need.
#
# After starting the bzrflag server, this is one way to start
# this code:
# python agent0.py [hostname] [port]
#
# Often this translates to something like the following (with the
# port name being printed out by the bzrflag server):
# python agent0.py localhost 49857
#################################################################
import sys
import math
import time
from bzrc import BZRC, Command
from pFields import PField
class PFAgent(object):
"""Class handles all command and control logic for a teams tanks."""
def __init__(self, bzrc):
self.bzrc = bzrc
self.constants = self.bzrc.get_constants()
self.commands = []
self.potentialFields = []
self.flag_sphere = 400
self.obstacle_sphere = 1000
self.enemy_sphere = 100
self.obstacles = bzrc.get_obstacles()
self.obstacle_centers = []
for ob in self.obstacles:
totalX = 0
totalY = 0
for corner in ob:
totalX += corner[0]
totalY += corner[1]
averageX = totalX / len(ob)
averageY = totalY / len(ob)
for corner in ob:
if self.dist(averageX,averageY,corner[0],corner[1]) > self.obstacle_sphere:
self.obstacle_sphere = self.dist(averageX,averageY,corner[0],corner[1])
# print self.obstacle_sphere
tup = (averageX,averageY)
self.obstacle_centers.append(tup)
# print ""
# for o in self.bzrc.get_obstacles():
# print o
# print ""
def tick(self, time_diff):
"""Some time has passed; decide what to do next."""
# print
mytanks, othertanks, flags, shots = self.bzrc.get_lots_o_stuff()
self.mytanks = mytanks
self.othertanks = othertanks
self.flags = flags
self.shots = shots
self.enemies = [tank for tank in othertanks if tank.color !=
self.constants['team']]
self.commands = []
for tank in mytanks:
if tank.status != 'dead':
pfo = None
obstacle_x, obstacle_y, d = self.closest_obstacle(tank)
if d < self.obstacle_sphere:
# print str(d)
pfo = PField(obstacle_x, obstacle_y, 0, self.obstacle_sphere, 'tangent')
pfe = None
enemy_x, enemy_y, enemy_dist = self.closest_enemy(tank, self.enemies)
if enemy_dist < self.enemy_sphere:
#print enemy_dist
pfe = PField(enemy_x, enemy_y, 0, self.enemy_sphere, 'repel')
# if flag possession, then put a pf on the home_base
pf = None
if(tank.flag == '-'):
best_flag = self.choose_best_flag(tank)
pf = PField(best_flag.x, best_flag.y, 0, self.flag_sphere, 'attract')
# if not possessed, then put a pf on a flag
else:
home_base_x, home_base_y = self.find_home_base(tank)
pf = PField(home_base_x, home_base_y, 0, self.flag_sphere, 'attract')
self.pf_move(tank, pf, pfo, pfe)
#for tank in mytanks:
#self.attack_enemies(tank)
#for tank in mytanks:
#self.run_to_flag(tank)
results = self.bzrc.do_commands(self.commands)
def pf_move(self, tank, pf, pfo, pfe):
final_angle = 0
if pfo != None:
# print 'pfo != None'
#print self.constants['team'] + " tank: %d = pfo" % tank.index
speedmod, angle = pfo.calc_vector(tank.x, tank.y)
elif pfe != None:
# print 'pfe ! = None'
#print self.constants['team'] + " tank: %d = pfe" % tank.index
speedmod, angle = pfe.calc_vector(tank.x, tank.y)
else:
# print 'else'
#print self.constants['team'] + " tank: %d = pf" % tank.index
speedmod, angle = pf.calc_vector(tank.x, tank.y)
angle = self.normalize_angle(angle - tank.angle)
if final_angle == 0:
final_angle = angle
else:
final_angle = (float(final_angle) + float(angle)) / 2.0
# current_tank_speed = math.sqrt(float(tank.vx**2) + float(tank.vy**2))
# print current_tank_speed
#command = Command(tank.index, speedmod * current_tank_speed, 2 * final_angle, True)
command = Command(tank.index, speedmod, 2 * final_angle, True)
self.commands.append(command)
def closest_obstacle(self, tank):
closest_x = (2 * float(self.constants['worldsize']))**2
closest_y = (2 * float(self.constants['worldsize']))**2
best_d = (2 * float(self.constants['worldsize']))**2
# obstacles = self.bzrc.get_obstacles()
for o in self.obstacle_centers:
x,y = o
d = self.dist(x, y, tank.x, tank.y)
if d < best_d:
best_d = d
closest_x = x
closest_y = y
return (closest_x, closest_y, best_d)
def closest_enemy(self, tank, enemies):
closest_x = (2 * float(self.constants['worldsize']))**2
closest_y = (2 * float(self.constants['worldsize']))**2
best_d = (2 * float(self.constants['worldsize']))**2
for e in enemies:
d = self.dist(e.x, e.y, tank.x, tank.y)
if d < best_d:
best_d = d
closest_x = e.x
closest_y = e.y
return (closest_x, closest_y, best_d)
def dist(self, x1, y1, x2, y2):
return (x1 - x2)**2 + (y1 - y2)**2
def find_home_base(self, tank):
bases = self.bzrc.get_bases()
for base in bases:
if base.color == self.constants['team']:
xdist = abs(base.corner1_x - base.corner3_x) / 2.0
ydist = abs(base.corner1_y - base.corner3_y) / 2.0
base_x = max(base.corner1_x, base.corner3_x) - (xdist/2.0)
base_y = max(base.corner1_y, base.corner3_y) - (ydist/2.0)
return (base_x, base_y)
def choose_best_flag(self, tank):
best_flag = None
best_flag_dist = 2 * float(self.constants['worldsize'])
for f in self.flags:
# print str(len(self.flags))
if f.color != self.constants['team'] and f.poss_color != self.constants['team']:
dist = math.sqrt((f.x - tank.x)**2 + (f.y - tank.y)**2)
if dist < best_flag_dist:
best_flag_dist = dist
best_flag = f
if best_flag is None:
return self.flags[0]
else:
return best_flag
# return self.flags[2]
def run_to_flag(self, tank):
best_flag = None
best_flag_dist = 2 * float(self.constants['worldsize'])
for f in self.flags:
if f.color != self.constants['team']:
dist = math.sqrt((f.x - tank.x)**2 + (f.y - tank.y)**2)
if dist < best_flag_dist:
best_flag_dist = dist
best_flag = f
if best_flag is None:
command = Command(tank.index, 0, 0, False)
self.commands.append(command)
else:
self.move_to_position(tank, best_flag.x, best_flag.y)
def attack_enemies(self, tank):
"""Find the closest enemy and chase it, shooting as you go."""
best_enemy = None
best_dist = 2 * float(self.constants['worldsize'])
for enemy in self.enemies:
if enemy.status != 'alive':
continue
dist = math.sqrt((enemy.x - tank.x)**2 + (enemy.y - tank.y)**2)
if dist < best_dist:
best_dist = dist
best_enemy = enemy
if best_enemy is None:
command = Command(tank.index, 0, 0, False)
self.commands.append(command)
else:
self.move_to_position(tank, best_enemy.x, best_enemy.y)
def move_to_position(self, tank, target_x, target_y):
"""Set command to move to given coordinates."""
target_angle = math.atan2(target_y - tank.y,
target_x - tank.x)
relative_angle = self.normalize_angle(target_angle - tank.angle)
# index, speed, angvel, shoot
command = Command(tank.index, 1, 2 * relative_angle, False)
self.commands.append(command)
def normalize_angle(self, angle):
"""Make any angle be between +/- pi."""
angle -= 2 * math.pi * int (angle / (2 * math.pi))
if angle <= -math.pi:
angle += 2 * math.pi
elif angle > math.pi:
angle -= 2 * math.pi
return angle
def main():
# Process CLI arguments.
try:
execname, host, port = sys.argv
except ValueError:
execname = sys.argv[0]
print >>sys.stderr, '%s: incorrect number of arguments' % execname
print >>sys.stderr, 'usage: %s hostname port' % sys.argv[0]
sys.exit(-1)
# Connect.
#bzrc = BZRC(host, int(port), debug=True)
bzrc = BZRC(host, int(port))
agent = PFAgent(bzrc)
prev_time = time.time()
# Run the agent
try:
while True:
time_diff = time.time() - prev_time
agent.tick(time_diff)
except KeyboardInterrupt:
print "Exiting due to keyboard interrupt."
bzrc.close()
if __name__ == '__main__':
main()
# vim: et sw=4 sts=4
|
gpl-3.0
| -4,527,369,260,058,859,000 | 34.533333 | 92 | 0.530661 | false | 3.441047 | false | false | false |
planetarymike/IDL-Colorbars
|
IDL_py_test/018_Pastels.py
|
1
|
5628
|
from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[1., 0., 0.282353],
[1., 0., 0.282353],
[1., 0., 0.290196],
[1., 0., 0.298039],
[1., 0., 0.305882],
[1., 0., 0.313725],
[1., 0., 0.321569],
[1., 0., 0.329412],
[1., 0., 0.337255],
[1., 0., 0.345098],
[1., 0., 0.352941],
[1., 0., 0.356863],
[1., 0., 0.364706],
[1., 0., 0.372549],
[1., 0., 0.380392],
[1., 0., 0.388235],
[1., 0., 0.396078],
[1., 0., 0.403922],
[1., 0., 0.411765],
[1., 0., 0.419608],
[1., 0., 0.427451],
[1., 0., 0.435294],
[1., 0., 0.443137],
[1., 0., 0.45098],
[1., 0., 0.458824],
[1., 0., 0.466667],
[1., 0., 0.47451],
[1., 0., 0.482353],
[1., 0., 0.490196],
[1., 0., 0.498039],
[1., 0., 0.505882],
[1., 0., 0.513725],
[1., 0., 0.521569],
[1., 0., 0.529412],
[1., 0., 0.537255],
[1., 0., 0.545098],
[1., 0., 0.552941],
[1., 0., 0.556863],
[1., 0., 0.564706],
[1., 0., 0.572549],
[1., 0., 0.580392],
[1., 0., 0.588235],
[1., 0., 0.596078],
[1., 0., 0.603922],
[1., 0., 0.611765],
[1., 0., 0.619608],
[1., 0., 0.627451],
[1., 0., 0.635294],
[1., 0., 0.643137],
[1., 0., 0.65098],
[1., 0., 0.658824],
[1., 0., 0.666667],
[1., 0., 0.67451],
[1., 0., 0.682353],
[1., 0., 0.690196],
[1., 0., 0.698039],
[1., 0., 0.705882],
[1., 0., 0.713725],
[1., 0., 0.721569],
[1., 0., 0.729412],
[1., 0., 0.737255],
[1., 0., 0.745098],
[1., 0., 0.74902],
[1., 0., 0.756863],
[1., 0., 0.764706],
[1., 0., 0.772549],
[1., 0., 0.780392],
[1., 0., 0.788235],
[1., 0., 0.796078],
[1., 0., 0.803922],
[1., 0., 0.811765],
[1., 0., 0.819608],
[1., 0., 0.827451],
[1., 0., 0.835294],
[1., 0., 0.843137],
[1., 0., 0.85098],
[1., 0., 0.858824],
[1., 0., 0.866667],
[1., 0., 0.87451],
[1., 0., 0.882353],
[1., 0., 0.890196],
[1., 0., 0.898039],
[1., 0., 0.905882],
[1., 0., 0.913725],
[1., 0., 0.921569],
[1., 0., 0.929412],
[1., 0., 0.937255],
[1., 0., 0.945098],
[1., 0., 0.94902],
[1., 0., 0.956863],
[1., 0., 0.964706],
[1., 0., 0.972549],
[1., 0., 0.980392],
[1., 0., 0.988235],
[1., 0., 0.996078],
[0.992157, 0., 1.],
[0.984314, 0., 1.],
[0.976471, 0., 1.],
[0.968627, 0., 1.],
[0.960784, 0., 1.],
[0.952941, 0., 1.],
[0.945098, 0., 1.],
[0.937255, 0., 1.],
[0.929412, 0., 1.],
[0.921569, 0., 1.],
[0.913725, 0., 1.],
[0.905882, 0., 1.],
[0.898039, 0., 1.],
[0.890196, 0., 1.],
[0.882353, 0., 1.],
[0.87451, 0., 1.],
[0.866667, 0., 1.],
[0.858824, 0., 1.],
[0.85098, 0., 1.],
[0.847059, 0., 1.],
[0.839216, 0., 1.],
[0.831373, 0., 1.],
[0.823529, 0., 1.],
[0.815686, 0., 1.],
[0.807843, 0., 1.],
[0.8, 0., 1.],
[0.792157, 0., 1.],
[0.784314, 0., 1.],
[0.776471, 0., 1.],
[0.768627, 0., 1.],
[0.760784, 0., 1.],
[0.752941, 0., 1.],
[0.745098, 0., 1.],
[0.737255, 0., 1.],
[0.729412, 0., 1.],
[0., 0.54902, 1.],
[0., 0.572549, 1.],
[0., 0.596078, 1.],
[0., 0.615686, 1.],
[0., 0.639216, 1.],
[0., 0.662745, 1.],
[0., 0.682353, 1.],
[0., 0.705882, 1.],
[0., 0.729412, 1.],
[0., 0.752941, 1.],
[0., 0.772549, 1.],
[0., 0.796078, 1.],
[0., 0.819608, 1.],
[0., 0.839216, 1.],
[0., 0.862745, 1.],
[0., 0.886275, 1.],
[0., 0.909804, 1.],
[0., 0.929412, 1.],
[0., 0.952941, 1.],
[0., 0.976471, 1.],
[0., 1., 1.],
[0., 1., 0.976471],
[0., 1., 0.952941],
[0., 1., 0.929412],
[0., 1., 0.909804],
[0., 1., 0.886275],
[0., 1., 0.862745],
[0., 1., 0.839216],
[0., 1., 0.819608],
[0., 1., 0.796078],
[0., 1., 0.772549],
[0., 1., 0.752941],
[0., 1., 0.729412],
[0., 1., 0.705882],
[0., 1., 0.682353],
[0., 1., 0.662745],
[0., 1., 0.639216],
[0., 1., 0.615686],
[0., 1., 0.596078],
[0., 1., 0.572549],
[0., 1., 0.54902],
[0., 1., 0.52549],
[0., 1., 0.505882],
[0., 1., 0.482353],
[0., 1., 0.458824],
[0., 1., 0.439216],
[0., 1., 0.415686],
[0., 1., 0.392157],
[0., 1., 0.368627],
[0., 1., 0.34902],
[0., 1., 0.32549],
[0., 1., 0.301961],
[0., 1., 0.278431],
[0., 1., 0.258824],
[0., 1., 0.235294],
[0., 1., 0.211765],
[0., 1., 0.192157],
[0., 1., 0.168627],
[0., 1., 0.145098],
[0., 1., 0.121569],
[0., 1., 0.101961],
[0., 1., 0.0784314],
[0., 1., 0.054902],
[0., 1., 0.0352941],
[0., 1., 0.0117647],
[0.00784314, 1., 0.],
[0.0313725, 1., 0.],
[0.0509804, 1., 0.],
[0.0745098, 1., 0.],
[0.0980392, 1., 0.],
[0.117647, 1., 0.],
[0.141176, 1., 0.],
[0.164706, 1., 0.],
[0.188235, 1., 0.],
[0.207843, 1., 0.],
[0.231373, 1., 0.],
[0.254902, 1., 0.],
[0.278431, 1., 0.],
[0.298039, 1., 0.],
[0.321569, 1., 0.],
[0.345098, 1., 0.],
[0.364706, 1., 0.],
[0.388235, 1., 0.],
[0.411765, 1., 0.],
[0.435294, 1., 0.],
[0.454902, 1., 0.],
[0.478431, 1., 0.],
[0.501961, 1., 0.],
[0.521569, 1., 0.],
[0.545098, 1., 0.],
[0.568627, 1., 0.],
[0.592157, 1., 0.],
[0.611765, 1., 0.],
[0.635294, 1., 0.],
[0.658824, 1., 0.],
[0.678431, 1., 0.],
[0.701961, 1., 0.],
[0.72549, 1., 0.],
[0.74902, 1., 0.],
[0.768627, 1., 0.],
[0.792157, 1., 0.],
[0.815686, 1., 0.],
[0.839216, 1., 0.],
[0.858824, 1., 0.],
[0.882353, 1., 0.],
[0.905882, 1., 0.],
[0.92549, 1., 0.],
[0.94902, 1., 0.],
[0.972549, 1., 0.],
[0.996078, 1., 0.],
[1., 0.980392, 0.],
[1., 0.956863, 0.],
[1., 0.933333, 0.],
[1., 0.913725, 0.],
[1., 0.890196, 0.],
[1., 0.866667, 0.],
[1., 0.843137, 0.],
[1., 0.823529, 0.],
[1., 0.8, 0.],
[1., 0.776471, 0.],
[1., 0.756863, 0.],
[1., 0.733333, 0.],
[1., 0.709804, 0.],
[1., 0.686275, 0.],
[1., 0.666667, 0.],
[1., 0.666667, 0.]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
|
gpl-2.0
| 6,271,818,433,585,241,000 | 19.540146 | 69 | 0.465885 | false | 1.82372 | false | true | false |
jlublin/landpatterngen
|
target_svg.py
|
1
|
4266
|
#!/usr/bin/env python3
import xml.etree.ElementTree as ET
def get_target():
return SVG()
class SVG:
def __init__(self):
self.svg = ET.parse('skeleton.svg')
self.mmpx = 3.543307
def output(self, path):
self.svg.write(path)
def add_package(self, package):
'''
Target SVG only handles one drawing at a time, only last added drawing will be part of output
'''
self.svg = ET.parse('skeleton.svg')
self.package = \
{
'name': package['name'],
'pads': [],
'mnt_pads': [],
'holes': [],
'lines': [],
'circles': [],
'rectangles': [] ,
'texts': []
}
def output(self, fout):
package = self.package
for pad in package['pads']:
self.gen_pac_pad(pad)
for mnt_pad in package['mnt_pads']: # TODO, adding mnt_pads not done
self.gen_pac_mnt_pad(mnt_pad)
for hole in package['holes']:
self.gen_pac_hole(hole)
for line in package['lines']:
self.gen_pac_line(line)
if(0):
for circle in package['circles']:
self.gen_pac_circle(circle)
for rect in package['rectangles']:
self.gen_pac_rectangle(rect)
for text in package['texts']:
self.gen_pac_text(text)
self.svg.write(fout)
def add_pac_pad(self, type, angle, size, pos, number):
self.package['pads'].append(
{
'type': type,
'angle': angle,
'size': size,
'pos': pos,
'number': number
})
def add_pac_hole(self, diameter, pos):
self.package['holes'].append(
{
'd': diameter,
'pos': pos
})
def add_pac_line(self, layer, width, vertices):
self.package['lines'].append(
{
'layer': layer,
'width': width,
'vertices': vertices
})
def gen_pac_pad(self, pad): # type, angle, size, pos, number
top_layer = self.svg.find('.//g[@id="Top"]')
# TODO: Types and angle
el = ET.SubElement(top_layer, 'rect')
el.set('style', 'fill:#ff0000;fill-opacity:1;stroke:none;stroke-width:10;stroke-linecap:square;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1')
el.set('id', 'pin_{}'.format(pad['number']))
el.set('width', '{}'.format(pad['size'][0]*self.mmpx))
el.set('height', '{}'.format(pad['size'][1]*self.mmpx))
el.set('x', '{}'.format((pad['pos'][0] - pad['size'][0]/2)*self.mmpx))
el.set('y', '{}'.format((pad['pos'][1] - pad['size'][1]/2)*self.mmpx))
def gen_pac_hole(self, hole):
top_layer = self.svg.find('.//g[@id="Holes"]')
circle = ET.SubElement(top_layer, 'circle')
circle.set('style', 'fill:#eeee00;fill-opacity:1;stroke:none;stroke-width:0.0;stroke-linecap:square;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"')
circle.set('cx', '{}'.format(hole['pos'][0]*self.mmpx))
circle.set('cy', '{}'.format(hole['pos'][1]*self.mmpx))
circle.set('r', '{}'.format(hole['d']/2*self.mmpx))
def gen_pac_line(self, line):
layer = self.svg.find('.//g[@id="{}"]'.format(line['layer']))
if(line['layer'] == 'Courtyard'):
color = '#e63a81'
elif(line['layer'] == 'Silk'):
color = '#111111'
else:
color = '#000000'
el = ET.SubElement(layer, 'path')
el.set('style', 'fill:none;fill-rule:evenodd;stroke:{color};stroke-width:{}mm;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;stroke-miterlimit:4;stroke-dasharray:none'.format(line['width'], color=color))
pathdata = ''
first = True
for (x,y) in line['vertices']:
if(first):
pathdata += 'M ' + '{},{}'.format(x*self.mmpx,y*self.mmpx)
first = False
elif(x == 'end'):
pathdata += ' z'
else:
pathdata += ' L ' + '{},{}'.format(x*self.mmpx,y*self.mmpx)
el.set('d', pathdata)
def gen_circle(self, layer_name, diameter, pos):
layer = self.svg.find('.//g[@id="{}"]'.format(layer_name))
if(layer_name == 'Courtyard'):
color = '#e63a81'
elif(layer_name == 'Silk'):
color = '#111111'
else:
color = '#000000'
circle = ET.SubElement(layer, 'circle')
circle.set('style', 'fill:#{color};fill-opacity:1;stroke:none;stroke-width:0.0;stroke-linecap:square;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1"'.format(color=color))
circle.set('cx', '{}'.format(pos[0]*self.mmpx))
circle.set('cy', '{}'.format(pos[1]*self.mmpx))
circle.set('r', '{}'.format(diameter/2*self.mmpx))
if(__name__ == '__main__'):
target = get_target()
target.output('test.svg')
|
gpl-3.0
| -2,957,371,261,297,704,000 | 23.517241 | 217 | 0.61158 | false | 2.657944 | false | false | false |
GaretJax/ipd
|
ipd/metadata/resource.py
|
1
|
7569
|
import json
from uuid import UUID
from twisted.internet import defer
from twisted.web import resource, server
class RecursiveResource(resource.Resource, object):
isLeaf = False
def getChild(self, name, request):
if name == '':
child = self
else:
try:
child = self.children[name]
except KeyError:
child = super(RecursiveResource, self).getChild(name, request)
return child
class MetadataMixin(object):
def __init__(self, server):
super(MetadataMixin, self).__init__()
self.meta_server = server
def get_metadata_from_request(self, request):
h = request.requestHeaders
hypervisor = h.getRawHeaders('X-Tenant-ID')[0]
domain_uuid = UUID(hex=h.getRawHeaders('X-Instance-ID')[0])
#domain_ip = h.getRawHeaders('X-Forwarded-For')[0]
return self.meta_server.get_metadata_for_uuid(hypervisor, domain_uuid)
class DelayedRendererMixin(object):
def _delayed_renderer(self, request):
raise NotImplementedError
def finish_write(self, res, request):
request.write(res)
request.finish()
def finish_err(self, failure, request):
request.setResponseCode(500)
request.write('500: Internal server error')
request.finish()
return failure
def render_GET(self, request):
d = self._delayed_renderer(request)
d.addCallback(self.finish_write, request)
d.addErrback(self.finish_err, request)
return server.NOT_DONE_YET
class UserdataResource(DelayedRendererMixin, resource.Resource, object):
isLeaf = True
def __init__(self, server):
super(UserdataResource, self).__init__()
self.meta_server = server
def get_userdata_from_request(self, request):
h = request.requestHeaders
hypervisor = h.getRawHeaders('X-Tenant-ID')[0]
domain_uuid = UUID(hex=h.getRawHeaders('X-Instance-ID')[0])
#domain_ip = h.getRawHeaders('X-Forwarded-For')[0]
return self.meta_server.get_userdata_for_uuid(hypervisor, domain_uuid)
def _delayed_renderer(self, request):
return self.get_userdata_from_request(request)
class AtomResource(DelayedRendererMixin, MetadataMixin, resource.Resource,
object):
def _delayed_renderer(self, request):
d = self.get_metadata_from_request(request)
d.addCallback(self.get_value)
return d
def get_value(self, metadata):
raise NotImplementedError()
class KeyedAtomResource(AtomResource):
isLeaf = True
def __init__(self, server, key):
super(KeyedAtomResource, self).__init__(server)
self._key = key
def get_value(self, metadata):
val = metadata
for k in self._key:
val = val[k]
return str(val)
class KeysResource(AtomResource):
isLeaf = False
formats = {
'openssh-key': 'OPENSSH'
}
def get_value(self, metadata):
keys = ('{}={}'.format(i, k[0])
for i, k in enumerate(metadata['public_keys']))
return '\n'.join(keys)
def getChild(self, name, request):
if not name:
return self
key = int(name)
fmt = self.formats[request.postpath[0]]
return KeyRenderer(self.meta_server, key, fmt)
class KeyRenderer(KeyedAtomResource):
def __init__(self, server, key, fmt):
super(KeyedAtomResource, self).__init__(server)
self._key = key
self._format = fmt
def get_value(self, metadata):
key = metadata['public_keys'][self._key][1]
return key.toString(self._format)
class IndexResource(RecursiveResource):
isleaf = False
def render_GET(self, request):
for k, v in sorted(self.children.items()):
request.write(k)
if not v.isLeaf:
request.write('/\n')
else:
request.write('\n')
request.finish()
return server.NOT_DONE_YET
class EC2MetadataAPI(IndexResource):
isLeaf = False
version = '2009-04-04'
def __init__(self, server):
super(EC2MetadataAPI, self).__init__()
meta = IndexResource()
meta.putChild('hostname', KeyedAtomResource(server, ['hostname']))
meta.putChild('instance-id', KeyedAtomResource(server, ['uuid']))
meta.putChild('public-keys', KeysResource(server))
self.putChild('meta-data', meta)
self.putChild('user-data', UserdataResource(server))
class OpenstackMetadataAPI(IndexResource):
version = '2012-08-10'
def __init__(self, server):
super(OpenstackMetadataAPI, self).__init__()
self.putChild('meta_data.json', OpenstackMetadata(server))
self.putChild('user_data', UserdataResource(server))
class OpenstackMetadata(DelayedRendererMixin, MetadataMixin, resource.Resource,
object):
isLeaf = True
@defer.inlineCallbacks
def _delayed_renderer(self, request):
metadata = yield self.get_metadata_from_request(request)
metadata['uuid'] = str(metadata['uuid'])
metadata['public_keys'] = {
k: v.toString('OPENSSH')
for k, v in metadata['public_keys']
}
defer.returnValue(json.dumps(metadata))
class APIVersionsIndex(RecursiveResource):
def register_api(self, res):
self.putChild(res.version, res)
latest = self.children.get('latest', None)
if not latest or res.version > latest.version:
self.putChild('latest', res)
def render_GET(self, request):
versions = sorted(self.children)
if versions:
return '\n'.join(versions) + '\n'
else:
return ''
class InstanceCallback(DelayedRendererMixin, resource.Resource):
isLeaf = True
def __init__(self, server):
self._server = server
@defer.inlineCallbacks
def _delayed_renderer(self, request):
instance_uuid = request.postpath[0]
data = yield self._server.get_instancedata_for_uuid(instance_uuid)
defer.returnValue(json.dumps(data))
def render_POST(self, request):
setip = 'nosetip' not in request.args
instance_id = request.args['instance_id'][0]
hostname = request.args['hostname'][0]
data = {
'hostname': hostname,
'status': 'running',
}
if setip:
ip = request.requestHeaders.getRawHeaders('X-Forwarded-For')[0]
data['ip_address'] = ip
for k, v in request.args.iteritems():
if k.startswith('pub_key_'):
try:
data[k] = v[0].strip()
except:
pass
self._server.add_instancedata_for_uuid(instance_id, data)
return ''
class MetadataRootResource(RecursiveResource):
isLeaf = False
def __init__(self, server):
super(MetadataRootResource, self).__init__()
self._server = server
self.ec2 = APIVersionsIndex()
self.ec2.register_api(EC2MetadataAPI(server))
self.openstack = APIVersionsIndex()
self.openstack.register_api(OpenstackMetadataAPI(server))
self.instancedata = InstanceCallback(self._server)
def getChild(self, name, request):
if name == 'openstack':
child = self.openstack
elif name == 'instancedata':
child = self.instancedata
else:
child = self.ec2.getChild(name, request)
return child
|
mit
| 9,081,998,082,272,325,000 | 27.454887 | 79 | 0.608271 | false | 3.994195 | false | false | false |
yen223/mclass-sky
|
mclearn/photometry.py
|
1
|
10079
|
""" Procedures specific to photometric data. """
import os
import numpy as np
from urllib.request import urlopen
from urllib.parse import urlencode
from .tools import load_results
def reddening_correction_sfd98(extinction_r):
""" Compute the reddening values using the SFD98 correction set.
Parameters
----------
extinction_r : array
The uncorrected extinction values in the r-band.
Returns
-------
A_u : array
The corrected extinction values in the u-band.
A_g : array
The corrected extinction values in the g-band.
A_r : array
The corrected extinction values in the r-band.
A_i : array
The corrected extinction values in the i-band.
A_z : array
The corrected extinction values in the z-band.
"""
E_BV = extinction_r / 2.751
A_u = E_BV * 5.155
A_g = E_BV * 3.793
A_r = E_BV * 2.751
A_i = E_BV * 2.086
A_z = E_BV * 1.479
return (A_u, A_g, A_r, A_i, A_z)
def reddening_correction_sf11(extinction_r):
""" Compute the reddening values using the SF11 correction set.
Parameters
----------
extinction_r : array
The uncorrected extinction values in the r-band.
Returns
-------
A_u : array
The corrected extinction values in the u-band.
A_g : array
The corrected extinction values in the g-band.
A_r : array
The corrected extinction values in the r-band.
A_i : array
The corrected extinction values in the i-band.
A_z : array
The corrected extinction values in the z-band.
"""
E_BV = extinction_r / 2.751
A_u = E_BV * 4.239
A_g = E_BV * 3.303
A_r = E_BV * 2.285
A_i = E_BV * 1.698
A_z = E_BV * 1.263
return (A_u, A_g, A_r, A_i, A_z)
def reddening_correction_w14(extinction_r):
""" Compute the reddening values using the W14 correction set.
Parameters
----------
extinction_r : array
The uncorrected extinction values in the r-band.
Returns
-------
A_u : array
The corrected extinction values in the u-band.
A_g : array
The corrected extinction values in the g-band.
A_r : array
The corrected extinction values in the r-band.
A_i : array
The corrected extinction values in the i-band.
A_z : array
The corrected extinction values in the z-band.
"""
E_BV = extinction_r / 2.751
region_2 = np.logical_and(E_BV >= 0.04, E_BV < 0.08)
region_3 = E_BV >= 0.08
E_BV[region_2] = E_BV[region_2] + 0.5 * (E_BV[region_2] - 0.04)
E_BV[region_3] = E_BV[region_3] + 0.02
A_u = E_BV * 4.305
A_g = E_BV * 3.288
A_r = E_BV * 2.261
A_i = E_BV * 1.714
A_z = E_BV * 1.263
return (A_u, A_g, A_r, A_i, A_z)
def correct_magnitudes(data, magnitudes, corrections, suffix):
""" Correct the values of magntidues given a correction set.
Parameters
----------
data : DataFrame
The DataFrame containing the magnitudes.
magnitudes : array
The column names of the magnitudes.
corrections : array
The set of correction values in the same order as `magnitudes`.
"""
for mag, cor in zip(magnitudes, corrections):
data[mag + suffix] = data[mag] - cor
def compute_colours(data, colours, suffix):
""" Compute specified combinations of colours.
Parameters
----------
data : DataFrame
The DataFrame containing the magnitudes.
colours : array
The list of colour combinations to be computed.
suffix : array
A suffix is added to the colour name to distinguish between correction sets.
"""
for colour in colours:
prefix = 'psf' if colour[0].startswith('psf') else 'petro'
colour_name = prefix + colour[0][-2:] + colour[1][-2:]
data[colour_name + suffix] = data[colour[0] + suffix] - data[colour[1] + suffix]
def fetch_sloan_data(sql, output, url=None, fmt='csv', verbose=True):
""" Run an SQL query on the Sloan Sky Server.
Parameters
----------
sql : str
The sql query.
output : str
The path where the queried data will be stored.
url : str
The url that will be used for fetching.
fmt : str
The format of the output, one of 'csv', 'xml', 'html'.
"""
assert fmt in ['csv','xml','html'], "Wrong format!"
if not url:
url = 'http://skyserver.sdss.org/dr10/en/tools/search/x_sql.aspx'
# filter out the comments in the sql query
fsql = ''
for line in sql.split('\n'):
fsql += line.split('--')[0] + ' ' + os.linesep
# make the sql query
if verbose:
print('Connecting to the server...')
params = urlencode({'cmd': fsql, 'format': fmt})
query = urlopen(url + '?{}'.format(params))
# ignore the first line (the name of table)
query.readline()
if verbose:
print('Writing to file...')
with open(output, 'wb') as f:
f.write(query.read())
if verbose:
print('Success!')
def fetch_filter(filter, download_url, filter_dir=''):
""" Get a filter from the internet.
Parameters
----------
filter : char
Name of the filters. Must be one of u, g, r, i, and z.
download_url : str
The URL where the filter can be downloaded.
Returns
-------
data : array
The downloaded filter data.
"""
assert filter in 'ugriz'
url = download_url % filter
if not os.path.exists(filter_dir):
os.makedirs(filter_dir)
loc = os.path.join(filter_dir, '%s.dat' % filter)
if not os.path.exists(loc):
filter_file = urlopen(url)
with open(loc, 'wb') as f:
f.write(filter_file.read())
with open(loc, 'rb') as f:
data = np.loadtxt(f)
return data
def fetch_spectrum(spectrum_url, spectra_dir=''):
""" Get a spectrum from the internet.
Parameters
----------
spectrum_url : str
The URL where the spectrum can be downloaded.
Returns
-------
data : array
The downloaded spectrum data.
"""
if not os.path.exists(spectra_dir):
os.makedirs(spectra_dir)
refspec_file = os.path.join(spectra_dir, spectrum_url.split('/')[-1])
if not os.path.exists(refspec_file):
spectrum_file = urlopen(spectrum_url)
with open(refspec_file, 'wb') as f:
f.write(spectrum_file.read())
with open(refspec_file, 'rb') as f:
data = np.loadtxt(f)
return data
def clean_up_subclasses(classes, subclasses):
""" Clean up the names of the subclasses in the SDSS dataset.
Parameters
----------
classes : array
The array containing the classes. This will be prepended to the sublcasses.
subclasses : array
The array containing the subclasses.
"""
# remove null references
subclasses.replace('null', '', inplace=True)
# remove HD catalog number (stored in brackets)
subclasses.replace(r'\s*\(\d+\)\s*', '', regex=True, inplace=True)
# captialise only the first leter of some subclasses
subclasses.replace('BROADLINE', 'Broadline', inplace=True)
subclasses.replace('STARFORMING', 'Starforming', inplace=True)
subclasses.replace('STARBURST', 'Starburst', inplace=True)
subclasses.replace('STARBURST BROADLINE', 'Starburst Broadline', inplace=True)
subclasses.replace('AGN BROADLINE', 'AGN Broadline', inplace=True)
subclasses.replace('STARFORMING BROADLINE', 'Starforming Broadline', inplace=True)
# remove other brackets
subclasses.replace('F8V (G_243-63)', 'F8V', inplace=True)
subclasses.replace('K5 (G_19-24)', 'K5', inplace=True)
subclasses.replace('sd:F0 (G_84-29)', 'sd:F0', inplace=True)
subclasses.replace('G0 (G_101-29)', 'G0', inplace=True)
subclasses.replace('A4 (G_165-39)', 'A4', inplace=True)
subclasses.replace('A4p (G_37-26)', 'A4p', inplace=True)
not_empty = subclasses != ''
subclasses.loc[not_empty] = classes[not_empty] + ' ' + subclasses[not_empty]
def optimise_sdss_features(sdss, scaler_path):
""" Apply the W14 reddening correction and compute key colours in the SDSS dataset.
Parameters
----------
sdss : DataFrame
The DataFrame containing photometric features.
"""
# compute the three sets of reddening correction
A_u_w14, A_g_w14, A_r_w14, A_i_w14, A_z_w14 = reddening_correction_w14(sdss['extinction_r'])
# useful variables
psf_magnitudes = ['psfMag_u', 'psfMag_g', 'psfMag_r', 'psfMag_i', 'psfMag_z']
petro_magnitudes = ['petroMag_u', 'petroMag_g', 'petroMag_r', 'petroMag_i', 'petroMag_z']
w14_corrections = [A_u_w14, A_g_w14, A_r_w14, A_i_w14, A_z_w14]
colours = [('psfMag_u', 'psfMag_g'), ('psfMag_g', 'psfMag_r'), ('psfMag_r', 'psfMag_i'), ('psfMag_i', 'psfMag_z'),
('petroMag_u', 'petroMag_g'), ('petroMag_g', 'petroMag_r'), ('petroMag_r', 'petroMag_i'), ('petroMag_i', 'petroMag_z')]
# calculate the corrected magnitudes
correct_magnitudes(sdss, psf_magnitudes, w14_corrections, '_w14')
correct_magnitudes(sdss, petro_magnitudes, w14_corrections, '_w14')
# calculate the corrected magnitudes
compute_colours(sdss, colours, '_w14')
# scale features
w14_feature_cols = ['psfMag_r_w14', 'psf_u_g_w14', 'psf_g_r_w14', 'psf_r_i_w14',
'psf_i_z_w14', 'petroMag_r_w14', 'petro_u_g_w14', 'petro_g_r_w14',
'petro_r_i_w14', 'petro_i_z_w14', 'petroRad_r']
scaler = load_results(scaler_path)
sdss[w14_feature_cols] = scaler.transform(sdss[w14_feature_cols])
|
bsd-3-clause
| -5,552,300,325,919,544,000 | 27.882521 | 134 | 0.580613 | false | 3.369776 | false | false | false |
weka511/bioinformatics
|
nwck.py
|
1
|
1881
|
# Copyright (C) 2017 Greenweaves Software Pty Ltd
# This is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>
import newick
tokenizer = newick.Tokenizer()
parser = newick.Parser(tokenizer)
def get_path_to_root(clade,path=[]):
if (len(path)==0):
path=[(clade.id,clade.length)]
ancestor = clade.parent
if ancestor == None:
return path
pp=path[:]
pp.append((ancestor.id,ancestor.length))
return get_path_to_root(ancestor,pp)
def get_path(clade):
return [clade.id]+get_path_to_root(clade)
def diff(path1,path2):
def length(path):
return sum(l for (_,l) in path)
if len(path1)>len(path2):
return diff(path2,path1)
i=0
while i<len(path1) and path1[i][0]==path2[i][0]:
i+=1
return length(path1[i:]) +length(path2[i:])
with open (r'C:\Users\Weka\Downloads\rosalind_nkew.txt') as f:
diffs=[]
i = 0
tree = None
lookup = None
for line in f:
if i%3==0:
print (line.strip())
tree,lookup=parser.parse(line.strip())
elif i%3==1:
clades = line.strip().split()
print (clades)
ancestors=[get_path_to_root(lookup[clade])[::-1] for clade in clades]
diffs.append(diff(ancestors[0],ancestors[1]))
i+=1
print (diffs)
|
gpl-3.0
| 8,854,605,071,501,078,000 | 29.852459 | 81 | 0.643275 | false | 3.389189 | false | false | false |
anderson1008/NOCulator
|
hring/src/Script/sim_batch.py
|
1
|
1964
|
#!/usr/bin/python
import sys
import os
import re
import fnmatch
import string
workload_dir = "/Users/xiyuexiang/GoogleDrive/NOCulator/hring/src/bin/"
workload = "mix_app"
insns_count = 1000000
ipc_alone = [2.16, 2.75, 2.08, 1.91, 2.16, 2.75, 2.08, 1.91, 2.16, 2.75, 2.08, 1.91, 2.16, 2.75, 2.08, 1.91]
ipc_share = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
out_dir = "/Users/xiyuexiang/GoogleDrive/NOCulator/hring/src/bin/sweep_batch/"
filename = '../bin/sweep_batch_period.txt'
filename_out = str(filename)
if os.path.exists(filename_out) == True:
os.remove(filename_out)
fo_out = open(filename_out, "a")
fo_out.write('\n\n' + 'sweep packet batching period (epoch = 100000)' + '\n\n')
fo_out.write('period'.ljust(15) + 'w_speedup'.ljust(15) + 'h_speedup'.ljust(15))
fo_out.write('\n')
for sim_index in range(100, 5100, 100):
out_file = "sim_" + str(sim_index) + ".out"
command_line = "mono ../bin/sim.exe -config " + workload_dir + "config_0.txt -output " + out_dir + out_file + " -workload " + workload_dir + workload + ' 3 ' + "-STC_batchPeriod " + str(sim_index)
os.system (command_line)
# collect result
result_file = open (out_dir + out_file, 'r')
result = result_file.read()
result_file.close()
searchObj = re.search(r'(?:"active_cycles":\[(.*?)])',result)
splitObj = re.split('\W+',searchObj.group(1))
active_cycles = splitObj
weighted_speedup = 0
temp0 = 0
for i in range (0, 16, 1):
ipc_share [i] = float(insns_count) / int(active_cycles[i])
weighted_speedup = ipc_share[i] / ipc_alone[i] + weighted_speedup
temp0 = ipc_alone[i] / ipc_share[i] + temp0
harmonic_speedup = 16 / temp0
print str(sim_index) + " " + str("%.2f" % weighted_speedup) + " " + str("%.2f" % harmonic_speedup)
fo_out.write('\n')
fo_out.write(str(sim_index).ljust(15) + str(weighted_speedup).ljust(15) + str(harmonic_speedup).ljust(15))
fo_out.write('\n')
fo_out.close()
|
mit
| 6,003,752,314,014,478,000 | 32.45614 | 197 | 0.623727 | false | 2.451935 | false | false | false |
zhuangjun1981/retinotopic_mapping
|
retinotopic_mapping/examples/analysis_retinotopicmapping/batch_MarkPatches.py
|
1
|
1417
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 30 14:46:38 2014
@author: junz
"""
import os
import matplotlib.pyplot as plt
import corticalmapping.core.FileTools as ft
import corticalmapping.RetinotopicMapping as rm
trialName = '160208_M193206_Trial1.pkl'
names = [
['patch01', 'V1'],
['patch02', 'RL'],
['patch03', 'LM'],
['patch04', 'AL'],
['patch05', 'AM'],
['patch06', 'PM'],
['patch07', 'MMA'],
['patch08', 'MMP'],
['patch09', 'LLA'],
# ['patch10', 'AM'],
# ['patch11', 'LLA'],
# ['patch12', 'MMP'],
# ['patch13', 'MMP']
# ['patch14', 'MMP']
]
currFolder = os.path.dirname(os.path.realpath(__file__))
os.chdir(currFolder)
trialPath = os.path.join(currFolder,trialName)
trialDict = ft.loadFile(trialPath)
finalPatches = dict(trialDict['finalPatches'])
for i, namePair in enumerate(names):
currPatch = finalPatches.pop(namePair[0])
newPatchDict = {namePair[1]:currPatch}
finalPatches.update(newPatchDict)
trialDict.update({'finalPatchesMarked':finalPatches})
ft.saveFile(trialPath,trialDict)
trial, _ = rm.loadTrial(trialPath)
f = plt.figure(figsize=(10,10))
ax = f.add_subplot(111)
trial.plotFinalPatchBorders2(plotAxis = ax,borderWidth=2)
plt.show()
f.savefig(trialName[0:-4]+'_borders.pdf',dpi=600)
f.savefig(trialName[0:-4]+'_borders.png',dpi=300)
|
gpl-3.0
| 4,602,845,023,401,263,600 | 24.321429 | 57 | 0.617502 | false | 2.927686 | false | false | false |
zmarvel/slowboy
|
test_roms/scripts/gentilemap2.py
|
1
|
2654
|
import string
import sys
TWIDTH = 8
THEIGHT = 8
TSWIDTH = 128
TSHEIGHT = 128
TSWIDTH_TILES = TSWIDTH // TWIDTH
TSHEIGHT_TILES = TSHEIGHT // THEIGHT
SCREEN_WIDTH = 160
SCREEN_HEIGHT = 144
SWIDTH_TILES = SCREEN_WIDTH // TWIDTH
SHEIGHT_TILES = SCREEN_HEIGHT // THEIGHT
BACKGROUND_WIDTH = 256
BACKGROUND_HEIGHT = 256
BGWIDTH_TILES = BACKGROUND_WIDTH // TWIDTH
BGHEIGHT_TILES = BACKGROUND_HEIGHT // THEIGHT
def s8(u):
return ((u ^ 0xff) + 1) & 0xff
def sub(a, b):
return (a + s8(b)) & 0xff
def strtotilemap(s, offset, width, left, right, pad):
# width in tiles
# left and right are tileid for left and right border
# only support one case for now
s = s.lower()
out = [left]
col = 1
for i in range(len(s)):
if col == width-1:
out.append(right)
out.extend([pad for _ in range(BGWIDTH_TILES-width)])
out.append(left)
col = 1
if s[i] == ' ':
out.append(pad)
elif s[i] not in string.ascii_lowercase:
raise ValueError('only ascii letters are supported: {}')
else:
out.append(offset + (ord(s[i]) - 97))
col += 1
print(len(out))
if col <= width:
# pad
out.extend([pad for _ in range(width-col-1)])
out.append(right)
out.extend([pad for _ in range(BGWIDTH_TILES-width)])
print(len(out))
print(out)
return out
TOPLEFT_CORNER = 64+43
TOPRIGHT_CORNER = 64+44
BOTTOMLEFT_CORNER = 64+50
BOTTOMRIGHT_CORNER = 64+49
TOP_EDGE = 64+46
LEFT_EDGE = 64+45
RIGHT_EDGE = 64+47
BOTTOM_EDGE = 64+48
SPACE = 64+51
HEART = 64+6
fname = sys.argv[1]
with open(fname, 'wb+') as f:
# bg tilemap: 0x9800-0x9bff = 0x400
f.write(bytes(x % 64 for x in range(0, 0x400)))
# fg tilemap: 0x0xc00-0x9fff = 0x400
top_row = bytes([TOPLEFT_CORNER] + [TOP_EDGE for _ in range(18)] \
+ [TOPRIGHT_CORNER] + [SPACE for _ in range(BGWIDTH_TILES-20)])
f.write(top_row)
encoded = strtotilemap("hello world", 64+17, 20, LEFT_EDGE, RIGHT_EDGE, HEART)
blank_rows = []
for i in range(3):
blank_rows.extend([LEFT_EDGE] + [SPACE for _ in range(18)] + [RIGHT_EDGE])
blank_rows.extend(HEART for _ in range(BGWIDTH_TILES-SWIDTH_TILES))
bottom_row = [BOTTOMLEFT_CORNER] + [BOTTOM_EDGE for _ in range(18)] \
+ [BOTTOMRIGHT_CORNER]
bottom_row.extend(HEART for _ in range(BGWIDTH_TILES-SWIDTH_TILES))
l = 0x400 - len(top_row) - len(encoded) - len(blank_rows) - len(bottom_row)
f.write(bytes(encoded))
f.write(bytes(blank_rows))
f.write(bytes(bottom_row))
f.write(bytes(0 for _ in range(l)))
|
mit
| -4,803,830,658,140,106,000 | 28.164835 | 83 | 0.609646 | false | 2.838503 | false | false | false |
dmlloyd/openjdk-modules
|
hotspot/.mx.jvmci/mx_jvmci.py
|
1
|
31696
|
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import os, shutil, zipfile, re, time, sys, datetime, platform
from os.path import join, exists, dirname, isdir
from argparse import ArgumentParser, REMAINDER
import StringIO
import xml.dom.minidom
import subprocess
import mx
import mx_gate
import mx_unittest
from mx_gate import Task
from mx_unittest import unittest
_suite = mx.suite('jvmci')
JVMCI_VERSION = 9
"""
Top level directory of the JDK source workspace.
"""
_jdkSourceRoot = dirname(_suite.dir)
_JVMCI_JDK_TAG = 'jvmci'
_minVersion = mx.VersionSpec('1.9')
# max version (first _unsupported_ version)
_untilVersion = None
_jvmciModes = {
'hosted' : ['-XX:+UnlockExperimentalVMOptions', '-XX:+EnableJVMCI'],
'jit' : ['-XX:+UnlockExperimentalVMOptions', '-XX:+EnableJVMCI', '-XX:+UseJVMCICompiler'],
'disabled' : []
}
# TODO: can optimized be built without overriding release build?
_jdkDebugLevels = ['release', 'fastdebug', 'slowdebug']
# TODO: add client once/if it can be built on 64-bit platforms
_jdkJvmVariants = ['server', 'client']
"""
Translation table from mx_jvmci:8 --vmbuild values to mx_jvmci:9 --jdk-debug-level values.
"""
_legacyVmbuilds = {
'product' : 'release',
'debug' : 'slowdebug'
}
"""
Translates a mx_jvmci:8 --vmbuild value to a mx_jvmci:9 --jdk-debug-level value.
"""
def _translateLegacyDebugLevel(debugLevel):
return _legacyVmbuilds.get(debugLevel, debugLevel)
"""
Translation table from mx_jvmci:8 --vm values to mx_jvmci:9 (--jdk-jvm-variant, --jvmci-mode) tuples.
"""
_legacyVms = {
'jvmci' : ('server', 'jit')
}
"""
A VM configuration composed of a JDK debug level, JVM variant and a JVMCI mode.
This is also a context manager that can be used with the 'with' statement to set/change
a VM configuration within a dynamic scope. For example:
with ConfiguredJDK(debugLevel='fastdebug'):
dacapo(['pmd'])
"""
class VM:
def __init__(self, jvmVariant=None, debugLevel=None, jvmciMode=None):
self.update(jvmVariant, debugLevel, jvmciMode)
def update(self, jvmVariant=None, debugLevel=None, jvmciMode=None):
if jvmVariant in _legacyVms:
# Backwards compatibility for mx_jvmci:8 API
jvmVariant, newJvmciMode = _legacyVms[jvmVariant]
if jvmciMode is not None and jvmciMode != newJvmciMode:
mx.abort('JVM variant "' + jvmVariant + '" implies JVMCI mode "' + newJvmciMode +
'" which conflicts with explicitly specified JVMCI mode of "' + jvmciMode + '"')
jvmciMode = newJvmciMode
debugLevel = _translateLegacyDebugLevel(debugLevel)
assert jvmVariant is None or jvmVariant in _jdkJvmVariants, jvmVariant
assert debugLevel is None or debugLevel in _jdkDebugLevels, debugLevel
assert jvmciMode is None or jvmciMode in _jvmciModes, jvmciMode
self.jvmVariant = jvmVariant or _vm.jvmVariant
self.debugLevel = debugLevel or _vm.debugLevel
self.jvmciMode = jvmciMode or _vm.jvmciMode
def __enter__(self):
global _vm
self.previousVm = _vm
_vm = self
def __exit__(self, exc_type, exc_value, traceback):
global _vm
_vm = self.previousVm
_vm = VM(jvmVariant=_jdkJvmVariants[0], debugLevel=_jdkDebugLevels[0], jvmciMode='hosted')
def get_vm():
"""
Gets the configured VM.
"""
return _vm
def relativeVmLibDirInJdk():
mxos = mx.get_os()
if mxos == 'darwin':
return join('lib')
if mxos == 'windows' or mxos == 'cygwin':
return join('bin')
return join('lib', mx.get_arch())
def isJVMCIEnabled(vm):
assert vm in _jdkJvmVariants
return True
def _makehelp():
return subprocess.check_output([mx.gmake_cmd(), 'help'], cwd=_jdkSourceRoot)
def _runmake(args):
"""run the JDK make process
To build hotspot and import it into the JDK: "mx make hotspot import-hotspot"
{0}"""
jdkBuildDir = _get_jdk_build_dir()
if not exists(jdkBuildDir):
# JDK9 must be bootstrapped with a JDK8
compliance = mx.JavaCompliance('8')
jdk8 = mx.get_jdk(compliance.exactMatch, versionDescription=compliance.value)
cmd = ['sh', 'configure', '--with-debug-level=' + _vm.debugLevel, '--with-native-debug-symbols=external', '--disable-precompiled-headers', '--with-jvm-features=graal',
'--with-jvm-variants=' + _vm.jvmVariant, '--disable-warnings-as-errors', '--with-boot-jdk=' + jdk8.home, '--with-jvm-features=graal']
mx.run(cmd, cwd=_jdkSourceRoot)
cmd = [mx.gmake_cmd(), 'CONF=' + _vm.debugLevel]
if mx.get_opts().verbose:
cmd.append('LOG=debug')
cmd.extend(args)
if mx.get_opts().use_jdk_image and 'images' not in args:
cmd.append('images')
if not mx.get_opts().verbose:
mx.log('--------------- make execution ----------------------')
mx.log('Working directory: ' + _jdkSourceRoot)
mx.log('Command line: ' + ' '.join(cmd))
mx.log('-----------------------------------------------------')
mx.run(cmd, cwd=_jdkSourceRoot)
def _runmultimake(args):
"""run the JDK make process for one or more configurations"""
jvmVariantsDefault = ','.join(_jdkJvmVariants)
debugLevelsDefault = ','.join(_jdkDebugLevels)
parser = ArgumentParser(prog='mx multimake')
parser.add_argument('--jdk-jvm-variants', '--vms', help='a comma separated list of VMs to build (default: ' + jvmVariantsDefault + ')', metavar='<args>', default=jvmVariantsDefault)
parser.add_argument('--jdk-debug-levels', '--builds', help='a comma separated list of JDK debug levels (default: ' + debugLevelsDefault + ')', metavar='<args>', default=debugLevelsDefault)
parser.add_argument('-n', '--no-check', action='store_true', help='omit running "java -version" after each build')
select = parser.add_mutually_exclusive_group()
select.add_argument('-c', '--console', action='store_true', help='send build output to console instead of log files')
select.add_argument('-d', '--output-dir', help='directory for log files instead of current working directory', default=os.getcwd(), metavar='<dir>')
args = parser.parse_args(args)
jvmVariants = args.jdk_jvm_variants.split(',')
debugLevels = [_translateLegacyDebugLevel(dl) for dl in args.jdk_debug_levels.split(',')]
allStart = time.time()
for jvmVariant in jvmVariants:
for debugLevel in debugLevels:
if not args.console:
logFile = join(mx.ensure_dir_exists(args.output_dir), jvmVariant + '-' + debugLevel + '.log')
log = open(logFile, 'wb')
start = time.time()
mx.log('BEGIN: ' + jvmVariant + '-' + debugLevel + '\t(see: ' + logFile + ')')
verbose = ['-v'] if mx.get_opts().verbose else []
# Run as subprocess so that output can be directed to a file
cmd = [sys.executable, '-u', mx.__file__] + verbose + ['--jdk-jvm-variant=' + jvmVariant, '--jdk-debug-level=' + debugLevel, 'make']
mx.logv("executing command: " + str(cmd))
subprocess.check_call(cmd, cwd=_suite.dir, stdout=log, stderr=subprocess.STDOUT)
duration = datetime.timedelta(seconds=time.time() - start)
mx.log('END: ' + jvmVariant + '-' + debugLevel + '\t[' + str(duration) + ']')
else:
with VM(jvmVariant=jvmVariant, debugLevel=debugLevel):
_runmake([])
if not args.no_check:
with VM(jvmciMode='jit'):
run_vm(['-XX:-BootstrapJVMCI', '-version'])
allDuration = datetime.timedelta(seconds=time.time() - allStart)
mx.log('TOTAL TIME: ' + '[' + str(allDuration) + ']')
class HotSpotProject(mx.NativeProject):
"""
Defines a NativeProject representing the HotSpot binaries built via make.
"""
def __init__(self, suite, name, deps, workingSets, **args):
assert name == 'hotspot'
mx.NativeProject.__init__(self, suite, name, "", [], deps, workingSets, None, None, join(suite.mxDir, name))
def eclipse_config_up_to_date(self, configZip):
# Assume that any change to this module might imply changes to the generated IDE files
if configZip.isOlderThan(__file__):
return False
for _, source in self._get_eclipse_settings_sources().iteritems():
if configZip.isOlderThan(source):
return False
return True
def _get_eclipse_settings_sources(self):
"""
Gets a dictionary from the name of an Eclipse settings file to
the file providing its generated content.
"""
if not hasattr(self, '_eclipse_settings'):
esdict = {}
templateSettingsDir = join(self.dir, 'templates', 'eclipse', 'settings')
if exists(templateSettingsDir):
for name in os.listdir(templateSettingsDir):
source = join(templateSettingsDir, name)
esdict[name] = source
self._eclipse_settings = esdict
return self._eclipse_settings
def _eclipseinit(self, files=None, libFiles=None):
"""
Generates an Eclipse project for each HotSpot build configuration.
"""
roots = [
'ASSEMBLY_EXCEPTION',
'LICENSE',
'README',
'THIRD_PARTY_README',
'agent',
'make',
'src',
'test'
]
for jvmVariant in _jdkJvmVariants:
for debugLevel in _jdkDebugLevels:
name = jvmVariant + '-' + debugLevel
eclProjectDir = join(self.dir, 'eclipse', name)
mx.ensure_dir_exists(eclProjectDir)
out = mx.XMLDoc()
out.open('projectDescription')
out.element('name', data='hotspot:' + name)
out.element('comment', data='')
out.element('projects', data='')
out.open('buildSpec')
out.open('buildCommand')
out.element('name', data='org.eclipse.cdt.managedbuilder.core.ScannerConfigBuilder')
out.element('triggers', data='full,incremental')
out.element('arguments', data='')
out.close('buildCommand')
out.close('buildSpec')
out.open('natures')
out.element('nature', data='org.eclipse.cdt.core.cnature')
out.element('nature', data='org.eclipse.cdt.core.ccnature')
out.element('nature', data='org.eclipse.cdt.managedbuilder.core.managedBuildNature')
out.element('nature', data='org.eclipse.cdt.managedbuilder.core.ScannerConfigNature')
out.close('natures')
if roots:
out.open('linkedResources')
for r in roots:
f = join(_suite.dir, r)
out.open('link')
out.element('name', data=r)
out.element('type', data='2' if isdir(f) else '1')
out.element('locationURI', data=mx.get_eclipse_project_rel_locationURI(f, eclProjectDir))
out.close('link')
out.open('link')
out.element('name', data='generated')
out.element('type', data='2')
generated = join(_get_hotspot_build_dir(jvmVariant, debugLevel), 'generated')
out.element('locationURI', data=mx.get_eclipse_project_rel_locationURI(generated, eclProjectDir))
out.close('link')
out.close('linkedResources')
out.close('projectDescription')
projectFile = join(eclProjectDir, '.project')
mx.update_file(projectFile, out.xml(indent='\t', newl='\n'))
if files:
files.append(projectFile)
cprojectTemplate = join(self.dir, 'templates', 'eclipse', 'cproject')
cprojectFile = join(eclProjectDir, '.cproject')
with open(cprojectTemplate) as f:
content = f.read()
mx.update_file(cprojectFile, content)
if files:
files.append(cprojectFile)
settingsDir = join(eclProjectDir, ".settings")
mx.ensure_dir_exists(settingsDir)
for name, source in self._get_eclipse_settings_sources().iteritems():
out = StringIO.StringIO()
print >> out, '# GENERATED -- DO NOT EDIT'
print >> out, '# Source:', source
with open(source) as f:
print >> out, f.read()
content = out.getvalue()
mx.update_file(join(settingsDir, name), content)
if files:
files.append(join(settingsDir, name))
def getBuildTask(self, args):
return JDKBuildTask(self, args, _vm.debugLevel, _vm.jvmVariant)
class JDKBuildTask(mx.NativeBuildTask):
def __init__(self, project, args, debugLevel, jvmVariant):
mx.NativeBuildTask.__init__(self, args, project)
self.jvmVariant = jvmVariant
self.debugLevel = debugLevel
def __str__(self):
return 'Building JDK[{}, {}]'.format(self.debugLevel, self.jvmVariant)
def build(self):
if mx.get_opts().use_jdk_image:
_runmake(['images'])
else:
_runmake([])
self._newestOutput = None
def clean(self, forBuild=False):
if forBuild: # Let make handle incremental builds
return
if exists(_get_jdk_build_dir(self.debugLevel)):
_runmake(['clean'])
self._newestOutput = None
# Backwards compatibility for mx_jvmci:8 API
def buildvms(args):
_runmultimake(args)
def run_vm(args, vm=None, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, debugLevel=None, vmbuild=None):
"""run a Java program by executing the java executable in a JVMCI JDK"""
jdkTag = mx.get_jdk_option().tag
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.abort('The "--jdk" option must have the tag "' + _JVMCI_JDK_TAG + '" when running a command requiring a JVMCI VM')
jdk = get_jvmci_jdk(debugLevel=debugLevel or _translateLegacyDebugLevel(vmbuild))
return jdk.run_java(args, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd, timeout=timeout)
def _unittest_vm_launcher(vmArgs, mainClass, mainClassArgs):
run_vm(vmArgs + [mainClass] + mainClassArgs)
mx_unittest.set_vm_launcher('JVMCI VM launcher', _unittest_vm_launcher)
def _jvmci_gate_runner(args, tasks):
# Build release server VM now so we can run the unit tests
with Task('BuildHotSpotJVMCIHosted: release', tasks) as t:
if t: _runmultimake(['--jdk-jvm-variants', 'server', '--jdk-debug-levels', 'release'])
# Run unit tests in hosted mode
with VM(jvmVariant='server', debugLevel='release', jvmciMode='hosted'):
with Task('JVMCI UnitTests: hosted-release', tasks) as t:
if t: unittest(['--suite', 'jvmci', '--enable-timing', '--verbose', '--fail-fast'])
# Build the other VM flavors
with Task('BuildHotSpotJVMCIOthers: fastdebug', tasks) as t:
if t: _runmultimake(['--jdk-jvm-variants', 'server', '--jdk-debug-levels', 'fastdebug'])
with Task('CleanAndBuildIdealGraphVisualizer', tasks, disableJacoco=True) as t:
if t and platform.processor() != 'sparc':
buildxml = mx._cygpathU2W(join(_suite.dir, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'build.xml'))
mx.run(['ant', '-f', buildxml, '-q', 'clean', 'build'], env=_igvBuildEnv())
mx_gate.add_gate_runner(_suite, _jvmci_gate_runner)
mx_gate.add_gate_argument('-g', '--only-build-jvmci', action='store_false', dest='buildNonJVMCI', help='only build the JVMCI VM')
def _igvJdk():
v8u20 = mx.VersionSpec("1.8.0_20")
v8u40 = mx.VersionSpec("1.8.0_40")
v8 = mx.VersionSpec("1.8")
def _igvJdkVersionCheck(version):
return version >= v8 and (version < v8u20 or version >= v8u40)
return mx.get_jdk(_igvJdkVersionCheck, versionDescription='>= 1.8 and < 1.8.0u20 or >= 1.8.0u40', purpose="building & running IGV").home
def _igvBuildEnv():
# When the http_proxy environment variable is set, convert it to the proxy settings that ant needs
env = dict(os.environ)
proxy = os.environ.get('http_proxy')
if not (proxy is None) and len(proxy) > 0:
if '://' in proxy:
# Remove the http:// prefix (or any other protocol prefix)
proxy = proxy.split('://', 1)[1]
# Separate proxy server name and port number
proxyName, proxyPort = proxy.split(':', 1)
proxyEnv = '-DproxyHost="' + proxyName + '" -DproxyPort=' + proxyPort
env['ANT_OPTS'] = proxyEnv
env['JAVA_HOME'] = _igvJdk()
return env
def igv(args):
"""run the Ideal Graph Visualizer"""
logFile = '.ideal_graph_visualizer.log'
with open(join(_suite.dir, logFile), 'w') as fp:
mx.logv('[Ideal Graph Visualizer log is in ' + fp.name + ']')
nbplatform = join(_suite.dir, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'nbplatform')
# Remove NetBeans platform if it is earlier than the current supported version
if exists(nbplatform):
updateTrackingFile = join(nbplatform, 'platform', 'update_tracking', 'org-netbeans-core.xml')
if not exists(updateTrackingFile):
mx.log('Could not find \'' + updateTrackingFile + '\', removing NetBeans platform')
shutil.rmtree(nbplatform)
else:
dom = xml.dom.minidom.parse(updateTrackingFile)
currentVersion = mx.VersionSpec(dom.getElementsByTagName('module_version')[0].getAttribute('specification_version'))
supportedVersion = mx.VersionSpec('3.43.1')
if currentVersion < supportedVersion:
mx.log('Replacing NetBeans platform version ' + str(currentVersion) + ' with version ' + str(supportedVersion))
shutil.rmtree(nbplatform)
elif supportedVersion < currentVersion:
mx.log('Supported NetBeans version in igv command should be updated to ' + str(currentVersion))
if not exists(nbplatform):
mx.logv('[This execution may take a while as the NetBeans platform needs to be downloaded]')
env = _igvBuildEnv()
# make the jar for Batik 1.7 available.
env['IGV_BATIK_JAR'] = mx.library('BATIK').get_path(True)
if mx.run(['ant', '-f', mx._cygpathU2W(join(_suite.dir, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'build.xml')), '-l', mx._cygpathU2W(fp.name), 'run'], env=env, nonZeroIsFatal=False):
mx.abort("IGV ant build & launch failed. Check '" + logFile + "'. You can also try to delete 'src/share/tools/IdealGraphVisualizer/nbplatform'.")
def c1visualizer(args):
"""run the Cl Compiler Visualizer"""
libpath = join(_suite.dir, 'lib')
if mx.get_os() == 'windows':
executable = join(libpath, 'c1visualizer', 'bin', 'c1visualizer.exe')
else:
executable = join(libpath, 'c1visualizer', 'bin', 'c1visualizer')
# Check whether the current C1Visualizer installation is the up-to-date
if exists(executable) and not exists(mx.library('C1VISUALIZER_DIST').get_path(resolve=False)):
mx.log('Updating C1Visualizer')
shutil.rmtree(join(libpath, 'c1visualizer'))
archive = mx.library('C1VISUALIZER_DIST').get_path(resolve=True)
if not exists(executable):
zf = zipfile.ZipFile(archive, 'r')
zf.extractall(libpath)
if not exists(executable):
mx.abort('C1Visualizer binary does not exist: ' + executable)
if mx.get_os() != 'windows':
# Make sure that execution is allowed. The zip file does not always specfiy that correctly
os.chmod(executable, 0777)
mx.run([executable])
def hsdis(args, copyToDir=None):
"""download the hsdis library
This is needed to support HotSpot's assembly dumping features.
By default it downloads the Intel syntax version, use the 'att' argument to install AT&T syntax."""
flavor = 'intel'
if 'att' in args:
flavor = 'att'
if mx.get_arch() == "sparcv9":
flavor = "sparcv9"
lib = mx.add_lib_suffix('hsdis-' + mx.get_arch())
path = join(_suite.dir, 'lib', lib)
sha1s = {
'att/hsdis-amd64.dll' : 'bcbd535a9568b5075ab41e96205e26a2bac64f72',
'att/hsdis-amd64.so' : '58919ba085d4ef7a513f25bae75e7e54ee73c049',
'intel/hsdis-amd64.dll' : '6a388372cdd5fe905c1a26ced614334e405d1f30',
'intel/hsdis-amd64.so' : '844ed9ffed64fe9599638f29a8450c50140e3192',
'intel/hsdis-amd64.dylib' : 'fdb13ef0d7d23d93dacaae9c98837bea0d4fc5a2',
'sparcv9/hsdis-sparcv9.so': '970640a9af0bd63641f9063c11275b371a59ee60',
}
flavoredLib = flavor + "/" + lib
if flavoredLib not in sha1s:
mx.logv("hsdis not supported on this plattform or architecture")
return
if not exists(path):
sha1 = sha1s[flavoredLib]
sha1path = path + '.sha1'
mx.download_file_with_sha1('hsdis', path, ['https://lafo.ssw.uni-linz.ac.at/pub/hsdis/' + flavoredLib], sha1, sha1path, True, True, sources=False)
if copyToDir is not None and exists(copyToDir):
shutil.copy(path, copyToDir)
def hcfdis(args):
"""disassemble HexCodeFiles embedded in text files
Run a tool over the input files to convert all embedded HexCodeFiles
to a disassembled format."""
parser = ArgumentParser(prog='mx hcfdis')
parser.add_argument('-m', '--map', help='address to symbol map applied to disassembler output')
parser.add_argument('files', nargs=REMAINDER, metavar='files...')
args = parser.parse_args(args)
path = mx.library('HCFDIS').get_path(resolve=True)
mx.run_java(['-cp', path, 'com.oracle.max.hcfdis.HexCodeFileDis'] + args.files)
if args.map is not None:
addressRE = re.compile(r'0[xX]([A-Fa-f0-9]+)')
with open(args.map) as fp:
lines = fp.read().splitlines()
symbols = dict()
for l in lines:
addressAndSymbol = l.split(' ', 1)
if len(addressAndSymbol) == 2:
address, symbol = addressAndSymbol
if address.startswith('0x'):
address = long(address, 16)
symbols[address] = symbol
for f in args.files:
with open(f) as fp:
lines = fp.read().splitlines()
updated = False
for i in range(0, len(lines)):
l = lines[i]
for m in addressRE.finditer(l):
sval = m.group(0)
val = long(sval, 16)
sym = symbols.get(val)
if sym:
l = l.replace(sval, sym)
updated = True
lines[i] = l
if updated:
mx.log('updating ' + f)
with open('new_' + f, "w") as fp:
for l in lines:
print >> fp, l
def jol(args):
"""Java Object Layout"""
joljar = mx.library('JOL_INTERNALS').get_path(resolve=True)
candidates = mx.findclass(args, logToConsole=False, matcher=lambda s, classname: s == classname or classname.endswith('.' + s) or classname.endswith('$' + s))
if len(candidates) > 0:
candidates = mx.select_items(sorted(candidates))
else:
# mx.findclass can be mistaken, don't give up yet
candidates = args
run_vm(['-javaagent:' + joljar, '-cp', os.pathsep.join([mx.classpath(), joljar]), "org.openjdk.jol.MainObjectInternals"] + candidates)
def _get_openjdk_os():
# See: common/autoconf/platform.m4
os = mx.get_os()
if 'darwin' in os:
os = 'macosx'
elif 'linux' in os:
os = 'linux'
elif 'solaris' in os:
os = 'solaris'
elif 'cygwin' in os or 'mingw' in os:
os = 'windows'
return os
def _get_openjdk_cpu():
cpu = mx.get_arch()
if cpu == 'amd64':
cpu = 'x86_64'
elif cpu == 'sparcv9':
cpu = 'sparcv9'
return cpu
def _get_openjdk_os_cpu():
return _get_openjdk_os() + '-' + _get_openjdk_cpu()
def _get_jdk_build_dir(debugLevel=None):
"""
Gets the directory into which the JDK is built. This directory contains
the exploded JDK under jdk/ and the JDK image under images/jdk/.
"""
if debugLevel is None:
debugLevel = _vm.debugLevel
name = '{}-{}-{}-{}'.format(_get_openjdk_os_cpu(), 'normal', _vm.jvmVariant, debugLevel)
return join(dirname(_suite.dir), 'build', name)
_jvmci_bootclasspath_prepends = []
def _get_hotspot_build_dir(jvmVariant=None, debugLevel=None):
"""
Gets the directory in which a particular HotSpot configuration is built
(e.g., <JDK_REPO_ROOT>/build/macosx-x86_64-normal-server-release/hotspot/bsd_amd64_compiler2)
"""
if jvmVariant is None:
jvmVariant = _vm.jvmVariant
os = mx.get_os()
if os == 'darwin':
os = 'bsd'
arch = mx.get_arch()
buildname = {'client': 'compiler1', 'server': 'compiler2'}.get(jvmVariant, jvmVariant)
name = '{}_{}_{}'.format(os, arch, buildname)
return join(_get_jdk_build_dir(debugLevel=debugLevel), 'hotspot', name)
class JVMCI9JDKConfig(mx.JDKConfig):
def __init__(self, debugLevel):
self.debugLevel = debugLevel
jdkBuildDir = _get_jdk_build_dir(debugLevel)
jdkDir = join(jdkBuildDir, 'images', 'jdk') if mx.get_opts().use_jdk_image else join(jdkBuildDir, 'jdk')
mx.JDKConfig.__init__(self, jdkDir, tag=_JVMCI_JDK_TAG)
def parseVmArgs(self, args, addDefaultArgs=True):
args = mx.expand_project_in_args(args, insitu=False)
jacocoArgs = mx_gate.get_jacoco_agent_args()
if jacocoArgs:
args = jacocoArgs + args
args = ['-Xbootclasspath/p:' + dep.classpath_repr() for dep in _jvmci_bootclasspath_prepends] + args
# Remove JVMCI jars from class path. They are only necessary when
# compiling with a javac from JDK8 or earlier.
cpIndex, cp = mx.find_classpath_arg(args)
if cp:
excluded = frozenset([dist.path for dist in _suite.dists])
cp = os.pathsep.join([e for e in cp.split(os.pathsep) if e not in excluded])
args[cpIndex] = cp
if '-version' in args:
ignoredArgs = args[args.index('-version') + 1:]
if len(ignoredArgs) > 0:
mx.log("Warning: The following options will be ignored by the vm because they come after the '-version' argument: " + ' '.join(ignoredArgs))
return self.processArgs(args, addDefaultArgs=addDefaultArgs)
# Overrides JDKConfig
def run_java(self, args, vm=None, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, env=None, addDefaultArgs=True):
if vm is None:
vm = 'server'
args = self.parseVmArgs(args, addDefaultArgs=addDefaultArgs)
jvmciModeArgs = _jvmciModes[_vm.jvmciMode]
cmd = [self.java] + ['-' + vm] + jvmciModeArgs + args
return mx.run(cmd, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd)
"""
The dict of JVMCI JDKs indexed by debug-level names.
"""
_jvmci_jdks = {}
def get_jvmci_jdk(debugLevel=None):
"""
Gets the JVMCI JDK corresponding to 'debugLevel'.
"""
if not debugLevel:
debugLevel = _vm.debugLevel
jdk = _jvmci_jdks.get(debugLevel)
if jdk is None:
try:
jdk = JVMCI9JDKConfig(debugLevel)
except mx.JDKConfigException as e:
jdkBuildDir = _get_jdk_build_dir(debugLevel)
msg = 'Error with the JDK built into {}:\n{}\nTry (re)building it with: mx --jdk-debug-level={} make'
if mx.get_opts().use_jdk_image:
msg += ' images'
mx.abort(msg.format(jdkBuildDir, e.message, debugLevel))
_jvmci_jdks[debugLevel] = jdk
return jdk
class JVMCI9JDKFactory(mx.JDKFactory):
def getJDKConfig(self):
jdk = get_jvmci_jdk(_vm.debugLevel)
return jdk
def description(self):
return "JVMCI JDK"
mx.update_commands(_suite, {
'make': [_runmake, '[args...]', _makehelp],
'multimake': [_runmultimake, '[options]'],
'c1visualizer' : [c1visualizer, ''],
'hsdis': [hsdis, '[att]'],
'hcfdis': [hcfdis, ''],
'igv' : [igv, ''],
'jol' : [jol, ''],
'vm': [run_vm, '[-options] class [args...]'],
})
mx.add_argument('-M', '--jvmci-mode', action='store', choices=sorted(_jvmciModes.viewkeys()), help='the JVM variant type to build/run (default: ' + _vm.jvmciMode + ')')
mx.add_argument('--jdk-jvm-variant', '--vm', action='store', choices=_jdkJvmVariants + sorted(_legacyVms.viewkeys()), help='the JVM variant type to build/run (default: ' + _vm.jvmVariant + ')')
mx.add_argument('--jdk-debug-level', '--vmbuild', action='store', choices=_jdkDebugLevels + sorted(_legacyVmbuilds.viewkeys()), help='the JDK debug level to build/run (default: ' + _vm.debugLevel + ')')
mx.add_argument('-I', '--use-jdk-image', action='store_true', help='build/run JDK image instead of exploded JDK')
mx.addJDKFactory(_JVMCI_JDK_TAG, mx.JavaCompliance('9'), JVMCI9JDKFactory())
def mx_post_parse_cmd_line(opts):
mx.set_java_command_default_jdk_tag(_JVMCI_JDK_TAG)
jdkTag = mx.get_jdk_option().tag
jvmVariant = None
debugLevel = None
jvmciMode = None
if opts.jdk_jvm_variant is not None:
jvmVariant = opts.jdk_jvm_variant
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.warn('Ignoring "--jdk-jvm-variant" option as "--jdk" tag is not "' + _JVMCI_JDK_TAG + '"')
if opts.jdk_debug_level is not None:
debugLevel = _translateLegacyDebugLevel(opts.jdk_debug_level)
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.warn('Ignoring "--jdk-debug-level" option as "--jdk" tag is not "' + _JVMCI_JDK_TAG + '"')
if opts.jvmci_mode is not None:
jvmciMode = opts.jvmci_mode
if jdkTag and jdkTag != _JVMCI_JDK_TAG:
mx.warn('Ignoring "--jvmci-mode" option as "--jdk" tag is not "' + _JVMCI_JDK_TAG + '"')
_vm.update(jvmVariant, debugLevel, jvmciMode)
|
gpl-2.0
| 4,158,647,016,580,874,000 | 41.20506 | 202 | 0.608941 | false | 3.494983 | true | false | false |
guykisel/inline-plz
|
inlineplz/linters/shellcheck.py
|
1
|
1627
|
# -*- coding: utf-8 -*-
import dirtyjson as json
from ..decorators import linter
from ..parsers.base import ParserBase
@linter(
name="shellcheck",
install=[
["cabal", "update"],
["cabal", "install", "shellcheck"],
["apt-get", "install", "shellcheck"],
["dnf", "install", "shellcheck"],
["brew", "install", "shellcheck"],
["port", "install", "shellcheck"],
["zypper", "in", "ShellCheck"],
],
help_cmd=["shellcheck", "-V"],
run=["shellcheck", "-x", "-f", "json", "-e", "SC2086"],
rundefault=["shellcheck", "-x", "-f", "json", "-e", "SC2086"],
dotfiles=[],
language="shell",
autorun=True,
run_per_file=True,
)
class ShellcheckParser(ParserBase):
"""Parse json shellcheck output."""
def parse(self, lint_data):
messages = set()
for file_path, output in lint_data:
if file_path.strip() and output.strip():
filedata = json.loads(output)
if filedata:
for msgdata in filedata:
try:
path = file_path
line = msgdata["line"]
msgbody = msgdata["message"]
messages.add((path, line, msgbody))
except (ValueError, KeyError, TypeError):
print(
"({0}) Invalid message: {1}".format(
type(self).__name__, msgdata
)
)
return messages
|
isc
| -6,519,408,518,040,228,000 | 32.204082 | 68 | 0.454825 | false | 4.36193 | false | false | false |
Vijaysai005/KProject
|
vijay/DBSCAN/main_3.py
|
1
|
2729
|
# usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 12 13:15:05 2017
@author: Vijayasai S
"""
# Use python3
import Cluster as cl
from pymongo import MongoClient
import numpy as np
from datetime import datetime
def centroid(lati=[],longi=[]):
x = sum(lati) / len(lati)
y = sum(longi) / len(longi)
return x,y
def mongoCluster(get_col, set_col, year, month, startday, endday):
for day in range(startday,endday+1):
for hr in range(24):
for mins in range(59):
items = get_col.find({"$and" :[{"packettimestamp":{"$gte":datetime(year,month,day,hr,mins,0)}},{"packettimestamp":{"$lte":datetime(year,month,day,hr,mins+1,0)}}]},{"unit_id":1,"latitude":1,"longitude":1,"_id":0}).sort([("packettimestamp", -1)])
data = [] ; item_id = []
for item in items:
if item["unit_id"] not in item_id:
item_id.append(item["unit_id"])
data.append(item)
try:
listOflist = cl.DictToList(data)
data = cl.loadData(listOflist, "unit_id", "latitude", "longitude", start_column=1)
main_dict,n_cluster = cl.cluster(data[0], data[1], 0.045, 2)
for i in range(len(main_dict)):
try:
for j in range(len(main_dict[i])):
set_col.insert([{"cluster_number": i, "unit_id": int(main_dict[i][j][0]), "latitude": main_dict[i][j][1],"longitude": main_dict[i][j][2], "timestamp":datetime(year,month,day,hr,mins)}])
except Exception:
for k in range(len(main_dict["outlier"])):
set_col.insert([{"cluster_number": "outlier", "unit_id": int(main_dict["outlier"][k][0]), "latitude": main_dict["outlier"][k][1],"longitude": main_dict["outlier"][k][2], "timestamp":datetime(year,month,day,hr,mins)}])
print (day,hr,mins)
if n_cluster == 0:
lat_cen = [] ; long_cen = []
for i in range(len(main_dict["outlier"])):
lat_cen.append(main_dict["outlier"][i][1])
long_cen.append(main_dict["outlier"][i][2])
cent_x,cent_y = centroid(lat_cen,long_cen)
else:
cent_x = [] ; cent_y = []
for i in range(n_cluster):
lat_cen = [] ; long_cen = []
for j in range(main_dict[i]):
lat_cen.append(main_dict[i][j][1])
long_cen.append(main_dict[i][j][2])
_x,_y = centroid(lat_cen,long_cen)
cent_x.append(_x)
cent_y.append(_y)
#print (cent_x,cent_y)
except KeyError:
pass
return main_dict, n_cluster, cent_x, cent_y
if __name__ == "__main__":
client = MongoClient('localhost', 27017)
db = client.maximus_db
get_coll = db.device_data
set_coll = db.clus
startday = 25 ; endday = 26
year = 2017 ; month = 3
main_dict, n_cluster, cent_x, cent_y = mongoCluster(get_coll, set_coll, year, month, startday, endday)
|
gpl-3.0
| 5,728,580,436,739,454,000 | 31.488095 | 248 | 0.599487 | false | 2.729 | false | false | false |
vitan/django-guardian
|
guardian/shortcuts.py
|
1
|
27452
|
"""
Convenient shortcuts to manage or check object permissions.
"""
from __future__ import unicode_literals
from django.contrib.auth.models import Group
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import Count, Q
from django.shortcuts import _get_queryset
from itertools import groupby
from guardian.compat import get_user_model
from guardian.compat import basestring
from guardian.core import ObjectPermissionChecker
from guardian.exceptions import MixedContentTypeError
from guardian.exceptions import WrongAppError
from guardian.utils import get_anonymous_user
from guardian.utils import get_identity
from guardian.utils import get_user_obj_perms_model
from guardian.utils import get_group_obj_perms_model
import warnings
def assign_perm(perm, user_or_group, obj=None):
"""
Assigns permission to user/group and object pair.
:param perm: proper permission for given ``obj``, as string (in format:
``app_label.codename`` or ``codename``). If ``obj`` is not given, must
be in format ``app_label.codename``.
:param user_or_group: instance of ``User``, ``AnonymousUser`` or ``Group``;
passing any other object would raise
``guardian.exceptions.NotUserNorGroup`` exception
:param obj: persisted Django's ``Model`` instance or ``None`` if assigning
global permission. Default is ``None``.
We can assign permission for ``Model`` instance for specific user:
>>> from django.contrib.sites.models import Site
>>> from guardian.models import User
>>> from guardian.shortcuts import assign_perm
>>> site = Site.objects.get_current()
>>> user = User.objects.create(username='joe')
>>> assign_perm("change_site", user, site)
<UserObjectPermission: example.com | joe | change_site>
>>> user.has_perm("change_site", site)
True
... or we can assign permission for group:
>>> group = Group.objects.create(name='joe-group')
>>> user.groups.add(group)
>>> assign_perm("delete_site", group, site)
<GroupObjectPermission: example.com | joe-group | delete_site>
>>> user.has_perm("delete_site", site)
True
**Global permissions**
This function may also be used to assign standard, *global* permissions if
``obj`` parameter is omitted. Added Permission would be returned in that
case:
>>> assign_perm("sites.change_site", user)
<Permission: sites | site | Can change site>
"""
user, group = get_identity(user_or_group)
# If obj is None we try to operate on global permissions
if obj is None:
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
if user:
user.user_permissions.add(perm)
return perm
if group:
group.permissions.add(perm)
return perm
perm = perm.split('.')[-1]
if user:
model = get_user_obj_perms_model(obj)
return model.objects.assign_perm(perm, user, obj)
if group:
model = get_group_obj_perms_model(obj)
return model.objects.assign_perm(perm, group, obj)
def assign(perm, user_or_group, obj=None):
""" Depreciated function name left in for compatibility"""
warnings.warn("Shortcut function 'assign' is being renamed to 'assign_perm'. Update your code accordingly as old name will be depreciated in 2.0 version.", DeprecationWarning)
return assign_perm(perm, user_or_group, obj)
def bulk_assign_perm(perm, users_or_groups, objs=None):
"""
Bulk assign permission to users/groups and objects pair.
:param perm: proper permission for given ``objs``, as string (in format:
``app_label.codename`` or ``codename``). If ``objs`` is not given, must
be in format ``app_label.codename``.
:param users_or_groups: instances of ``User``, ``AnonymousUser`` or ``Group``;
passing any other objects would raise
``guardian.exceptions.NotUserNorGroup`` exception
:param objs: persisted Django's ``Model`` instances or ``None`` if assigning
global permission. Default is ``None``.
We can assign permission for ``Model`` instances for specific users:
>>> from django.contrib.sites.models import Site
>>> from guardian.compat import get_user_model
>>> from guardian.shortcuts import bulk_assign_perm
>>> User = get_user_model
>>> Site.objects.bulk_create([
... Site(domain='d_vitan', name='vitan.com'),
... Site(domain='d_elain', name='elain.com')])
>>> User.objects.bulk_create([
... User(username='vitan'),
... User(username='elain')])
>>> site_qs = Site.objects.all()
>>> user_qs = User.objects.all()
>>> bulk_assign_perm("change_site", user_qs, site_qs)
[<UserObjectPermission: vitan.com | vitan | change_site>, ...]
>>> for user in user_qs:
... for site in site_qs:
... user.has_perm("change_site", site)
True
True
True
True
... or we can assign permission for groups:
>>> group = Group.objects.create(name='joe-group')
>>> for user in user_qs:
... user.groups.add(group)
>>> bulk_assign_perm("delete_site", [group], site_qs)
[<GroupObjectPermission: vitan.com | joe-group | delete_site>, ...]
>>> for user in user_qs:
... for site in site_qs:
... user.has_perm("delete_site", site)
True
True
True
True
**Global permissions**
This function may also be used to assign standard, *global* permissions if
``objs`` parameter is omitted. Added Permission would be returned in that
case:
>>> bulk_assign_perm("sites.change_site", user_qs)
<Permission: sites | site | Can change site>
"""
user, group = get_identity(users_or_groups[0])
# If objs is None we try to operate on global permissions
if objs is None:
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
if user:
perm.user_set.add(*users_or_groups)
return perm
if group:
perm.group_set.add(*users_or_groups)
return perm
perm = perm.split('.')[-1]
if user:
model = get_user_obj_perms_model(objs[0])
if group:
model = get_group_obj_perms_model(objs[0])
return model.objects.bulk_assign_perm(perm, users_or_groups, objs)
def remove_perm(perm, user_or_group=None, obj=None):
"""
Removes permission from user/group and object pair.
:param perm: proper permission for given ``obj``, as string (in format:
``app_label.codename`` or ``codename``). If ``obj`` is not given, must
be in format ``app_label.codename``.
:param user_or_group: instance of ``User``, ``AnonymousUser`` or ``Group``;
passing any other object would raise
``guardian.exceptions.NotUserNorGroup`` exception
:param obj: persisted Django's ``Model`` instance or ``None`` if assigning
global permission. Default is ``None``.
"""
user, group = get_identity(user_or_group)
if obj is None:
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
if user:
user.user_permissions.remove(perm)
return
elif group:
group.permissions.remove(perm)
return
perm = perm.split('.')[-1]
if user:
model = get_user_obj_perms_model(obj)
model.objects.remove_perm(perm, user, obj)
if group:
model = get_group_obj_perms_model(obj)
model.objects.remove_perm(perm, group, obj)
def bulk_remove_perm(perm, users_or_groups=None, objs=None):
"""
Removes permission from users/groups and objects pair.
:param perm: proper permission for given ``objs``, as string (in format:
``app_label.codename`` or ``codename``). If ``objs`` is not given, must
be in format ``app_label.codename``.
:param users_or_groups: instances of ``User``, ``AnonymousUser`` or ``Group``;
passing any other object would raise
``guardian.exceptions.NotUserNorGroup`` exception
:param objs: persisted Django's ``Model`` instances or ``None`` if assigning
global permission. Default is ``None``.
"""
user, group = get_identity(users_or_groups[0])
if objs is None:
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
if user:
perm.user_set.remove(*users_or_groups)
return
elif group:
perm.group_set.remove(*users_or_groups)
return
perm = perm.split('.')[-1]
if user:
model = get_user_obj_perms_model(objs[0])
if group:
model = get_group_obj_perms_model(objs[0])
model.objects.bulk_remove_perm(perm, users_or_groups, objs)
def get_perms(user_or_group, obj):
"""
Returns permissions for given user/group and object pair, as list of
strings.
"""
check = ObjectPermissionChecker(user_or_group)
return check.get_perms(obj)
def get_perms_for_model(cls):
"""
Returns queryset of all Permission objects for the given class. It is
possible to pass Model as class or instance.
"""
if isinstance(cls, basestring):
app_label, model_name = cls.split('.')
model = models.get_model(app_label, model_name)
else:
model = cls
ctype = ContentType.objects.get_for_model(model)
return Permission.objects.filter(content_type=ctype)
def get_users_with_perms(obj, attach_perms=False, with_superusers=False,
with_group_users=True):
"""
Returns queryset of all ``User`` objects with *any* object permissions for
the given ``obj``.
:param obj: persisted Django's ``Model`` instance
:param attach_perms: Default: ``False``. If set to ``True`` result would be
dictionary of ``User`` instances with permissions' codenames list as
values. This would fetch users eagerly!
:param with_superusers: Default: ``False``. If set to ``True`` result would
contain all superusers.
:param with_group_users: Default: ``True``. If set to ``False`` result would
**not** contain those users who have only group permissions for given
``obj``.
Example::
>>> from django.contrib.flatpages.models import FlatPage
>>> from django.contrib.auth.models import User
>>> from guardian.shortcuts import assign_perm, get_users_with_perms
>>>
>>> page = FlatPage.objects.create(title='Some page', path='/some/page/')
>>> joe = User.objects.create_user('joe', '[email protected]', 'joesecret')
>>> assign_perm('change_flatpage', joe, page)
>>>
>>> get_users_with_perms(page)
[<User: joe>]
>>>
>>> get_users_with_perms(page, attach_perms=True)
{<User: joe>: [u'change_flatpage']}
"""
ctype = ContentType.objects.get_for_model(obj)
if not attach_perms:
# It's much easier without attached perms so we do it first if that is
# the case
user_model = get_user_obj_perms_model(obj)
related_name = user_model.user.field.related_query_name()
if user_model.objects.is_generic():
user_filters = {
'%s__content_type' % related_name: ctype,
'%s__object_pk' % related_name: obj.pk,
}
else:
user_filters = {'%s__content_object' % related_name: obj}
qset = Q(**user_filters)
if with_group_users:
group_model = get_group_obj_perms_model(obj)
group_rel_name = group_model.group.field.related_query_name()
if group_model.objects.is_generic():
group_filters = {
'groups__%s__content_type' % group_rel_name: ctype,
'groups__%s__object_pk' % group_rel_name: obj.pk,
}
else:
group_filters = {
'groups__%s__content_object' % group_rel_name: obj,
}
qset = qset | Q(**group_filters)
if with_superusers:
qset = qset | Q(is_superuser=True)
return get_user_model().objects.filter(qset).distinct()
else:
# TODO: Do not hit db for each user!
users = {}
for user in get_users_with_perms(obj,
with_group_users=with_group_users):
users[user] = sorted(get_perms(user, obj))
return users
def get_groups_with_perms(obj, attach_perms=False):
"""
Returns queryset of all ``Group`` objects with *any* object permissions for
the given ``obj``.
:param obj: persisted Django's ``Model`` instance
:param attach_perms: Default: ``False``. If set to ``True`` result would be
dictionary of ``Group`` instances with permissions' codenames list as
values. This would fetch groups eagerly!
Example::
>>> from django.contrib.flatpages.models import FlatPage
>>> from guardian.shortcuts import assign_perm, get_groups_with_perms
>>> from guardian.models import Group
>>>
>>> page = FlatPage.objects.create(title='Some page', path='/some/page/')
>>> admins = Group.objects.create(name='Admins')
>>> assign_perm('change_flatpage', admins, page)
>>>
>>> get_groups_with_perms(page)
[<Group: admins>]
>>>
>>> get_groups_with_perms(page, attach_perms=True)
{<Group: admins>: [u'change_flatpage']}
"""
ctype = ContentType.objects.get_for_model(obj)
if not attach_perms:
# It's much easier without attached perms so we do it first if that is
# the case
group_model = get_group_obj_perms_model(obj)
group_rel_name = group_model.group.field.related_query_name()
if group_model.objects.is_generic():
group_filters = {
'%s__content_type' % group_rel_name: ctype,
'%s__object_pk' % group_rel_name: obj.pk,
}
else:
group_filters = {'%s__content_object' % group_rel_name: obj}
groups = Group.objects.filter(**group_filters).distinct()
return groups
else:
# TODO: Do not hit db for each group!
groups = {}
for group in get_groups_with_perms(obj):
if not group in groups:
groups[group] = sorted(get_perms(group, obj))
return groups
def get_objects_for_user(user, perms, klass=None, use_groups=True, any_perm=False,
with_superuser=True):
"""
Returns queryset of objects for which a given ``user`` has *all*
permissions present at ``perms``.
:param user: ``User`` or ``AnonymousUser`` instance for which objects would
be returned.
:param perms: single permission string, or sequence of permission strings
which should be checked.
If ``klass`` parameter is not given, those should be full permission
names rather than only codenames (i.e. ``auth.change_user``). If more than
one permission is present within sequence, their content type **must** be
the same or ``MixedContentTypeError`` exception would be raised.
:param klass: may be a Model, Manager or QuerySet object. If not given
this parameter would be computed based on given ``params``.
:param use_groups: if ``False``, wouldn't check user's groups object
permissions. Default is ``True``.
:param any_perm: if True, any of permission in sequence is accepted
:param with_superuser: if ``True`` returns the entire queryset if not it will
only return objects the user has explicit permissions.
:raises MixedContentTypeError: when computed content type for ``perms``
and/or ``klass`` clashes.
:raises WrongAppError: if cannot compute app label for given ``perms``/
``klass``.
Example::
>>> from django.contrib.auth.models import User
>>> from guardian.shortcuts import get_objects_for_user
>>> joe = User.objects.get(username='joe')
>>> get_objects_for_user(joe, 'auth.change_group')
[]
>>> from guardian.shortcuts import assign_perm
>>> group = Group.objects.create('some group')
>>> assign_perm('auth.change_group', joe, group)
>>> get_objects_for_user(joe, 'auth.change_group')
[<Group some group>]
The permission string can also be an iterable. Continuing with the previous example:
>>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'])
[]
>>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'], any_perm=True)
[<Group some group>]
>>> assign_perm('auth.delete_group', joe, group)
>>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'])
[<Group some group>]
"""
if isinstance(perms, basestring):
perms = [perms]
ctype = None
app_label = None
codenames = set()
# Compute codenames set and ctype if possible
for perm in perms:
if '.' in perm:
new_app_label, codename = perm.split('.', 1)
if app_label is not None and app_label != new_app_label:
raise MixedContentTypeError("Given perms must have same app "
"label (%s != %s)" % (app_label, new_app_label))
else:
app_label = new_app_label
else:
codename = perm
codenames.add(codename)
if app_label is not None:
new_ctype = ContentType.objects.get(app_label=app_label,
permission__codename=codename)
if ctype is not None and ctype != new_ctype:
raise MixedContentTypeError("ContentType was once computed "
"to be %s and another one %s" % (ctype, new_ctype))
else:
ctype = new_ctype
# Compute queryset and ctype if still missing
if ctype is None and klass is None:
raise WrongAppError("Cannot determine content type")
elif ctype is None and klass is not None:
queryset = _get_queryset(klass)
ctype = ContentType.objects.get_for_model(queryset.model)
elif ctype is not None and klass is None:
queryset = _get_queryset(ctype.model_class())
else:
queryset = _get_queryset(klass)
if ctype.model_class() != queryset.model:
raise MixedContentTypeError("Content type for given perms and "
"klass differs")
# At this point, we should have both ctype and queryset and they should
# match which means: ctype.model_class() == queryset.model
# we should also have ``codenames`` list
# First check if user is superuser and if so, return queryset immediately
if with_superuser and user.is_superuser:
return queryset
# Check if the user is anonymous. The
# django.contrib.auth.models.AnonymousUser object doesn't work for queries
# and it's nice to be able to pass in request.user blindly.
if user.is_anonymous():
user = get_anonymous_user()
# Now we should extract list of pk values for which we would filter queryset
user_model = get_user_obj_perms_model(queryset.model)
user_obj_perms_queryset = (user_model.objects
.filter(user=user)
.filter(permission__content_type=ctype)
.filter(permission__codename__in=codenames))
if user_model.objects.is_generic():
fields = ['object_pk', 'permission__codename']
else:
fields = ['content_object__pk', 'permission__codename']
if use_groups:
group_model = get_group_obj_perms_model(queryset.model)
group_filters = {
'permission__content_type': ctype,
'permission__codename__in': codenames,
'group__%s' % get_user_model().groups.field.related_query_name(): user,
}
groups_obj_perms_queryset = group_model.objects.filter(**group_filters)
if group_model.objects.is_generic():
fields = ['object_pk', 'permission__codename']
else:
fields = ['content_object__pk', 'permission__codename']
if not any_perm:
user_obj_perms = user_obj_perms_queryset.values_list(*fields)
groups_obj_perms = groups_obj_perms_queryset.values_list(*fields)
data = list(user_obj_perms) + list(groups_obj_perms)
keyfunc = lambda t: t[0] # sorting/grouping by pk (first in result tuple)
data = sorted(data, key=keyfunc)
pk_list = []
for pk, group in groupby(data, keyfunc):
obj_codenames = set((e[1] for e in group))
if codenames.issubset(obj_codenames):
pk_list.append(pk)
objects = queryset.filter(pk__in=pk_list)
return objects
if not any_perm and len(codenames) > 1:
counts = user_obj_perms_queryset.values(fields[0]).annotate(object_pk_count=Count(fields[0]))
user_obj_perms_queryset = counts.filter(object_pk_count__gte=len(codenames))
values = user_obj_perms_queryset.values_list(fields[0], flat=True)
if user_model.objects.is_generic():
values = [int(v) for v in values]
objects = queryset.filter(pk__in=values)
if use_groups:
values = groups_obj_perms_queryset.values_list(fields[0], flat=True)
if group_model.objects.is_generic():
values = [int(v) for v in values]
objects |= queryset.filter(pk__in=values)
return objects
def get_objects_for_group(group, perms, klass=None, any_perm=False):
"""
Returns queryset of objects for which a given ``group`` has *all*
permissions present at ``perms``.
:param group: ``Group`` instance for which objects would be returned.
:param perms: single permission string, or sequence of permission strings
which should be checked.
If ``klass`` parameter is not given, those should be full permission
names rather than only codenames (i.e. ``auth.change_user``). If more than
one permission is present within sequence, their content type **must** be
the same or ``MixedContentTypeError`` exception would be raised.
:param klass: may be a Model, Manager or QuerySet object. If not given
this parameter would be computed based on given ``params``.
:param any_perm: if True, any of permission in sequence is accepted
:raises MixedContentTypeError: when computed content type for ``perms``
and/or ``klass`` clashes.
:raises WrongAppError: if cannot compute app label for given ``perms``/
``klass``.
Example:
Let's assume we have a ``Task`` model belonging to the ``tasker`` app with
the default add_task, change_task and delete_task permissions provided
by Django::
>>> from guardian.shortcuts import get_objects_for_group
>>> from tasker import Task
>>> group = Group.objects.create('some group')
>>> task = Task.objects.create('some task')
>>> get_objects_for_group(group, 'tasker.add_task')
[]
>>> from guardian.shortcuts import assign_perm
>>> assign_perm('tasker.add_task', group, task)
>>> get_objects_for_group(group, 'tasker.add_task')
[<Task some task>]
The permission string can also be an iterable. Continuing with the previous example:
>>> get_objects_for_group(group, ['tasker.add_task', 'tasker.delete_task'])
[]
>>> assign_perm('tasker.delete_task', group, task)
>>> get_objects_for_group(group, ['tasker.add_task', 'tasker.delete_task'])
[<Task some task>]
"""
if isinstance(perms, basestring):
perms = [perms]
ctype = None
app_label = None
codenames = set()
# Compute codenames set and ctype if possible
for perm in perms:
if '.' in perm:
new_app_label, codename = perm.split('.', 1)
if app_label is not None and app_label != new_app_label:
raise MixedContentTypeError("Given perms must have same app "
"label (%s != %s)" % (app_label, new_app_label))
else:
app_label = new_app_label
else:
codename = perm
codenames.add(codename)
if app_label is not None:
new_ctype = ContentType.objects.get(app_label=app_label,
permission__codename=codename)
if ctype is not None and ctype != new_ctype:
raise MixedContentTypeError("ContentType was once computed "
"to be %s and another one %s" % (ctype, new_ctype))
else:
ctype = new_ctype
# Compute queryset and ctype if still missing
if ctype is None and klass is None:
raise WrongAppError("Cannot determine content type")
elif ctype is None and klass is not None:
queryset = _get_queryset(klass)
ctype = ContentType.objects.get_for_model(queryset.model)
elif ctype is not None and klass is None:
queryset = _get_queryset(ctype.model_class())
else:
queryset = _get_queryset(klass)
if ctype.model_class() != queryset.model:
raise MixedContentTypeError("Content type for given perms and "
"klass differs")
# At this point, we should have both ctype and queryset and they should
# match which means: ctype.model_class() == queryset.model
# we should also have ``codenames`` list
# Now we should extract list of pk values for which we would filter queryset
group_model = get_group_obj_perms_model(queryset.model)
groups_obj_perms_queryset = (group_model.objects
.filter(group=group)
.filter(permission__content_type=ctype)
.filter(permission__codename__in=codenames))
if group_model.objects.is_generic():
fields = ['object_pk', 'permission__codename']
else:
fields = ['content_object__pk', 'permission__codename']
groups_obj_perms = groups_obj_perms_queryset.values_list(*fields)
data = list(groups_obj_perms)
keyfunc = lambda t: t[0] # sorting/grouping by pk (first in result tuple)
data = sorted(data, key=keyfunc)
pk_list = []
for pk, group in groupby(data, keyfunc):
obj_codenames = set((e[1] for e in group))
if any_perm or codenames.issubset(obj_codenames):
pk_list.append(pk)
objects = queryset.filter(pk__in=pk_list)
return objects
|
bsd-2-clause
| -398,339,219,573,541,500 | 39.075912 | 179 | 0.623998 | false | 3.973368 | false | false | false |
abramhindle/UnnaturalCodeFork
|
python/testdata/launchpad/lib/lp/testing/layers.py
|
1
|
66650
|
# Copyright 2009-2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Layers used by Launchpad tests.
Layers are the mechanism used by the Zope3 test runner to efficiently
provide environments for tests and are documented in the lib/zope/testing.
Note that every Layer should define all of setUp, tearDown, testSetUp
and testTearDown. If you don't do this, a base class' method will be called
instead probably breaking something.
Preferred style is to not use the 'cls' argument to Layer class methods,
as this is unambguious.
TODO: Make the Zope3 test runner handle multiple layers per test instead
of one, forcing us to attempt to make some sort of layer tree.
-- StuartBishop 20060619
"""
__metaclass__ = type
__all__ = [
'AppServerLayer',
'AuditorLayer',
'BaseLayer',
'DatabaseFunctionalLayer',
'DatabaseLayer',
'FunctionalLayer',
'GoogleLaunchpadFunctionalLayer',
'GoogleServiceLayer',
'LaunchpadFunctionalLayer',
'LaunchpadLayer',
'LaunchpadScriptLayer',
'LaunchpadTestSetup',
'LaunchpadZopelessLayer',
'LayerInvariantError',
'LayerIsolationError',
'LibrarianLayer',
'PageTestLayer',
'RabbitMQLayer',
'SwiftLayer',
'TwistedAppServerLayer',
'TwistedLaunchpadZopelessLayer',
'TwistedLayer',
'YUITestLayer',
'YUIAppServerLayer',
'ZopelessAppServerLayer',
'ZopelessDatabaseLayer',
'ZopelessLayer',
'disconnect_stores',
'reconnect_stores',
'wsgi_application',
]
from cProfile import Profile
import datetime
import errno
import gc
import logging
import os
import signal
import socket
import subprocess
import sys
import tempfile
from textwrap import dedent
import threading
import time
from unittest import (
TestCase,
TestResult,
)
from urllib import urlopen
from fixtures import (
Fixture,
MonkeyPatch,
)
import psycopg2
from storm.zope.interfaces import IZStorm
import transaction
import wsgi_intercept
from wsgi_intercept import httplib2_intercept
from zope.app.publication.httpfactory import chooseClasses
import zope.app.testing.functional
from zope.app.testing.functional import (
FunctionalTestSetup,
ZopePublication,
)
from zope.component import (
getUtility,
globalregistry,
provideUtility,
)
from zope.component.interfaces import ComponentLookupError
import zope.publisher.publish
from zope.security.management import (
endInteraction,
getSecurityPolicy,
)
from zope.server.logger.pythonlogger import PythonLogger
from lp.services import pidfile
from lp.services.auditor.server import AuditorServer
from lp.services.config import (
config,
dbconfig,
LaunchpadConfig,
)
from lp.services.config.fixture import (
ConfigFixture,
ConfigUseFixture,
)
from lp.services.database.interfaces import IStore
from lp.services.database.sqlbase import session_store
from lp.services.googlesearch.tests.googleserviceharness import (
GoogleServiceTestSetup,
)
from lp.services.job.tests import celeryd
from lp.services.librarian.model import LibraryFileAlias
from lp.services.librarianserver.testing.server import LibrarianServerFixture
from lp.services.mail.mailbox import (
IMailBox,
TestMailBox,
)
from lp.services.mail.sendmail import set_immediate_mail_delivery
import lp.services.mail.stub
from lp.services.memcache.client import memcache_client_factory
from lp.services.osutils import kill_by_pidfile
from lp.services.rabbit.server import RabbitServer
from lp.services.scripts import execute_zcml_for_scripts
from lp.services.testing.profiled import profiled
from lp.services.timeout import (
get_default_timeout_function,
set_default_timeout_function,
)
from lp.services.webapp.authorization import LaunchpadPermissiveSecurityPolicy
from lp.services.webapp.interfaces import IOpenLaunchBag
from lp.services.webapp.servers import (
LaunchpadAccessLogger,
register_launchpad_request_publication_factories,
)
import lp.services.webapp.session
from lp.testing import (
ANONYMOUS,
login,
logout,
reset_logging,
)
from lp.testing.pgsql import PgTestSetup
from lp.testing.swift.fixture import SwiftFixture
from lp.testing.smtpd import SMTPController
orig__call__ = zope.app.testing.functional.HTTPCaller.__call__
COMMA = ','
WAIT_INTERVAL = datetime.timedelta(seconds=180)
def set_up_functional_test():
return FunctionalTestSetup('zcml/ftesting.zcml')
class LayerError(Exception):
pass
class LayerInvariantError(LayerError):
"""Layer self checks have detected a fault. Invariant has been violated.
This indicates the Layer infrastructure has messed up. The test run
should be aborted.
"""
pass
class LayerIsolationError(LayerError):
"""Test isolation has been broken, probably by the test we just ran.
This generally indicates a test has screwed up by not resetting
something correctly to the default state.
The test suite should abort if it cannot clean up the mess as further
test failures may well be spurious.
"""
def is_ca_available():
"""Returns true if the component architecture has been loaded"""
try:
getUtility(IOpenLaunchBag)
except ComponentLookupError:
return False
else:
return True
def disconnect_stores():
"""Disconnect Storm stores."""
zstorm = getUtility(IZStorm)
stores = [
store for name, store in zstorm.iterstores() if name != 'session']
# If we have any stores, abort the transaction and close them.
if stores:
for store in stores:
zstorm.remove(store)
transaction.abort()
for store in stores:
store.close()
def reconnect_stores(reset=False):
"""Reconnect Storm stores, resetting the dbconfig to its defaults.
After reconnecting, the database revision will be checked to make
sure the right data is available.
"""
disconnect_stores()
if reset:
dbconfig.reset()
main_store = IStore(LibraryFileAlias)
assert main_store is not None, 'Failed to reconnect'
# Confirm that SQLOS is again talking to the database (it connects
# as soon as SQLBase._connection is accessed
r = main_store.execute('SELECT count(*) FROM LaunchpadDatabaseRevision')
assert r.get_one()[0] > 0, 'Storm is not talking to the database'
assert session_store() is not None, 'Failed to reconnect'
def wait_children(seconds=120):
"""Wait for all children to exit.
:param seconds: Maximum number of seconds to wait. If None, wait
forever.
"""
now = datetime.datetime.now
if seconds is None:
until = None
else:
until = now() + datetime.timedelta(seconds=seconds)
while True:
try:
os.waitpid(-1, os.WNOHANG)
except OSError as error:
if error.errno != errno.ECHILD:
raise
break
if until is not None and now() > until:
break
class MockRootFolder:
"""Implement the minimum functionality required by Z3 ZODB dependencies
Installed as part of FunctionalLayer.testSetUp() to allow the http()
method (zope.app.testing.functional.HTTPCaller) to work.
"""
@property
def _p_jar(self):
return self
def sync(self):
pass
class BaseLayer:
"""Base layer.
All our layers should subclass Base, as this is where we will put
test isolation checks to ensure that tests to not leave global
resources in a mess.
XXX: StuartBishop 2006-07-12: Unit tests (tests with no layer) will not
get these checks. The Z3 test runner should be updated so that a layer
can be specified to use for unit tests.
"""
# Set to True when we are running tests in this layer.
isSetUp = False
# The name of this test - this is the same output that the testrunner
# displays. It is probably unique, but not guaranteed to be so.
test_name = None
# A flag to disable a check for threads still running after test
# completion. This is hopefully a temporary measure; see the comment
# in tearTestDown.
disable_thread_check = False
# A flag to make services like Librarian and Memcached to persist
# between test runs. This flag is set in setUp() by looking at the
# LP_PERSISTENT_TEST_SERVICES environment variable.
persist_test_services = False
# Things we need to cleanup.
fixture = None
# ConfigFixtures for the configs generated for this layer. Set to None
# if the layer is not setUp, or if persistent tests services are in use.
config_fixture = None
appserver_config_fixture = None
# The config names that are generated for this layer. Set to None when
# the layer is not setUp.
config_name = None
appserver_config_name = None
@classmethod
def make_config(cls, config_name, clone_from, attr_name):
"""Create a temporary config and link it into the layer cleanup."""
cfg_fixture = ConfigFixture(config_name, clone_from)
cls.fixture.addCleanup(cfg_fixture.cleanUp)
cfg_fixture.setUp()
cls.fixture.addCleanup(setattr, cls, attr_name, None)
setattr(cls, attr_name, cfg_fixture)
@classmethod
@profiled
def setUp(cls):
# Set the default appserver config instance name.
# May be changed as required eg when running parallel tests.
cls.appserver_config_name = 'testrunner-appserver'
BaseLayer.isSetUp = True
cls.fixture = Fixture()
cls.fixture.setUp()
cls.fixture.addCleanup(setattr, cls, 'fixture', None)
BaseLayer.persist_test_services = (
os.environ.get('LP_PERSISTENT_TEST_SERVICES') is not None)
# We can only do unique test allocation and parallelisation if
# LP_PERSISTENT_TEST_SERVICES is off.
if not BaseLayer.persist_test_services:
test_instance = str(os.getpid())
os.environ['LP_TEST_INSTANCE'] = test_instance
cls.fixture.addCleanup(os.environ.pop, 'LP_TEST_INSTANCE', '')
# Kill any Memcached or Librarian left running from a previous
# test run, or from the parent test process if the current
# layer is being run in a subprocess. No need to be polite
# about killing memcached - just do it quickly.
kill_by_pidfile(MemcachedLayer.getPidFile(), num_polls=0)
config_name = 'testrunner_%s' % test_instance
cls.make_config(config_name, 'testrunner', 'config_fixture')
app_config_name = 'testrunner-appserver_%s' % test_instance
cls.make_config(
app_config_name, 'testrunner-appserver',
'appserver_config_fixture')
cls.appserver_config_name = app_config_name
else:
config_name = 'testrunner'
app_config_name = 'testrunner-appserver'
cls.config_name = config_name
cls.fixture.addCleanup(setattr, cls, 'config_name', None)
cls.appserver_config_name = app_config_name
cls.fixture.addCleanup(setattr, cls, 'appserver_config_name', None)
use_fixture = ConfigUseFixture(config_name)
cls.fixture.addCleanup(use_fixture.cleanUp)
use_fixture.setUp()
# Kill any database left lying around from a previous test run.
db_fixture = LaunchpadTestSetup()
try:
db_fixture.connect().close()
except psycopg2.Error:
# We assume this means 'no test database exists.'
pass
else:
db_fixture.dropDb()
@classmethod
@profiled
def tearDown(cls):
cls.fixture.cleanUp()
BaseLayer.isSetUp = False
@classmethod
@profiled
def testSetUp(cls):
# Store currently running threads so we can detect if a test
# leaves new threads running.
BaseLayer._threads = threading.enumerate()
BaseLayer.check()
BaseLayer.original_working_directory = os.getcwd()
# Tests and test infrastruture sometimes needs to know the test
# name. The testrunner doesn't provide this, so we have to do
# some snooping.
import inspect
frame = inspect.currentframe()
try:
while frame.f_code.co_name != 'startTest':
frame = frame.f_back
BaseLayer.test_name = str(frame.f_locals['test'])
finally:
del frame # As per no-leak stack inspection in Python reference.
@classmethod
@profiled
def testTearDown(cls):
# Get our current working directory, handling the case where it no
# longer exists (!).
try:
cwd = os.getcwd()
except OSError:
cwd = None
# Handle a changed working directory. If the test succeeded,
# add an error. Then restore the working directory so the test
# run can continue.
if cwd != BaseLayer.original_working_directory:
BaseLayer.flagTestIsolationFailure(
"Test failed to restore working directory.")
os.chdir(BaseLayer.original_working_directory)
BaseLayer.original_working_directory = None
reset_logging()
del lp.services.mail.stub.test_emails[:]
BaseLayer.test_name = None
BaseLayer.check()
def new_live_threads():
return [
thread for thread in threading.enumerate()
if thread not in BaseLayer._threads and thread.isAlive()]
if BaseLayer.disable_thread_check:
new_threads = None
else:
for loop in range(0, 100):
# Check for tests that leave live threads around early.
# A live thread may be the cause of other failures, such as
# uncollectable garbage.
new_threads = new_live_threads()
has_live_threads = False
for new_thread in new_threads:
new_thread.join(0.1)
if new_thread.isAlive():
has_live_threads = True
if has_live_threads:
# Trigger full garbage collection that might be
# blocking threads from exiting.
gc.collect()
else:
break
new_threads = new_live_threads()
if new_threads:
# BaseLayer.disable_thread_check is a mechanism to stop
# tests that leave threads behind from failing. Its use
# should only ever be temporary.
if BaseLayer.disable_thread_check:
print (
"ERROR DISABLED: "
"Test left new live threads: %s") % repr(new_threads)
else:
BaseLayer.flagTestIsolationFailure(
"Test left new live threads: %s" % repr(new_threads))
BaseLayer.disable_thread_check = False
del BaseLayer._threads
if signal.getsignal(signal.SIGCHLD) != signal.SIG_DFL:
BaseLayer.flagTestIsolationFailure(
"Test left SIGCHLD handler.")
# Objects with __del__ methods cannot participate in refence cycles.
# Fail tests with memory leaks now rather than when Launchpad crashes
# due to a leak because someone ignored the warnings.
if gc.garbage:
del gc.garbage[:]
gc.collect() # Expensive, so only do if there might be garbage.
if gc.garbage:
BaseLayer.flagTestIsolationFailure(
"Test left uncollectable garbage\n"
"%s (referenced from %s)"
% (gc.garbage, gc.get_referrers(*gc.garbage)))
@classmethod
@profiled
def check(cls):
"""Check that the environment is working as expected.
We check here so we can detect tests that, for example,
initialize the Zopeless or Functional environments and
are using the incorrect layer.
"""
if FunctionalLayer.isSetUp and ZopelessLayer.isSetUp:
raise LayerInvariantError(
"Both Zopefull and Zopeless CA environments setup")
# Detect a test that causes the component architecture to be loaded.
# This breaks test isolation, as it cannot be torn down.
if (is_ca_available()
and not FunctionalLayer.isSetUp
and not ZopelessLayer.isSetUp):
raise LayerIsolationError(
"Component architecture should not be loaded by tests. "
"This should only be loaded by the Layer.")
# Detect a test that forgot to reset the default socket timeout.
# This safety belt is cheap and protects us from very nasty
# intermittent test failures: see bug #140068 for an example.
if socket.getdefaulttimeout() is not None:
raise LayerIsolationError(
"Test didn't reset the socket default timeout.")
@classmethod
def flagTestIsolationFailure(cls, message):
"""Handle a breakdown in test isolation.
If the test that broke isolation thinks it succeeded,
add an error. If the test failed, don't add a notification
as the isolation breakdown is probably just fallout.
The layer that detected the isolation failure still needs to
repair the damage, or in the worst case abort the test run.
"""
test_result = BaseLayer.getCurrentTestResult()
if test_result.wasSuccessful():
test_case = BaseLayer.getCurrentTestCase()
try:
raise LayerIsolationError(message)
except LayerIsolationError:
test_result.addError(test_case, sys.exc_info())
@classmethod
def getCurrentTestResult(cls):
"""Return the TestResult currently in play."""
import inspect
frame = inspect.currentframe()
try:
while True:
f_self = frame.f_locals.get('self', None)
if isinstance(f_self, TestResult):
return frame.f_locals['self']
frame = frame.f_back
finally:
del frame # As per no-leak stack inspection in Python reference.
@classmethod
def getCurrentTestCase(cls):
"""Return the test currently in play."""
import inspect
frame = inspect.currentframe()
try:
while True:
f_self = frame.f_locals.get('self', None)
if isinstance(f_self, TestCase):
return f_self
f_test = frame.f_locals.get('test', None)
if isinstance(f_test, TestCase):
return f_test
frame = frame.f_back
return frame.f_locals['test']
finally:
del frame # As per no-leak stack inspection in Python reference.
@classmethod
def appserver_config(cls):
"""Return a config suitable for AppServer tests."""
return LaunchpadConfig(cls.appserver_config_name)
@classmethod
def appserver_root_url(cls, facet='mainsite', ensureSlash=False):
"""Return the correct app server root url for the given facet."""
return cls.appserver_config().appserver_root_url(
facet, ensureSlash)
class MemcachedLayer(BaseLayer):
"""Provides tests access to a memcached.
Most tests needing memcache access will actually need to use
ZopelessLayer, FunctionalLayer or sublayer as they will be accessing
memcached using a utility.
"""
# A memcache.Client instance.
client = None
# A subprocess.Popen instance if this process spawned the test
# memcached.
_memcached_process = None
_is_setup = False
@classmethod
@profiled
def setUp(cls):
cls._is_setup = True
# Create a client
MemcachedLayer.client = memcache_client_factory()
if (BaseLayer.persist_test_services and
os.path.exists(MemcachedLayer.getPidFile())):
return
# First, check to see if there is a memcached already running.
# This happens when new layers are run as a subprocess.
test_key = "MemcachedLayer__live_test"
if MemcachedLayer.client.set(test_key, "live"):
return
cmd = [
'memcached',
'-m', str(config.memcached.memory_size),
'-l', str(config.memcached.address),
'-p', str(config.memcached.port),
'-U', str(config.memcached.port),
]
if config.memcached.verbose:
cmd.append('-vv')
stdout = sys.stdout
stderr = sys.stderr
else:
stdout = tempfile.NamedTemporaryFile()
stderr = tempfile.NamedTemporaryFile()
MemcachedLayer._memcached_process = subprocess.Popen(
cmd, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr)
MemcachedLayer._memcached_process.stdin.close()
# Wait for the memcached to become operational.
while not MemcachedLayer.client.set(test_key, "live"):
if MemcachedLayer._memcached_process.returncode is not None:
raise LayerInvariantError(
"memcached never started or has died.",
MemcachedLayer._memcached_process.stdout.read())
MemcachedLayer.client.forget_dead_hosts()
time.sleep(0.1)
# Store the pidfile for other processes to kill.
pid_file = MemcachedLayer.getPidFile()
open(pid_file, 'w').write(str(MemcachedLayer._memcached_process.pid))
@classmethod
@profiled
def tearDown(cls):
if not cls._is_setup:
return
cls._is_setup = False
MemcachedLayer.client.disconnect_all()
MemcachedLayer.client = None
if not BaseLayer.persist_test_services:
# Kill our memcached, and there is no reason to be nice about it.
kill_by_pidfile(MemcachedLayer.getPidFile())
MemcachedLayer._memcached_process = None
@classmethod
@profiled
def testSetUp(cls):
MemcachedLayer.client.forget_dead_hosts()
MemcachedLayer.client.flush_all()
@classmethod
@profiled
def testTearDown(cls):
pass
@classmethod
def getPidFile(cls):
return os.path.join(config.root, '.memcache.pid')
@classmethod
def purge(cls):
"Purge everything from our memcached."
MemcachedLayer.client.flush_all() # Only do this in tests!
class RabbitMQLayer(BaseLayer):
"""Provides tests access to a rabbitMQ instance."""
rabbit = RabbitServer()
_is_setup = False
@classmethod
@profiled
def setUp(cls):
cls.rabbit.setUp()
cls.config_fixture.add_section(
cls.rabbit.config.service_config)
cls.appserver_config_fixture.add_section(
cls.rabbit.config.service_config)
cls._is_setup = True
@classmethod
@profiled
def tearDown(cls):
if not cls._is_setup:
return
cls.rabbit.cleanUp()
cls._is_setup = False
# Can't pop the config above, so bail here and let the test runner
# start a sub-process.
raise NotImplementedError
@classmethod
@profiled
def testSetUp(cls):
pass
@classmethod
@profiled
def testTearDown(cls):
pass
# We store a reference to the DB-API connect method here when we
# put a proxy in its place.
_org_connect = None
class DatabaseLayer(BaseLayer):
"""Provides tests access to the Launchpad sample database."""
_is_setup = False
_db_fixture = None
# For parallel testing, we allocate a temporary template to prevent worker
# contention.
_db_template_fixture = None
@classmethod
@profiled
def setUp(cls):
cls._is_setup = True
# Read the sequences we'll need from the test template database.
reset_sequences_sql = LaunchpadTestSetup(
dbname='launchpad_ftest_template').generateResetSequencesSQL()
# Allocate a template for this test instance
if os.environ.get('LP_TEST_INSTANCE'):
template_name = '_'.join([LaunchpadTestSetup.template,
os.environ.get('LP_TEST_INSTANCE')])
cls._db_template_fixture = LaunchpadTestSetup(
dbname=template_name, reset_sequences_sql=reset_sequences_sql)
cls._db_template_fixture.setUp()
else:
template_name = LaunchpadTestSetup.template
cls._db_fixture = LaunchpadTestSetup(template=template_name,
reset_sequences_sql=reset_sequences_sql)
cls.force_dirty_database()
# Nuke any existing DB (for persistent-test-services) [though they
# prevent this !?]
cls._db_fixture.tearDown()
# Force a db creation for unique db names - needed at layer init
# because appserver using layers run things at layer setup, not
# test setup.
cls._db_fixture.setUp()
# And take it 'down' again to be in the right state for testSetUp
# - note that this conflicts in principle with layers whose setUp
# needs the db working, but this is a conceptually cleaner starting
# point for addressing that mismatch.
cls._db_fixture.tearDown()
# Bring up the db, so that it is available for other layers.
cls._ensure_db()
@classmethod
@profiled
def tearDown(cls):
if not cls._is_setup:
return
cls._is_setup = False
# Don't leave the DB lying around or it might break tests
# that depend on it not being there on startup, such as found
# in test_layers.py
cls.force_dirty_database()
cls._db_fixture.tearDown()
cls._db_fixture = None
if os.environ.get('LP_TEST_INSTANCE'):
cls._db_template_fixture.tearDown()
cls._db_template_fixture = None
@classmethod
@profiled
def testSetUp(cls):
pass
@classmethod
def _ensure_db(cls):
cls._db_fixture.setUp()
# Ensure that the database is connectable. Because we might have
# just created it, keep trying for a few seconds incase PostgreSQL
# is taking its time getting its house in order.
attempts = 60
for count in range(0, attempts):
try:
cls.connect().close()
except psycopg2.Error:
if count == attempts - 1:
raise
time.sleep(0.5)
else:
break
@classmethod
@profiled
def testTearDown(cls):
# Ensure that the database is connectable
cls.connect().close()
cls._db_fixture.tearDown()
# Fail tests that forget to uninstall their database policies.
from lp.services.webapp.adapter import StoreSelector
while StoreSelector.get_current() is not None:
BaseLayer.flagTestIsolationFailure(
"Database policy %s still installed"
% repr(StoreSelector.pop()))
# Reset/bring up the db - makes it available for either the next test,
# or a subordinate layer which builds on the db. This wastes one setup
# per db layer teardown per run, but thats tolerable.
cls._ensure_db()
@classmethod
@profiled
def force_dirty_database(cls):
cls._db_fixture.force_dirty_database()
@classmethod
@profiled
def connect(cls):
return cls._db_fixture.connect()
@classmethod
@profiled
def _dropDb(cls):
return cls._db_fixture.dropDb()
class SwiftLayer(BaseLayer):
@classmethod
@profiled
def setUp(cls):
cls.swift_fixture = SwiftFixture()
cls.swift_fixture.setUp()
@classmethod
@profiled
def tearDown(cls):
swift = cls.swift_fixture
if swift is not None:
cls.swift_fixture = None
swift.cleanUp()
class LibrarianLayer(DatabaseLayer):
"""Provides tests access to a Librarian instance.
Calls to the Librarian will fail unless there is also a Launchpad
database available.
"""
librarian_fixture = None
@classmethod
@profiled
def setUp(cls):
cls.librarian_fixture = LibrarianServerFixture(
BaseLayer.config_fixture)
cls.librarian_fixture.setUp()
cls._check_and_reset()
# Make sure things using the appserver config know the
# correct Librarian port numbers.
cls.appserver_config_fixture.add_section(
cls.librarian_fixture.service_config)
@classmethod
@profiled
def tearDown(cls):
# Permit multiple teardowns while we sort out the layering
# responsibilities : not desirable though.
if cls.librarian_fixture is None:
return
try:
cls._check_and_reset()
finally:
librarian = cls.librarian_fixture
cls.librarian_fixture = None
librarian.cleanUp()
@classmethod
@profiled
def _check_and_reset(cls):
"""Raise an exception if the Librarian has been killed, else reset."""
try:
f = urlopen(config.librarian.download_url)
f.read()
except Exception as e:
raise LayerIsolationError(
"Librarian has been killed or has hung."
"Tests should use LibrarianLayer.hide() and "
"LibrarianLayer.reveal() where possible, and ensure "
"the Librarian is restarted if it absolutely must be "
"shutdown: " + str(e))
else:
cls.librarian_fixture.reset()
@classmethod
@profiled
def testSetUp(cls):
cls._check_and_reset()
@classmethod
@profiled
def testTearDown(cls):
if cls._hidden:
cls.reveal()
cls._check_and_reset()
# Flag maintaining state of hide()/reveal() calls
_hidden = False
# Fake upload socket used when the librarian is hidden
_fake_upload_socket = None
@classmethod
@profiled
def hide(cls):
"""Hide the Librarian so nothing can find it. We don't want to
actually shut it down because starting it up again is expensive.
We do this by altering the configuration so the Librarian client
looks for the Librarian server on the wrong port.
"""
cls._hidden = True
if cls._fake_upload_socket is None:
# Bind to a socket, but don't listen to it. This way we
# guarantee that connections to the given port will fail.
cls._fake_upload_socket = socket.socket(
socket.AF_INET, socket.SOCK_STREAM)
assert config.librarian.upload_host == 'localhost', (
'Can only hide librarian if it is running locally')
cls._fake_upload_socket.bind(('127.0.0.1', 0))
host, port = cls._fake_upload_socket.getsockname()
librarian_data = dedent("""
[librarian]
upload_port: %s
""" % port)
config.push('hide_librarian', librarian_data)
@classmethod
@profiled
def reveal(cls):
"""Reveal a hidden Librarian.
This just involves restoring the config to the original value.
"""
cls._hidden = False
config.pop('hide_librarian')
def test_default_timeout():
"""Don't timeout by default in tests."""
return None
class LaunchpadLayer(LibrarianLayer, MemcachedLayer, RabbitMQLayer):
"""Provides access to the Launchpad database and daemons.
We need to ensure that the database setup runs before the daemon
setup, or the database setup will fail because the daemons are
already connected to the database.
This layer is mainly used by tests that call initZopeless() themselves.
Most tests will use a sublayer such as LaunchpadFunctionalLayer that
provides access to the Component Architecture.
"""
@classmethod
@profiled
def setUp(cls):
pass
@classmethod
@profiled
def tearDown(cls):
pass
@classmethod
@profiled
def testSetUp(cls):
# By default, don't make external service tests timeout.
if get_default_timeout_function() is not None:
raise LayerIsolationError(
"Global default timeout function should be None.")
set_default_timeout_function(test_default_timeout)
@classmethod
@profiled
def testTearDown(cls):
if get_default_timeout_function() is not test_default_timeout:
raise LayerIsolationError(
"Test didn't reset default timeout function.")
set_default_timeout_function(None)
# A database connection to the session database, created by the first
# call to resetSessionDb.
_raw_sessiondb_connection = None
@classmethod
@profiled
def resetSessionDb(cls):
"""Reset the session database.
Layers that need session database isolation call this explicitly
in the testSetUp().
"""
if LaunchpadLayer._raw_sessiondb_connection is None:
from storm.uri import URI
from lp.services.webapp.adapter import (
LaunchpadSessionDatabase)
launchpad_session_database = LaunchpadSessionDatabase(
URI('launchpad-session:'))
LaunchpadLayer._raw_sessiondb_connection = (
launchpad_session_database.raw_connect())
LaunchpadLayer._raw_sessiondb_connection.cursor().execute(
"DELETE FROM SessionData")
def wsgi_application(environ, start_response):
"""This is a wsgi application for Zope functional testing.
We use it with wsgi_intercept, which is itself mostly interesting
for our webservice (lazr.restful) tests.
"""
# Committing work done up to now is a convenience that the Zope
# zope.app.testing.functional.HTTPCaller does. We're replacing that bit,
# so it is easiest to follow that lead, even if it feels a little loose.
transaction.commit()
# Let's support post-mortem debugging.
if environ.pop('HTTP_X_ZOPE_HANDLE_ERRORS', 'True') == 'False':
environ['wsgi.handleErrors'] = False
handle_errors = environ.get('wsgi.handleErrors', True)
# Make sure the request method is something Launchpad will
# recognize. httplib2 usually takes care of this, but we've
# bypassed that code in our test environment.
environ['REQUEST_METHOD'] = environ['REQUEST_METHOD'].upper()
# Now we do the proper dance to get the desired request. This is an
# almalgam of code from zope.app.testing.functional.HTTPCaller and
# zope.publisher.paste.Application.
request_cls, publication_cls = chooseClasses(
environ['REQUEST_METHOD'], environ)
publication = publication_cls(set_up_functional_test().db)
request = request_cls(environ['wsgi.input'], environ)
request.setPublication(publication)
# The rest of this function is an amalgam of
# zope.publisher.paste.Application.__call__ and van.testing.layers.
request = zope.publisher.publish.publish(
request, handle_errors=handle_errors)
response = request.response
# We sort these, and then put the status first, because
# zope.testbrowser.testing does--and because it makes it easier to write
# reliable tests.
headers = sorted(response.getHeaders())
status = response.getStatusString()
headers.insert(0, ('Status', status))
# Start the WSGI server response.
start_response(status, headers)
# Return the result body iterable.
return response.consumeBodyIter()
class FunctionalLayer(BaseLayer):
"""Loads the Zope3 component architecture in appserver mode."""
# Set to True if tests using the Functional layer are currently being run.
isSetUp = False
@classmethod
@profiled
def setUp(cls):
FunctionalLayer.isSetUp = True
set_up_functional_test().setUp()
# Assert that set_up_functional_test did what it says it does
if not is_ca_available():
raise LayerInvariantError("Component architecture failed to load")
# Access the cookie manager's secret to get the cache populated.
# If we don't, it may issue extra queries depending on test order.
lp.services.webapp.session.idmanager.secret
# If our request publication factories were defined using ZCML,
# they'd be set up by set_up_functional_test().setUp(). Since
# they're defined by Python code, we need to call that code
# here.
register_launchpad_request_publication_factories()
wsgi_intercept.add_wsgi_intercept(
'localhost', 80, lambda: wsgi_application)
wsgi_intercept.add_wsgi_intercept(
'api.launchpad.dev', 80, lambda: wsgi_application)
httplib2_intercept.install()
@classmethod
@profiled
def tearDown(cls):
FunctionalLayer.isSetUp = False
wsgi_intercept.remove_wsgi_intercept('localhost', 80)
wsgi_intercept.remove_wsgi_intercept('api.launchpad.dev', 80)
httplib2_intercept.uninstall()
# Signal Layer cannot be torn down fully
raise NotImplementedError
@classmethod
@profiled
def testSetUp(cls):
transaction.abort()
transaction.begin()
# Fake a root folder to keep Z3 ZODB dependencies happy.
fs = set_up_functional_test()
if not fs.connection:
fs.connection = fs.db.open()
root = fs.connection.root()
root[ZopePublication.root_name] = MockRootFolder()
# Should be impossible, as the CA cannot be unloaded. Something
# mighty nasty has happened if this is triggered.
if not is_ca_available():
raise LayerInvariantError(
"Component architecture not loaded or totally screwed")
@classmethod
@profiled
def testTearDown(cls):
# Should be impossible, as the CA cannot be unloaded. Something
# mighty nasty has happened if this is triggered.
if not is_ca_available():
raise LayerInvariantError(
"Component architecture not loaded or totally screwed")
transaction.abort()
class ZopelessLayer(BaseLayer):
"""Layer for tests that need the Zopeless component architecture
loaded using execute_zcml_for_scripts().
"""
# Set to True if tests in the Zopeless layer are currently being run.
isSetUp = False
@classmethod
@profiled
def setUp(cls):
ZopelessLayer.isSetUp = True
execute_zcml_for_scripts()
# Assert that execute_zcml_for_scripts did what it says it does.
if not is_ca_available():
raise LayerInvariantError(
"Component architecture not loaded by "
"execute_zcml_for_scripts")
# If our request publication factories were defined using
# ZCML, they'd be set up by execute_zcml_for_scripts(). Since
# they're defined by Python code, we need to call that code
# here.
register_launchpad_request_publication_factories()
@classmethod
@profiled
def tearDown(cls):
ZopelessLayer.isSetUp = False
# Signal Layer cannot be torn down fully
raise NotImplementedError
@classmethod
@profiled
def testSetUp(cls):
# Should be impossible, as the CA cannot be unloaded. Something
# mighty nasty has happened if this is triggered.
if not is_ca_available():
raise LayerInvariantError(
"Component architecture not loaded or totally screwed")
# This should not happen here, it should be caught by the
# testTearDown() method. If it does, something very nasty
# happened.
if getSecurityPolicy() != LaunchpadPermissiveSecurityPolicy:
raise LayerInvariantError(
"Previous test removed the LaunchpadPermissiveSecurityPolicy."
)
# execute_zcml_for_scripts() sets up an interaction for the
# anonymous user. A previous script may have changed or removed
# the interaction, so set it up again
login(ANONYMOUS)
@classmethod
@profiled
def testTearDown(cls):
# Should be impossible, as the CA cannot be unloaded. Something
# mighty nasty has happened if this is triggered.
if not is_ca_available():
raise LayerInvariantError(
"Component architecture not loaded or totally screwed")
# Make sure that a test that changed the security policy, reset it
# back to its default value.
if getSecurityPolicy() != LaunchpadPermissiveSecurityPolicy:
raise LayerInvariantError(
"This test removed the LaunchpadPermissiveSecurityPolicy and "
"didn't restore it.")
logout()
class TwistedLayer(BaseLayer):
"""A layer for cleaning up the Twisted thread pool."""
@classmethod
@profiled
def setUp(cls):
pass
@classmethod
@profiled
def tearDown(cls):
pass
@classmethod
def _save_signals(cls):
"""Save the current signal handlers."""
TwistedLayer._original_sigint = signal.getsignal(signal.SIGINT)
TwistedLayer._original_sigterm = signal.getsignal(signal.SIGTERM)
TwistedLayer._original_sigchld = signal.getsignal(signal.SIGCHLD)
# XXX MichaelHudson, 2009-07-14, bug=399118: If a test case in this
# layer launches a process with spawnProcess, there should really be a
# SIGCHLD handler installed to avoid PotentialZombieWarnings. But
# some tests in this layer use tachandler and it is fragile when a
# SIGCHLD handler is installed. tachandler needs to be fixed.
# from twisted.internet import reactor
# signal.signal(signal.SIGCHLD, reactor._handleSigchld)
@classmethod
def _restore_signals(cls):
"""Restore the signal handlers."""
signal.signal(signal.SIGINT, TwistedLayer._original_sigint)
signal.signal(signal.SIGTERM, TwistedLayer._original_sigterm)
signal.signal(signal.SIGCHLD, TwistedLayer._original_sigchld)
@classmethod
@profiled
def testSetUp(cls):
TwistedLayer._save_signals()
from twisted.internet import interfaces, reactor
from twisted.python import threadpool
# zope.exception demands more of frame objects than
# twisted.python.failure provides in its fake frames. This is enough
# to make it work with them as of 2009-09-16. See
# https://bugs.launchpad.net/bugs/425113.
cls._patch = MonkeyPatch(
'twisted.python.failure._Frame.f_locals',
property(lambda self: {}))
cls._patch.setUp()
if interfaces.IReactorThreads.providedBy(reactor):
pool = getattr(reactor, 'threadpool', None)
# If the Twisted threadpool has been obliterated (probably by
# testTearDown), then re-build it using the values that Twisted
# uses.
if pool is None:
reactor.threadpool = threadpool.ThreadPool(0, 10)
reactor.threadpool.start()
@classmethod
@profiled
def testTearDown(cls):
# Shutdown and obliterate the Twisted threadpool, to plug up leaking
# threads.
from twisted.internet import interfaces, reactor
if interfaces.IReactorThreads.providedBy(reactor):
reactor.suggestThreadPoolSize(0)
pool = getattr(reactor, 'threadpool', None)
if pool is not None:
reactor.threadpool.stop()
reactor.threadpool = None
cls._patch.cleanUp()
TwistedLayer._restore_signals()
class GoogleServiceLayer(BaseLayer):
"""Tests for Google web service integration."""
@classmethod
def setUp(cls):
google = GoogleServiceTestSetup()
google.setUp()
@classmethod
def tearDown(cls):
GoogleServiceTestSetup().tearDown()
@classmethod
def testSetUp(self):
# We need to override BaseLayer.testSetUp(), or else we will
# get a LayerIsolationError.
pass
@classmethod
def testTearDown(self):
# We need to override BaseLayer.testTearDown(), or else we will
# get a LayerIsolationError.
pass
class DatabaseFunctionalLayer(DatabaseLayer, FunctionalLayer):
"""Provides the database and the Zope3 application server environment."""
@classmethod
@profiled
def setUp(cls):
pass
@classmethod
@profiled
def tearDown(cls):
pass
@classmethod
@profiled
def testSetUp(cls):
# Connect Storm
reconnect_stores(reset=True)
@classmethod
@profiled
def testTearDown(cls):
getUtility(IOpenLaunchBag).clear()
endInteraction()
# Disconnect Storm so it doesn't get in the way of database resets
disconnect_stores()
class LaunchpadFunctionalLayer(LaunchpadLayer, FunctionalLayer):
"""Provides the Launchpad Zope3 application server environment."""
@classmethod
@profiled
def setUp(cls):
pass
@classmethod
@profiled
def testSetUp(cls):
# Reset any statistics
from lp.services.webapp.opstats import OpStats
OpStats.resetStats()
# Connect Storm
reconnect_stores(reset=True)
@classmethod
@profiled
def testTearDown(cls):
getUtility(IOpenLaunchBag).clear()
endInteraction()
# Reset any statistics
from lp.services.webapp.opstats import OpStats
OpStats.resetStats()
# Disconnect Storm so it doesn't get in the way of database resets
disconnect_stores()
class AuditorLayer(LaunchpadFunctionalLayer):
auditor = AuditorServer()
_is_setup = False
@classmethod
@profiled
def setUp(cls):
cls.auditor.setUp()
cls.config_fixture.add_section(cls.auditor.service_config)
cls.appserver_config_fixture.add_section(cls.auditor.service_config)
cls._is_setup = True
@classmethod
@profiled
def tearDown(cls):
if not cls._is_setup:
return
cls.auditor.cleanUp()
cls._is_setup = False
# Can't pop the config above, so bail here and let the test runner
# start a sub-process.
raise NotImplementedError
@classmethod
@profiled
def testSetUp(cls):
pass
@classmethod
@profiled
def testTearDown(cls):
pass
class GoogleLaunchpadFunctionalLayer(LaunchpadFunctionalLayer,
GoogleServiceLayer):
"""Provides Google service in addition to LaunchpadFunctionalLayer."""
@classmethod
@profiled
def setUp(cls):
pass
@classmethod
@profiled
def tearDown(cls):
pass
@classmethod
@profiled
def testSetUp(cls):
pass
@classmethod
@profiled
def testTearDown(cls):
pass
class ZopelessDatabaseLayer(ZopelessLayer, DatabaseLayer):
"""Testing layer for unit tests with no need for librarian.
Can be used wherever you're accustomed to using LaunchpadZopeless
or LaunchpadScript layers, but there is no need for librarian.
"""
@classmethod
@profiled
def setUp(cls):
pass
@classmethod
@profiled
def tearDown(cls):
# Signal Layer cannot be torn down fully
raise NotImplementedError
@classmethod
@profiled
def testSetUp(cls):
# LaunchpadZopelessLayer takes care of reconnecting the stores
if not LaunchpadZopelessLayer.isSetUp:
reconnect_stores(reset=True)
@classmethod
@profiled
def testTearDown(cls):
disconnect_stores()
class LaunchpadScriptLayer(ZopelessLayer, LaunchpadLayer):
"""Testing layer for scripts using the main Launchpad database adapter"""
@classmethod
@profiled
def setUp(cls):
# Make a TestMailBox available
# This is registered via ZCML in the LaunchpadFunctionalLayer
# XXX flacoste 2006-10-25 bug=68189: This should be configured from
# ZCML but execute_zcml_for_scripts() doesn't cannot support a
# different testing configuration.
cls._mailbox = TestMailBox()
provideUtility(cls._mailbox, IMailBox)
@classmethod
@profiled
def tearDown(cls):
if not globalregistry.base.unregisterUtility(cls._mailbox):
raise NotImplementedError('failed to unregister mailbox')
@classmethod
@profiled
def testSetUp(cls):
# LaunchpadZopelessLayer takes care of reconnecting the stores
if not LaunchpadZopelessLayer.isSetUp:
reconnect_stores(reset=True)
@classmethod
@profiled
def testTearDown(cls):
disconnect_stores()
class LaunchpadTestSetup(PgTestSetup):
template = 'launchpad_ftest_template'
dbuser = 'launchpad'
host = 'localhost'
class LaunchpadZopelessLayer(LaunchpadScriptLayer):
"""Full Zopeless environment including Component Architecture and
database connections initialized.
"""
isSetUp = False
txn = transaction
@classmethod
@profiled
def setUp(cls):
LaunchpadZopelessLayer.isSetUp = True
@classmethod
@profiled
def tearDown(cls):
LaunchpadZopelessLayer.isSetUp = False
@classmethod
@profiled
def testSetUp(cls):
dbconfig.override(isolation_level='read_committed')
# XXX wgrant 2011-09-24 bug=29744: initZopeless used to do this.
# Tests that still need it should eventually set this directly,
# so the whole layer is not polluted.
set_immediate_mail_delivery(True)
# Connect Storm
reconnect_stores()
@classmethod
@profiled
def testTearDown(cls):
dbconfig.reset()
# LaunchpadScriptLayer will disconnect the stores for us.
# XXX wgrant 2011-09-24 bug=29744: uninstall used to do this.
# Tests that still need immediate delivery should eventually do
# this directly.
set_immediate_mail_delivery(False)
@classmethod
@profiled
def commit(cls):
transaction.commit()
@classmethod
@profiled
def abort(cls):
transaction.abort()
class MockHTTPTask:
class MockHTTPRequestParser:
headers = None
first_line = None
class MockHTTPServerChannel:
# This is not important to us, so we can hardcode it here.
addr = ['127.0.0.88', 80]
request_data = MockHTTPRequestParser()
channel = MockHTTPServerChannel()
def __init__(self, response, first_line):
self.request = response._request
# We have no way of knowing when the task started, so we use
# the current time here. That shouldn't be a problem since we don't
# care about that for our tests anyway.
self.start_time = time.time()
self.status = response.getStatus()
# When streaming files (see lib/zope/publisher/httpresults.txt)
# the 'Content-Length' header is missing. When it happens we set
# 'bytes_written' to an obviously invalid value. This variable is
# used for logging purposes, see webapp/servers.py.
content_length = response.getHeader('Content-Length')
if content_length is not None:
self.bytes_written = int(content_length)
else:
self.bytes_written = -1
self.request_data.headers = self.request.headers
self.request_data.first_line = first_line
def getCGIEnvironment(self):
return self.request._orig_env
class PageTestLayer(LaunchpadFunctionalLayer, GoogleServiceLayer):
"""Environment for page tests.
"""
@classmethod
@profiled
def setUp(cls):
if os.environ.get('PROFILE_PAGETESTS_REQUESTS'):
PageTestLayer.profiler = Profile()
else:
PageTestLayer.profiler = None
file_handler = logging.FileHandler('logs/pagetests-access.log', 'w')
file_handler.setFormatter(logging.Formatter())
logger = PythonLogger('pagetests-access')
logger.logger.addHandler(file_handler)
logger.logger.setLevel(logging.INFO)
access_logger = LaunchpadAccessLogger(logger)
def my__call__(obj, request_string, handle_errors=True, form=None):
"""Call HTTPCaller.__call__ and log the page hit."""
if PageTestLayer.profiler:
response = PageTestLayer.profiler.runcall(
orig__call__, obj, request_string,
handle_errors=handle_errors, form=form)
else:
response = orig__call__(
obj, request_string, handle_errors=handle_errors,
form=form)
first_line = request_string.strip().splitlines()[0]
access_logger.log(MockHTTPTask(response._response, first_line))
return response
PageTestLayer.orig__call__ = (
zope.app.testing.functional.HTTPCaller.__call__)
zope.app.testing.functional.HTTPCaller.__call__ = my__call__
@classmethod
@profiled
def tearDown(cls):
zope.app.testing.functional.HTTPCaller.__call__ = (
PageTestLayer.orig__call__)
if PageTestLayer.profiler:
PageTestLayer.profiler.dump_stats(
os.environ.get('PROFILE_PAGETESTS_REQUESTS'))
@classmethod
@profiled
def testSetUp(cls):
LaunchpadLayer.resetSessionDb()
@classmethod
@profiled
def testTearDown(cls):
pass
class TwistedLaunchpadZopelessLayer(TwistedLayer, LaunchpadZopelessLayer):
"""A layer for cleaning up the Twisted thread pool."""
@classmethod
@profiled
def setUp(cls):
pass
@classmethod
@profiled
def tearDown(cls):
pass
@classmethod
@profiled
def testSetUp(cls):
pass
@classmethod
@profiled
def testTearDown(cls):
# XXX 2008-06-11 jamesh bug=239086:
# Due to bugs in the transaction module's thread local
# storage, transactions may be reused by new threads in future
# tests. Therefore we do some cleanup before the pool is
# destroyed by TwistedLayer.testTearDown().
from twisted.internet import interfaces, reactor
if interfaces.IReactorThreads.providedBy(reactor):
pool = getattr(reactor, 'threadpool', None)
if pool is not None and pool.workers > 0:
def cleanup_thread_stores(event):
disconnect_stores()
# Don't exit until the event fires. This ensures
# that our thread doesn't get added to
# pool.waiters until all threads are processed.
event.wait()
event = threading.Event()
# Ensure that the pool doesn't grow, and issue one
# cleanup job for each thread in the pool.
pool.adjustPoolsize(0, pool.workers)
for i in range(pool.workers):
pool.callInThread(cleanup_thread_stores, event)
event.set()
class LayerProcessController:
"""Controller for starting and stopping subprocesses.
Layers which need to start and stop a child process appserver or smtp
server should call the methods in this class, but should NOT inherit from
this class.
"""
# Holds the Popen instance of the spawned app server.
appserver = None
# The config used by the spawned app server.
appserver_config = None
# The SMTP server for layer tests. See
# configs/testrunner-appserver/mail-configure.zcml
smtp_controller = None
@classmethod
def setConfig(cls):
"""Stash a config for use."""
cls.appserver_config = LaunchpadConfig(
BaseLayer.appserver_config_name, 'runlaunchpad')
@classmethod
def setUp(cls):
cls.setConfig()
cls.startSMTPServer()
cls.startAppServer()
@classmethod
@profiled
def startSMTPServer(cls):
"""Start the SMTP server if it hasn't already been started."""
if cls.smtp_controller is not None:
raise LayerInvariantError('SMTP server already running')
# Ensure that the SMTP server does proper logging.
log = logging.getLogger('lazr.smtptest')
log_file = os.path.join(config.mailman.build_var_dir, 'logs', 'smtpd')
handler = logging.FileHandler(log_file)
formatter = logging.Formatter(
fmt='%(asctime)s (%(process)d) %(message)s',
datefmt='%b %d %H:%M:%S %Y')
handler.setFormatter(formatter)
log.setLevel(logging.DEBUG)
log.addHandler(handler)
log.propagate = False
cls.smtp_controller = SMTPController('localhost', 9025)
cls.smtp_controller.start()
@classmethod
@profiled
def startAppServer(cls, run_name='run'):
"""Start the app server if it hasn't already been started."""
if cls.appserver is not None:
raise LayerInvariantError('App server already running')
cls._cleanUpStaleAppServer()
cls._runAppServer(run_name)
cls._waitUntilAppServerIsReady()
@classmethod
@profiled
def stopSMTPServer(cls):
"""Kill the SMTP server and wait until it's exited."""
if cls.smtp_controller is not None:
cls.smtp_controller.reset()
cls.smtp_controller.stop()
cls.smtp_controller = None
@classmethod
def _kill(cls, sig):
"""Kill the appserver with `sig`.
:param sig: the signal to kill with
:type sig: int
:return: True if the signal was delivered, otherwise False.
:rtype: bool
"""
try:
os.kill(cls.appserver.pid, sig)
except OSError as error:
if error.errno == errno.ESRCH:
# The child process doesn't exist. Maybe it went away by the
# time we got here.
cls.appserver = None
return False
else:
# Something else went wrong.
raise
else:
return True
@classmethod
@profiled
def stopAppServer(cls):
"""Kill the appserver and wait until it's exited."""
if cls.appserver is not None:
# Unfortunately, Popen.wait() does not support a timeout, so poll
# for a little while, then SIGKILL the process if it refuses to
# exit. test_on_merge.py will barf if we hang here for too long.
until = datetime.datetime.now() + WAIT_INTERVAL
last_chance = False
if not cls._kill(signal.SIGTERM):
# The process is already gone.
return
while True:
# Sleep and poll for process exit.
if cls.appserver.poll() is not None:
break
time.sleep(0.5)
# If we slept long enough, send a harder kill and wait again.
# If we already had our last chance, raise an exception.
if datetime.datetime.now() > until:
if last_chance:
raise RuntimeError("The appserver just wouldn't die")
last_chance = True
if not cls._kill(signal.SIGKILL):
# The process is already gone.
return
until = datetime.datetime.now() + WAIT_INTERVAL
cls.appserver = None
@classmethod
@profiled
def postTestInvariants(cls):
"""Enforce some invariants after each test.
Must be called in your layer class's `testTearDown()`.
"""
if cls.appserver.poll() is not None:
raise LayerIsolationError(
"App server died in this test (status=%s):\n%s" % (
cls.appserver.returncode, cls.appserver.stdout.read()))
DatabaseLayer.force_dirty_database()
@classmethod
def _cleanUpStaleAppServer(cls):
"""Kill any stale app server or pid file."""
pid = pidfile.get_pid('launchpad', cls.appserver_config)
if pid is not None:
# Don't worry if the process no longer exists.
try:
os.kill(pid, signal.SIGTERM)
except OSError as error:
if error.errno != errno.ESRCH:
raise
pidfile.remove_pidfile('launchpad', cls.appserver_config)
@classmethod
def _runAppServer(cls, run_name):
"""Start the app server using runlaunchpad.py"""
_config = cls.appserver_config
cmd = [
os.path.join(_config.root, 'bin', run_name),
'-C', 'configs/%s/launchpad.conf' % _config.instance_name]
environ = dict(os.environ)
environ['LPCONFIG'] = _config.instance_name
cls.appserver = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=environ, cwd=_config.root)
@classmethod
def appserver_root_url(cls):
return cls.appserver_config.vhost.mainsite.rooturl
@classmethod
def _waitUntilAppServerIsReady(cls):
"""Wait until the app server accepts connection."""
assert cls.appserver is not None, "App server isn't started."
root_url = cls.appserver_root_url()
until = datetime.datetime.now() + WAIT_INTERVAL
while until > datetime.datetime.now():
try:
connection = urlopen(root_url)
connection.read()
except IOError as error:
# We are interested in a wrapped socket.error.
# urlopen() really sucks here.
if len(error.args) <= 1:
raise
if not isinstance(error.args[1], socket.error):
raise
if error.args[1].args[0] != errno.ECONNREFUSED:
raise
returncode = cls.appserver.poll()
if returncode is not None:
raise RuntimeError(
'App server failed to start (status=%d):\n%s' % (
returncode, cls.appserver.stdout.read()))
time.sleep(0.5)
else:
connection.close()
break
else:
os.kill(cls.appserver.pid, signal.SIGTERM)
cls.appserver = None
# Go no further.
raise AssertionError('App server startup timed out.')
class AppServerLayer(LaunchpadFunctionalLayer):
"""Layer for tests that run in the webapp environment with an app server.
"""
@classmethod
@profiled
def setUp(cls):
LayerProcessController.setUp()
@classmethod
@profiled
def tearDown(cls):
LayerProcessController.stopAppServer()
LayerProcessController.stopSMTPServer()
@classmethod
@profiled
def testSetUp(cls):
LaunchpadLayer.resetSessionDb()
@classmethod
@profiled
def testTearDown(cls):
LayerProcessController.postTestInvariants()
class CeleryJobLayer(AppServerLayer):
"""Layer for tests that run jobs via Celery."""
celeryd = None
@classmethod
@profiled
def setUp(cls):
cls.celeryd = celeryd('launchpad_job')
cls.celeryd.__enter__()
@classmethod
@profiled
def tearDown(cls):
cls.celeryd.__exit__(None, None, None)
cls.celeryd = None
class CeleryBzrsyncdJobLayer(AppServerLayer):
"""Layer for tests that run jobs that read from branches via Celery."""
celeryd = None
@classmethod
@profiled
def setUp(cls):
cls.celeryd = celeryd('bzrsyncd_job')
cls.celeryd.__enter__()
@classmethod
@profiled
def tearDown(cls):
cls.celeryd.__exit__(None, None, None)
cls.celeryd = None
class CeleryBranchWriteJobLayer(AppServerLayer):
"""Layer for tests that run jobs which write to branches via Celery."""
celeryd = None
@classmethod
@profiled
def setUp(cls):
cls.celeryd = celeryd('branch_write_job')
cls.celeryd.__enter__()
@classmethod
@profiled
def tearDown(cls):
cls.celeryd.__exit__(None, None, None)
cls.celeryd = None
class ZopelessAppServerLayer(LaunchpadZopelessLayer):
"""Layer for tests that run in the zopeless environment with an appserver.
"""
@classmethod
@profiled
def setUp(cls):
LayerProcessController.setUp()
@classmethod
@profiled
def tearDown(cls):
LayerProcessController.stopAppServer()
LayerProcessController.stopSMTPServer()
@classmethod
@profiled
def testSetUp(cls):
LaunchpadLayer.resetSessionDb()
@classmethod
@profiled
def testTearDown(cls):
LayerProcessController.postTestInvariants()
class TwistedAppServerLayer(TwistedLaunchpadZopelessLayer):
"""Layer for twisted-using zopeless tests that need a running app server.
"""
@classmethod
@profiled
def setUp(cls):
LayerProcessController.setUp()
@classmethod
@profiled
def tearDown(cls):
LayerProcessController.stopAppServer()
LayerProcessController.stopSMTPServer()
@classmethod
@profiled
def testSetUp(cls):
LaunchpadLayer.resetSessionDb()
@classmethod
@profiled
def testTearDown(cls):
LayerProcessController.postTestInvariants()
class YUITestLayer(FunctionalLayer):
"""The layer for all YUITests cases."""
class YUIAppServerLayer(MemcachedLayer):
"""The layer for all YUIAppServer test cases."""
@classmethod
@profiled
def setUp(cls):
LayerProcessController.setConfig()
LayerProcessController.startAppServer('run-testapp')
@classmethod
@profiled
def tearDown(cls):
LayerProcessController.stopAppServer()
@classmethod
@profiled
def testSetUp(cls):
LaunchpadLayer.resetSessionDb()
|
agpl-3.0
| 8,982,281,586,918,893,000 | 31.655561 | 78 | 0.633593 | false | 4.327641 | true | false | false |
UASLab/ImageAnalysis
|
scripts/archive/4b-simple-matches-reset.py
|
1
|
2626
|
#!/usr/bin/python
import sys
sys.path.insert(0, "/usr/local/opencv3/lib/python2.7/site-packages/")
import argparse
import commands
import cPickle as pickle
import cv2
import fnmatch
import math
import numpy as np
import os.path
from progress.bar import Bar
import scipy.spatial
sys.path.append('../lib')
import Matcher
import Pose
import ProjectMgr
import SRTM
# Rest all match point locations to their original direct
# georeferenced locations based on estimated camera pose and
# projection onto DEM earth surface
parser = argparse.ArgumentParser(description='Keypoint projection.')
parser.add_argument('--project', required=True, help='project directory')
args = parser.parse_args()
proj = ProjectMgr.ProjectMgr(args.project)
proj.load_image_info()
proj.load_features()
proj.undistort_keypoints()
proj.load_match_pairs()
# setup SRTM ground interpolator
ref = proj.ned_reference_lla
sss = SRTM.NEDGround( ref, 2000, 2000, 30 )
# compute keypoint usage map
proj.compute_kp_usage()
# fast way:
# 1. make a grid (i.e. 8x8) of uv coordinates covering the whole image
# 2. undistort these uv coordinates
# 3. project them into vectors
# 4. intersect them with the srtm terrain to get ned coordinates
# 5. use linearndinterpolator ... g = scipy.interpolate.LinearNDInterpolator([[0,0],[1,0],[0,1],[1,1]], [[0,4,8],[1,3,2],[2,2,-4],[4,1,0]])
# with origin uv vs. 3d location to build a table
# 6. interpolate original uv coordinates to 3d locations
proj.fastProjectKeypointsTo3d(sss)
# build a list of all keypoints, but only consider pairwise
# matches and don't try to find single matches that span 3 or more
# images.
print "Constructing unified match structure..."
matches_direct = []
for i, i1 in enumerate(proj.image_list):
# print i1.name
for j, matches in enumerate(i1.match_list):
# print proj.image_list[j].name
if j > i:
for pair in matches:
ned1 = proj.image_list[i].coord_list[pair[0]]
ned2 = proj.image_list[j].coord_list[pair[1]]
ned = (ned1 + ned2) / 2
#print ned1, ned2, ned
match = [ ned, [i, pair[0]], [j, pair[1]] ]
matches_direct.append( match )
print "total features in image set = %d" % len(matches_direct)
print "2 images per feature, no redundancy removal."
print "Writing match file ..."
pickle.dump(matches_direct, open(args.project + "/matches_direct", "wb"))
print "temp: writing matches_direct ascii version..."
f = open(args.project + "/matches_direct.ascii", "wb")
for match in matches_direct:
f.write( str(match) + '\n' )
|
mit
| 6,323,647,680,881,667,000 | 31.02439 | 139 | 0.690023 | false | 3.307305 | false | false | false |
undoware/neutron-drive
|
google_appengine/google/appengine/api/search/search_util.py
|
1
|
4254
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Provides utility methods used by modules in the FTS API stub."""
import datetime
import re
from google.appengine.datastore import document_pb
from google.appengine.api.search import QueryParser
DEFAULT_MAX_SNIPPET_LENGTH = 160
TEXT_DOCUMENT_FIELD_TYPES = [
document_pb.FieldValue.ATOM,
document_pb.FieldValue.TEXT,
document_pb.FieldValue.HTML,
]
TEXT_QUERY_TYPES = [
QueryParser.NAME,
QueryParser.PHRASE,
QueryParser.TEXT,
]
NUMBER_DOCUMENT_FIELD_TYPES = [
document_pb.FieldValue.NUMBER,
]
NUMBER_QUERY_TYPES = [
QueryParser.FLOAT,
QueryParser.INT,
QueryParser.NUMBER,
]
BASE_DATE = datetime.datetime(1970, 1, 1, tzinfo=None)
class UnsupportedOnDevError(Exception):
"""Indicates attempt to perform an action unsupported on the dev server."""
def GetFieldInDocument(document, field_name):
"""Find and return the field with the provided name in the document."""
for f in document.field_list():
if f.name() == field_name:
return f
return None
def AddFieldsToDocumentPb(doc_id, fields, document):
"""Add the id and fields to document.
Args:
doc_id: The document id.
fields: List of tuples of field name, value and optionally type.
document: The document to add the fields to.
"""
if doc_id is not None:
document.set_id(doc_id)
for field_tuple in fields:
name = field_tuple[0]
value = field_tuple[1]
field = document.add_field()
field.set_name(name)
field_value = field.mutable_value()
field_value.set_string_value(value)
if len(field_tuple) > 2:
field_value.set_type(field_tuple[2])
def GetFieldCountInDocument(document, field_name):
count = 0
for field in document.field_list():
if field.name() == field_name:
count += 1
return count
def GetFieldValue(field):
"""Returns the value of a field as the correct type."""
value = field.value().string_value()
value_type = field.value().type()
if value_type in TEXT_DOCUMENT_FIELD_TYPES:
return value
if value_type is document_pb.FieldValue.DATE:
return DeserializeDate(value)
if value_type is document_pb.FieldValue.NUMBER:
return float(value)
raise TypeError('No conversion defined for type %s' % value_type)
def EpochTime(date):
"""Returns millisecond epoch time for a date or datetime."""
if isinstance(date, datetime.datetime):
td = date - BASE_DATE
else:
td = date - BASE_DATE.date()
milliseconds_since_epoch = long(
(td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**3)
return milliseconds_since_epoch
def SerializeDate(date):
return str(EpochTime(date))
def DeserializeDate(date_str):
if re.match(r'^\d+\-\d+\-\d+$', date_str):
return datetime.datetime.strptime(date_str, '%Y-%m-%d')
else:
dt = BASE_DATE + datetime.timedelta(milliseconds=long(date_str))
return dt
def Repr(class_instance, ordered_dictionary):
"""Generates an unambiguous representation for instance and ordered dict."""
return 'search.%s(%s)' % (class_instance.__class__.__name__, ', '.join(
["%s='%s'" % (key, value)
for (key, value) in ordered_dictionary if value]))
def TreeRepr(tree, depth=0):
"""Generate a string representation of an ANTLR parse tree for debugging."""
def _NodeRepr(node):
text = str(node.getType())
if node.getText():
text = '%s: %s' % (text, node.getText())
return text
children = ''
if tree.children:
children = '\n' + '\n'.join([TreeRepr(child, depth=depth+1)
for child in tree.children if child])
return depth * ' ' + _NodeRepr(tree) + children
|
bsd-3-clause
| -5,943,355,562,431,053,000 | 25.5875 | 78 | 0.681711 | false | 3.571788 | false | false | false |
Vicaris/ModPro
|
moviepy/video/fx/resize.py
|
1
|
4949
|
resize_possible = True
try:
# TRY USING OpenCV AS RESIZER
#raise ImportError #debugging
import cv2
import numpy as np
def resizer (pic, newsize):
lx, ly = int(newsize[0]), int(newsize[1])
if lx > pic.shape[1] or ly > pic.shape[0]:
# For upsizing use linear for good quality & decent speed
interpolation = cv2.INTER_LINEAR
else:
# For dowsizing use area to prevent aliasing
interpolation = cv2.INTER_AREA
return cv2.resize(+pic.astype('uint8'), (lx, ly),
interpolation=interpolation)
resizer.origin = "cv2"
except ImportError:
try:
# TRY USING PIL/PILLOW AS RESIZER
from PIL import Image
import numpy as np
def resizer(pic, newsize):
newsize = list(map(int, newsize))[::-1]
shape = pic.shape
if len(shape)==3:
newshape = (newsize[0],newsize[1], shape[2] )
else:
newshape = (newsize[0],newsize[1])
pilim = Image.fromarray(pic)
resized_pil = pilim.resize(newsize[::-1], Image.ANTIALIAS)
#arr = np.fromstring(resized_pil.tostring(), dtype='uint8')
#arr.reshape(newshape)
return np.array(resized_pil)
resizer.origin = "PIL"
except ImportError:
# TRY USING SCIPY AS RESIZER
try:
from scipy.misc import imresize
resizer = lambda pic, newsize : imresize(pic,
map(int, newsize[::-1]))
resizer.origin = "Scipy"
except ImportError:
resize_possible = False
from moviepy.decorators import apply_to_mask
def resize(clip, newsize=None, height=None, width=None, apply_to_mask=True):
"""
Returns a video clip that is a resized version of the clip.
Parameters
------------
newsize:
Can be either
- ``(height,width)`` in pixels or a float representing
- A scaling factor, like 0.5
- A function of time returning one of these.
width:
width of the new clip in pixel. The height is then computed so
that the width/height ratio is conserved.
height:
height of the new clip in pixel. The width is then computed so
that the width/height ratio is conserved.
Examples
----------
>>> myClip.resize( (460,720) ) # New resolution: (460,720)
>>> myClip.resize(0.6) # width and heigth multiplied by 0.6
>>> myClip.resize(width=800) # height computed automatically.
>>> myClip.resize(lambda t : 1+0.02*t) # slow swelling of the clip
"""
w, h = clip.tamano
if newsize is not None:
def trans_newsize(ns):
if isinstance(ns, (int, float)):
return [ns * w, ns * h]
else:
return ns
if hasattr(newsize, "__call__"):
newsize2 = lambda t : trans_newsize(newsize(t))
if clip.ismask:
fun = lambda gf,t: (1.0*resizer((255 * gf(t)).astype('uint8'),
newsize2(t))/255)
else:
fun = lambda gf,t: resizer(gf(t).astype('uint8'),
newsize2(t))
return clip.fl(fun, keep_duration=True,
apply_to= (["mask"] if apply_to_mask else []))
else:
newsize = trans_newsize(newsize)
elif height is not None:
if hasattr(height, "__call__"):
fun = lambda t : 1.0*int(height(t))/h
return resize(clip, fun)
else:
newsize = [w * height / h, height]
elif width is not None:
if hasattr(width, "__call__"):
fun = lambda t : 1.0*width(t)/w
return resize(clip, fun)
newsize = [width, h * width / w]
# From here, the resizing is constant (not a function of time), tamano=newsize
if clip.ismask:
fl = lambda pic: 1.0*resizer((255 * pic).astype('uint8'), newsize)/255.0
else:
fl = lambda pic: resizer(pic.astype('uint8'), newsize)
newclip = clip.fl_image(fl)
if apply_to_mask and clip.mask is not None:
newclip.mask = resize(clip.mask, newsize, apply_to_mask=False)
return newclip
if not resize_possible:
doc = resize.__doc__
def resize(clip, newsize=None, height=None, width=None):
raise ImportError("fx resize needs OpenCV or Scipy or PIL")
resize.__doc__ = doc
|
mit
| -1,627,818,066,893,388,500 | 28.993939 | 82 | 0.503536 | false | 4.103648 | false | false | false |
vitan/blaze
|
blaze/expr/broadcast.py
|
1
|
5871
|
from __future__ import absolute_import, division, print_function
from datashape.predicates import iscollection, isscalar, isnumeric
from toolz import partial, unique, first
import datashape
from datashape import dshape, DataShape, Record, Var, Option, Unit
from .expressions import ElemWise, Label, Expr, Symbol, Field
from .core import eval_str
from .arithmetic import (Eq, Ne, Lt, Le, Gt, Ge, Add, Mult, Div, Sub, Pow, Mod,
Or, And, USub, Not, FloorDiv)
from . import math
__all__ = ['broadcast', 'Broadcast']
def _expr_child(col):
""" Expr and child of field
Examples
--------
>>> accounts = Symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> _expr_child(accounts.name)
(name, accounts)
Helper function for ``broadcast``
"""
if isinstance(col, (Broadcast, Field)):
return col._expr, col._child
elif isinstance(col, Label):
return _expr_child(col._child)
else:
return col, None
def broadcast(op, *column_inputs):
""" Broadcast scalar operation across multiple fields
Parameters
----------
op : Scalar Operation like Add, Mult, Sin, Exp
column_inputs : either Column, Broadcast or constant (like 1, 1.0, '1')
Examples
--------
>>> accounts = Symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> broadcast(Add, accounts.amount, 100)
accounts.amount + 100
Fuses operations down into ScalarExpr level
>>> broadcast(Mult, 2, (accounts.amount + 100))
2 * (accounts.amount + 100)
"""
expr_inputs = []
children = set()
for col in column_inputs:
expr, child = _expr_child(col)
expr_inputs.append(expr)
if child:
children.add(child)
if not len(children) == 1:
raise ValueError("All inputs must be from same Table.\n"
"Saw the following tables: %s"
% ', '.join(map(str, children)))
if hasattr(op, 'op'):
expr = op.op(*expr_inputs)
else:
expr = op(*expr_inputs)
return Broadcast(first(children), expr)
class Broadcast(ElemWise):
""" Apply Scalar Expression onto columns of data
Parameters
----------
child : TableExpr
expr : ScalarExpr
The names of the varibles within the scalar expr must match the columns
of the child. Use ``Column.scalar_variable`` to generate the
appropriate scalar Symbol
Examples
--------
>>> from blaze.expr import Symbol, Add
>>> accounts = Symbol('accounts',
... 'var * {name: string, amount: int, id: int}')
>>> expr = Add(accounts.amount._expr, 100)
>>> Broadcast(accounts, expr)
accounts.amount + 100
See Also
--------
blaze.expr.broadcast.broadcast
"""
__slots__ = '_child', '_expr'
@property
def _name(self):
names = [x._name for x in self._expr._traverse()
if isinstance(x, Symbol)]
if len(names) == 1 and not isinstance(self._expr.dshape[0], Record):
return names[0]
@property
def dshape(self):
return DataShape(*(self._child.shape + (self._expr.dshape.measure,)))
def __str__(self):
columns = self.active_columns()
newcol = lambda c: "%s.%s" % (self._child, c)
return eval_str(self._expr._subs(dict(zip(columns,
map(newcol, columns)))))
def active_columns(self):
return sorted(unique(x._name for x in self._traverse()
if isinstance(x, Symbol) and isscalar(x.dshape)))
def _eq(self, other):
if (isscalar(self.dshape.measure) and
(not isinstance(other, Expr)
or isscalar(other.dshape.measure))):
return broadcast(Eq, self, other)
else:
return self.isidentical(other)
def _ne(a, b):
return broadcast(Ne, a, b)
def _lt(a, b):
return broadcast(Lt, a, b)
def _le(a, b):
return broadcast(Le, a, b)
def _gt(a, b):
return broadcast(Gt, a, b)
def _ge(a, b):
return broadcast(Ge, a, b)
def _add(a, b):
return broadcast(Add, a, b)
def _radd(a, b):
return broadcast(Add, b, a)
def _mul(a, b):
return broadcast(Mult, a, b)
def _rmul(a, b):
return broadcast(Mult, b, a)
def _div(a, b):
return broadcast(Div, a, b)
def _rdiv(a, b):
return broadcast(Div, b, a)
def _floordiv(a, b):
return broadcast(FloorDiv, a, b)
def _rfloordiv(a, b):
return broadcast(FloorDiv, b, a)
def _sub(a, b):
return broadcast(Sub, a, b)
def _rsub(a, b):
return broadcast(Sub, b, a)
def _pow(a, b):
return broadcast(Pow, a, b)
def _rpow(a, b):
return broadcast(Pow, b, a)
def _mod(a, b):
return broadcast(Mod, a, b)
def _rmod(a, b):
return broadcast(Mod, b, a)
def _or(a, b):
return broadcast(Or, a, b)
def _ror(a, b):
return broadcast(Or, b, a)
def _and(a, b):
return broadcast(And, a, b)
def _rand(a, b):
return broadcast(And, b, a)
def _neg(a):
return broadcast(USub, a)
def _invert(a):
return broadcast(Not, a)
def isnan(expr):
return broadcast(math.isnan, expr)
from .expressions import dshape_method_list
def isreal(ds):
if isinstance(ds, DataShape) and len(ds) == 1:
ds = ds[0]
if isinstance(ds, Option):
ds = ds.ty
return isinstance(ds, Unit) and 'float' in str(ds)
dshape_method_list.extend([
(lambda ds: iscollection(ds) and isscalar(ds.measure),
set([_eq, _ne, _lt, _le, _gt, _ge, _add, _radd, _mul,
_rmul, _div, _rdiv, _floordiv, _rfloordiv, _sub, _rsub, _pow,
_rpow, _mod, _rmod, _or, _ror, _and, _rand, _neg, _invert])),
(lambda ds: iscollection(ds) and isreal(ds.measure),
set([isnan]))
])
|
bsd-3-clause
| 5,816,400,818,536,924,000 | 23.982979 | 79 | 0.577755 | false | 3.366399 | false | false | false |
boppreh/bayesian
|
bayesian/tests.py
|
1
|
7654
|
import sys
sys.path.append('../')
import unittest
from bayesian import Bayes, classify, classify_normal
class TestBayes(unittest.TestCase):
def test_empty_constructor(self):
with self.assertRaises(ValueError):
b = Bayes()
def test_list_constructor(self):
self.assertEqual(Bayes([]), [])
self.assertEqual(Bayes(()), [])
self.assertEqual(Bayes(range(5)), [0, 1, 2, 3, 4])
self.assertEqual(Bayes({'a': 10, 'b': 50}), [10, 50])
self.assertEqual(Bayes([10, 10, 20]), [10, 10, 20])
self.assertEqual(Bayes([('a', 10), ('b', 50)]), [10, 50])
with self.assertRaises(ValueError):
b = Bayes([('a', 10), ('b', 50), ('a', 15)])
def test_get_odds(self):
b = Bayes({'a': 10, 'b': 50})
self.assertEqual(b['a'], 10)
self.assertEqual(b['b'], 50)
self.assertEqual(b[0], 10)
self.assertEqual(b[1], 50)
with self.assertRaises(IndexError):
b[2]
with self.assertRaises(ValueError):
b['c']
def test_set_odds(self):
b = Bayes((10, 20, 30))
b[0] = 50
b[1] = 40
b[2] = 30
self.assertEqual(b, [50, 40, 30])
def test_opposite(self):
b = Bayes([0.2, 0.8])
opposite = b.opposite()
self.assertEqual(opposite[0] / opposite[1], b[1] / b[0])
b = Bayes([0.2, 0.4, 0.4])
opposite = b.opposite()
self.assertEqual(opposite[0] / opposite[1], b[1] / b[0])
self.assertEqual(opposite[1] / opposite[2], b[2] / b[1])
self.assertEqual(opposite[0] / opposite[2], b[2] / b[0])
def test_normalized(self):
self.assertEqual(Bayes([]).normalized(), [])
self.assertEqual(Bayes([2]).normalized(), [1])
self.assertEqual(Bayes([9, 1]).normalized(), [0.9, 0.1])
self.assertEqual(Bayes([2, 4, 4]).normalized(), [0.2, 0.4, 0.4])
self.assertEqual(Bayes([2, 0]).normalized(), [1.0, 0])
self.assertEqual(Bayes([0, 0]).normalized(), [0.0, 0])
def test_operators(self):
b = Bayes([5, 2, 3])
b *= (2, 2, 1)
b /= (2, 2, 1)
self.assertEqual(b, [5, 2, 3])
self.assertEqual(Bayes([.5, .5]) * (.9, .1), [0.45, 0.05])
self.assertEqual(Bayes([.5, .5]) / (.9, .1), [5 / 9, 5])
self.assertEqual(Bayes([.5, .5]) * {'0': 0.9, '1': 0.1}, [0.45, 0.05])
self.assertEqual(Bayes([.5, .5]) * [('0', 0.9), ('1', 0.1)], [0.45, 0.05])
def test_equality(self):
b1 = Bayes([0.5, 0.2, 0.3])
b2 = Bayes([5, 2, 3])
b3 = Bayes([5, 2, 5])
self.assertEqual(b1, b2)
self.assertNotEqual(b1, b3)
self.assertNotEqual(b2, b3)
def test_update(self):
b = Bayes([1, 2])
b.update((2, 1))
self.assertEqual(b, [1, 1])
b.update((2, 1))
self.assertEqual(b, [2, 1])
b.update((2, 0))
self.assertEqual(b, [1, 0])
def test_update_from_events(self):
b = Bayes([1, 1])
b.update_from_events(['a', 'a', 'a'], {'a': (0.5, 2)})
self.assertEqual(b, [0.5 ** 3, 2 ** 3])
def test_update_from_tests(self):
b = Bayes([1, 1])
b.update_from_tests([True], [0.9, 0.1])
self.assertEqual(b, [0.45, 0.05])
b = Bayes([1, 1])
b.update_from_tests([True, True, True, False], [0.5, 2])
self.assertEqual(b, [0.5 ** 2, 2 ** 2])
def test_most_likely(self):
b = Bayes({'a': 9, 'b': 1})
self.assertEqual(b.most_likely(), 'a')
self.assertEqual(b.most_likely(0), 'a')
self.assertEqual(b.most_likely(0.89), 'a')
self.assertIsNone(b.most_likely(0.91))
def test_is_likely(self):
b = Bayes({'a': 9, 'b': 1})
self.assertTrue(b.is_likely('a'))
self.assertTrue(b.is_likely('a', 0.89))
self.assertFalse(b.is_likely('a', 0.91))
def test_conversions(self):
b = Bayes({'a': 9, 'b': 1, 'c': 0})
self.assertEqual(b, b.normalized())
self.assertEqual(b.normalized()['a'], 0.9)
self.assertEqual(b.opposite().opposite(), b)
def test_extract_events_odds(self):
instances = {'spam': ["buy viagra", "buy cialis"] * 100 + ["meeting love"],
'genuine': ["meeting tomorrow", "buy milk"] * 100}
odds = Bayes.extract_events_odds(instances)
b = Bayes({'spam': 0.9, 'genuine': 0.1})
b.update_from_events('buy coffee for meeting'.split(), odds)
self.assertEqual(b.most_likely(0.8), 'genuine')
class TestClassify(unittest.TestCase):
def test_single(self):
self.assertEqual(classify('a', {'A': []}), 'A')
self.assertEqual(classify('a', {'A': ['a']}), 'A')
self.assertEqual(classify('a', {'A': ['a', 'a']}), 'A')
self.assertEqual(classify('a', {'A': ['a', 'b']}), 'A')
def test_basic(self):
self.assertEqual(classify('a', {'A': ['a'], 'B': ['b']}), 'A')
self.assertEqual(classify('a a a', {'A': ['a'], 'B': ['b']}), 'A')
self.assertEqual(classify('a a b', {'A': ['a'], 'B': ['b']}), 'A')
self.assertEqual(classify('a a b', {'A': ['a', 'a'], 'B': ['b']}), 'A')
self.assertEqual(classify('a b b', {'A': ['a', 'a'], 'B': ['b']}), 'B')
self.assertEqual(classify('b b b', {'A': ['a', 'a'], 'B': ['b']}), 'B')
def test_with_extraction(self):
self.assertEqual(classify('a', {'A': ['a a a'], 'B': ['b']}), 'A')
self.assertEqual(classify('a', {'A': ['a', 'a'], 'B': ['b b b']}), 'A')
def test_sample(self):
spams = ["buy viagra", "dear recipient", "meet sexy singles"]
genuines = ["let's meet tomorrow", "remember to buy milk"]
message = "remember the meeting tomorrow"
instances = {'spam': spams, 'genuine': genuines}
self.assertEqual(classify(message, instances), 'genuine')
# Classify File and Classify Folder require too much of a test harness for now.
class TestClassifyNormal(unittest.TestCase):
def test_single(self):
self.assertEqual(classify_normal({'a': 100}, {'A': [{'a': 100}]}), 'A')
self.assertEqual(classify_normal({'a': 100, 'b': 0},
{'A': [{'a': 100, 'b': 0}]}), 'A')
self.assertEqual(classify_normal({'a': 100, 'b': 0},
{'A': [{'a': 100, 'b': 10}],
'B': [{'a': 50, 'b': 100}]}), None)
def test_basic(self):
self.assertEqual(classify_normal({'a': 100, 'b': 0},
{'A': [{'a': 100, 'b': 10},
{'a': 99, 'b': -10}],
'B': [{'a': 50, 'b': 100},
{'a': 70, 'b':90}]}), 'A')
def test_sample(self):
instance = {'height': 6, 'weight': 130, 'foot size': 8}
training = {'male': [{'height': 6, 'weight': 180, 'foot size': 12},
{'height': 5.92, 'weight': 190, 'foot size': 11},
{'height': 5.58, 'weight': 170, 'foot size': 12},
{'height': 5.92, 'weight': 165, 'foot size': 10}],
'female': [{'height': 5, 'weight': 100, 'foot size': 6},
{'height': 5.5, 'weight': 150, 'foot size': 8},
{'height': 5.42, 'weight': 130, 'foot size': 7},
{'height': 5.75, 'weight': 150, 'foot size': 9}]}
self.assertEqual(classify_normal(instance, training), 'female')
if __name__ == '__main__':
unittest.main()
|
mit
| 2,979,042,622,111,361,000 | 38.864583 | 83 | 0.482362 | false | 3.111382 | true | false | false |
danyill/rdb-tool
|
rdb_section_extract.py
|
1
|
13782
|
#!/usr/bin/env python3
"""
This tool extracts a pile of settings based on the hierachy of Quickset
"""
import collections
import os
import re
import olefile
import sel_logic_count
LINE_INFO = ['Lines Used (w/ comment lines)', 'Lines Used (w/o comment lines)']
LOGIC_INFO = [ 'PSV', 'PMV', 'PLT', 'PCT', 'PST', 'PCN',
'ASV', 'AMV', 'ALT', 'AST', 'ACN']
TOTAL_SEL_PROTECTION_LINES = 250
TOTAL_SEL_AUTOMATION_LINES = 1000
# this probably needs to be expanded
SEL_FILES_TO_GROUP = {
'G': ['SET_G1'],
'G1': ['SET_S1.TXT', 'SET_L1.TXT', 'SET_1.TXT'], # Groups
'G2': ['SET_S2.TXT', 'SET_L2.TXT', 'SET_2.TXT'],
'G3': ['SET_S3.TXT', 'SET_L3.TXT', 'SET_3.TXT'],
'G4': ['SET_S4.TXT', 'SET_L4.TXT', 'SET_4.TXT'],
'G5': ['SET_S5.TXT', 'SET_L5.TXT', 'SET_5.TXT'],
'G6': ['SET_S6.TXT', 'SET_L6.TXT', 'SET_6.TXT'],
'P1': ['SET_P1.TXT'], # Ports
'P2': ['SET_P2.TXT'],
'P3': ['SET_P3.TXT'],
'P5': ['SET_P5.TXT'],
'PF': ['SET_PF.TXT'], # Front Port
'P87': ['SET_P87.TXT'], # Differential Port Settings
'A1': ['SET_A1.TXT'], # Automation
'A2': ['SET_A2.TXT'],
'A3': ['SET_A3.TXT'],
'A4': ['SET_A4.TXT'],
'A5': ['SET_A5.TXT'],
'A6': ['SET_A6.TXT'],
'A7': ['SET_A7.TXT'],
'A8': ['SET_A8.TXT'],
'A9': ['SET_A9.TXT'],
'A10': ['SET_A10.TXT'],
'L1': ['SET_L1.TXT'], # Protection Logic
'L2': ['SET_L2.TXT'],
'L3': ['SET_L3.TXT'],
'L4': ['SET_L4.TXT'],
'L5': ['SET_L5.TXT'],
'L6': ['SET_L6.TXT'],
'L7': ['SET_L7.TXT'],
'L8': ['SET_L8.TXT'],
'L9': ['SET_L9.TXT'],
'B1': ['SET_B1.TXT'], # Bay Control information
'D1': ['SET_D1.TXT'], # DNP
'D2': ['SET_D2.TXT'],
'D3': ['SET_D3.TXT'],
'D4': ['SET_D4.TXT'],
'D5': ['SET_D5.TXT'],
'F1': ['SET_F1.TXT'], # Front Panel
'M1': ['SET_M1.TXT'], # CB Monitoring
'N1': ['SET_N1.TXT'], # Notes
'O1': ['SET_O1.TXT'], # Outputs
'R1': ['SET_R1.TXT'], # SER
'T1': ['SET_R1.TXT'], # Aliases
}
def process_file(filepath, args, settingsName=None):
rdb_info = get_ole_data(filepath, settingsName=settingsName)
return extract_parameters(filepath, rdb_info, args)
def get_ole_data(filepath,settingsName=None):
data = []
listdir = []
try:
ole = olefile.OleFileIO(filepath)
listdir = ole.listdir()
if settingsName:
listdir = [l for l in listdir if l[1]==settingsName]
for direntry in listdir:
data.append([direntry, ole.openstream(direntry).getvalue()])
except:
print('Failed to read streams in file: ' + filepath)
return data
def extract_parameters(filepath, rdb_info, txtfile):
fn = os.path.basename(filepath)
parameter_info=[]
for stream in rdb_info:
settings_name = str(stream[0][1])
stream_name = str(stream[0][-1]).upper()
if stream_name in SEL_FILES_TO_GROUP[txtfile]:
return [settings_name, stream[1].decode('utf-8')]
def get_sel_setting(text):
setting_expression = re.compile(r'^([A-Z0-9_]+),\"(.*)\"(?:\r\n|\x1c\r\n)', flags=re.MULTILINE)
return re.findall(setting_expression, text)
def format_logic(d):
# get logic report
if isinstance(d, str):
raw_results = collections.OrderedDict()
for k, v in d.items():
raw_results[k] = sel_logic_count.calc_usage_raw(v)
return raw_results
else:
return d
def make_table_data(raw_results):
table_data = []
for row_name in LINE_INFO + LOGIC_INFO:
table_row = [row_name]
for k, v in raw_results.items():
if row_name in v:
table_row.append(v[row_name])
table_data.append(table_row)
return table_data
def sum_logic_usage_multiple_groups(d, group_title='Group', settings_name=None, automation=None, total=None):
"""
d is a dictionary with the group number as the key
and the protection logic as the values
This is processed and an Asciidoc table is produced
"""
columns = 3*len(d) + 1
# get logic report
table_data = make_table_data(format_logic(d))
no_groups = len(d)
info = []
# Anchor
info.append('[#overall_logic_usage]')
# Title
if settings_name:
keys = ', '.join([str(ky)[1:2] for ky in d.keys()])
info.append('.`{}` Logic Usage in Setting Groups {}'.format(settings_name.upper(), keys))
# Column Definitions
info.append('[cols="1*<.^,{}"]'.format(','.join(['1*>.^,1*^.^,1*>.^'] * no_groups)))
info.append('|===')
# Group Title
info.append('h|')
for group in d.keys():
info.append('3+^.^h| '.format(no_groups) +
'{} {}'.format(group_title, group[1:]))
info.append('')
info.append(str(columns)+'+^.^h| Protection Usage')
info.append('')
# Overall line information
for k in table_data:
if k[0] in LINE_INFO:
pr = ('h| {}').format(k[0]).ljust(50)
for gd in k[1:]:
pr += '3+^.^| {} / {} '.format(gd, TOTAL_SEL_PROTECTION_LINES).ljust(20)
info.append(pr)
# Capacity free from relay STA S command
sta_s_info = ['Free protection settings capacity (%)', 'Free protection execution capacity (%)']
for s in sta_s_info:
pr = ('h| {} ').format(s).ljust(50)
for gd in range(no_groups):
pr += '3+^.^| #??# '.ljust(20)
info.append(pr)
info.append('')
if d and not total:
info.append(str(columns)+'+^.^h| Variable Usage for Protection Logic')
elif total and automation:
info.append(str(columns)+'+^.^h| Variable Usage for Protection and Automation Logic')
info.append('')
info.append('h| Variable ' +
' '.join(['h| Used h| Free % h| Available']*no_groups))
info.append('')
if total:
table_data = make_table_data(format_logic(total))
for k in table_data:
if k[0] in LOGIC_INFO:
pr = ('h| `{}`'.format(k[0])).ljust(13)
for gd in k[1:]:
fstr = '| {:>12} | {:<7.0%} | {:<30}'
pr += fstr.format('{} / {}'.format(gd['qty'], gd['total']),
gd['free_pu'],
'[small]#{}#'.format(gd['available_detail']))
info.append(pr)
if automation:
info.append('')
info.append(str(columns)+'+^.^h| Automation Usage')
info.append('')
# Group Title
info.append('h|')
for group in d.keys():
info.append('3+^.^h| '.format(no_groups) +
'{} {}'.format(group_title, group[1:]))
questions = ['3+^.^| #??# '] * no_groups
info.append('{:<50} {}'.format('h| Free automation settings storage capacity (%)', ''.join(questions)))
info.append('{:<50} {}'.format('h| Free automation execution availability (%)', ''.join(questions)))
info.append('{:<50} {}'.format('h| Automation peak execution cycle time (ms)', ''.join(questions)))
info.append('{:<50} {}'.format('h| Automation average execution cycle time (ms)', ''.join(questions)))
table_data = make_table_data(format_logic(automation))
# Overall line information
for k in table_data:
if k[0] in LINE_INFO:
pr = ('h| {} ').format(k[0]).ljust(51)
for gd in k[1:]:
pr += str(no_groups * 3) + '+^.^| {} / {} '.format(gd, TOTAL_SEL_AUTOMATION_LINES).ljust(20)
info.append(pr)
info.append('|===')
return('\n'.join(info))
def get_logic(filepath, *names, settingsName=None):
logics = {}
for name in names:
[settings_name, output] = process_file(filepath, name, settingsName)
lines = get_sel_setting(output)
result = []
for settings in lines:
result.append(settings[1])
logic_text = "\n".join(result)
logics[name] = logic_text
return logics
def get_logic_total(path, groups, includeAutomation=True, settings_name=None):
# get logic for number of protection
groups_new = ['L' + str(g) for g in groups]
protection = get_logic(path, *groups_new, settingsName=settings_name)
automation_arr = []
if includeAutomation:
for block in range(1,10+1):
#print(get_logic(path, 'A' + str(block)))
automation_arr.append(get_logic(path, 'A' + str(block), settingsName=settings_name)['A' + str(block)])
automation = '\n'.join(automation_arr)
return [protection, automation]
return [protection]
def plogic_used(filepath, group_prefix, settings_name, *nums):
logics = get_logic(filepath, *nums)
if len(nums) == 1:
return sel_logic_count.calc_logic_usage(logics[nums[0]])
else:
return sum_logic_usage_multiple_groups(logics, group_prefix, settings_name)
def pa_logic_used(filepath, group_prefix, settings_name, *nums):
logics = get_logic_total(filepath, nums, includeAutomation=True, settings_name=settings_name)
LINES = ['Lines Used (w/ comment lines)', 'Lines Used (w/o comment lines)']
automation = sel_logic_count.calc_usage_raw(logics[1])
automation = {k:v for (k,v) in automation.items() if k in LINES}
automation = {'A': automation}
protection = {}
total = {}
for group in nums:
# print(group)
pg = sel_logic_count.calc_usage_raw(logics[0]['L' + str(group)])
protection['L' + str(group)] = {k:v for (k,v) in pg.items() if k in LINES}
tg = sel_logic_count.calc_usage_raw(logics[0]['L' + str(group)] + '\n' + logics[1])
total['L' + str(group)] = {k:v for (k,v) in tg.items() if k not in LINES}
#print('p',protection, 'a', automation, 't', total)
print(sum_logic_usage_multiple_groups(protection, group_prefix, settings_name, automation, total))
"""
if len(nums) == 1:
return sel_logic_count.calc_logic_usage(logics[nums[0]])
else:
return sum_logic_usage_multiple_groups(logics, group_prefix, settings_name)
"""
if __name__ == '__main__':
"""
path = r'F:\standard-designs\transformer-protection\SEL487E-3_Transformer_Protection_Settings\settings\SEL-487E-3.rdb'
output = process_file(path, 'F1')
k = get_sel_setting(output)
result = []
for item in k:
val = item[1]
cnt = sel_logic_count.countElementsUsed(val)
result.append(('{: >3}').format(str(cnt)) + ' | ' + item[0] + ' ::= ' + val)
result = sorted(result, key=lambda x: int((x.split('|'))[0].strip()))
print(result)
for k in result:
# print('x', k)
print(int((k.split('|'))[0].strip()), k)
"""
"""output = process_file('/media/mulhollandd/KINGSTON/standard-designs/transformer-protection/SEL487E-3_Transformer_Protection_Settings/settings/SEL-487E-3.rdb', 'L1')
#k = get_stream_parameter('',output)
k = get_sel_setting(output)
result = []
for val in k:
result.append(val[1])
logic_text = "\n".join(result)
print(sel_logic_count.calc_logic_usage(logic_text))"""
#plogic_used('/home/mulhollandd/Downloads/SEL487E-3_Transformer_Protection_Settings_v14Aug2017.000.002/settings/SEL-487E-3.rdb', 1)
#path = '/media/mulhollandd/KINGSTON/standard-designs/transformer-protection/SEL487E-3_Transformer_Protection_Settings/settings/SEL-487E-3.rdb'
#path = r'G:\standard-designs\transformer-protection\SEL487E-3_Transformer_Protection_Settings\settings\SEL-487E-3.rdb'
path = r'F:\standard-designs\transformer-protection\SEL487E-3_Transformer_Protection_Settings\settings\SEL-487E-3.rdb'
path = r'/media/mulhollandd/KINGSTON/standard-designs/capacitor-protection/SEL487E-3_Capacitor_Protection_Settings/settings/SEL-487E-3.rdb'
#path = '/home/mulhollandd/Downloads/junk/SEL-487E-3.rdb'
#print(plogic_used(path, 'Application', 1, 2))
#print(get_logic_total(path, [1,2]))
#print(pa_logic_used(path, 'Application', 1, 2))
#print(plogic_used(path, 'Application', 'Blah', 'L1', 'L2'))
pa_logic_used(path, 'Application', 'TYP123_DStarNE', '1')
#output = process_file(path, 'R1')
#print(output)
#print(output)
"""
ser_points_and_aliases = {}
for counter in range(1, 250+1):
num = str(counter)
#SITM70,"TRIPT"\x1c\r\nSNAME70,"TRIPT"\x1c\r\nSSET70,"Asserted"\x1c\r\nSCLR70,"Deasserted"\x1c\r\nSHMI70,"N"
match = re.compile(r'SITM' + num + r',"([A-Z0-9_]*)"\x1c\r\nSNAME' + num + r',"([A-Za-z0-9_]+)*"\x1c\r\nSSET' + num + ',"(.*)"\x1c\r\nSCLR'+ num + ',"(.*)"\x1c\r\nSHMI' + num + r',"([A-Z0-9_]+)*"', flags=re.MULTILINE)
result = match.findall('\n'.join(output))
rwb = result[0][0]
aliases = result[0][1]
alias_set = result[0][2]
alias_clear = result[0][3]
hmi_alarm = result[0][4]
ser_points_and_aliases[rwb] = [aliases, alias_set, alias_clear, hmi_alarm]
print(rwb, [aliases, alias_set, alias_clear, hmi_alarm])
output = process_file(path, 'P1')
protection = ['P1', 'P2', 'P3', 'P4', 'P5', 'P6']
automation = ['A1', 'A2', 'A3', 'A4', 'A5', 'A6', 'A7', 'A8', 'A9', 'A10']
for logic in protection + automation
output = process_file(path, 'P1')
output = process_file(path, 'P1')
output = process_file(path, 'P1')
output = process_file(path, 'P1')
output = process_file(path, 'P1')
"""
#for k in output:
# print(k)
# SITM248,"PST07Q" SNAME248,"PST07Q" SSET248,"Asserted" SCLR248,"Deasserted" SHMI248,"N"
#
# tool to remove protection and automation aliases which are unused.
|
gpl-3.0
| -2,976,571,120,079,492,000 | 31.658768 | 225 | 0.567117 | false | 3.007857 | false | false | false |
vortex-ape/scikit-learn
|
sklearn/tree/export.py
|
4
|
17978
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Trevor Stephens <[email protected]>
# Li Li <[email protected]>
# License: BSD 3 clause
from numbers import Integral
import numpy as np
from ..externals import six
from ..utils.validation import check_is_fitted
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
class Sentinel(object):
def __repr__(self):
return '"tree.dot"'
SENTINEL = Sentinel()
def export_graphviz(decision_tree, out_file=None, max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False, precision=3):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree regressor or classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default=None)
Handle or name of the output file. If ``None``, the result is
returned as a string.
.. versionchanged:: 0.20
Default of out_file changed from "tree.dot" to None.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
precision : int, optional (default=3)
Number of digits of precision for floating point in the values of
impurity, threshold and value attributes of each node.
Returns
-------
dot_data : string
String representation of the input tree in GraphViz dot format.
Only returned if ``out_file`` is None.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = int(np.round(255 * (sorted_values[0] -
sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id],
precision),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], precision)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, precision)
elif proportion:
# Classification
value_text = np.around(value, precision)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, precision)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif (tree.n_classes[0] == 1 and
len(np.unique(tree.value)) != 1):
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
check_is_fitted(decision_tree, 'tree_')
own_file = False
return_string = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
if out_file is None:
return_string = True
out_file = six.StringIO()
if isinstance(precision, Integral):
if precision < 0:
raise ValueError("'precision' should be greater or equal to 0."
" Got {} instead.".format(precision))
else:
raise ValueError("'precision' should be an integer. Got {}"
" instead.".format(type(precision)))
# Check length of feature_names before getting into the tree node
# Raise error if length of feature_names does not match
# n_features_ in the decision_tree
if feature_names is not None:
if len(feature_names) != decision_tree.n_features_:
raise ValueError("Length of feature_names, %d "
"does not match number of features, %d"
% (len(feature_names),
decision_tree.n_features_))
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
if return_string:
return out_file.getvalue()
finally:
if own_file:
out_file.close()
|
bsd-3-clause
| -4,032,309,513,680,093,700 | 37.008457 | 79 | 0.518356 | false | 4.174135 | false | false | false |
rdezavalia/ansible
|
lib/ansible/cli/galaxy.py
|
1
|
30060
|
########################################################################
#
# (C) 2013, James Cammarata <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
import sys
import yaml
import time
from collections import defaultdict
from jinja2 import Environment
import ansible.constants as C
from ansible.cli import CLI
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.galaxy import Galaxy
from ansible.galaxy.api import GalaxyAPI
from ansible.galaxy.role import GalaxyRole
from ansible.galaxy.login import GalaxyLogin
from ansible.galaxy.token import GalaxyToken
from ansible.playbook.role.requirement import RoleRequirement
from ansible.utils.unicode import to_unicode
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyCLI(CLI):
SKIP_INFO_KEYS = ("name", "description", "readme_html", "related", "summary_fields", "average_aw_composite", "average_aw_score", "url" )
VALID_ACTIONS = ("delete", "import", "info", "init", "install", "list", "login", "remove", "search", "setup")
def __init__(self, args):
self.api = None
self.galaxy = None
super(GalaxyCLI, self).__init__(args)
def parse(self):
''' create an options parser for bin/ansible '''
self.parser = CLI.base_parser(
usage = "usage: %%prog [%s] [--help] [options] ..." % "|".join(self.VALID_ACTIONS),
epilog = "\nSee '%s <command> --help' for more information on a specific command.\n\n" % os.path.basename(sys.argv[0])
)
self.set_action()
# common
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.')
# specific to actions
if self.action == "delete":
self.parser.set_usage("usage: %prog delete [options] github_user github_repo")
elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
self.parser.add_option('--branch', dest='reference', help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--status', dest='check_status', action='store_true', default=False, help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
self.parser.add_option('-p', '--init-path', dest='init_path', default="./", help='The path in which the skeleton role will be created. The default is the current working directory.')
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
elif self.action == "remove":
self.parser.set_usage("usage: %prog remove role1 role2 ...")
elif self.action == "list":
self.parser.set_usage("usage: %prog list [role_name]")
elif self.action == "login":
self.parser.set_usage("usage: %prog login [options]")
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
elif self.action == "search":
self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]")
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='tags', help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author', help='GitHub username')
elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
self.parser.add_option('--remove', dest='remove_id', default=None, help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
# options that apply to more than one action
if self.action in ['init', 'info']:
self.parser.add_option( '--offline', dest='offline', default=False, action='store_true', help="Don't query the galaxy API when creating roles")
if not self.action in ("delete","import","init","login","setup"):
# NOTE: while the option type=str, the default is a list, and the
# callback will set the value to a list.
self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.expand_paths, type=str, default=C.DEFAULT_ROLES_PATH,
help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg file (/etc/ansible/roles if not configured)')
if self.action in ("init","install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')
self.options, self.args =self.parser.parse_args()
display.verbosity = self.options.verbosity
self.galaxy = Galaxy(self.options)
return True
def run(self):
super(GalaxyCLI, self).run()
self.api = GalaxyAPI(self.galaxy)
self.execute()
def exit_without_ignore(self, rc=1):
"""
Exits with the specified return code unless the
option --ignore-errors was specified
"""
if not self.get_opt("ignore_errors", False):
raise AnsibleError('- you can use --ignore-errors to skip failed roles and finish processing the list.')
def _display_role_info(self, role_info):
text = [u"", u"Role: %s" % to_unicode(role_info['name'])]
text.append(u"\tdescription: %s" % role_info.get('description', ''))
for k in sorted(role_info.keys()):
if k in self.SKIP_INFO_KEYS:
continue
if isinstance(role_info[k], dict):
text.append(u"\t%s:" % (k))
for key in sorted(role_info[k].keys()):
if key in self.SKIP_INFO_KEYS:
continue
text.append(u"\t\t%s: %s" % (key, role_info[k][key]))
else:
text.append(u"\t%s: %s" % (k, role_info[k]))
return u'\n'.join(text)
############################
# execute actions
############################
def execute_init(self):
"""
Executes the init action, which creates the skeleton framework
of a role that complies with the galaxy metadata format.
"""
init_path = self.get_opt('init_path', './')
force = self.get_opt('force', False)
offline = self.get_opt('offline', False)
role_name = self.args.pop(0).strip() if self.args else None
if not role_name:
raise AnsibleOptionsError("- no role name specified for init")
role_path = os.path.join(init_path, role_name)
if os.path.exists(role_path):
if os.path.isfile(role_path):
raise AnsibleError("- the path %s already exists, but is a file - aborting" % role_path)
elif not force:
raise AnsibleError("- the directory %s already exists."
"you can use --force to re-initialize this directory,\n"
"however it will reset any main.yml files that may have\n"
"been modified there already." % role_path)
# create default README.md
if not os.path.exists(role_path):
os.makedirs(role_path)
readme_path = os.path.join(role_path, "README.md")
f = open(readme_path, "wb")
f.write(self.galaxy.default_readme)
f.close()
# create default .travis.yml
travis = Environment().from_string(self.galaxy.default_travis).render()
f = open(os.path.join(role_path, '.travis.yml'), 'w')
f.write(travis)
f.close()
for dir in GalaxyRole.ROLE_DIRS:
dir_path = os.path.join(init_path, role_name, dir)
main_yml_path = os.path.join(dir_path, 'main.yml')
# create the directory if it doesn't exist already
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# now create the main.yml file for that directory
if dir == "meta":
# create a skeleton meta/main.yml with a valid galaxy_info
# datastructure in place, plus with all of the available
# platforms included (but commented out), the galaxy_tags
# list, and the dependencies section
platforms = []
if not offline:
platforms = self.api.get_list("platforms") or []
# group the list of platforms from the api based
# on their names, with the release field being
# appended to a list of versions
platform_groups = defaultdict(list)
for platform in platforms:
platform_groups[platform['name']].append(platform['release'])
platform_groups[platform['name']].sort()
inject = dict(
author = 'your name',
description = 'your description',
company = 'your company (optional)',
license = 'license (GPLv2, CC-BY, etc)',
issue_tracker_url = 'http://example.com/issue/tracker',
min_ansible_version = '1.2',
platforms = platform_groups,
)
rendered_meta = Environment().from_string(self.galaxy.default_meta).render(inject)
f = open(main_yml_path, 'w')
f.write(rendered_meta)
f.close()
pass
elif dir == "tests":
# create tests/test.yml
inject = dict(
role_name = role_name
)
playbook = Environment().from_string(self.galaxy.default_test).render(inject)
f = open(os.path.join(dir_path, 'test.yml'), 'w')
f.write(playbook)
f.close()
# create tests/inventory
f = open(os.path.join(dir_path, 'inventory'), 'w')
f.write('localhost')
f.close()
elif dir not in ('files','templates'):
# just write a (mostly) empty YAML file for main.yml
f = open(main_yml_path, 'w')
f.write('---\n# %s file for %s\n' % (dir,role_name))
f.close()
display.display("- %s was created successfully" % role_name)
def execute_info(self):
"""
Executes the info action. This action prints out detailed
information about an installed role as well as info available
from the galaxy API.
"""
if len(self.args) == 0:
# the user needs to specify a role
raise AnsibleOptionsError("- you must specify a user/role name")
roles_path = self.get_opt("roles_path")
data = ''
for role in self.args:
role_info = {'path': roles_path}
gr = GalaxyRole(self.galaxy, role)
install_info = gr.install_info
if install_info:
if 'version' in install_info:
install_info['intalled_version'] = install_info['version']
del install_info['version']
role_info.update(install_info)
remote_data = False
if not self.options.offline:
remote_data = self.api.lookup_role_by_name(role, False)
if remote_data:
role_info.update(remote_data)
if gr.metadata:
role_info.update(gr.metadata)
req = RoleRequirement()
role_spec= req.role_yaml_parse({'role': role})
if role_spec:
role_info.update(role_spec)
data = self._display_role_info(role_info)
### FIXME: This is broken in both 1.9 and 2.0 as
# _display_role_info() always returns something
if not data:
data = u"\n- the role %s was not found" % role
self.pager(data)
def execute_install(self):
"""
Executes the installation action. The args list contains the
roles to be installed, unless -f was specified. The list of roles
can be a name (which will be downloaded via the galaxy API and github),
or it can be a local .tar.gz file.
"""
role_file = self.get_opt("role_file", None)
if len(self.args) == 0 and role_file is None:
# the user needs to specify one of either --role-file
# or specify a single user/role name
raise AnsibleOptionsError("- you must specify a user/role name or a roles file")
elif len(self.args) == 1 and role_file is not None:
# using a role file is mutually exclusive of specifying
# the role name on the command line
raise AnsibleOptionsError("- please specify a user/role name, or a roles file, but not both")
no_deps = self.get_opt("no_deps", False)
force = self.get_opt('force', False)
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if role_file.endswith('.yaml') or role_file.endswith('.yml'):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError("Unable to load data from the requirements file: %s" % role_file)
if required_roles is None:
raise AnsibleError("No roles found in file: %s" % role_file)
for role in required_roles:
role = RoleRequirement.role_yaml_parse(role)
display.vvv('found role %s in yaml file' % str(role))
if 'name' not in role and 'scm' not in role:
raise AnsibleError("Must specify name or src for role")
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
display.deprecated("going forward only the yaml format will be supported")
# roles listed in a file, one per line
for rline in f.readlines():
if rline.startswith("#") or rline.strip() == '':
continue
display.debug('found role %s in text file' % str(rline))
role = RoleRequirement.role_yaml_parse(rline.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
f.close()
except (IOError, OSError) as e:
display.error('Unable to open %s: %s' % (role_file, str(e)))
else:
# roles were specified directly, so we'll just go out grab them
# (and their dependencies, unless the user doesn't want us to).
for rname in self.args:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
display.vvv('Installing role %s ' % role.name)
# query the galaxy API for the role data
if role.install_info is not None and not force:
display.display('- %s is already installed, skipping.' % role.name)
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning("- %s was NOT installed successfully: %s " % (role.name, str(e)))
self.exit_without_ignore()
continue
# install dependencies, if we want them
if not no_deps and installed:
role_dependencies = role.metadata.get('dependencies') or []
for dep in role_dependencies:
display.debug('Installing dep %s' % dep)
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if '.' not in dep_role.name and '.' not in dep_role.src and dep_role.scm is None:
# we know we can skip this, as it's not going to
# be found on galaxy.ansible.com
continue
if dep_role.install_info is None or force:
if dep_role not in roles_left:
display.display('- adding dependency: %s' % dep_role.name)
roles_left.append(dep_role)
else:
display.display('- dependency %s already pending installation.' % dep_role.name)
else:
display.display('- dependency %s is already installed, skipping.' % dep_role.name)
if not installed:
display.warning("- %s was NOT installed successfully." % role.name)
self.exit_without_ignore()
return 0
def execute_remove(self):
"""
Executes the remove action. The args list contains the list
of roles to be removed. This list can contain more than one role.
"""
if len(self.args) == 0:
raise AnsibleOptionsError('- you must specify at least one role to remove.')
for role_name in self.args:
role = GalaxyRole(self.galaxy, role_name)
try:
if role.remove():
display.display('- successfully removed %s' % role_name)
else:
display.display('- %s is not installed, skipping.' % role_name)
except Exception as e:
raise AnsibleError("Failed to remove role %s: %s" % (role_name, str(e)))
return 0
def execute_list(self):
"""
Executes the list action. The args list can contain zero
or one role. If one is specified, only that role will be
shown, otherwise all roles in the specified directory will
be shown.
"""
if len(self.args) > 1:
raise AnsibleOptionsError("- please specify only one role to list, or specify no roles to see a full list")
if len(self.args) == 1:
# show only the request role, if it exists
name = self.args.pop()
gr = GalaxyRole(self.galaxy, name)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
# show some more info about single roles here
display.display("- %s, %s" % (name, version))
else:
display.display("- the role %s was not found" % name)
else:
# show all valid roles in the roles_path directory
roles_path = self.get_opt('roles_path')
for path in roles_path:
role_path = os.path.expanduser(path)
if not os.path.exists(role_path):
raise AnsibleOptionsError("- the path %s does not exist. Please specify a valid path with --roles-path" % role_path)
elif not os.path.isdir(role_path):
raise AnsibleOptionsError("- %s exists, but it is not a directory. Please specify a valid path with --roles-path" % role_path)
path_files = os.listdir(role_path)
for path_file in path_files:
gr = GalaxyRole(self.galaxy, path_file)
if gr.metadata:
install_info = gr.install_info
version = None
if install_info:
version = install_info.get("version", None)
if not version:
version = "(unknown version)"
display.display("- %s, %s" % (path_file, version))
return 0
def execute_search(self):
page_size = 1000
search = None
if len(self.args):
terms = []
for i in range(len(self.args)):
terms.append(self.args.pop())
search = '+'.join(terms[::-1])
if not search and not self.options.platforms and not self.options.tags and not self.options.author:
raise AnsibleError("Invalid query. At least one search term, platform, galaxy tag or author must be provided.")
response = self.api.search_roles(search, platforms=self.options.platforms,
tags=self.options.tags, author=self.options.author, page_size=page_size)
if response['count'] == 0:
display.display("No roles match your search.", color=C.COLOR_ERROR)
return True
data = [u'']
if response['count'] > page_size:
data.append(u"Found %d roles matching your search. Showing first %s." % (response['count'], page_size))
else:
data.append(u"Found %d roles matching your search:" % response['count'])
max_len = []
for role in response['results']:
max_len.append(len(role['username'] + '.' + role['name']))
name_len = max(max_len)
format_str = u" %%-%ds %%s" % name_len
data.append(u'')
data.append(format_str % (u"Name", u"Description"))
data.append(format_str % (u"----", u"-----------"))
for role in response['results']:
data.append(format_str % (u'%s.%s' % (role['username'], role['name']), role['description']))
data = u'\n'.join(data)
self.pager(data)
return True
def execute_login(self):
"""
Verify user's identify via Github and retreive an auth token from Galaxy.
"""
# Authenticate with github and retrieve a token
if self.options.token is None:
login = GalaxyLogin(self.galaxy)
github_token = login.create_github_token()
else:
github_token = self.options.token
galaxy_response = self.api.authenticate(github_token)
if self.options.token is None:
# Remove the token we created
login.remove_github_token()
# Store the Galaxy token
token = GalaxyToken()
token.set(galaxy_response['token'])
display.display("Succesfully logged into Galaxy as %s" % galaxy_response['username'])
return 0
def execute_import(self):
"""
Import a role into Galaxy
"""
colors = {
'INFO': 'normal',
'WARNING': C.COLOR_WARN,
'ERROR': C.COLOR_ERROR,
'SUCCESS': C.COLOR_OK,
'FAILED': C.COLOR_ERROR,
}
if len(self.args) < 2:
raise AnsibleError("Expected a github_username and github_repository. Use --help.")
github_repo = self.args.pop()
github_user = self.args.pop()
if self.options.check_status:
task = self.api.get_import_task(github_user=github_user, github_repo=github_repo)
else:
# Submit an import request
task = self.api.create_import_task(github_user, github_repo, reference=self.options.reference)
if len(task) > 1:
# found multiple roles associated with github_user/github_repo
display.display("WARNING: More than one Galaxy role associated with Github repo %s/%s." % (github_user,github_repo),
color='yellow')
display.display("The following Galaxy roles are being updated:" + u'\n', color=C.COLOR_CHANGED)
for t in task:
display.display('%s.%s' % (t['summary_fields']['role']['namespace'],t['summary_fields']['role']['name']), color=C.COLOR_CHANGED)
display.display(u'\n' + "To properly namespace this role, remove each of the above and re-import %s/%s from scratch" % (github_user,github_repo), color=C.COLOR_CHANGED)
return 0
# found a single role as expected
display.display("Successfully submitted import request %d" % task[0]['id'])
if not self.options.wait:
display.display("Role name: %s" % task[0]['summary_fields']['role']['name'])
display.display("Repo: %s/%s" % (task[0]['github_user'],task[0]['github_repo']))
if self.options.check_status or self.options.wait:
# Get the status of the import
msg_list = []
finished = False
while not finished:
task = self.api.get_import_task(task_id=task[0]['id'])
for msg in task[0]['summary_fields']['task_messages']:
if msg['id'] not in msg_list:
display.display(msg['message_text'], color=colors[msg['message_type']])
msg_list.append(msg['id'])
if task[0]['state'] in ['SUCCESS', 'FAILED']:
finished = True
else:
time.sleep(10)
return 0
def execute_setup(self):
"""
Setup an integration from Github or Travis
"""
if self.options.setup_list:
# List existing integration secrets
secrets = self.api.list_secrets()
if len(secrets) == 0:
# None found
display.display("No integrations found.")
return 0
display.display(u'\n' + "ID Source Repo", color=C.COLOR_OK)
display.display("---------- ---------- ----------", color=C.COLOR_OK)
for secret in secrets:
display.display("%-10s %-10s %s/%s" % (secret['id'], secret['source'], secret['github_user'],
secret['github_repo']),color=C.COLOR_OK)
return 0
if self.options.remove_id:
# Remove a secret
self.api.remove_secret(self.options.remove_id)
display.display("Secret removed. Integrations using this secret will not longer work.", color=C.COLOR_OK)
return 0
if len(self.args) < 4:
raise AnsibleError("Missing one or more arguments. Expecting: source github_user github_repo secret")
return 0
secret = self.args.pop()
github_repo = self.args.pop()
github_user = self.args.pop()
source = self.args.pop()
resp = self.api.add_secret(source, github_user, github_repo, secret)
display.display("Added integration for %s %s/%s" % (resp['source'], resp['github_user'], resp['github_repo']))
return 0
def execute_delete(self):
"""
Delete a role from galaxy.ansible.com
"""
if len(self.args) < 2:
raise AnsibleError("Missing one or more arguments. Expected: github_user github_repo")
github_repo = self.args.pop()
github_user = self.args.pop()
resp = self.api.delete_role(github_user, github_repo)
if len(resp['deleted_roles']) > 1:
display.display("Deleted the following roles:")
display.display("ID User Name")
display.display("------ --------------- ----------")
for role in resp['deleted_roles']:
display.display("%-8s %-15s %s" % (role.id,role.namespace,role.name))
display.display(resp['status'])
return True
|
gpl-3.0
| -1,375,117,498,538,847,500 | 43.865672 | 197 | 0.559015 | false | 4.262621 | false | false | false |
jwodder/javaproperties
|
docs/conf.py
|
1
|
1147
|
from javaproperties import __version__
project = "javaproperties"
author = "John T. Wodder II"
copyright = "2016-2020 John T. Wodder II"
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx_copybutton",
]
autodoc_default_options = {
"members": True,
"undoc-members": True,
}
# NOTE: Do not set 'inherited-members', as it will cause all of the
# MutableMapping methods to be listed under `Properties`.
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
}
exclude_patterns = ["_build"]
source_suffix = ".rst"
source_encoding = "utf-8-sig"
master_doc = "index"
version = __version__
release = __version__
today_fmt = "%Y %b %d"
default_role = "py:obj"
pygments_style = "sphinx"
todo_include_todos = True
html_theme = "sphinx_rtd_theme"
html_theme_options = {
"collapse_navigation": False,
"prev_next_buttons_location": "both",
}
html_last_updated_fmt = "%Y %b %d"
html_show_sourcelink = True
html_show_sphinx = True
html_show_copyright = True
copybutton_prompt_text = r">>> |\.\.\. |\$ "
copybutton_prompt_is_regexp = True
|
mit
| 7,820,110,847,402,805,000 | 22.895833 | 67 | 0.668701 | false | 3.010499 | false | true | false |
goldsborough/euler
|
13.py
|
1
|
6006
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Work out the first ten digits of the sum of the
following one-hundred 50-digit numbers.
"""
n = """37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690"""
def add(values, last_carry=0):
carry = 0
for d in range(len(values[0]) -1, -1, -1):
i = int(values[0][d])
for j in values[1:]:
i += int(j[d])
if i + last_carry >= 10:
carry += 1
i %= 10
last_carry = carry
carry = 0
return last_carry
def last_ten(n):
digits = []
for d in n.split():
digits.append([])
for i in range(0, len(d), 10):
digits[-1].append(d[i:i+10])
digits = [[d[i] for d in digits] for i in range(len(digits[0]))]
carry = 0
for x in digits[1::-1]:
carry = add(x, carry)
result = carry
for i in digits[0]:
result += int(i)
return str(result)[:10]
def easy(n):
return str(sum(int(d) for d in n.split()))[:10]
def main():
print(easy(n))
print(last_ten(n))
if __name__ == '__main__':
main()
|
mit
| -4,324,106,769,971,052,500 | 39.863946 | 65 | 0.919414 | false | 2.301149 | false | false | false |
lierwing/yecrypto
|
yecrypto.py
|
1
|
1966
|
import random
class Yecrypto:
@staticmethod
def ascii_encryption(new_file, normal_string):
num = 0
ns_dic = {}
for i in normal_string:
num = num + 1
ns_dic[num] = i
ns_dic_keys = []
for i in ns_dic.keys():
ns_dic_keys.append(i)
random_key = random.randrange(10000, 100000)
crypt_f = open(new_file + ".txt", "w", encoding="utf-8")
for i in ns_dic_keys:
ns_dic_value = ns_dic[i]
ns_crypt_ascii = ord(ns_dic_value)
crypt_f.write("%s " % (ns_crypt_ascii + random_key))
crypt_f.close()
print("Key:", random_key)
print("Encryption Done!")
@staticmethod
def ascii_decryption(find_file, decrypt_key):
crypt_f = open(find_file + ".txt", "r", encoding="utf-8")
read_crypt_f = crypt_f.read().strip()
crypt_f.close()
decrypt_str = []
for i in read_crypt_f.split(" "):
decrypt_ascii = int(i) - decrypt_key
decrypt_ascii = chr(decrypt_ascii)
decrypt_str.append(decrypt_ascii)
decrypt_f = open(find_file + ".txt", "w", encoding="utf-8")
for i in decrypt_str:
decrypt_f.write(i)
print("Decryption Done!")
decrypt_f.close()
if __name__ == "__main__":
while True:
try:
menu = int(input("\nChoose the number( 1: Crypt, 2: Decrypt, 3: Exit ): "))
if menu == 1:
file_name = input("File Name: ")
content = input("Content: ")
Yecrypto().ascii_encryption(file_name, content)
elif menu == 2:
file_name = input("File Name: ")
crypt_key = int(input("Crypt Key: "))
Yecrypto().ascii_decryption(file_name, crypt_key)
elif menu == 3:
break
except ValueError:
print("1: Crypt, 2: Decrypt, 3: Exit")
|
mit
| 3,042,574,561,103,298,600 | 30.709677 | 87 | 0.503561 | false | 3.640741 | false | false | false |
Code4SA/odac-ford-housing
|
msg_handler/admin.py
|
1
|
7116
|
from flask import Flask, url_for, redirect, render_template, request
from wtforms import form, fields, validators
from wtforms.fields import SelectField, TextAreaField
from flask.ext import admin, login
from flask.ext.admin.contrib import sqla
from flask.ext.admin import helpers, expose
from flask.ext.admin.model.template import macro
from flask.ext.admin.form import rules
from flask.ext.login import current_user
from msg_handler import app, db, logger
from msg_handler.models import *
from vumi_go import VumiMessage
import json
# Define login and registration forms (for flask-login)
class LoginForm(form.Form):
email = fields.TextField(validators=[validators.required()])
password = fields.PasswordField(validators=[validators.required()])
def validate_login(self, field):
user = self.get_user()
if user is None:
raise validators.ValidationError('Invalid user')
if user.password != hash(self.password.data):
raise validators.ValidationError('Invalid password')
def get_user(self):
return db.session.query(User).filter_by(email=self.email.data).first()
class RegistrationForm(form.Form):
email = fields.TextField(validators=[validators.required()])
password = fields.PasswordField(validators=[validators.required()])
def validate_login(self, field):
if db.session.query(User).filter_by(email=self.email.data).count() > 0:
raise validators.ValidationError('Duplicate users')
# Initialize flask-login
def init_login():
login_manager = login.LoginManager()
login_manager.init_app(app)
# Create user loader function
@login_manager.user_loader
def load_user(user_id):
return db.session.query(User).get(user_id)
# Create customized model view class
class MyModelView(sqla.ModelView):
def is_accessible(self):
return login.current_user.is_authenticated()
# Create customized index view class that handles login & registration
class MyAdminIndexView(admin.AdminIndexView):
@expose('/')
def index(self):
if not login.current_user.is_authenticated():
return redirect(url_for('.login_view'))
return super(MyAdminIndexView, self).index()
@expose('/login/', methods=('GET', 'POST'))
def login_view(self):
# handle user login
form = LoginForm(request.form)
if helpers.validate_form_on_submit(form):
user = form.get_user()
login.login_user(user)
if login.current_user.is_authenticated():
return redirect(url_for('.index'))
link = '<p>Don\'t have an account? <a href="' + url_for('.register_view') + '">Click here to register.</a></p>'
self._template_args['form'] = form
self._template_args['link'] = link
return super(MyAdminIndexView, self).index()
@expose('/register/', methods=('GET', 'POST'))
def register_view(self):
form = RegistrationForm(request.form)
if helpers.validate_form_on_submit(form):
user = User()
# hash password, before populating User object
form.password.data = hash(form.password.data)
form.populate_obj(user)
db.session.add(user)
db.session.commit()
login.login_user(user)
return redirect(url_for('.index'))
link = '<p>Already have an account? <a href="' + url_for('.login_view') + '">Click here to log in.</a></p>'
self._template_args['form'] = form
self._template_args['link'] = link
return super(MyAdminIndexView, self).index()
@expose('/logout/')
def logout_view(self):
login.logout_user()
return redirect(url_for('.index'))
class QueryView(MyModelView):
# disable manual editing / deletion of messages
can_create = False
can_edit = False
can_delete = False
column_list = (
'starred',
'datetime',
'from_addr',
'status',
'content',
'notes',
'responses'
)
column_labels = dict(
datetime='Date',
from_addr='From',
content='Message'
)
column_formatters = dict(
starred=macro('render_star'),
datetime=macro('render_date'),
status=macro('render_status'),
content=macro('render_content'),
notes=macro('render_notes'),
responses=macro('render_responses')
)
column_sortable_list = ('starred', 'datetime', 'from_addr', 'status')
column_searchable_list = ('content', Response.content)
column_default_sort = ('datetime', True)
list_template = 'query_list_template.html'
form_overrides = dict(
content=TextAreaField,
)
form_args = dict(
status=dict(
choices=[
('pending', 'pending'),
('in_progress', 'in progress'),
('finished', 'finished')
]
)
)
inline_models = [(Response, dict(form_label='Reply', ))]
class UserView(MyModelView):
can_create = False
column_list = (
'email',
'first_name',
'last_name'
)
class UpdateView(MyModelView):
can_delete = False
can_edit = False
list_template = 'update_list_template.html'
column_list = (
'datetime',
'user',
'content',
'notes'
)
column_labels = dict(
datetime='Date',
user='User',
content='Message',
notes='Notes'
)
column_default_sort = ('datetime', True)
column_formatters = dict(
datetime=macro('render_date'),
user=macro('render_user'),
)
form_overrides = dict(
content=TextAreaField,
)
form_create_rules = [
rules.Field('content'),
]
def on_model_change(self, form, model, is_created):
# send SMS notifications before saving message to database
msg = VumiMessage({"content": model.content})
count_tot = 0
model.user = current_user
try:
with app.open_instance_resource('notification_list.json', mode='r') as f:
try:
notification_list = json.loads(f.read())
except ValueError:
# start with clean list, if the file does not yet contain a list
notification_list = []
pass
for number in notification_list:
logger.debug("sending update to: " + number)
msg.send(number)
count_tot += 1
model.notes = "Update sent to " + str(count_tot) + " user(s)."
except Exception:
tmp = "Error sending update broadcast via SMS."
logger.exception(tmp)
model.notes = tmp
return
# Initialize flask-login
init_login()
# Create admin
admin = admin.Admin(app, 'Ford Housing', index_view=MyAdminIndexView(), base_template='my_master.html')
# Add views
admin.add_view(UserView(User, db.session))
admin.add_view(QueryView(Query, db.session))
admin.add_view(UpdateView(Update, db.session))
|
apache-2.0
| -4,220,745,175,959,475,000 | 29.676724 | 119 | 0.607926 | false | 4.031728 | false | false | false |
olgadoronina/LES_ABC
|
abc_code/utils.py
|
1
|
8359
|
import logging
import abc_code.global_var as g
import numpy as np
import scipy as sp
import scipy.stats
from numpy.fft import fftfreq, fftn, ifftn
from time import time
from abc_code.sobol_seq import i4_sobol_generate
from fast_histogram import histogram1d
import abc_code.distance as dist
import itertools
def timer(start, end, label):
hours, rem = divmod(end - start, 3600)
minutes, seconds = divmod(rem, 60)
logging.info("{:0>1}:{:0>2}:{:05.2f} \t {}".format(int(hours), int(minutes), seconds, label))
def rand_ind(random):
ind = np.random.randint(0, 256 ** 3, size=random)
ind = np.unique(ind)
while len(ind) < 0.99*random:
ind_add = np.random.randint(0, 256 ** 3, size=(random - len(ind)))
ind = np.unique(np.append(ind, ind_add))
return ind
def pdf_from_array_with_x(array, bins, range):
pdf, edges = np.histogram(array, bins=bins, range=range, normed=1)
x = (edges[1:] + edges[:-1]) / 2
return x, pdf
def pdf_from_array_improved(array, bins, domain, N_each):
pdf = np.empty((N_each, bins))
for i in range(N_each):
pdf[i, :] = np.histogram(array[i, :], bins=bins, range=domain, normed=1)[0]
return pdf
def pdf_from_array_np(array, bins, range):
pdf, _ = np.histogram(array, bins=bins, range=range, normed=1)
return pdf
def pdf_from_array(array, bins, range):
pdf = histogram1d(array.flatten(), bins=bins, range=range)
norm = np.sum(pdf)/bins
return pdf/norm
def baseconvert(x, newbase, number_digits):
"""Converts given number x, from base 10 to base 'newbase'
x -- the number in base 10
newbase -- base to convert
number_digits -- number of digits in new base (add zero in the beginning)
"""
assert(x >= 0)
r = []
while x > 0:
r = [x % newbase] + r
x //= newbase
for i in range(number_digits-len(r)):
r = [0] + r
return r
def uniform_grid(C_limits, N_each):
C_tmp = np.linspace(C_limits[0], C_limits[1], N_each + 1)
C_tmp = C_tmp[:-1] + (C_tmp[1] - C_tmp[0]) / 2
return C_tmp
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, s = np.mean(a), np.std(a)
h = s / np.sqrt(n) * sp.stats.t._ppf((1 + confidence) / 2., n - 1)
return m, h
def take_safe_log(x):
"""Takes natural logarithm and put g.TINY number where x = 0"""
log_fill = np.empty_like(x)
log_fill.fill(g.TINY_log)
log = np.log(x, out=log_fill, where=x > g.TINY)
return log
def covariance_recursive(x, t, cov_prev, mean_prev, s_d):
mean_new = t / (t + 1) * mean_prev + 1 / (t + 1) * x
cov = (t - 1) / t * cov_prev + \
s_d / t * (t * np.outer(mean_prev, mean_prev) - (t + 1) * np.outer(mean_new, mean_new) + np.outer(x, x))
return cov, mean_new
def tophat_kernel(k, limit):
"""Create 3D array of Tophat filter.
k - array of wave numbers;
limit - cutoff wavenumber."""
a = np.zeros((len(k[0]), len(k[1]), len(k[2])), dtype=np.float32)
for indx, kx in enumerate(k[0]):
for indy, ky in enumerate(k[1]):
for indz, kz in enumerate(k[2]):
a[indx, indy, indz] = np.sqrt(kx ** 2 + ky ** 2 + kz ** 2)
kernel = np.piecewise(a, [a <= limit, a > limit], [1, 0])
return kernel
def filter3d(data, scale_k, dx, N_points, filename=None):
""" Tophat filter in Fourier space for dictionary of 3D arrays.
data - dictionary of numpy arrays;
scale_k - wave number, which define size of filter."""
# FFT
start = time()
FFT = dict()
for key, value in data.items():
FFT[key] = fftn(value)
k = [fftfreq(N_points[0], dx[0]), fftfreq(N_points[1], dx[1]), fftfreq(N_points[2], dx[2])]
end = time()
timer(start, end, 'Time for FFT')
# Filtering
start = time()
kernel = tophat_kernel(k, scale_k)
end = time()
timer(start, end, 'Time for creating filter kernel')
start = time()
result = dict()
fft_filtered = dict()
for key, value in FFT.items():
fft_filtered[key] = np.multiply(value, kernel)
end = time()
timer(start, end, 'Time for filtering')
FFT.clear()
start = time()
for key, value in fft_filtered.items():
result[key] = ifftn(value).real
end = time()
timer(start, end, 'Time for iFFT')
fft_filtered.clear()
if filename:
logging.info('\nWrite file in ./data/' + filename + '.npz')
file = './data/' + filename + '.npz'
np.savez(file, **result)
return result
# def filter3d_array(array, scale_k):
#
# fft_array = fftn(array)
# k = [fftfreq(N_points[0], dx[0]), fftfreq(N_points[1], dx[1]), fftfreq(N_points[2], dx[2])]
# kernel = tophat_kernel(k, scale_k)
# fft_filtered = np.multiply(fft_array, kernel)
# result = ifftn(fft_filtered).real
#
# return result
#
# def filter3d_array_inFspace(array, scale_k):
# logging.info(array.shape)
# k = [fftfreq(N_points[0], dx[0]), fftfreq(N_points[1], dx[1]), fftfreq(N_points[2], dx[2])]
# kernel = tophat_kernel(k, scale_k)
# fft_filtered = np.multiply(array, kernel)
#
# return fft_filtered
########################################################################################################################
## Sampling functions
########################################################################################################################
def sampling_initial_for_MCMC(N_proc, C_limits, eps):
""" Find starting points for MCMC. (Sample randomly and save if distance < eps)
:return: list of lists of parameters
"""
C_array = []
while len(C_array) <= N_proc:
c = np.random.uniform(C_limits[:, 0], C_limits[:, 1])
d = dist.calc_dist(c)
if d <= eps:
C_array.append(c)
logging.info('C_start = {}'.format(c))
return C_array
def sampling_initial_for_gaussian_mixture(N_proc, N_gaussians, C_limits, eps):
""" Find starting points for Gaussian Mixture. (Sample randomly and save if distance < eps)
:return: list of lists of parameters
"""
C_array = []
start = time()
from tqdm import tqdm
with tqdm(total=N_proc*N_gaussians) as pbar:
for i in range(N_proc):
c_array = []
while len(c_array) < N_gaussians:
c = np.random.uniform(C_limits[:, 0], C_limits[:, 1])
dist = calc_dist(c)
if dist <= eps:
c_array.append(c)
pbar.update()
C_array.append(np.array(c_array))
pbar.close()
end = time()
timer(start, end, 'Time for sampling')
return C_array
def sampling_sobol(N_total, C_limits):
""" Generate Sobol' sequense of parameters. (low-discrepency quasi-random sampling)
:return: list of lists of sampled parameters
"""
N_params = len(C_limits)
C_array = i4_sobol_generate(N_params, N_total)
for i in range(N_params):
C_array[:, i] = C_array[:, i] * (C_limits[i, 1] - C_limits[i, 0]) + C_limits[i, 0]
C_array = C_array.tolist()
return C_array
def sampling_random(N_total, C_limits):
"""
"""
N_params = len(C_limits)
C_array = np.random.random(size=(N_total, N_params))
for i in range(g.N.params):
C_array[:, i] = C_array[:, i] * (C_limits[i, 1] - C_limits[i, 0]) + C_limits[i, 0]
C_array = C_array.tolist()
return C_array
def sampling_uniform_grid(N_each, N_params_in_task, C_limits):
""" Create list of lists of N parameters manually (make grid) uniformly distributed on given interval
:return: list of lists of sampled parameters
"""
N_params = len(C_limits)
if N_params == 1:
# C1 = np.linspace(C_limits[0, 0], C_limits[0, 1], N_each)
C1 = uniform_grid(C_limits[0], N_each)
C_array = []
for i in C1:
C_array.append([i])
else:
C = np.empty((N_params - N_params_in_task, N_each))
for i in range(N_params - N_params_in_task):
# C[i, :] = np.linspace(C_limits[i, 0], C_limits[i, 1], N_each)
C[i, :] = uniform_grid(C_limits[i], N_each)
permutation = itertools.product(*C)
C_array = list(map(list, permutation))
logging.debug('Form C_array as uniform grid: {} samples\n'.format(len(C_array)))
return C_array
|
gpl-3.0
| 2,585,686,329,948,227,000 | 30.78327 | 120 | 0.569685 | false | 3.068649 | false | false | false |
icists/ams2
|
django/registration/migrations/0001_initial.py
|
1
|
4035
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-25 01:44
from __future__ import unicode_literals
from decimal import Decimal
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import djmoney.models.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('policy', '0006_auto_20171025_0144'),
]
operations = [
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('stage', models.CharField(choices=[('E', 'Early'), ('R', 'Regular'), ('L', 'Late')], max_length=1)),
('screening_result', models.CharField(choices=[('A', 'Accepted'), ('R', 'Rejected'), ('P', 'Pending')], default='P', max_length=1)),
('disclose_result', models.BooleanField(default=False)),
('essay_text', models.TextField(blank=True)),
('visa_letter', models.BooleanField(default=False)),
('financial_aid', models.BooleanField(default=False)),
('previous_participation', models.BooleanField(default=False)),
('last_update', models.DateTimeField(auto_now=True)),
('essay_topic', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='applications', to='policy.EssayTopic')),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, unique=True)),
],
options={
'verbose_name': 'applicant group',
},
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('preferred_currency', models.CharField(choices=[('KRW', 'KRW'), ('USD', 'USD')], default='KRW', max_length=3)),
('paid_amount_currency', djmoney.models.fields.CurrencyField(choices=[('KRW', 'KRW'), ('USD', 'USD')], default='KRW', editable=False, max_length=3)),
('paid_amount', djmoney.models.fields.MoneyField(decimal_places=0, default=Decimal('0'), default_currency='KRW', max_digits=7)),
('dietary_preferences', models.CharField(blank=True, max_length=100, null=True)),
('breakfast_option', models.BooleanField(default=False)),
('pre_option', models.BooleanField(default=False, verbose_name='pre-conference banquet')),
('post_option', models.BooleanField(default=False, verbose_name='post-conference tour')),
('accommodation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to='policy.AccommodationOption')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='application',
name='group',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='applications', to='registration.Group'),
),
migrations.AddField(
model_name='application',
name='topic_preference',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='applications', to='policy.ProjectTopic'),
),
migrations.AddField(
model_name='application',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='applications', to=settings.AUTH_USER_MODEL),
),
]
|
mit
| 3,841,872,372,975,416,000 | 51.402597 | 165 | 0.602974 | false | 4.117347 | false | false | false |
ijzer/cwbot-ndy
|
kol/request/UseSkillRequest.py
|
1
|
1026
|
from GenericRequest import GenericRequest
from kol.database import SkillDatabase
from kol.manager import PatternManager
class UseSkillRequest(GenericRequest):
def __init__(self, session, skillId, numTimes=1, targetPlayer=None):
super(UseSkillRequest, self).__init__(session)
self.get = True
self.url = session.serverURL + "runskillz.php"
self.requestData["pwd"] = session.pwd
self.requestData["action"] = "Skillz"
self.requestData["whichskill"] = skillId
self.requestData["ajax"] = 1
self.requestData["quantity"] = numTimes
if targetPlayer != None:
self.requestData["targetplayer"] = targetPlayer
else:
self.requestData["targetplayer"] = session.userId
def parseResponse(self):
resultsPattern = PatternManager.getOrCompilePattern('results')
match = resultsPattern.search(self.responseText)
if match:
results = match.group(1)
self.responseData["results"] = results
|
bsd-3-clause
| 4,853,984,224,395,065,000 | 40.04 | 72 | 0.662768 | false | 4.120482 | false | false | false |
simokivimaki/gtk
|
gtk/compose-parse.py
|
1
|
34024
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# compose-parse.py, version 1.3
#
# multifunction script that helps manage the compose sequence table in GTK+ (gtk/gtkimcontextsimple.c)
# the script produces statistics and information about the whole process, run with --help for more.
#
# You may need to switch your python installation to utf-8, if you get 'ascii' codec errors.
#
# Complain to Simos Xenitellis ([email protected], http://simos.info/blog) for this craft.
from re import findall, match, split, sub
from string import atoi
from unicodedata import normalize
from urllib import urlretrieve
from os.path import isfile, getsize
from copy import copy
import sys
import getopt
# We grab files off the web, left and right.
URL_COMPOSE = 'http://gitweb.freedesktop.org/?p=xorg/lib/libX11.git;a=blob_plain;f=nls/en_US.UTF-8/Compose.pre'
URL_KEYSYMSTXT = "http://www.cl.cam.ac.uk/~mgk25/ucs/keysyms.txt"
URL_GDKKEYSYMSH = "http://git.gnome.org/browse/gtk%2B/plain/gdk/gdkkeysyms.h"
URL_UNICODEDATATXT = 'http://www.unicode.org/Public/5.2.0/ucd/UnicodeData.txt'
FILENAME_COMPOSE_SUPPLEMENTARY = 'gtk-compose-lookaside.txt'
# We currently support keysyms of size 2; once upstream xorg gets sorted,
# we might produce some tables with size 2 and some with size 4.
SIZEOFINT = 2
# Current max compose sequence length; in case it gets increased.
WIDTHOFCOMPOSETABLE = 5
keysymdatabase = {}
keysymunicodedatabase = {}
unicodedatabase = {}
headerfile_start = """/* GTK - The GIMP Tool Kit
* Copyright (C) 2007, 2008 GNOME Foundation
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*/
/*
* File auto-generated from script found at http://bugzilla.gnome.org/show_bug.cgi?id=321896
* using the input files
* Input : http://gitweb.freedesktop.org/?p=xorg/lib/libX11.git;a=blob_plain;f=nls/en_US.UTF-8/Compose.pre
* Input : http://www.cl.cam.ac.uk/~mgk25/ucs/keysyms.txt
* Input : http://www.unicode.org/Public/UNIDATA/UnicodeData.txt
*
* This table is optimised for space and requires special handling to access the content.
* This table is used solely by http://svn.gnome.org/viewcvs/gtk%2B/trunk/gtk/gtkimcontextsimple.c
*
* The resulting file is placed at http://svn.gnome.org/viewcvs/gtk%2B/trunk/gtk/gtkimcontextsimpleseqs.h
* This file is described in bug report http://bugzilla.gnome.org/show_bug.cgi?id=321896
*/
/*
* Modified by the GTK+ Team and others 2007, 2008. See the AUTHORS
* file for a list of people on the GTK+ Team. See the ChangeLog
* files for a list of changes. These files are distributed with
* GTK+ at ftp://ftp.gtk.org/pub/gtk/.
*/
#ifndef __GTK_IM_CONTEXT_SIMPLE_SEQS_H__
#define __GTK_IM_CONTEXT_SIMPLE_SEQS_H__
/* === These are the original comments of the file; we keep for historical purposes ===
*
* The following table was generated from the X compose tables include with
* XFree86 4.0 using a set of Perl scripts. Contact Owen Taylor <[email protected]>
* to obtain the relevant perl scripts.
*
* The following compose letter letter sequences confliced
* Dstroke/dstroke and ETH/eth; resolved to Dstroke (Croation, Vietnamese, Lappish), over
* ETH (Icelandic, Faroese, old English, IPA) [ D- -D d- -d ]
* Amacron/amacron and ordfeminine; resolved to ordfeminine [ _A A_ a_ _a ]
* Amacron/amacron and Atilde/atilde; resolved to atilde [ -A A- a- -a ]
* Omacron/Omacron and masculine; resolved to masculine [ _O O_ o_ _o ]
* Omacron/omacron and Otilde/atilde; resolved to otilde [ -O O- o- -o ]
*
* [ Amacron and Omacron are in Latin-4 (Baltic). ordfeminine and masculine are used for
* spanish. atilde and otilde are used at least for Portuguese ]
*
* at and Aring; resolved to Aring [ AA ]
* guillemotleft and caron; resolved to guillemotleft [ << ]
* ogonek and cedilla; resolved to cedilla [ ,, ]
*
* This probably should be resolved by first checking an additional set of compose tables
* that depend on the locale or selected input method.
*/
static const guint16 gtk_compose_seqs_compact[] = {"""
headerfile_end = """};
#endif /* __GTK_IM_CONTEXT_SIMPLE_SEQS_H__ */
"""
def stringtohex(str): return atoi(str, 16)
def factorial(n):
if n <= 1:
return 1
else:
return n * factorial(n-1)
def uniq(*args) :
""" Performs a uniq operation on a list or lists """
theInputList = []
for theList in args:
theInputList += theList
theFinalList = []
for elem in theInputList:
if elem not in theFinalList:
theFinalList.append(elem)
return theFinalList
def all_permutations(seq):
""" Borrowed from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252178 """
""" Produces all permutations of the items of a list """
if len(seq) <=1:
yield seq
else:
for perm in all_permutations(seq[1:]):
for i in range(len(perm)+1):
#nb str[0:1] works in both string and list contexts
yield perm[:i] + seq[0:1] + perm[i:]
def usage():
print """compose-parse available parameters:
-h, --help this craft
-s, --statistics show overall statistics (both algorithmic, non-algorithmic)
-a, --algorithmic show sequences saved with algorithmic optimisation
-g, --gtk show entries that go to GTK+
-u, --unicodedatatxt show compose sequences derived from UnicodeData.txt (from unicode.org)
-v, --verbose show verbose output
-p, --plane1 show plane1 compose sequences
-n, --numeric when used with --gtk, create file with numeric values only
-e, --gtk-expanded when used with --gtk, create file that repeats first column; not usable in GTK+
Default is to show statistics.
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "pvgashune", ["help", "algorithmic", "statistics", "unicodedatatxt",
"stats", "gtk", "verbose", "plane1", "numeric", "gtk-expanded"])
except:
usage()
sys.exit(2)
opt_statistics = False
opt_algorithmic = False
opt_gtk = False
opt_unicodedatatxt = False
opt_verbose = False
opt_plane1 = False
opt_numeric = False
opt_gtkexpanded = False
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if o in ("-s", "--statistics"):
opt_statistics = True
if o in ("-a", "--algorithmic"):
opt_algorithmic = True
if o in ("-g", "--gtk"):
opt_gtk = True
if o in ("-u", "--unicodedatatxt"):
opt_unicodedatatxt = True
if o in ("-v", "--verbose"):
opt_verbose = True
if o in ("-p", "--plane1"):
opt_plane1 = True
if o in ("-n", "--numeric"):
opt_numeric = True
if o in ("-e", "--gtk-expanded"):
opt_gtkexpanded = True
if not opt_algorithmic and not opt_gtk and not opt_unicodedatatxt:
opt_statistics = True
def download_hook(blocks_transferred, block_size, file_size):
""" A download hook to provide some feedback when downloading """
if blocks_transferred == 0:
if file_size > 0:
if opt_verbose:
print "Downloading", file_size, "bytes: ",
else:
if opt_verbose:
print "Downloading: ",
sys.stdout.write('#')
sys.stdout.flush()
def download_file(url):
""" Downloads a file provided a URL. Returns the filename. """
""" Borks on failure """
localfilename = url.split('/')[-1]
if not isfile(localfilename) or getsize(localfilename) <= 0:
if opt_verbose:
print "Downloading ", url, "..."
try:
urlretrieve(url, localfilename, download_hook)
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
print " done."
else:
if opt_verbose:
print "Using cached file for ", url
return localfilename
def process_gdkkeysymsh():
""" Opens the gdkkeysyms.h file from GTK+/gdk/gdkkeysyms.h """
""" Fills up keysymdb with contents """
filename_gdkkeysymsh = download_file(URL_GDKKEYSYMSH)
try:
gdkkeysymsh = open(filename_gdkkeysymsh, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Parse the gdkkeysyms.h file and place contents in keysymdb """
linenum_gdkkeysymsh = 0
keysymdb = {}
for line in gdkkeysymsh.readlines():
linenum_gdkkeysymsh += 1
line = line.strip()
if line == "" or not match('^#define GDK_KEY_', line):
continue
components = split('\s+', line)
if len(components) < 3:
print "Invalid line %(linenum)d in %(filename)s: %(line)s"\
% {'linenum': linenum_gdkkeysymsh, 'filename': filename_gdkkeysymsh, 'line': line}
print "Was expecting 3 items in the line"
sys.exit(-1)
if not match('^GDK_KEY_', components[1]):
print "Invalid line %(linenum)d in %(filename)s: %(line)s"\
% {'linenum': linenum_gdkkeysymsh, 'filename': filename_gdkkeysymsh, 'line': line}
print "Was expecting a keysym starting with GDK_KEY_"
sys.exit(-1)
if match('^0x[0-9a-fA-F]+$', components[2]):
unival = long(components[2][2:], 16)
if unival == 0:
continue
keysymdb[components[1][4:]] = unival
else:
print "Invalid line %(linenum)d in %(filename)s: %(line)s"\
% {'linenum': linenum_gdkkeysymsh, 'filename': filename_gdkkeysymsh, 'line': line}
print "Was expecting a hexadecimal number at the end of the line"
sys.exit(-1)
gdkkeysymsh.close()
""" Patch up the keysymdb with some of our own stuff """
""" This is for a missing keysym from the currently upstream file """
keysymdb['dead_stroke'] = 0x338
""" This is for a missing keysym from the currently upstream file """
###keysymdb['dead_belowring'] = 0x323
###keysymdb['dead_belowmacron'] = 0x331
###keysymdb['dead_belowcircumflex'] = 0x32d
###keysymdb['dead_belowtilde'] = 0x330
###keysymdb['dead_belowbreve'] = 0x32e
###keysymdb['dead_belowdiaeresis'] = 0x324
""" This is^Wwas preferential treatment for Greek """
# keysymdb['dead_tilde'] = 0x342
""" This is^was preferential treatment for Greek """
#keysymdb['combining_tilde'] = 0x342
""" Fixing VoidSymbol """
keysymdb['VoidSymbol'] = 0xFFFF
return keysymdb
def process_keysymstxt():
""" Grabs and opens the keysyms.txt file that Markus Kuhn maintains """
""" This file keeps a record between keysyms <-> unicode chars """
filename_keysymstxt = download_file(URL_KEYSYMSTXT)
try:
keysymstxt = open(filename_keysymstxt, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Parse the keysyms.txt file and place content in keysymdb """
linenum_keysymstxt = 0
keysymdb = {}
for line in keysymstxt.readlines():
linenum_keysymstxt += 1
line = line.strip()
if line == "" or match('^#', line):
continue
components = split('\s+', line)
if len(components) < 5:
print "Invalid line %(linenum)d in %(filename)s: %(line)s'"\
% {'linenum': linenum_keysymstxt, 'filename': filename_keysymstxt, 'line': line}
print "Was expecting 5 items in the line"
sys.exit(-1)
if match('^U[0-9a-fA-F]+$', components[1]):
unival = long(components[1][1:], 16)
if unival == 0:
continue
keysymdb[components[4]] = unival
keysymstxt.close()
""" Patch up the keysymdb with some of our own stuff """
""" This is for a missing keysym from the currently upstream file """
###keysymdb['dead_belowring'] = 0x323
###keysymdb['dead_belowmacron'] = 0x331
###keysymdb['dead_belowcircumflex'] = 0x32d
###keysymdb['dead_belowtilde'] = 0x330
###keysymdb['dead_belowbreve'] = 0x32e
###keysymdb['dead_belowdiaeresis'] = 0x324
""" This is preferential treatment for Greek """
""" => we get more savings if used for Greek """
# keysymdb['dead_tilde'] = 0x342
""" This is preferential treatment for Greek """
# keysymdb['combining_tilde'] = 0x342
""" This is for a missing keysym from Markus Kuhn's db """
keysymdb['dead_stroke'] = 0x338
""" This is for a missing keysym from Markus Kuhn's db """
keysymdb['Oslash'] = 0x0d8
""" This is for a missing (recently added) keysym """
keysymdb['dead_psili'] = 0x313
""" This is for a missing (recently added) keysym """
keysymdb['dead_dasia'] = 0x314
""" Allows to import Multi_key sequences """
keysymdb['Multi_key'] = 0xff20
keysymdb['zerosubscript'] = 0x2080
keysymdb['onesubscript'] = 0x2081
keysymdb['twosubscript'] = 0x2082
keysymdb['threesubscript'] = 0x2083
keysymdb['foursubscript'] = 0x2084
keysymdb['fivesubscript'] = 0x2085
keysymdb['sixsubscript'] = 0x2086
keysymdb['sevensubscript'] = 0x2087
keysymdb['eightsubscript'] = 0x2088
keysymdb['ninesubscript'] = 0x2089
keysymdb['dead_doublegrave'] = 0x030F
keysymdb['dead_invertedbreve'] = 0x0311
return keysymdb
def keysymvalue(keysym, file = "n/a", linenum = 0):
""" Extracts a value from the keysym """
""" Find the value of keysym, using the data from keysyms """
""" Use file and linenum to when reporting errors """
if keysym == "":
return 0
if keysymdatabase.has_key(keysym):
return keysymdatabase[keysym]
elif keysym[0] == 'U' and match('[0-9a-fA-F]+$', keysym[1:]):
return atoi(keysym[1:], 16)
elif keysym[:2] == '0x' and match('[0-9a-fA-F]+$', keysym[2:]):
return atoi(keysym[2:], 16)
else:
print 'keysymvalue: UNKNOWN{%(keysym)s}' % { "keysym": keysym }
#return -1
sys.exit(-1)
def keysymunicodevalue(keysym, file = "n/a", linenum = 0):
""" Extracts a value from the keysym """
""" Find the value of keysym, using the data from keysyms """
""" Use file and linenum to when reporting errors """
if keysym == "":
return 0
if keysymunicodedatabase.has_key(keysym):
return keysymunicodedatabase[keysym]
elif keysym[0] == 'U' and match('[0-9a-fA-F]+$', keysym[1:]):
return atoi(keysym[1:], 16)
elif keysym[:2] == '0x' and match('[0-9a-fA-F]+$', keysym[2:]):
return atoi(keysym[2:], 16)
else:
print 'keysymunicodevalue: UNKNOWN{%(keysym)s}' % { "keysym": keysym }
sys.exit(-1)
def rename_combining(seq):
filtered_sequence = []
for ks in seq:
if findall('^combining_', ks):
ks = sub('^combining_', 'dead_', ks)
if ks == 'dead_double_grave':
ks = 'dead_doublegrave'
if ks == 'dead_inverted_breve':
ks = 'dead_invertedbreve'
filtered_sequence.append(ks)
return filtered_sequence
keysymunicodedatabase = process_keysymstxt()
keysymdatabase = process_gdkkeysymsh()
""" Grab and open the compose file from upstream """
filename_compose = download_file(URL_COMPOSE)
try:
composefile = open(filename_compose, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Look if there is a lookaside (supplementary) compose file in the current
directory, and if so, open, then merge with upstream Compose file.
"""
xorg_compose_sequences_raw = []
for seq in composefile.readlines():
xorg_compose_sequences_raw.append(seq)
try:
composefile_lookaside = open(FILENAME_COMPOSE_SUPPLEMENTARY, 'r')
for seq in composefile_lookaside.readlines():
xorg_compose_sequences_raw.append(seq)
except IOError, (errno, strerror):
if opt_verbose:
print "I/O error(%s): %s" % (errno, strerror)
print "Did not find lookaside compose file. Continuing..."
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
""" Parse the compose file in xorg_compose_sequences"""
xorg_compose_sequences = []
xorg_compose_sequences_algorithmic = []
linenum_compose = 0
comment_nest_depth = 0
for line in xorg_compose_sequences_raw:
linenum_compose += 1
line = line.strip()
if match("^XCOMM", line) or match("^#", line):
continue
line = sub(r"\/\*([^\*]*|[\*][^/])\*\/", "", line)
comment_start = line.find("/*")
if comment_start >= 0:
if comment_nest_depth == 0:
line = line[:comment_start]
else:
line = ""
comment_nest_depth += 1
else:
comment_end = line.find("*/")
if comment_end >= 0:
comment_nest_depth -= 1
if comment_nest_depth < 0:
print "Invalid comment %(linenum_compose)d in %(filename)s: \
Closing '*/' without opening '/*'" % { "linenum_compose": linenum_compose, "filename": filename_compose }
exit(-1)
if comment_nest_depth > 0:
line = ""
else:
line = line[comment_end + 2:]
if line is "":
continue
#line = line[:-1]
components = split(':', line)
if len(components) != 2:
print "Invalid line %(linenum_compose)d in %(filename)s: No sequence\
/value pair found" % { "linenum_compose": linenum_compose, "filename": filename_compose }
exit(-1)
(seq, val ) = split(':', line)
seq = seq.strip()
val = val.strip()
raw_sequence = findall('\w+', seq)
values = split('\s+', val)
unichar_temp = split('"', values[0])
unichar = unichar_temp[1]
if len(values) == 1:
continue
codepointstr = values[1]
if values[1] == '#':
# No codepoints that are >1 characters yet.
continue
if raw_sequence[0][0] == 'U' and match('[0-9a-fA-F]+$', raw_sequence[0][1:]):
raw_sequence[0] = '0x' + raw_sequence[0][1:]
if match('^U[0-9a-fA-F]+$', codepointstr):
codepoint = long(codepointstr[1:], 16)
elif keysymunicodedatabase.has_key(codepointstr):
#if keysymdatabase[codepointstr] != keysymunicodedatabase[codepointstr]:
#print "DIFFERENCE: 0x%(a)X 0x%(b)X" % { "a": keysymdatabase[codepointstr], "b": keysymunicodedatabase[codepointstr]},
#print raw_sequence, codepointstr
codepoint = keysymunicodedatabase[codepointstr]
else:
print
print "Invalid codepoint at line %(linenum_compose)d in %(filename)s:\
%(line)s" % { "linenum_compose": linenum_compose, "filename": filename_compose, "line": line }
exit(-1)
sequence = rename_combining(raw_sequence)
reject_this = False
for i in sequence:
if keysymvalue(i) > 0xFFFF:
reject_this = True
if opt_plane1:
print sequence
break
if keysymvalue(i) < 0:
reject_this = True
break
if reject_this:
continue
if "U0342" in sequence or \
"U0313" in sequence or \
"U0314" in sequence or \
"0x0313" in sequence or \
"0x0342" in sequence or \
"0x0314" in sequence:
continue
if "dead_belowring" in sequence or\
"dead_currency" in sequence or\
"dead_belowcomma" in sequence or\
"dead_belowmacron" in sequence or\
"dead_belowtilde" in sequence or\
"dead_belowbreve" in sequence or\
"dead_belowdiaeresis" in sequence or\
"dead_belowcircumflex" in sequence:
continue
#for i in range(len(sequence)):
# if sequence[i] == "0x0342":
# sequence[i] = "dead_tilde"
if "Multi_key" not in sequence:
""" Ignore for now >0xFFFF keysyms """
if codepoint < 0xFFFF:
original_sequence = copy(sequence)
stats_sequence = copy(sequence)
base = sequence.pop()
basechar = keysymvalue(base, filename_compose, linenum_compose)
if basechar < 0xFFFF:
counter = 1
unisequence = []
not_normalised = True
skipping_this = False
for i in range(0, len(sequence)):
""" If the sequence has dead_tilde and is for Greek, we don't do algorithmically
because of lack of dead_perispomeni (i.e. conflict)
"""
bc = basechar
"""if sequence[-1] == "dead_tilde" and (bc >= 0x370 and bc <= 0x3ff) or (bc >= 0x1f00 and bc <= 0x1fff):
skipping_this = True
break
if sequence[-1] == "dead_horn" and (bc >= 0x370 and bc <= 0x3ff) or (bc >= 0x1f00 and bc <= 0x1fff):
skipping_this = True
break
if sequence[-1] == "dead_ogonek" and (bc >= 0x370 and bc <= 0x3ff) or (bc >= 0x1f00 and bc <= 0x1fff):
skipping_this = True
break
if sequence[-1] == "dead_psili":
sequence[i] = "dead_horn"
if sequence[-1] == "dead_dasia":
sequence[-1] = "dead_ogonek"
"""
unisequence.append(unichr(keysymunicodevalue(sequence.pop(), filename_compose, linenum_compose)))
if skipping_this:
unisequence = []
for perm in all_permutations(unisequence):
# print counter, original_sequence, unichr(basechar) + "".join(perm)
# print counter, map(unichr, perm)
normalized = normalize('NFC', unichr(basechar) + "".join(perm))
if len(normalized) == 1:
# print 'Base: %(base)s [%(basechar)s], produces [%(unichar)s] (0x%(codepoint)04X)' \
# % { "base": base, "basechar": unichr(basechar), "unichar": unichar, "codepoint": codepoint },
# print "Normalized: [%(normalized)s] SUCCESS %(c)d" % { "normalized": normalized, "c": counter }
stats_sequence_data = map(keysymunicodevalue, stats_sequence)
stats_sequence_data.append(normalized)
xorg_compose_sequences_algorithmic.append(stats_sequence_data)
not_normalised = False
break;
counter += 1
if not_normalised:
original_sequence.append(codepoint)
xorg_compose_sequences.append(original_sequence)
""" print xorg_compose_sequences[-1] """
else:
print "Error in base char !?!"
exit(-2)
else:
print "OVER", sequence
exit(-1)
else:
sequence.append(codepoint)
xorg_compose_sequences.append(sequence)
""" print xorg_compose_sequences[-1] """
def sequence_cmp(x, y):
if keysymvalue(x[0]) > keysymvalue(y[0]):
return 1
elif keysymvalue(x[0]) < keysymvalue(y[0]):
return -1
elif len(x) > len(y):
return 1
elif len(x) < len(y):
return -1
elif keysymvalue(x[1]) > keysymvalue(y[1]):
return 1
elif keysymvalue(x[1]) < keysymvalue(y[1]):
return -1
elif len(x) < 4:
return 0
elif keysymvalue(x[2]) > keysymvalue(y[2]):
return 1
elif keysymvalue(x[2]) < keysymvalue(y[2]):
return -1
elif len(x) < 5:
return 0
elif keysymvalue(x[3]) > keysymvalue(y[3]):
return 1
elif keysymvalue(x[3]) < keysymvalue(y[3]):
return -1
elif len(x) < 6:
return 0
elif keysymvalue(x[4]) > keysymvalue(y[4]):
return 1
elif keysymvalue(x[4]) < keysymvalue(y[4]):
return -1
else:
return 0
def sequence_unicode_cmp(x, y):
if keysymunicodevalue(x[0]) > keysymunicodevalue(y[0]):
return 1
elif keysymunicodevalue(x[0]) < keysymunicodevalue(y[0]):
return -1
elif len(x) > len(y):
return 1
elif len(x) < len(y):
return -1
elif keysymunicodevalue(x[1]) > keysymunicodevalue(y[1]):
return 1
elif keysymunicodevalue(x[1]) < keysymunicodevalue(y[1]):
return -1
elif len(x) < 4:
return 0
elif keysymunicodevalue(x[2]) > keysymunicodevalue(y[2]):
return 1
elif keysymunicodevalue(x[2]) < keysymunicodevalue(y[2]):
return -1
elif len(x) < 5:
return 0
elif keysymunicodevalue(x[3]) > keysymunicodevalue(y[3]):
return 1
elif keysymunicodevalue(x[3]) < keysymunicodevalue(y[3]):
return -1
elif len(x) < 6:
return 0
elif keysymunicodevalue(x[4]) > keysymunicodevalue(y[4]):
return 1
elif keysymunicodevalue(x[4]) < keysymunicodevalue(y[4]):
return -1
else:
return 0
def sequence_algorithmic_cmp(x, y):
if len(x) < len(y):
return -1
elif len(x) > len(y):
return 1
else:
for i in range(len(x)):
if x[i] < y[i]:
return -1
elif x[i] > y[i]:
return 1
return 0
xorg_compose_sequences.sort(sequence_cmp)
xorg_compose_sequences_uniqued = []
first_time = True
item = None
for next_item in xorg_compose_sequences:
if first_time:
first_time = False
item = next_item
if sequence_unicode_cmp(item, next_item) != 0:
xorg_compose_sequences_uniqued.append(item)
item = next_item
xorg_compose_sequences = copy(xorg_compose_sequences_uniqued)
counter_multikey = 0
for item in xorg_compose_sequences:
if findall('Multi_key', "".join(item[:-1])) != []:
counter_multikey += 1
xorg_compose_sequences_algorithmic.sort(sequence_algorithmic_cmp)
xorg_compose_sequences_algorithmic_uniqued = uniq(xorg_compose_sequences_algorithmic)
firstitem = ""
num_first_keysyms = 0
zeroes = 0
num_entries = 0
num_algorithmic_greek = 0
for sequence in xorg_compose_sequences:
if keysymvalue(firstitem) != keysymvalue(sequence[0]):
firstitem = sequence[0]
num_first_keysyms += 1
zeroes += 6 - len(sequence) + 1
num_entries += 1
for sequence in xorg_compose_sequences_algorithmic_uniqued:
ch = ord(sequence[-1:][0])
if ch >= 0x370 and ch <= 0x3ff or ch >= 0x1f00 and ch <= 0x1fff:
num_algorithmic_greek += 1
if opt_algorithmic:
for sequence in xorg_compose_sequences_algorithmic_uniqued:
letter = "".join(sequence[-1:])
print '0x%(cp)04X, %(uni)c, seq: [ <0x%(base)04X>,' % { 'cp': ord(unicode(letter)), 'uni': letter, 'base': sequence[-2] },
for elem in sequence[:-2]:
print "<0x%(keysym)04X>," % { 'keysym': elem },
""" Yeah, verified... We just want to keep the output similar to -u, so we can compare/sort easily """
print "], recomposed as", letter, "verified"
def num_of_keysyms(seq):
return len(seq) - 1
def convert_UnotationToHex(arg):
if isinstance(arg, str):
if match('^U[0-9A-F][0-9A-F][0-9A-F][0-9A-F]$', arg):
return sub('^U', '0x', arg)
return arg
def addprefix_GDK(arg):
if match('^0x', arg):
return '%(arg)s, ' % { 'arg': arg }
else:
return 'GDK_KEY_%(arg)s, ' % { 'arg': arg }
if opt_gtk:
first_keysym = ""
sequence = []
compose_table = []
ct_second_part = []
ct_sequence_width = 2
start_offset = num_first_keysyms * (WIDTHOFCOMPOSETABLE+1)
we_finished = False
counter = 0
sequence_iterator = iter(xorg_compose_sequences)
sequence = sequence_iterator.next()
while True:
first_keysym = sequence[0] # Set the first keysym
compose_table.append([first_keysym, 0, 0, 0, 0, 0])
while sequence[0] == first_keysym:
compose_table[counter][num_of_keysyms(sequence)-1] += 1
try:
sequence = sequence_iterator.next()
except StopIteration:
we_finished = True
break
if we_finished:
break
counter += 1
ct_index = start_offset
for line_num in range(len(compose_table)):
for i in range(WIDTHOFCOMPOSETABLE):
occurences = compose_table[line_num][i+1]
compose_table[line_num][i+1] = ct_index
ct_index += occurences * (i+2)
for sequence in xorg_compose_sequences:
ct_second_part.append(map(convert_UnotationToHex, sequence))
print headerfile_start
for i in compose_table:
if opt_gtkexpanded:
print "0x%(ks)04X," % { "ks": keysymvalue(i[0]) },
print '%(str)s' % { 'str': "".join(map(lambda x : str(x) + ", ", i[1:])) }
elif not match('^0x', i[0]):
print 'GDK_KEY_%(str)s' % { 'str': "".join(map(lambda x : str(x) + ", ", i)) }
else:
print '%(str)s' % { 'str': "".join(map(lambda x : str(x) + ", ", i)) }
for i in ct_second_part:
if opt_numeric:
for ks in i[1:][:-1]:
print '0x%(seq)04X, ' % { 'seq': keysymvalue(ks) },
print '0x%(cp)04X, ' % { 'cp':i[-1] }
"""
for ks in i[:-1]:
print '0x%(seq)04X, ' % { 'seq': keysymvalue(ks) },
print '0x%(cp)04X, ' % { 'cp':i[-1] }
"""
elif opt_gtkexpanded:
print '%(seq)s0x%(cp)04X, ' % { 'seq': "".join(map(addprefix_GDK, i[:-1])), 'cp':i[-1] }
else:
print '%(seq)s0x%(cp)04X, ' % { 'seq': "".join(map(addprefix_GDK, i[:-1][1:])), 'cp':i[-1] }
print headerfile_end
def redecompose(codepoint):
(name, decomposition, combiningclass) = unicodedatabase[codepoint]
if decomposition[0] == '' or decomposition[0] == '0':
return [codepoint]
if match('<\w+>', decomposition[0]):
numdecomposition = map(stringtohex, decomposition[1:])
return map(redecompose, numdecomposition)
numdecomposition = map(stringtohex, decomposition)
return map(redecompose, numdecomposition)
def process_unicodedata_file(verbose = False):
""" Grab from wget http://www.unicode.org/Public/UNIDATA/UnicodeData.txt """
filename_unicodedatatxt = download_file(URL_UNICODEDATATXT)
try:
unicodedatatxt = open(filename_unicodedatatxt, 'r')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
sys.exit(-1)
except:
print "Unexpected error: ", sys.exc_info()[0]
sys.exit(-1)
for line in unicodedatatxt.readlines():
if line[0] == "" or line[0] == '#':
continue
line = line[:-1]
uniproperties = split(';', line)
codepoint = stringtohex(uniproperties[0])
""" We don't do Plane 1 or CJK blocks. The latter require reading additional files. """
if codepoint > 0xFFFF or (codepoint >= 0x4E00 and codepoint <= 0x9FFF) or (codepoint >= 0xF900 and codepoint <= 0xFAFF):
continue
name = uniproperties[1]
category = uniproperties[2]
combiningclass = uniproperties[3]
decomposition = uniproperties[5]
unicodedatabase[codepoint] = [name, split('\s+', decomposition), combiningclass]
counter_combinations = 0
counter_combinations_greek = 0
counter_entries = 0
counter_entries_greek = 0
for item in unicodedatabase.keys():
(name, decomposition, combiningclass) = unicodedatabase[item]
if decomposition[0] == '':
continue
print name, "is empty"
elif match('<\w+>', decomposition[0]):
continue
print name, "has weird", decomposition[0]
else:
sequence = map(stringtohex, decomposition)
chrsequence = map(unichr, sequence)
normalized = normalize('NFC', "".join(chrsequence))
""" print name, sequence, "Combining: ", "".join(chrsequence), normalized, len(normalized), """
decomposedsequence = []
for subseq in map(redecompose, sequence):
for seqitem in subseq:
if isinstance(seqitem, list):
for i in seqitem:
if isinstance(i, list):
for j in i:
decomposedsequence.append(j)
else:
decomposedsequence.append(i)
else:
decomposedsequence.append(seqitem)
recomposedchar = normalize('NFC', "".join(map(unichr, decomposedsequence)))
if len(recomposedchar) == 1 and len(decomposedsequence) > 1:
counter_entries += 1
counter_combinations += factorial(len(decomposedsequence)-1)
ch = item
if ch >= 0x370 and ch <= 0x3ff or ch >= 0x1f00 and ch <= 0x1fff:
counter_entries_greek += 1
counter_combinations_greek += factorial(len(decomposedsequence)-1)
if verbose:
print "0x%(cp)04X, %(uni)c, seq:" % { 'cp':item, 'uni':unichr(item) },
print "[",
for elem in decomposedsequence:
print '<0x%(hex)04X>,' % { 'hex': elem },
print "], recomposed as", recomposedchar,
if unichr(item) == recomposedchar:
print "verified"
if verbose == False:
print "Unicode statistics from UnicodeData.txt"
print "Number of entries that can be algorithmically produced :", counter_entries
print " of which are for Greek :", counter_entries_greek
print "Number of compose sequence combinations requiring :", counter_combinations
print " of which are for Greek :", counter_combinations_greek
print "Note: We do not include partial compositions, "
print "thus the slight discrepancy in the figures"
print
if opt_unicodedatatxt:
process_unicodedata_file(True)
if opt_statistics:
print
print "Total number of compose sequences (from file) :", len(xorg_compose_sequences) + len(xorg_compose_sequences_algorithmic)
print " of which can be expressed algorithmically :", len(xorg_compose_sequences_algorithmic)
print " of which cannot be expressed algorithmically :", len(xorg_compose_sequences)
print " of which have Multi_key :", counter_multikey
print
print "Algorithmic (stats for Xorg Compose file)"
print "Number of sequences off due to algo from file (len(array)) :", len(xorg_compose_sequences_algorithmic)
print "Number of sequences off due to algo (uniq(sort(array))) :", len(xorg_compose_sequences_algorithmic_uniqued)
print " of which are for Greek :", num_algorithmic_greek
print
process_unicodedata_file()
print "Not algorithmic (stats from Xorg Compose file)"
print "Number of sequences :", len(xorg_compose_sequences)
print "Flat array looks like :", len(xorg_compose_sequences), "rows of 6 integers (2 bytes per int, or 12 bytes per row)"
print "Flat array would have taken up (in bytes) :", num_entries * 2 * 6, "bytes from the GTK+ library"
print "Number of items in flat array :", len(xorg_compose_sequences) * 6
print " of which are zeroes :", zeroes, "or ", (100 * zeroes) / (len(xorg_compose_sequences) * 6), " per cent"
print "Number of different first items :", num_first_keysyms
print "Number of max bytes (if using flat array) :", num_entries * 2 * 6
print "Number of savings :", zeroes * 2 - num_first_keysyms * 2 * 5
print
print "Memory needs if both algorithmic+optimised table in latest Xorg compose file"
print " :", num_entries * 2 * 6 - zeroes * 2 + num_first_keysyms * 2 * 5
print
print "Existing (old) implementation in GTK+"
print "Number of sequences in old gtkimcontextsimple.c :", 691
print "The existing (old) implementation in GTK+ takes up :", 691 * 2 * 12, "bytes"
|
lgpl-2.1
| 2,910,440,404,792,069,000 | 34.258031 | 159 | 0.649688 | false | 2.999295 | false | false | false |
wujuguang/motor
|
test/tornado_tests/test_motor_transaction.py
|
1
|
18586
|
# Copyright 2018-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import collections
import os
import re
from bson import json_util
from bson.json_util import JSONOptions
from pymongo.read_concern import ReadConcern
from pymongo.results import (BulkWriteResult,
InsertManyResult,
InsertOneResult,
UpdateResult, DeleteResult)
from motor.motor_tornado import (MotorCommandCursor,
MotorCursor,
MotorLatentCommandCursor)
from test.utils import TestListener
from test.version import Version
"""Test Motor, an asynchronous driver for MongoDB and Tornado."""
import unittest
from pymongo import (client_session,
operations,
read_preferences,
ReadPreference,
WriteConcern)
from tornado import gen
from pymongo.errors import OperationFailure, PyMongoError
from tornado.testing import gen_test
from test import SkipTest
from test.test_environment import env
from test.tornado_tests import MotorTest
# Location of JSON test specifications.
_TEST_PATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '../json/transactions')
_TXN_TESTS_DEBUG = os.environ.get('TRANSACTION_TESTS_DEBUG')
def camel_to_snake(camel):
# Regex to convert CamelCase to snake_case.
snake = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', camel)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', snake).lower()
def camel_to_upper_camel(camel):
return camel[0].upper() + camel[1:]
def camel_to_snake_args(arguments):
for arg_name in list(arguments):
c2s = camel_to_snake(arg_name)
arguments[c2s] = arguments.pop(arg_name)
return arguments
def parse_read_preference(pref):
# Make first letter lowercase to match read_pref's modes.
mode_string = pref.get('mode', 'primary')
mode_string = mode_string[:1].lower() + mode_string[1:]
mode = read_preferences.read_pref_mode_from_name(mode_string)
max_staleness = pref.get('maxStalenessSeconds', -1)
tag_sets = pref.get('tag_sets')
return read_preferences.make_read_preference(
mode, tag_sets=tag_sets, max_staleness=max_staleness)
def parse_opts(opts):
parsed = {}
if 'readPreference' in opts:
parsed['read_preference'] = parse_read_preference(
opts.pop('readPreference'))
if 'writeConcern' in opts:
parsed['write_concern'] = WriteConcern(**opts.pop('writeConcern'))
if 'readConcern' in opts:
parsed['read_concern'] = ReadConcern(**opts.pop('readConcern'))
return parsed
def parse_args(args, sessions):
parsed = parse_opts(args)
if 'session' in args:
assert sessions is not None
parsed['session'] = sessions[args.pop('session')]
return parsed
class MotorTransactionTest(MotorTest):
@classmethod
def setUpClass(cls):
super(MotorTransactionTest, cls).setUpClass()
if not env.sessions_enabled:
raise SkipTest("Sessions not supported")
if not env.is_replica_set:
raise SkipTest("Requires a replica set")
if env.version < Version(3, 7):
raise SkipTest("Requires MongoDB 3.7+")
def transaction_test_debug(self, msg):
if _TXN_TESTS_DEBUG:
print(msg)
def check_result(self, expected_result, result):
write_results = (BulkWriteResult,
DeleteResult,
InsertOneResult,
InsertManyResult,
UpdateResult)
if isinstance(result, write_results):
for res in expected_result:
prop = camel_to_snake(res)
# SPEC-869: Only BulkWriteResult has upserted_count.
if (prop == "upserted_count"
and not isinstance(result, BulkWriteResult)):
if result.upserted_id is not None:
upserted_count = 1
else:
upserted_count = 0
self.assertEqual(upserted_count, expected_result[res], prop)
elif prop == "inserted_ids":
# BulkWriteResult does not have inserted_ids.
if isinstance(result, BulkWriteResult):
self.assertEqual(len(expected_result[res]),
result.inserted_count)
else:
# InsertManyResult may be compared to [id1] from the
# crud spec or {"0": id1} from the retryable write spec.
ids = expected_result[res]
if isinstance(ids, dict):
ids = [ids[str(i)] for i in range(len(ids))]
self.assertEqual(ids, result.inserted_ids, prop)
elif prop == "upserted_ids":
# Convert indexes from strings to integers.
ids = expected_result[res]
expected_ids = {}
for str_index in ids:
expected_ids[int(str_index)] = ids[str_index]
self.assertEqual(expected_ids, result.upserted_ids, prop)
else:
self.assertEqual(
getattr(result, prop), expected_result[res], prop)
return True
elif isinstance(result, dict):
for k, v in expected_result.items():
self.assertEqual(v, result[k])
else:
self.assertEqual(expected_result, result)
@gen.coroutine
def run_operation(self, sessions, collection, operation):
name = camel_to_snake(operation['name'])
if name == 'run_command':
name = 'command'
self.transaction_test_debug(name)
collection_opts = operation.get('collectionOptions')
if collection_opts:
collection = collection.with_options(**parse_opts(collection_opts))
obj = {
'collection': collection,
'database': collection.database,
'session0': sessions['session0'],
'session1': sessions['session1'],
}[operation['object']]
# Combine arguments with options and handle special cases.
arguments = operation['arguments']
arguments.update(arguments.pop("options", {}))
kwargs = parse_args(arguments, sessions)
for arg_name, arg_value in arguments.items():
c2s = camel_to_snake(arg_name)
if arg_name == "sort":
assert len(arg_value) == 1, 'test can only have 1 sort key'
kwargs[arg_name] = list(arg_value.items())
# Named "key" instead not fieldName.
elif arg_name == "fieldName":
kwargs["key"] = arg_value
# Aggregate uses "batchSize", while find uses batch_size.
elif arg_name == "batchSize" and name == "aggregate":
kwargs["batchSize"] = arg_value
# Requires boolean returnDocument.
elif arg_name == "returnDocument":
kwargs[c2s] = (arg_value == "After")
elif c2s == "requests":
# Parse each request into a bulk write model.
requests = []
for request in arg_value:
bulk_model = camel_to_upper_camel(request["name"])
bulk_class = getattr(operations, bulk_model)
bulk_arguments = camel_to_snake_args(request["arguments"])
requests.append(bulk_class(**bulk_arguments))
kwargs["requests"] = requests
else:
kwargs[c2s] = arg_value
cmd = getattr(obj, name)
result = cmd(**kwargs)
try:
result = gen.convert_yielded(result)
except gen.BadYieldError:
# Not an async method.
pass
else:
result = yield result
cursor_types = MotorCursor, MotorCommandCursor, MotorLatentCommandCursor
if isinstance(result, cursor_types):
result = yield result.to_list(length=None)
raise gen.Return(result)
def check_events(self, test, listener, session_ids):
res = listener.results
if not len(test['expectations']):
return
self.assertEqual(len(res['started']), len(test['expectations']))
for i, expectation in enumerate(test['expectations']):
event_type = next(iter(expectation))
event = res['started'][i]
# The tests substitute 42 for any number other than 0.
if (event.command_name == 'getMore'
and event.command['getMore']):
event.command['getMore'] = 42
elif event.command_name == 'killCursors':
event.command['cursors'] = [42]
# Replace afterClusterTime: 42 with actual afterClusterTime.
expected_cmd = expectation[event_type]['command']
expected_read_concern = expected_cmd.get('readConcern')
if expected_read_concern is not None:
time = expected_read_concern.get('afterClusterTime')
if time == 42:
actual_time = event.command.get(
'readConcern', {}).get('afterClusterTime')
if actual_time is not None:
expected_read_concern['afterClusterTime'] = actual_time
# Replace lsid with a name like "session0" to match test.
if 'lsid' in event.command:
for name, lsid in session_ids.items():
if event.command['lsid'] == lsid:
event.command['lsid'] = name
break
for attr, expected in expectation[event_type].items():
actual = getattr(event, attr)
if isinstance(expected, dict):
for key, val in expected.items():
if val is None:
if key in actual:
self.fail("Unexpected key [%s] in %r" % (
key, actual))
elif key not in actual:
self.fail("Expected key [%s] in %r" % (
key, actual))
else:
self.assertEqual(val, actual[key],
"Key [%s] in %s" % (key, actual))
else:
self.assertEqual(actual, expected)
def expect_error(expected_result):
if isinstance(expected_result, dict):
return set(expected_result.keys()).intersection((
'errorContains', 'errorCodeName', 'errorLabelsContain',
'errorLabelsOmit'))
return False
def end_sessions(sessions):
for s in sessions.values():
# Aborts the transaction if it's open.
s.end_session()
def create_test(scenario_def, test):
@gen_test
def run_scenario(self):
listener = TestListener()
# New client, to avoid interference from pooled sessions.
client = self.motor_rsc(event_listeners=[listener],
**test['clientOptions'])
try:
yield client.admin.command('killAllSessions', [])
except OperationFailure:
# "operation was interrupted" by killing the command's own session.
pass
if test['failPoint']:
yield client.admin.command(test['failPoint'])
database_name = scenario_def['database_name']
collection_name = scenario_def['collection_name']
write_concern_db = client.get_database(
database_name, write_concern=WriteConcern(w='majority'))
write_concern_coll = write_concern_db[collection_name]
yield write_concern_coll.drop()
yield write_concern_db.create_collection(collection_name)
if scenario_def['data']:
# Load data.
yield write_concern_coll.insert_many(scenario_def['data'])
# Create session0 and session1.
sessions = {}
session_ids = {}
for i in range(2):
session_name = 'session%d' % i
opts = camel_to_snake_args(test['sessionOptions'][session_name])
if 'default_transaction_options' in opts:
txn_opts = opts['default_transaction_options']
if 'readConcern' in txn_opts:
read_concern = ReadConcern(**txn_opts['readConcern'])
else:
read_concern = None
if 'writeConcern' in txn_opts:
write_concern = WriteConcern(**txn_opts['writeConcern'])
else:
write_concern = None
if 'readPreference' in txn_opts:
read_pref = parse_read_preference(
txn_opts['readPreference'])
else:
read_pref = None
txn_opts = client_session.TransactionOptions(
read_concern=read_concern,
write_concern=write_concern,
read_preference=read_pref,
)
opts['default_transaction_options'] = txn_opts
s = yield client.start_session(**opts)
sessions[session_name] = s
# Store lsid so we can access it after end_session, in check_events.
session_ids[session_name] = s.session_id
self.addCleanup(end_sessions, sessions)
listener.results.clear()
collection = client[database_name][collection_name]
for op in test['operations']:
expected_result = op.get('result')
if expect_error(expected_result):
with self.assertRaises(PyMongoError,
msg=op.get('name')) as context:
yield self.run_operation(sessions, collection, op.copy())
err = context.exception
if expected_result['errorContains']:
self.assertIn(expected_result['errorContains'].lower(),
str(err).lower())
if expected_result['errorCodeName']:
self.assertEqual(expected_result['errorCodeName'],
err.details.get('codeName'))
for label in expected_result.get('errorLabelsContain', []):
self.assertTrue(
err.has_error_label(label),
"%r should have errorLabel %s" % (err, label))
for label in expected_result.get('errorLabelsOmit', []):
self.assertFalse(
err.has_error_label(label),
"%r should NOT have errorLabel %s" % (err, label))
else:
result = yield self.run_operation(
sessions, collection, op.copy())
if 'result' in op:
self.check_result(expected_result, result)
for s in sessions.values():
yield s.end_session()
self.check_events(test, listener, session_ids)
# Assert final state is expected.
expected = test['outcome'].get('collection')
if expected is not None:
# Read from the primary to ensure causal consistency.
primary_coll = collection.with_options(
read_preference=ReadPreference.PRIMARY)
docs = yield primary_coll.find().to_list(length=None)
self.assertEqual(expected['data'], docs)
return run_scenario
class ScenarioDict(collections.OrderedDict):
"""Dict that returns {} for any unknown key, recursively."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
# Unlike a defaultdict, don't set the key, just return a dict.
return ScenarioDict({})
def copy(self):
return ScenarioDict(self)
def create_tests():
assert os.path.isdir(_TEST_PATH)
for dirpath, _, filenames in os.walk(_TEST_PATH):
dirname = os.path.split(dirpath)[-1]
for filename in filenames:
test_type, ext = os.path.splitext(filename)
if ext != '.json':
continue
with open(os.path.join(dirpath, filename)) as scenario_stream:
opts = JSONOptions(document_class=ScenarioDict)
scenario_def = json_util.loads(
scenario_stream.read(), json_options=opts)
# Construct test from scenario.
for test in scenario_def['tests']:
test_name = 'test_%s_%s_%s' % (
dirname,
test_type.replace("-", "_"),
str(test['description'].replace(" ", "_")))
new_test = create_test(scenario_def, test)
new_test = env.require(
lambda: not test.get('skipReason'),
test.get('skipReason'),
new_test)
if test_type == 'reads' and test['description'] == 'count':
new_test = env.require(
lambda: False,
"Motor has removed the 'count' helper",
new_test)
if 'secondary' in test_name:
new_test = env.require(
lambda: env.secondaries,
'No secondaries',
new_test)
# In Python 2, case test_name from unicode to str.
new_test.__name__ = str(test_name)
setattr(MotorTransactionTest, new_test.__name__, new_test)
create_tests()
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| 9,086,862,932,157,388,000 | 36.930612 | 80 | 0.546648 | false | 4.51226 | true | false | false |
SEL-Columbia/commcare-hq
|
corehq/apps/cleanup/management/commands/check_case_integrity.py
|
1
|
5062
|
from collections import defaultdict
from optparse import make_option
from django.core.management.base import BaseCommand
from casexml.apps.case.cleanup import rebuild_case
from casexml.apps.case.models import CommCareCase
from corehq.elastic import stream_es_query, ES_URLS, ADD_TO_ES_FILTER
import dateutil.parser as dparser
import csv
import logging
from dimagi.utils.chunked import chunked
logger = logging.getLogger(__name__)
def forms_with_cases(domain=None, since=None, chunksize=500):
q = {"filter": {"and": [{"bool": {
"must_not": {
"missing": {
"field": "__retrieved_case_ids",
"existence": True,
"null_value": True}}}}]}}
q["sort"] = [{"domain.exact" : {"order": "asc"}}]
params={"domain.exact": domain} if domain else {}
if since:
q["filter"]["and"][0]["bool"]["must"] = {
"range": {
"received_on": {"from": since.strftime("%Y-%m-%d")}}}
q["filter"]["and"].extend(ADD_TO_ES_FILTER["forms"][:])
return stream_es_query(params=params, q=q, es_url=ES_URLS["forms"],
fields=["__retrieved_case_ids", "domain", "received_on"], chunksize=chunksize)
def case_ids_by_xform_id(xform_ids):
ret = defaultdict(list)
for res in CommCareCase.get_db().view('case/by_xform_id', keys=xform_ids, reduce=False):
ret[res["key"]].append(res["id"])
return dict(ret)
def iter_forms_with_cases(domain, since, chunksize=500):
for form_list in chunked(forms_with_cases(domain, since), chunksize):
case_id_mapping = case_ids_by_xform_id([f["_id"] for f in form_list])
for form in form_list:
form_id, f_case_ids, f_domain = form["_id"], form["fields"]["__retrieved_case_ids"], form["fields"]["domain"]
received_on = form["fields"]["received_on"]
for case_id in f_case_ids:
yield form_id, received_on, case_id, case_id in case_id_mapping.get(form_id, []), f_domain
def handle_problematic_data(datalist_tup, csv_writer, verbose=False, rebuild=False):
case_data = CommCareCase.get_db().view('_all_docs', keys=[d[1] for d in datalist_tup])
cases = set([c["id"] for c in case_data if 'id' in c])
for domain, case_id, form_id, received_on in datalist_tup:
error = "action_missing" if case_id in cases else "nonexistent_case"
csv_writer.writerow([domain, case_id, form_id, received_on, error])
if verbose and error == "nonexistent_case":
logger.info("Case (%s) from form (%s) does not exist" % (case_id, form_id))
elif verbose and error == "action_missing":
logger.info("Case (%s) missing action for form (%s)" % (case_id, form_id))
if rebuild:
if verbose:
logger.info("rebuilding case (%s) from scratch" % case_id)
try:
rebuild_case(case_id)
except Exception as e:
logger.info("Case Rebuild Failure: %s" % e)
class Command(BaseCommand):
args = '<domain>'
help = ('Checks all forms in a domain to make sure their cases were properly updated.')
option_list = BaseCommand.option_list + (
make_option('-s', '--since',
help="Begin check at this date."),
make_option('-f', '--filename',
help="Save output to this file."),
make_option('-r', '--rebuild', action="store_true",
help="Rebuild cases that were found to be corrupt"),
make_option('-c', '--chunk',
help="Set the chunk size"),
make_option('--verbose', action="store_true",
help="Verbose"),
)
def handle(self, *args, **options):
domain = args[0] if len(args) == 1 else None
since = dparser.parse(options["since"], fuzzy=True) if options.get("since") else None
filename = options.get("filename") or ("case_integrity" + ("_%s" % domain if domain else ""))
chunksize = options.get("chunk") or 500
if not filename.endswith(".csv"):
filename = "%s.csv" % filename
rebuild, verbose = options.get("rebuild"), options.get("verbose")
logger.info("writing to file: %s" % filename)
with open(filename, 'wb+') as csvfile:
csv_writer = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['Domain', 'Case ID', 'Form ID', 'Form Recieved On', 'Error'])
problematic = []
for form_id, received_on, case_id, action_exists, f_domain in iter_forms_with_cases(domain, since, chunksize):
if not action_exists:
problematic.append((f_domain, case_id, form_id, received_on))
if len(problematic) > chunksize:
handle_problematic_data(problematic, csv_writer, verbose=verbose, rebuild=rebuild)
problematic = []
handle_problematic_data(problematic, csv_writer, verbose=verbose, rebuild=rebuild)
|
bsd-3-clause
| -8,207,043,014,911,200,000 | 47.209524 | 122 | 0.590676 | false | 3.730287 | false | false | false |
toruta39/blender-datablock-translator
|
translate_datablock_names.py
|
1
|
7692
|
bl_info = {
"name": "Translate Datablock Names",
"author": "Joshua Zhang",
"version": (1, 0),
"blender": (2, 69, 0),
"location": "Search > (rename)",
"description": "A blender addon/plugin that helps to translate datablock \
names to English.",
"wiki_url": "",
"tracker_url": "",
"category": "Object"
}
import urllib.request
import urllib.parse
import json
import time
import re
import xml.etree.ElementTree as ET
import bpy
class MSTranslator():
"""A Class to communicate with Microsoft Translator API"""
def __init__(self):
self.access_token = ""
self.access_token_expires_at = time.time()
self.get_access_token()
def get_access_token(self):
"""Get access token from Azure Marketplace.
If there's no existed access token, it'll try request a new one.
Returns: string
"""
if (
not bool(self.access_token) or
time.time() > self.access_token_expires_at
):
self.access_token = self.req_access_token()
return self.access_token
def req_access_token(self):
"""Request a new access token from Azure Marketplace
Returns: string
"""
url = "https://datamarket.accesscontrol.windows.net/v2/OAuth2-13"
data = {
"client_id": "blender-assets-translator",
"client_secret": "5TITh8SzOtQIefUJ/vKW10yk4/oNbGbgI+GquUdtgHo=",
"scope": "http://api.microsofttranslator.com",
"grant_type": "client_credentials"
}
data = urllib.parse.urlencode(data)
data = bytes(data, "utf-8")
req = urllib.request.Request(url=url, data=data)
result = urllib.request.urlopen(req).read()
result = str(result, "utf-8")
result = json.loads(result)
self.access_token_expires_at = time.time() + int(result["expires_in"])
return result["access_token"]
def translate(self, text, to_lang="en", from_lang=""):
"""Translate text to the target language
Keyword arguments:
text -- text to translate
to_lang -- optional, the target language code
from_lang -- optional, the source language code
Returns: string
"""
url = "http://api.microsofttranslator.com/v2/Http.svc/Translate"
data = {
"text": text,
"to": to_lang,
"from": from_lang
}
data = urllib.parse.urlencode(data)
url += "?" + data
req = urllib.request.Request(url=url, method="GET")
req.add_header("Authorization", "Bearer " + self.get_access_token())
result = urllib.request.urlopen(req).read()
result = str(result, "utf-8")
result = ET.fromstring(result)
result = result.text
return result
class TranslateDatablockNames(bpy.types.Operator):
"""Translate Datablock Names"""
bl_idname = "object.translate_datablock_names"
bl_label = "Translate Datablock Names"
bl_options = {'REGISTER', 'UNDO'}
is_object_to_translate = bpy.props.BoolProperty(
name='Object',
default=True,
description='Translate Object Names')
is_material_to_translate = bpy.props.BoolProperty(
name='Material',
default=True,
description='Translate Material Names')
is_animation_to_translate = bpy.props.BoolProperty(
name='Animation',
default=True,
description='Translate Animation Names')
is_armature_to_translate = bpy.props.BoolProperty(
name='Armature',
default=True,
description='Translate Armature Names')
is_shapekey_to_translate = bpy.props.BoolProperty(
name='Shape Key',
default=True,
description='Translate Shape Key Names')
dialog_width = 200
def draw(self, context):
layout = self.layout
row = layout.row()
row.prop(self.properties, 'is_object_to_translate')
row.prop(self.properties, 'is_material_to_translate')
row = layout.row()
row.prop(self.properties, 'is_animation_to_translate')
row.prop(self.properties, 'is_armature_to_translate')
row = layout.row()
row.prop(self.properties, 'is_shapekey_to_translate')
def execute(self, context):
translate_datablock_name(
is_object_to_translate=self.is_object_to_translate,
is_material_to_translate=self.is_material_to_translate,
is_animation_to_translate=self.is_animation_to_translate,
is_armature_to_translate=self.is_armature_to_translate,
is_shapekey_to_translate=self.is_shapekey_to_translate
)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
wm.invoke_props_dialog(self, self.dialog_width)
return {'RUNNING_MODAL'}
def menu_func(self, context):
self.layout.operator(TranslateDatablockNames.bl_idname)
def translate_datablock_name(
is_object_to_translate=False,
is_material_to_translate=False,
is_animation_to_translate=False,
is_armature_to_translate=False,
is_shapekey_to_translate=False
):
if is_object_to_translate:
for obj in bpy.data.objects:
if has_irregular_char(obj.name):
obj.name = hyphenize(ms_translator.translate(obj.name))
for mesh in bpy.data.meshes:
if has_irregular_char(mesh.name):
mesh.name = hyphenize(ms_translator.translate(mesh.name))
for group in bpy.data.groups:
if has_irregular_char(group.name):
group.name = hyphenize(ms_translator.translate(group.name))
if is_material_to_translate:
for material in bpy.data.materials:
if has_irregular_char(material.name):
material.name = hyphenize(
ms_translator.translate(material.name)
)
if is_animation_to_translate:
for action in bpy.data.actions:
if has_irregular_char(action.name):
action.name = hyphenize(ms_translator.translate(action.name))
if is_armature_to_translate:
for armature in bpy.data.armatures:
if has_irregular_char(armature.name):
armature.name = hyphenize(
ms_translator.translate(armature.name)
)
for bone in armature.bones:
bone.name = hyphenize(
ms_translator.translate(bone.name)
)
if is_shapekey_to_translate:
for shapekey in bpy.data.shape_keys:
if has_irregular_char(shapekey.name):
shapekey.name = hyphenize(
ms_translator.translate(shapekey.name)
)
for keyblock in shapekey.key_blocks:
if has_irregular_char(keyblock.name):
keyblock.name = hyphenize(
ms_translator.translate(keyblock.name)
)
def hyphenize(string):
return '-'.join(string.split())
def has_irregular_char(string):
match = re.search(r"[^\x00-\x7F]", string)
if match:
return True
else:
return False
def register():
global ms_translator
ms_translator = MSTranslator()
bpy.utils.register_class(TranslateDatablockNames)
bpy.types.OUTLINER_MT_search.append(menu_func)
def unregister():
global ms_translator
ms_translator = None
bpy.utils.unregister_class(TranslateDatablockNames)
bpy.types.OUTLINER_MT_search.remove(menu_func)
if __name__ == "__main__":
ms_translator = None
register()
|
mit
| 5,995,320,613,882,916,000 | 29.046875 | 78 | 0.604134 | false | 3.804154 | false | false | false |
miloszz/DIRAC
|
RequestManagementSystem/DB/RequestDB.py
|
1
|
33451
|
########################################################################
# $HeadURL $
# File: RequestDB.py
# Date: 2012/12/04 08:06:30
########################################################################
from types import ListType
""" :mod: RequestDB
=======================
.. module: RequestDB
:synopsis: db holding Requests
db holding Request, Operation and File
"""
__RCSID__ = "$Id $"
import random
import socket
import datetime
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gConfig, gLogger
from DIRAC.Core.Base.DB import DB
from DIRAC.RequestManagementSystem.Client.Request import Request
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.ConfigurationSystem.Client.PathFinder import getDatabaseSection
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm import relationship, backref, sessionmaker, joinedload_all, mapper
from sqlalchemy.sql import update
from sqlalchemy import create_engine, func, Table, Column, MetaData, ForeignKey,\
Integer, String, DateTime, Enum, BLOB, BigInteger, distinct
# Metadata instance that is used to bind the engine, Object and tables
metadata = MetaData()
# Description of the file table
fileTable = Table( 'File', metadata,
Column( 'FileID', Integer, primary_key = True ),
Column( 'OperationID', Integer,
ForeignKey( 'Operation.OperationID', ondelete = 'CASCADE' ),
nullable = False ),
Column( 'Status', Enum( 'Waiting', 'Done', 'Failed', 'Scheduled' ), server_default = 'Waiting' ),
Column( 'LFN', String( 255 ), index = True ),
Column( 'PFN', String( 255 ) ),
Column( 'ChecksumType', Enum( 'ADLER32', 'MD5', 'SHA1', '' ), server_default = '' ),
Column( 'Checksum', String( 255 ) ),
Column( 'GUID', String( 36 ) ),
Column( 'Size', BigInteger ),
Column( 'Attempt', Integer ),
Column( 'Error', String( 255 ) ),
mysql_engine = 'InnoDB'
)
# Map the File object to the fileTable, with a few special attributes
mapper( File, fileTable, properties = {
'_Status': fileTable.c.Status,
'_LFN': fileTable.c.LFN,
'_ChecksumType' : fileTable.c.ChecksumType,
'_GUID' : fileTable.c.GUID,
} )
# Description of the Operation table
operationTable = Table( 'Operation', metadata,
Column( 'TargetSE', String( 255 ) ),
Column( 'CreationTime', DateTime ),
Column( 'SourceSE', String( 255 ) ),
Column( 'Arguments', BLOB ),
Column( 'Error', String( 255 ) ),
Column( 'Type', String( 64 ), nullable = False ),
Column( 'Order', Integer, nullable = False ),
Column( 'Status', Enum( 'Waiting', 'Assigned', 'Queued', 'Done', 'Failed', 'Canceled', 'Scheduled' ), server_default = 'Queued' ),
Column( 'LastUpdate', DateTime ),
Column( 'SubmitTime', DateTime ),
Column( 'Catalog', String( 255 ) ),
Column( 'OperationID', Integer, primary_key = True ),
Column( 'RequestID', Integer,
ForeignKey( 'Request.RequestID', ondelete = 'CASCADE' ),
nullable = False ),
mysql_engine = 'InnoDB'
)
# Map the Operation object to the operationTable, with a few special attributes
mapper(Operation, operationTable, properties={
'_CreationTime': operationTable.c.CreationTime,
'_Order': operationTable.c.Order,
'_Status': operationTable.c.Status,
'_LastUpdate': operationTable.c.LastUpdate,
'_SubmitTime': operationTable.c.SubmitTime,
'_Catalog': operationTable.c.Catalog,
'__files__':relationship( File,
backref = backref( '_parent', lazy = 'immediate' ),
lazy = 'immediate',
passive_deletes = True,
cascade = "all, delete-orphan" )
})
# Description of the Request Table
requestTable = Table( 'Request', metadata,
Column( 'DIRACSetup', String( 32 ) ),
Column( 'CreationTime', DateTime ),
Column( 'JobID', Integer, server_default = '0' ),
Column( 'OwnerDN', String( 255 ) ),
Column( 'RequestName', String( 255 ), nullable = False ),
Column( 'Error', String( 255 ) ),
Column( 'Status', Enum( 'Waiting', 'Assigned', 'Done', 'Failed', 'Canceled', 'Scheduled' ), server_default = 'Waiting' ),
Column( 'LastUpdate', DateTime ),
Column( 'OwnerGroup', String( 32 ) ),
Column( 'SubmitTime', DateTime ),
Column( 'RequestID', Integer, primary_key = True ),
Column( 'SourceComponent', BLOB ),
Column( 'NotBefore', DateTime ),
mysql_engine = 'InnoDB'
)
# Map the Request object to the requestTable, with a few special attributes
mapper( Request, requestTable, properties = {
'_CreationTime': requestTable.c.CreationTime,
'_Status': requestTable.c.Status,
'_LastUpdate': requestTable.c.LastUpdate,
'_SubmitTime': requestTable.c.SubmitTime,
'_NotBefore': requestTable.c.NotBefore,
'__operations__' : relationship( Operation,
backref = backref( '_parent', lazy = 'immediate' ),
order_by = operationTable.c.Order,
lazy = 'immediate',
passive_deletes = True,
cascade = "all, delete-orphan"
)
} )
########################################################################
class RequestDB( object ):
"""
.. class:: RequestDB
db holding requests
"""
def __getDBConnectionInfo( self, fullname ):
""" Collect from the CS all the info needed to connect to the DB.
This should be in a base class eventually
"""
self.fullname = fullname
self.cs_path = getDatabaseSection( self.fullname )
self.dbHost = ''
result = gConfig.getOption( self.cs_path + '/Host' )
if not result['OK']:
raise RuntimeError( 'Failed to get the configuration parameters: Host' )
self.dbHost = result['Value']
# Check if the host is the local one and then set it to 'localhost' to use
# a socket connection
if self.dbHost != 'localhost':
localHostName = socket.getfqdn()
if localHostName == self.dbHost:
self.dbHost = 'localhost'
self.dbPort = 3306
result = gConfig.getOption( self.cs_path + '/Port' )
if not result['OK']:
# No individual port number found, try at the common place
result = gConfig.getOption( '/Systems/Databases/Port' )
if result['OK']:
self.dbPort = int( result['Value'] )
else:
self.dbPort = int( result['Value'] )
self.dbUser = ''
result = gConfig.getOption( self.cs_path + '/User' )
if not result['OK']:
# No individual user name found, try at the common place
result = gConfig.getOption( '/Systems/Databases/User' )
if not result['OK']:
raise RuntimeError( 'Failed to get the configuration parameters: User' )
self.dbUser = result['Value']
self.dbPass = ''
result = gConfig.getOption( self.cs_path + '/Password' )
if not result['OK']:
# No individual password found, try at the common place
result = gConfig.getOption( '/Systems/Databases/Password' )
if not result['OK']:
raise RuntimeError( 'Failed to get the configuration parameters: Password' )
self.dbPass = result['Value']
self.dbName = ''
result = gConfig.getOption( self.cs_path + '/DBName' )
if not result['OK']:
raise RuntimeError( 'Failed to get the configuration parameters: DBName' )
self.dbName = result['Value']
def __init__( self, systemInstance = 'Default', maxQueueSize = 10 ):
"""c'tor
:param self: self reference
"""
self.log = gLogger.getSubLogger( 'RequestDB' )
# Initialize the connection info
self.__getDBConnectionInfo( 'RequestManagement/ReqDB' )
runDebug = ( gLogger.getLevel() == 'DEBUG' )
self.engine = create_engine( 'mysql://%s:%s@%s/%s' % ( self.dbUser, self.dbPass, self.dbHost, self.dbName ),
echo = runDebug )
metadata.bind = self.engine
self.DBSession = sessionmaker( bind = self.engine )
def createTables( self, toCreate = None, force = False ):
""" create tables """
try:
metadata.create_all( self.engine )
except Exception, e:
return S_ERROR( e )
return S_OK()
@staticmethod
def getTableMeta():
""" get db schema in a dict format """
return dict( [ ( classDef.__name__, None )
for classDef in ( Request, Operation, File ) ] )
def getTables(self):
""" Return the table names """
return S_OK( metadata.tables.keys() )
def cancelRequest( self, requestID ):
session = self.DBSession()
try:
updateRet = session.execute( update( Request )\
.where( Request.RequestID == requestID )\
.values( {Request._Status : 'Canceled',
Request._LastUpdate : datetime.datetime.utcnow()\
.strftime( Request._datetimeFormat )
}
)
)
session.commit()
# No row was changed
if not updateRet.rowcount:
return S_ERROR( "No such request %s" % requestID )
return S_OK()
except Exception, e:
session.rollback()
self.log.exception( "cancelRequest: unexpected exception", lException = e )
return S_ERROR( "cancelRequest: unexpected exception %s" % e )
finally:
session.close()
def putRequest( self, request ):
""" update or insert request into db
:param Request request: Request instance
"""
session = self.DBSession( expire_on_commit = False )
try:
try:
if hasattr( request, 'RequestID' ):
status = session.query( Request._Status )\
.filter( Request.RequestID == request.RequestID )\
.one()
if status[0] == 'Canceled':
self.log.info( "Request %s(%s) was canceled, don't put it back" % ( request.RequestID, request.RequestName ) )
return S_OK( request.RequestID )
except NoResultFound, e:
pass
# Since the object request is not attached to the session, we merge it to have an update
# instead of an insert with duplicate primary key
request = session.merge( request )
session.add( request )
session.commit()
session.expunge_all()
return S_OK( request.RequestID )
except Exception, e:
session.rollback()
self.log.exception( "putRequest: unexpected exception", lException = e )
return S_ERROR( "putRequest: unexpected exception %s" % e )
finally:
session.close()
def getScheduledRequest( self, operationID ):
session = self.DBSession()
try:
requestID = session.query( Request.RequestID )\
.join( Request.__operations__ )\
.filter( Operation.OperationID == operationID )\
.one()
return self.getRequest( requestID[0] )
except NoResultFound, e:
return S_OK()
finally:
session.close()
#
# def getRequestName( self, requestID ):
# """ get Request.RequestName for a given Request.RequestID """
#
# session = self.DBSession()
# try:
# requestName = session.query( Request.RequestName )\
# .filter( Request.RequestID == requestID )\
# .one()
# return S_OK( requestName[0] )
# except NoResultFound, e:
# return S_ERROR( "getRequestName: no request found for RequestID=%s" % requestID )
# finally:
# session.close()
def getRequest( self, reqID = 0, assigned = True ):
""" read request for execution
:param reqID: request's ID (default 0) If 0, take a pseudo random one
"""
# expire_on_commit is set to False so that we can still use the object after we close the session
session = self.DBSession( expire_on_commit = False )
log = self.log.getSubLogger( 'getRequest' if assigned else 'peekRequest' )
requestID = None
try:
if reqID:
requestID = reqID
log.verbose( "selecting request '%s'%s" % ( reqID, ' (Assigned)' if assigned else '' ) )
status = None
try:
status = session.query( Request._Status )\
.filter( Request.RequestID == reqID )\
.one()
except NoResultFound, e:
return S_ERROR( "getRequest: request '%s' not exists" % reqID )
if status and status == "Assigned" and assigned:
return S_ERROR( "getRequest: status of request '%s' is 'Assigned', request cannot be selected" % reqID )
else:
now = datetime.datetime.utcnow().replace( microsecond = 0 )
reqIDs = set()
try:
reqAscIDs = session.query( Request.RequestID )\
.filter( Request._Status == 'Waiting' )\
.filter( Request._NotBefore < now )\
.order_by( Request._LastUpdate )\
.limit( 100 )\
.all()
reqIDs = set( [reqID[0] for reqID in reqAscIDs] )
reqDescIDs = session.query( Request.RequestID )\
.filter( Request._Status == 'Waiting' )\
.filter( Request._NotBefore < now )\
.order_by( Request._LastUpdate.desc() )\
.limit( 50 )\
.all()
reqIDs |= set( [reqID[0] for reqID in reqDescIDs] )
# No Waiting requests
except NoResultFound, e:
return S_OK()
if not reqIDs:
return S_OK()
reqIDs = list( reqIDs )
random.shuffle( reqIDs )
requestID = reqIDs[0]
# If we are here, the request MUST exist, so no try catch
# the joinedload_all is to force the non-lazy loading of all the attributes, especially _parent
request = session.query( Request )\
.options( joinedload_all( '__operations__.__files__' ) )\
.filter( Request.RequestID == requestID )\
.one()
if not reqID:
log.verbose( "selected request %s('%s')%s" % ( request.RequestID, request.RequestName, ' (Assigned)' if assigned else '' ) )
if assigned:
session.execute( update( Request )\
.where( Request.RequestID == requestID )\
.values( {Request._Status : 'Assigned',
Request._LastUpdate : datetime.datetime.utcnow()\
.strftime( Request._datetimeFormat )} )
)
session.commit()
session.expunge_all()
return S_OK( request )
except Exception, e:
session.rollback()
log.exception( "getRequest: unexpected exception", lException = e )
return S_ERROR( "getRequest: unexpected exception : %s" % e )
finally:
session.close()
def getBulkRequests( self, numberOfRequest = 10, assigned = True ):
""" read as many requests as requested for execution
:param int numberOfRequest: Number of Request we want (default 10)
:param bool assigned: if True, the status of the selected requests are set to assign
:returns a dictionary of Request objects indexed on the RequestID
"""
# expire_on_commit is set to False so that we can still use the object after we close the session
session = self.DBSession( expire_on_commit = False )
log = self.log.getSubLogger( 'getBulkRequest' if assigned else 'peekBulkRequest' )
requestDict = {}
try:
# If we are here, the request MUST exist, so no try catch
# the joinedload_all is to force the non-lazy loading of all the attributes, especially _parent
try:
requests = session.query( Request )\
.options( joinedload_all( '__operations__.__files__' ) )\
.filter( Request._Status == 'Waiting' )\
.order_by( Request._LastUpdate )\
.limit( numberOfRequest )\
.all()
requestDict = dict((req.RequestID, req) for req in requests)
# No Waiting requests
except NoResultFound, e:
pass
if assigned and requestDict:
session.execute( update( Request )\
.where( Request.RequestID.in_( requestDict.keys() ) )\
.values( {Request._Status : 'Assigned'} )
)
session.commit()
session.expunge_all()
except Exception, e:
session.rollback()
log.exception( "unexpected exception", lException = e )
return S_ERROR( "getBulkRequest: unexpected exception : %s" % e )
finally:
session.close()
return S_OK( requestDict )
def peekRequest( self, requestID ):
""" get request (ro), no update on states
:param requestID: Request.RequestID
"""
return self.getRequest( requestID, False )
def getRequestIDsList( self, statusList = None, limit = None, since = None, until = None ):
""" select requests with status in :statusList: """
statusList = statusList if statusList else list( Request.FINAL_STATES )
limit = limit if limit else 100
session = self.DBSession()
requestIDs = []
try:
reqQuery = session.query( Request.RequestID, Request._Status, Request._LastUpdate )\
.filter( Request._Status.in_( statusList ) )
if since:
reqQuery = reqQuery.filter( Request._LastUpdate > since )
if until:
reqQuery = reqQuery.filter( Request._LastUpdate < until )
reqQuery = reqQuery.order_by( Request._LastUpdate )\
.limit( limit )
requestIDs = [reqIDTuple[0] for reqIDTuple in reqQuery.all()]
except Exception, e:
session.rollback()
self.log.exception( "getRequestIDsList: unexpected exception", lException = e )
return S_ERROR( "getRequestIDsList: unexpected exception : %s" % e )
finally:
session.close()
return S_OK( requestIDs )
def deleteRequest( self, requestID ):
""" delete request given its ID
:param str requestID: request.RequestID
:param mixed connection: connection to use if any
"""
session = self.DBSession()
try:
session.query( Request ).filter( Request.RequestID == requestID ).delete()
session.commit()
except Exception, e:
session.rollback()
self.log.exception( "deleteRequest: unexpected exception", lException = e )
return S_ERROR( "deleteRequest: unexpected exception : %s" % e )
finally:
session.close()
return S_OK()
def getDBSummary( self ):
""" get db summary """
# # this will be returned
retDict = { "Request" : {}, "Operation" : {}, "File" : {} }
session = self.DBSession()
try:
requestQuery = session.query( Request._Status, func.count( Request.RequestID ) )\
.group_by( Request._Status )\
.all()
for status, count in requestQuery:
retDict["Request"][status] = count
operationQuery = session.query(Operation.Type, Operation._Status, func.count(Operation.OperationID))\
.group_by( Operation.Type, Operation._Status )\
.all()
for oType, status, count in operationQuery:
retDict['Operation'].setdefault( oType, {} )[status] = count
fileQuery = session.query( File._Status, func.count( File.FileID ) )\
.group_by( File._Status )\
.all()
for status, count in fileQuery:
retDict["File"][status] = count
except Exception, e:
self.log.exception( "getDBSummary: unexpected exception", lException = e )
return S_ERROR( "getDBSummary: unexpected exception : %s" % e )
finally:
session.close()
return S_OK( retDict )
def getRequestSummaryWeb( self, selectDict, sortList, startItem, maxItems ):
""" Returns a list of Request for the web portal
:param dict selectDict: parameter on which to restrain the query {key : Value}
key can be any of the Request columns, 'Type' (interpreted as Operation.Type)
and 'FromData' and 'ToData' are matched against the LastUpdate field
:param list sortList: [sorting column, ASC/DESC]
:param int startItem: start item (for pagination)
:param int maxItems: max items (for pagination)
"""
parameterList = [ 'RequestID', 'RequestName', 'JobID', 'OwnerDN', 'OwnerGroup',
'Status', "Error", "CreationTime", "LastUpdate"]
resultDict = {}
session = self.DBSession()
try:
summaryQuery = session.query( Request.RequestID, Request.RequestName,
Request.JobID, Request.OwnerDN, Request.OwnerGroup,
Request._Status, Request.Error,
Request._CreationTime, Request._LastUpdate )
for key, value in selectDict.items():
if key == 'ToDate':
summaryQuery = summaryQuery.filter( Request._LastUpdate < value )
elif key == 'FromDate':
summaryQuery = summaryQuery.filter( Request._LastUpdate > value )
else:
tableName = 'Request'
if key == 'Type':
summaryQuery = summaryQuery.join( Request.__operations__ )\
.group_by( Request.RequestID )
tableName = 'Operation'
elif key == 'Status':
key = '_Status'
if type( value ) == ListType:
summaryQuery = summaryQuery.filter( eval( '%s.%s.in_(%s)' % ( tableName, key, value ) ) )
else:
summaryQuery = summaryQuery.filter( eval( '%s.%s' % ( tableName, key ) ) == value )
if sortList:
summaryQuery = summaryQuery.order_by( eval( 'Request.%s.%s()' % ( sortList[0][0], sortList[0][1].lower() ) ) )
try:
requestLists = summaryQuery.all()
except NoResultFound, e:
resultDict['ParameterNames'] = parameterList
resultDict['Records'] = []
return S_OK( resultDict )
except Exception, e:
return S_ERROR( 'Error getting the webSummary %s' % e )
nRequests = len( requestLists )
if startItem <= len( requestLists ):
firstIndex = startItem
else:
return S_ERROR( 'getRequestSummaryWeb: Requested index out of range' )
if ( startItem + maxItems ) <= len( requestLists ):
secondIndex = startItem + maxItems
else:
secondIndex = len( requestLists )
records = []
for i in range( firstIndex, secondIndex ):
row = requestLists[i]
records.append( [ str( x ) for x in row] )
resultDict['ParameterNames'] = parameterList
resultDict['Records'] = records
resultDict['TotalRecords'] = nRequests
return S_OK( resultDict )
#
except Exception, e:
self.log.exception( "getRequestSummaryWeb: unexpected exception", lException = e )
return S_ERROR( "getRequestSummaryWeb: unexpected exception : %s" % e )
finally:
session.close()
def getRequestCountersWeb( self, groupingAttribute, selectDict ):
""" For the web portal.
Returns a dictionary {value : counts} for a given key.
The key can be any field from the RequestTable. or "Type",
which will be interpreted as 'Operation.Type'
"""
resultDict = {}
session = self.DBSession()
if groupingAttribute == 'Type':
groupingAttribute = 'Operation.Type'
elif groupingAttribute == 'Status':
groupingAttribute = 'Request._Status'
else:
groupingAttribute = 'Request.%s' % groupingAttribute
try:
summaryQuery = session.query( eval( groupingAttribute ), func.count( Request.RequestID ) )
for key, value in selectDict.items():
if key == 'ToDate':
summaryQuery = summaryQuery.filter( Request._LastUpdate < value )
elif key == 'FromDate':
summaryQuery = summaryQuery.filter( Request._LastUpdate > value )
else:
objectType = 'Request'
if key == 'Type':
summaryQuery = summaryQuery.join( Request.__operations__ )
objectType = 'Operation'
elif key == 'Status':
key = '_Status'
if type( value ) == ListType:
summaryQuery = summaryQuery.filter( eval( '%s.%s.in_(%s)' % ( objectType, key, value ) ) )
else:
summaryQuery = summaryQuery.filter( eval( '%s.%s' % ( objectType, key ) ) == value )
summaryQuery = summaryQuery.group_by( groupingAttribute )
try:
requestLists = summaryQuery.all()
resultDict = dict( requestLists )
except NoResultFound, e:
pass
except Exception, e:
return S_ERROR( 'Error getting the webCounters %s' % e )
return S_OK( resultDict )
except Exception, e:
self.log.exception( "getRequestSummaryWeb: unexpected exception", lException = e )
return S_ERROR( "getRequestSummaryWeb: unexpected exception : %s" % e )
finally:
session.close()
def getDistinctValues( self, tableName, columnName ):
""" For a given table and a given field, return the list of of distinct values in the DB"""
session = self.DBSession()
distinctValues = []
try:
result = session.query( distinct( eval ( "%s.%s" % ( tableName, columnName ) ) ) ).all()
distinctValues = [dist[0] for dist in result]
except NoResultFound, e:
pass
except Exception, e:
self.log.exception( "getDistinctValues: unexpected exception", lException = e )
return S_ERROR( "getDistinctValues: unexpected exception : %s" % e )
finally:
session.close()
return S_OK( distinctValues )
def getRequestIDsForJobs( self, jobIDs ):
""" read request ids for jobs given jobIDs
:param list jobIDs: list of jobIDs
"""
self.log.debug( "getRequestIDsForJobs: got %s jobIDs to check" % str( jobIDs ) )
if not jobIDs:
return S_ERROR( "Must provide jobID list as argument." )
if type( jobIDs ) in ( long, int ):
jobIDs = [ jobIDs ]
jobIDs = set( jobIDs )
reqDict = { "Successful": {}, "Failed": {} }
session = self.DBSession()
try:
ret = session.query( Request.JobID, Request.RequestID )\
.filter( Request.JobID.in_( jobIDs ) )\
.all()
reqDict['Successful'] = dict( ( jobId, reqID ) for jobId, reqID in ret )
reqDict['Failed'] = dict( (jobid, 'Request not found') for jobid in jobIDs - set(reqDict['Successful']))
except Exception, e:
self.log.exception( "getRequestIDsForJobs: unexpected exception", lException = e )
return S_ERROR( "getRequestIDsForJobs: unexpected exception : %s" % e )
finally:
session.close()
return S_OK( reqDict )
def readRequestsForJobs( self, jobIDs = None ):
""" read request for jobs
:param list jobIDs: list of JobIDs
:return: S_OK( "Successful" : { jobID1 : Request, jobID2: Request, ... }
"Failed" : { jobID3: "error message", ... } )
"""
self.log.debug( "readRequestForJobs: got %s jobIDs to check" % str( jobIDs ) )
if not jobIDs:
return S_ERROR( "Must provide jobID list as argument." )
if type( jobIDs ) in ( long, int ):
jobIDs = [ jobIDs ]
jobIDs = set( jobIDs )
reqDict = { "Successful": {}, "Failed": {} }
# expire_on_commit is set to False so that we can still use the object after we close the session
session = self.DBSession( expire_on_commit = False )
try:
ret = session.query( Request.JobID, Request )\
.options( joinedload_all( '__operations__.__files__' ) )\
.filter( Request.JobID.in_( jobIDs ) ).all()
reqDict['Successful'] = dict( ( jobId, reqObj ) for jobId, reqObj in ret )
reqDict['Failed'] = dict( ( jobid, 'Request not found' ) for jobid in jobIDs - set( reqDict['Successful'] ) )
session.expunge_all()
except Exception, e:
self.log.exception( "readRequestsForJobs: unexpected exception", lException = e )
return S_ERROR( "readRequestsForJobs: unexpected exception : %s" % e )
finally:
session.close()
return S_OK( reqDict )
def getRequestStatus( self, requestID ):
""" get request status for a given request ID """
self.log.debug( "getRequestStatus: checking status for '%s' request" % requestID )
session = self.DBSession()
try:
status = session.query( Request._Status ).filter( Request.RequestID == requestID ).one()
except NoResultFound, e:
return S_ERROR( "Request %s does not exist" % requestID )
finally:
session.close()
return S_OK( status[0] )
def getRequestFileStatus( self, requestID, lfnList ):
""" get status for files in request given its id
:param str requestID: Request.RequestID
:param list lfnList: list of LFNs
"""
session = self.DBSession()
try:
res = dict.fromkeys( lfnList, "UNKNOWN" )
requestRet = session.query( File._LFN, File._Status )\
.join( Request.__operations__ )\
.join( Operation.__files__ )\
.filter( Request.RequestID == requestID )\
.filter( File._LFN.in_( lfnList ) )\
.all()
for lfn, status in requestRet:
res[lfn] = status
return S_OK( res )
except Exception, e:
self.log.exception( "getRequestFileStatus: unexpected exception", lException = e )
return S_ERROR( "getRequestFileStatus: unexpected exception : %s" % e )
finally:
session.close()
def getRequestInfo( self, requestID ):
""" get request info given Request.RequestID """
session = self.DBSession()
try:
requestInfoQuery = session.query( Request.RequestID, Request._Status, Request.RequestName,
Request.JobID, Request.OwnerDN, Request.OwnerGroup,
Request.DIRACSetup, Request.SourceComponent, Request._CreationTime,
Request._SubmitTime, Request._LastUpdate )\
.filter( Request.RequestID == requestID )
try:
requestInfo = requestInfoQuery.one()
except NoResultFound, e:
return S_ERROR( 'No such request' )
return S_OK( tuple( requestInfo ) )
except Exception, e:
self.log.exception( "getRequestInfo: unexpected exception", lException = e )
return S_ERROR( "getRequestInfo: unexpected exception : %s" % e )
finally:
session.close()
def getDigest( self, requestID ):
""" get digest for request given its id
:param str requestName: request id
"""
self.log.debug( "getDigest: will create digest for request '%s'" % requestID )
request = self.getRequest( requestID, False )
if not request["OK"]:
self.log.error( "getDigest: %s" % request["Message"] )
request = request["Value"]
if not isinstance( request, Request ):
self.log.info( "getDigest: request '%s' not found" )
return S_OK()
return request.getDigest()
def getRequestIDForName( self, requestName ):
""" read request id for given name
if the name is not unique, an error is returned
:param requestName : name of the request
"""
session = self.DBSession()
reqID = 0
try:
ret = session.query( Request.RequestID )\
.filter( Request.RequestName == requestName )\
.all()
if not ret:
return S_ERROR( 'No such request %s' % requestName )
elif len( ret ) > 1:
return S_ERROR( 'RequestName %s not unique (%s matches)' % ( requestName, len( ret ) ) )
reqID = ret[0][0]
except NoResultFound, e:
return S_ERROR( 'No such request' )
except Exception, e:
self.log.exception( "getRequestIDsForName: unexpected exception", lException = e )
return S_ERROR( "getRequestIDsForName: unexpected exception : %s" % e )
finally:
session.close()
return S_OK( reqID )
|
gpl-3.0
| -7,894,808,296,504,958,000 | 34.100735 | 154 | 0.572868 | false | 4.222012 | false | false | false |
cgimenop/Excel2Testlink
|
ExcelParser/lib/openpyxl/chart/tests/test_surface_chart.py
|
1
|
3879
|
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def SurfaceChart():
from ..surface_chart import SurfaceChart
return SurfaceChart
class TestSurfaceChart:
def test_ctor(self, SurfaceChart):
chart = SurfaceChart()
xml = tostring(chart.to_tree())
expected = """
<surfaceChart>
<axId val="10"></axId>
<axId val="100"></axId>
<axId val="1000"></axId>
</surfaceChart>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, SurfaceChart):
src = """
<surfaceChart>
<wireframe val="0"/>
<ser>
<idx val="0"/>
<order val="0"/>
</ser>
<ser>
<idx val="1"/>
<order val="1"/>
</ser>
<bandFmts/>
<axId val="2086876920"/>
<axId val="2078923400"/>
<axId val="2079274408"/>
</surfaceChart>
"""
node = fromstring(src)
chart = SurfaceChart.from_tree(node)
assert [a.val for a in chart.axId] == [10, 100, 1000]
@pytest.fixture
def SurfaceChart3D():
from ..surface_chart import SurfaceChart3D
return SurfaceChart3D
class TestSurfaceChart3D:
def test_ctor(self, SurfaceChart3D):
chart = SurfaceChart3D()
xml = tostring(chart.to_tree())
expected = """
<surface3DChart>
<axId val="10"></axId>
<axId val="100"></axId>
<axId val="1000"></axId>
</surface3DChart>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, SurfaceChart3D):
src = """
<surface3DChart>
<wireframe val="0"/>
<ser>
<idx val="0"/>
<order val="0"/>
<val>
<numRef>
<f>Blatt1!$A$1:$A$12</f>
</numRef>
</val>
</ser>
<ser>
<idx val="1"/>
<order val="1"/>
<val>
<numRef>
<f>Blatt1!$B$1:$B$12</f>
</numRef>
</val>
</ser>
<bandFmts/>
<axId val="2082935272"/>
<axId val="2082938248"/>
<axId val="2082941288"/>
</surface3DChart>
"""
node = fromstring(src)
chart = SurfaceChart3D.from_tree(node)
assert len(chart.ser) == 2
assert [a.val for a in chart.axId] == [10, 100, 1000]
@pytest.fixture
def BandFormat():
from ..surface_chart import BandFormat
return BandFormat
class TestBandFormat:
def test_ctor(self, BandFormat):
fmt = BandFormat()
xml = tostring(fmt.to_tree())
expected = """
<bandFmt>
<idx val="0" />
</bandFmt>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, BandFormat):
src = """
<bandFmt>
<idx val="4"></idx>
</bandFmt>
"""
node = fromstring(src)
fmt = BandFormat.from_tree(node)
assert fmt == BandFormat(idx=4)
@pytest.fixture
def BandFormatList():
from ..surface_chart import BandFormatList
return BandFormatList
class TestBandFormatList:
def test_ctor(self, BandFormatList):
fmt = BandFormatList()
xml = tostring(fmt.to_tree())
expected = """
<bandFmts />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, BandFormatList):
src = """
<bandFmts />
"""
node = fromstring(src)
fmt = BandFormatList.from_tree(node)
assert fmt == BandFormatList()
|
mit
| 3,734,306,017,059,204,600 | 22.509091 | 61 | 0.523331 | false | 3.615098 | true | false | false |
Mr-F/dotmailer
|
dotmailer/surveys.py
|
1
|
2805
|
from dotmailer import Base
from dotmailer.connection import connection
class Survey(Base):
"""
"""
end_point = '/v2/surveys'
def __init__(self, **kwargs):
self.required_fields = []
super(Survey, self).__init__(**kwargs)
@classmethod
def get_multiple(cls, assigned_to_address_book_only=True, select=1000,
skip=0):
if assigned_to_address_book_only:
assigned_to_address_book_only = 'true'
else:
assigned_to_address_book_only = 'false'
response = connection.get(
cls.end_point,
query_params={
'AssignedToAddressBookOnly': assigned_to_address_book_only,
'Select': select,
'Skip': skip
}
)
return [cls(**entry) for entry in response]
@classmethod
def get_all(cls, assigned_to_address_book_only=True):
"""
Gets a list of all surveys in the account
:param assigned_to_address_book_only: A boolean value to
indicated if we should only retrieve surveys that have been
assigned to an address book. The default value for this is
True
:return:
"""
select = 1000
skip = 0
all_surveys = []
surveys = cls.get_multiple(assigned_to_address_book_only, select, skip)
num_of_entries = len(surveys)
while num_of_entries > 0:
all_surveys.extend(surveys)
# If there weren't enough entries then there are no more to
# load so simply break out of the loop
if num_of_entries < select:
break
skip += select
surveys = cls.get_multiple(assigned_to_address_book_only, select,
skip)
num_of_entries = len(surveys)
return all_surveys
@classmethod
def get_by_id(cls, id):
"""
Get a survey by it's ID value
:param id: The DotMailer unique ID value for the survey
:return:
"""
# Cast the ID parameter to an integer
id = int(id)
# Check that the ID parameter is greater than zero, if not raise
# an exception.
if id < 1:
raise Exception()
response = connection.get(
'{}/{}'.format(cls.end_point, id)
)
return cls(**response)
@classmethod
def get_survey_fields(cls, id):
"""
Gets a list of survey pages, each containing a list of the
fields on that page
:param id:
:return:
"""
response = connection.get(
'{}/{}/fields'.format(cls.end_point, id)
)
return response
|
mit
| -2,345,594,443,191,682,600 | 27.05 | 79 | 0.529768 | false | 4.262918 | false | false | false |
kernsuite-debian/lofar
|
SAS/ResourceAssignment/ResourceAssignmentEstimator/resource_estimators/observation.py
|
1
|
30414
|
# observation.py
#
# Copyright (C) 2016-2017
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
#
# $Id$
import logging
import pprint
from math import ceil
from .base_resource_estimator import BaseResourceEstimator
from lofar.stationmodel.antennasets_parser import AntennaSetsParser
logger = logging.getLogger(__name__)
DATAPRODUCTS = "Observation.DataProducts."
COBALT = "Observation.ObservationControl.OnlineControl.Cobalt."
class ObservationResourceEstimator(BaseResourceEstimator):
""" ResourceEstimator for LOFAR Observations
"""
def __init__(self):
logger.info("init ObservationResourceEstimator")
super(ObservationResourceEstimator, self).__init__(name='observation')
self.required_keys = ('Observation.sampleClock',
'Observation.startTime',
'Observation.stopTime',
'Observation.antennaSet',
'Observation.nrBeams',
'Observation.Beam[0].subbandList',
'Observation.nrBitsPerSample',
'Observation.VirtualInstrument.stationList',
COBALT + 'Correlator.nrChannelsPerSubband',
COBALT + 'Correlator.integrationTime',
COBALT + 'BeamFormer.flysEye',
COBALT + 'BeamFormer.CoherentStokes.timeIntegrationFactor',
COBALT + 'BeamFormer.IncoherentStokes.timeIntegrationFactor',
'Observation.VirtualInstrument.stationList',
DATAPRODUCTS + 'Output_Correlated.enabled',
DATAPRODUCTS + 'Output_Correlated.identifications',
DATAPRODUCTS + 'Output_Correlated.storageClusterName',
DATAPRODUCTS + 'Output_CoherentStokes.enabled',
DATAPRODUCTS + 'Output_CoherentStokes.identifications',
DATAPRODUCTS + 'Output_CoherentStokes.storageClusterName',
COBALT + 'BeamFormer.CoherentStokes.which',
DATAPRODUCTS + 'Output_IncoherentStokes.enabled',
DATAPRODUCTS + 'Output_IncoherentStokes.identifications',
DATAPRODUCTS + 'Output_IncoherentStokes.storageClusterName',
COBALT + 'BeamFormer.IncoherentStokes.which'
)
self.asp = AntennaSetsParser()
def _calculate(self, parset, predecessor_estimates=[]):
""" Calculate the resources needed by the different data product types that can be in a single observation.
The predecessor_estimates argument is just to implement the same interface as pipelines. Observations have no predecessor.
The following return value example is for an observation duration of 240.0 s and 3 data product types for 2 clusters.
NOTE: 'nr_of_XX_files' is for that SAP estimate. The total is thus times the 'resource_count'.
'nr_of_cs_parts' is for a full CS TAB (per stokes component) in that SAP; not per estimate, which may still describe one part.
See the calibration pipeline estimator for some explanation on why parts of this format are currently needed. It also has input_files.
{
'errors': [],
'estimates': [{
'resource_types': {'bandwidth': 35791395, 'storage': 1073741824}, # for each uv output data product (thus the total is times the resource_count value)
'resource_count': 20, 'root_resource_group': 'CEP4',
'output_files': {
'uv': [{'sap_nr': 0, 'identification': 'mom.G777955.B2.1.C.SAP000.uv.dps',
'properties': {'uv_file_size': 1073741824, 'nr_of_uv_files': 1, 'start_sb_nr': 0}}]
}
}, {'resource_types': {'bandwidth': 35791395, 'storage': 1073741824}, # idem
'resource_count': 60, 'root_resource_group': 'CEP4',
'output_files': {
'uv': [{'sap_nr': 1, 'identification': 'mom.G777955.B2.1.C.SAP001.uv.dps',
'properties': {'uv_file_size': 1073741824, 'nr_of_uv_files': 1, 'start_sb_nr': 20}}]
}
}, {'resource_types': {'bandwidth': 35791395, 'storage': 1073741824}, # idem
'resource_count': 20, 'root_resource_group': 'CEP4',
'output_files': {
'uv': [{'sap_nr': 2, 'identification': 'mom.G777955.B2.1.C.SAP002.uv.dps',
'properties': {'uv_file_size': 1073741824, 'nr_of_uv_files': 1, 'start_sb_nr': 80}}]
}
}, {'resource_types': {'bandwidth': 71582789, 'storage': 2147483648}, # for each quad (4 stokes) of cs output tab part (thus the total is times the resource_count value)
'resource_count': 34, 'root_resource_group': 'DRAGNET',
'output_files': {
'cs': [{'sap_nr': 0, 'identification': 'mom.G777955.B2.1.C.SAP000.cs.dps',
'properties': {'cs_file_size': 536870912, 'nr_of_cs_files': 4, 'nr_of_cs_stokes': 4,
'nr_of_cs_parts': 2}}] # parts per tab for this sap
}
}, {'resource_types': {'bandwidth': 71582789, 'storage': 2147483648}, # idem
'resource_count': 6, 'root_resource_group': 'DRAGNET',
'output_files': {
'cs': [{'sap_nr': 1, 'identification': 'mom.G777955.B2.1.C.SAP001.cs.dps',
'properties': {'cs_file_size': 536870912, 'nr_of_cs_files': 4, 'nr_of_cs_stokes': 4,
'nr_of_cs_parts': 1, 'is_tab_nr': 0}}] # parts per tab for this sap
}
}, {'resource_types': {'bandwidth': 17895698, 'storage': 536870912}, # for each 'is' output tab part (thus the total is times the resource_count value)
'resource_count': 1, 'root_resource_group': 'DRAGNET',
'output_files': {
'is': [{'sap_nr': 1, 'identification': 'mom.G777955.B2.1.C.SAP001.is.dps',
'properties': {'is_file_size': 536870912, 'nr_of_is_files': 1, 'nr_of_is_stokes': 1,
'is_tab_nr': 0}}] # IS can have >1 parts, but currently max 1 IS TAB per SAP
}
}]
}
"""
logger.info("start estimate '{}'".format(self.name))
logger.info('parset: %s ' % parset)
# NOTE: observation estimates appear quite accurate. Most of the difference comes from Observation.stopTime
# being planned instead of real stop time, because of Cobalt block size not being exactly 1.0 s.
duration = self._getDuration(parset.getString('Observation.startTime'),
parset.getString('Observation.stopTime'))
errors = []
estimates = []
try:
if parset.getBool('Observation.DataProducts.Output_Correlated.enabled'):
estimates.extend(self.correlated(parset, duration))
except ValueError as exc:
logger.error(exc)
errors.append(str(exc))
try:
if parset.getBool('Observation.DataProducts.Output_CoherentStokes.enabled'):
estimates.extend(self.coherentstokes(parset, duration))
except ValueError as exc:
logger.error(exc)
errors.append(str(exc))
try:
if parset.getBool('Observation.DataProducts.Output_IncoherentStokes.enabled'):
estimates.extend(self.incoherentstokes(parset, duration))
except ValueError as exc:
logger.error(exc)
errors.append(str(exc))
if not estimates:
logger.error('no data product estimates in observation resource estimate list!')
errors.append('Produced observation resource estimate list has no data product estimates!')
try:
estimates.extend(self.stations(parset))
except ValueError as exc:
logger.error(exc)
errors.append(str(exc))
logger.debug('Observation resource estimates:\n' + pprint.pformat(estimates))
result = {'errors': errors, 'estimates': estimates}
return result
def correlated(self, parset, duration):
""" Estimate storage size and bandwidth needed for correlated ('uv')
data products. Also add SAP properties needed by the propagator.
The duration argument is a float in (fractional) seconds.
Return list of estimates, max 1 SAP per estimate (easier for assigner),
or raise ValueError on error.
"""
logger.info("calculating correlated data size")
storage_unit = 512 # all sizes in bytes
size_of_header = 512
size_of_overhead = 600000 # COBALT parset in MS HISTORY subtable + misc
size_of_short = 2
size_of_visib = 8 # a visibility is stored as a std::complex<float>
nr_polarizations = 2
channels_per_subband = parset.getInt(COBALT + 'Correlator.nrChannelsPerSubband', 64) # defaults as in COBALT
integration_time = parset.getFloat(COBALT + 'Correlator.integrationTime', 1)
nr_virtual_stations = self._virtual_stations(parset)
# Reflects MeasurementSets produced by the casacore LOFAR storage manager (LofarStMan)
# The sub-expression '+ val-1) / val' computes a rounded (positive) integer division.
integrated_seconds = int(duration / integration_time)
nr_baselines = nr_virtual_stations * (nr_virtual_stations + 1) / 2
data_size = (nr_baselines * channels_per_subband * nr_polarizations * nr_polarizations * \
size_of_visib + storage_unit-1) / storage_unit * storage_unit
n_sample_size = (nr_baselines * channels_per_subband * size_of_short + storage_unit-1) / \
storage_unit * storage_unit
file_size = (data_size + n_sample_size + size_of_header) * integrated_seconds + size_of_overhead # bytes
bandwidth = int(ceil(8 * file_size / duration)) # bits/second
root_resource_group = parset.getString(DATAPRODUCTS + 'Output_Correlated.storageClusterName')
nr_saps = parset.getInt('Observation.nrBeams')
if nr_saps < 1:
raise ValueError("Correlated data output enabled, but nrBeams < 1")
# Estimates may differ per SAP for CS/IS. Decided to always produce a separate estimate per SAP.
# Hence, need to annotate each SAP with the right identifications for pipeline predecessor input filtering.
identifications = parset.getStringVector(DATAPRODUCTS + 'Output_Correlated.identifications')
sap_idents = self._sap_identifications(identifications, nr_saps)
total_files = 0 # sum of all subbands in all digital beams
estimates = []
for sap_nr in range(nr_saps):
subbandList = parset.getStringVector('Observation.Beam[%d].subbandList' % sap_nr)
nr_subbands = len(subbandList)
if nr_subbands == 0:
# Replace here by 'continue' (+ check total_files > 0 at the end) once we support separate subband lists for UV, CS, IS
raise ValueError("Correlated data output enabled, but empty subband list for sap %d" % sap_nr)
est = {'resource_types': {'bandwidth': bandwidth, 'storage': file_size},
'resource_count': nr_subbands,
'root_resource_group': root_resource_group,
'output_files': {'uv': [{'sap_nr': sap_nr, 'identification': sap_idents[sap_nr],
'properties': {'uv_file_size': file_size, 'nr_of_uv_files': 1, # thus total nr_of_uv_files is resource_count times 1
'start_sb_nr': total_files}}]}}
total_files += nr_subbands
estimates.append(est)
logger.debug("Correlated data estimates:\n" + pprint.pformat(estimates))
return estimates
def coherentstokes(self, parset, duration):
""" Estimate storage size and bandwidth needed for Coherent Stokes ('cs')
data products. Also add SAP properties needed by the propagator.
The duration argument is a float in (fractional) seconds.
Return list of estimates, max 1 SAP per estimate (easier for assigner),
or raise ValueError on error.
"""
logger.info("calculate coherent stokes data size")
size_of_sample = 4 # single precision float
coherent_type = parset.getString(COBALT + 'BeamFormer.CoherentStokes.which')
subbands_per_file = parset.getInt(COBALT + 'BeamFormer.CoherentStokes.subbandsPerFile', 512)
if subbands_per_file < 0:
raise ValueError('BeamFormer.CoherentStokes.subbandsPerFile may not be negative, but is %d' % subbands_per_file)
if subbands_per_file == 0:
subbands_per_file = 512
samples_per_second = self._samples_per_second(parset)
time_integration_factor = parset.getInt(COBALT + 'BeamFormer.CoherentStokes.timeIntegrationFactor')
# Note that complex voltages (XXYY) cannot be meaningfully integrated (time_integration_factor 1)
size_per_subband = (samples_per_second * size_of_sample * duration) / time_integration_factor
nr_coherent = len(coherent_type) # 'I' or 'IQUV' or 'XXYY'
doFlysEye = parset.getBool(COBALT + 'BeamFormer.flysEye')
root_resource_group = parset.getString(DATAPRODUCTS + 'Output_CoherentStokes.storageClusterName')
nr_saps = parset.getInt('Observation.nrBeams')
if nr_saps < 1:
raise ValueError("Coherent Stokes data output enabled, but nrBeams < 1")
# Estimates may differ per SAP for CS/IS. Decided to always produce a separate estimate per SAP.
# Hence, need to annotate each SAP with the right identifications for pipeline predecessor input filtering.
identifications = parset.getStringVector(DATAPRODUCTS + 'Output_CoherentStokes.identifications')
sap_idents = self._sap_identifications(identifications, nr_saps)
estimates = []
for sap_nr in range(nr_saps):
logger.info("checking SAP {}".format(sap_nr))
subbandList = parset.getStringVector('Observation.Beam[%d].subbandList' % sap_nr)
nr_subbands = len(subbandList)
if nr_subbands == 0:
raise ValueError("Coherent Stokes data output enabled, but empty subband list for sap %d" % sap_nr)
nr_subbands_per_file = min(subbands_per_file, nr_subbands)
nr_coherent_tabs = 0
is_tab_nr = None
nr_tabs = parset.getInt('Observation.Beam[%d].nrTiedArrayBeams' % sap_nr)
for tab_nr in range(nr_tabs):
if not parset.getBool("Observation.Beam[%d].TiedArrayBeam[%d].coherent" % (sap_nr, tab_nr)):
is_tab_nr = tab_nr
logger.info("coherentstokes: skipping incoherent tab")
continue
nr_coherent_tabs += 1
logger.info("added %d coherent tabs before considering tab rings and fly's eye tabs", nr_coherent_tabs)
nr_tab_rings = parset.getInt('Observation.Beam[%d].nrTabRings' % sap_nr)
if nr_tab_rings < 0:
raise ValueError("SAP %d: nr of tab rings is < 0: %d" % (sap_nr, nr_tab_rings))
elif nr_tab_rings > 0:
nr_tabs = (3 * nr_tab_rings * (nr_tab_rings + 1) + 1)
nr_coherent_tabs += nr_tabs
logger.info("added %d tabs from %d tab rings", nr_tabs, nr_tab_rings)
if doFlysEye:
nr_tabs = self._virtual_stations(parset)
nr_coherent_tabs += nr_tabs
logger.info("added %d fly's eye tabs", nr_tabs)
if nr_coherent_tabs == 0:
raise ValueError("Coherent Stokes data output enabled, but no coherent tabs for sap %d" % sap_nr)
# Keep XXYY/IQUV together (>1 parts still possible).
# Else translator to parset filenames cannot know which stokes (nr_of_XX_stokes property too coarse).
# Also for complex voltages (XXYY) only: pipeline needs all 4 XXYY accessible from the same node.
#
# NOTE: If a TAB is split into parts, then the last TAB part may contain fewer subbands.
# Simplify: compute a single (max) file size for all TABs or TAB parts.
file_size = int(nr_subbands_per_file * size_per_subband) # bytes
storage = file_size * nr_coherent # bytes
bandwidth = int(ceil(8 * storage / duration)) # bits/second
nr_parts_per_tab = int(ceil(nr_subbands / float(nr_subbands_per_file))) # thus per tab per stokes
est = {'resource_types': {'storage': storage, 'bandwidth': bandwidth},
'resource_count': nr_coherent_tabs * nr_parts_per_tab,
'root_resource_group': root_resource_group,
'output_files': {'cs': [{'sap_nr': sap_nr, 'identification': sap_idents[sap_nr],
'properties': {'cs_file_size': file_size, 'nr_of_cs_files': nr_coherent,
'nr_of_cs_stokes': nr_coherent, 'nr_of_cs_parts': nr_parts_per_tab}}]}}
if is_tab_nr is not None: # translator to filenames needs to know: it may not have all CS+IS info in one claim
est['output_files']['cs'][0]['properties']['is_tab_nr'] = is_tab_nr
estimates.append(est)
logger.debug("Coherent Stokes data estimates:\n" + pprint.pformat(estimates))
return estimates
def incoherentstokes(self, parset, duration):
""" Estimate storage size and bandwidth needed for Incoherent Stokes ('is')
data products. Also add SAP properties needed by the propagator.
The duration argument is a float in (fractional) seconds.
Return list of estimates, max 1 SAP per estimate (easier for assigner),
or raise ValueError on error.
"""
logger.info("calculate incoherent stokes data size")
size_of_sample = 4 # single precision float
incoherent_type = parset.getString(COBALT + 'BeamFormer.IncoherentStokes.which')
subbands_per_file = parset.getInt(COBALT + 'BeamFormer.IncoherentStokes.subbandsPerFile', 512)
if subbands_per_file < 0:
raise ValueError('BeamFormer.IncoherentStokes.subbandsPerFile may not be negative, but is %d' % subbands_per_file)
if subbands_per_file == 0:
subbands_per_file = 512
samples_per_second = self._samples_per_second(parset)
time_integration_factor = parset.getInt(COBALT + 'BeamFormer.IncoherentStokes.timeIntegrationFactor')
size_per_subband = (samples_per_second * size_of_sample * duration) / time_integration_factor
nr_incoherent = len(incoherent_type) # 'I' or 'IQUV' ('XXYY' only possible for coherent stokes)
root_resource_group = parset.getString(DATAPRODUCTS + 'Output_IncoherentStokes.storageClusterName')
nr_saps = parset.getInt('Observation.nrBeams')
if nr_saps < 1:
raise ValueError("Incoherent Stokes data output enabled, but nrBeams < 1")
# Estimates may differ per SAP for CS/IS. Decided to always produce a separate estimate per SAP.
# Hence, need to annotate each SAP with the right identifications for pipeline predecessor input filtering.
identifications = parset.getStringVector(DATAPRODUCTS + 'Output_IncoherentStokes.identifications')
sap_idents = self._sap_identifications(identifications, nr_saps)
estimates = []
for sap_nr in range(nr_saps):
logger.info("checking SAP {}".format(sap_nr))
subbandList = parset.getStringVector('Observation.Beam[%d].subbandList' % sap_nr)
nr_subbands = len(subbandList)
if nr_subbands == 0:
raise ValueError("Incoherent Stokes data output enabled, but empty subband list for sap %d" % sap_nr)
nr_subbands_per_file = min(subbands_per_file, nr_subbands)
# Atm can have 1 IS TAB per SAP, because its pointing is equal to the SAP pointing.
# (When we support online coh dedisp and on multiple DMs, we can have >1 IS per SAP.)
nr_incoherent_tabs = 0
nr_tabs = parset.getInt('Observation.Beam[%d].nrTiedArrayBeams' % sap_nr)
for tab_nr in range(nr_tabs):
if parset.getBool("Observation.Beam[%d].TiedArrayBeam[%d].coherent" % (sap_nr, tab_nr)):
continue
if nr_incoherent_tabs > 0:
# Could get here to produce >1 IS TAB copies, maybe for some software test
raise ValueError("SAP %i: >1 incoherent TAB not supported: TAB nrs %i and %i" % (sap_nr, tab_nr, is_tab_nr))
is_tab_nr = tab_nr
nr_incoherent_tabs += 1
logger.info("added %d incoherent tab(s)", nr_incoherent_tabs)
if nr_incoherent_tabs == 0:
raise ValueError("Incoherent Stokes data output enabled, but no incoherent tabs for sap %d" % sap_nr)
# Keep IQUV together (>1 parts still possible).
# Else translator to parset filenames cannot know which stokes (nr_of_XX_stokes property too coarse).
#
# NOTE: If a TAB is split into parts, then the last TAB part may contain fewer subbands.
# Simplify: compute a single (max) file size for all TABs or TAB parts.
file_size = int(nr_subbands_per_file * size_per_subband) # bytes
storage = file_size * nr_incoherent # bytes
bandwidth = int(ceil(8 * storage / duration)) # bits/second
nr_parts_per_tab = int(ceil(nr_subbands / float(nr_subbands_per_file))) # thus per tab per stokes
est = {'resource_types': {'storage': storage, 'bandwidth': bandwidth},
'resource_count': nr_incoherent_tabs * nr_parts_per_tab,
'root_resource_group': root_resource_group,
'output_files': {'is': [{'sap_nr': sap_nr, 'identification': sap_idents[sap_nr],
'properties': {'is_file_size': file_size, 'nr_of_is_files': nr_incoherent,
'nr_of_is_stokes': nr_incoherent, 'is_tab_nr': is_tab_nr}}]}}
estimates.append(est)
logger.debug("Incoherent Stokes data estimates:\n" + pprint.pformat(estimates))
return estimates
def _samples_per_second(self, parset):
""" set samples per second
"""
samples_160mhz = 155648
samples_200mhz = 196608
sample_clock = parset.getInt('Observation.sampleClock')
samples = samples_160mhz if 160 == sample_clock else samples_200mhz
logger.info("samples per second for {} MHz clock = {}".format(sample_clock, samples))
return samples
def _virtual_stations(self, parset):
""" calculate virtualnumber of stations
"""
stationList = parset.getStringVector('Observation.VirtualInstrument.stationList')
nr_virtual_stations = 0
if parset.getString('Observation.antennaSet') in ('HBA_DUAL', 'HBA_DUAL_INNER'):
for station in stationList:
if 'CS' in station:
nr_virtual_stations += 2
else:
nr_virtual_stations += 1
else:
nr_virtual_stations = len(stationList)
logger.info("number of virtual stations = {}".format(nr_virtual_stations))
return nr_virtual_stations
def _extract_sap_nr(self, identification):
""" Return sap nr as int from identification or None if
no int xxx in '.SAPxxx.' in identification.
"""
for s in identification.split('.'): # Find the SAP number, if present
if 'SAP' not in s:
continue
try:
return int(s[3:])
except:
pass
return None
def _sap_identifications(self, identifications, nr_saps):
""" Return list with identifications' identification for sap i at index i,
or '' at index i if no such identification for sap i.
NOTE: identifications should not contain entries for multiple data product types,
otherwise we cannot return a single identification per sap nr.
For output, there must be exactly 1 (non-duplicate) identification string per
data product type (how can you otherwise refer to it unambiguously?),
and per sap (per sap for observations only, but always the case here).
"""
sap_idents = [''] * nr_saps
for ident in identifications:
sap_nr = self._extract_sap_nr(ident)
try:
ident_seen = sap_idents[sap_nr]
except Exception as e: # e.g. sap_nr is None or out of bounds
logger.error("Ignoring observation identification string with no or invalid sap nr: %s", str(e))
continue
if not ident_seen:
sap_idents[sap_nr] = ident
elif ident_seen != ident:
logger.error("Cannot have multiple observation identifications per sap. Dropping %s", ident) # see doc string
return sap_idents
def stations(self, parset):
""" Estimate required RSPs and RCUs per station.
One or two RSP boards are returned per station depending on antennaset.
RCUs are encoded as a bitfield, to be able to tell which RCUs are actually neeeded.
Return list of estimates, or raise ValueError on error.
"""
estimates = []
antennaset = parset.getString('Observation.antennaSet')
stationset = parset.getStringVector('Observation.VirtualInstrument.stationList')
if not stationset:
raise ValueError("Observation.VirtualInstrument.stationList is empty")
rculists = self.asp.get_receiver_units_configuration_per_station(antennaset, stationset)
for station in stationset:
bitfield, count = self._rculist_to_bitfield(rculists[station])
rsps, channelbits = self._required_rsps(station, antennaset, parset)
est = {'resource_types': {'rcu': bitfield},
'resource_count': 1,
'station': station,
'root_resource_group': station}
estimates.append(est)
for rsp in rsps:
root_resource_group = station+rsp
est = {'resource_types': {},
'resource_count': 1,
'station': station,
'root_resource_group': root_resource_group}
est['resource_types']['bandwidth'] = 3000000000
est['resource_types']['rsp'] = channelbits
estimates.append(est)
return estimates
def _rculist_to_bitfield(self, rculist):
"""
Takes list of rcus as returned by Antennasets_parser ['LBL', 'LBH', None, ...] and encodes them as a bitfield.
Each bit represents one rcu, value is 1 if rcu is not None in input list (= is used), 0 otherwise.
Returns String representation of the bitfield and the number of used rcus.
"""
bitfield = ""
count = 0
for rcu in rculist:
if rcu is None:
bitfield = bitfield+"0"
else:
bitfield = bitfield+"1"
count = count + 1
return bitfield, count
def _required_rsps(self, station, antennaset, parset):
"""
Takes station name and list of antennafields.
Returns list with one or both required rsps and number of channelbits,
or raises ValueError on error.
"""
if station.startswith('CS'):
required_rsps = ['RSP0'] # default
if antennaset == 'HBA_ONE':
required_rsps = ['RSP1']
if antennaset in ['HBA_DUAL', 'HBA_DUAL_INNER']:
required_rsps = ['RSP0', 'RSP1']
else:
required_rsps = ['RSP'] # default for non-core stations
nr_saps = parset.getInt('Observation.nrBeams')
if nr_saps < 1:
raise ValueError('Observation.nrBeams must be at least 1, but is %d' % nr_saps)
subBandList = []
for nr in range(nr_saps):
key = 'Observation.Beam['+str(nr)+'].subbandList'
sblist = parset.getStringVector(key)
if not sblist:
raise ValueError("%s is empty" % key)
subBandList.extend(sblist)
nrSubbands = len(subBandList)
nrBitsPerSample = parset.getInt('Observation.nrBitsPerSample')
if nrBitsPerSample != 16 and nrBitsPerSample != 8 and nrBitsPerSample != 4:
raise ValueError('Observation.nrBitsPerSample must be 16, 8, or 4, but is %d' % nrBitsPerSample)
channelbits = nrSubbands * nrBitsPerSample
return required_rsps, channelbits
|
gpl-3.0
| 3,918,938,717,688,254,500 | 53.407871 | 182 | 0.597718 | false | 3.822775 | false | false | false |
pnomolos/greatbigcrane
|
greatbigcrane/project/migrations/0007_auto__add_field_project_project_type__chg_field_project_description.py
|
1
|
2050
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Project.project_type'
db.add_column('project_project', 'project_type', self.gf('django.db.models.fields.CharField')(default='buildout', max_length=9), keep_default=False)
# Changing field 'Project.description'
db.alter_column('project_project', 'description', self.gf('django.db.models.fields.TextField')(blank=True))
def backwards(self, orm):
# Deleting field 'Project.project_type'
db.delete_column('project_project', 'project_type')
# Changing field 'Project.description'
db.alter_column('project_project', 'description', self.gf('django.db.models.fields.TextField')())
models = {
'project.project': {
'Meta': {'object_name': 'Project'},
'base_directory': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'favourite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'git_repo': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project_type': ('django.db.models.fields.CharField', [], {'max_length': '9'}),
'test_status': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['project']
|
apache-2.0
| 401,360,477,161,549,800 | 46.674419 | 156 | 0.596585 | false | 3.768382 | false | false | false |
jaaimino/dogebook
|
app.py
|
1
|
9782
|
from flask import Flask, session, redirect, url_for, escape, request, render_template
from flask.ext.mongoengine import MongoEngine
from pbkdf2 import crypt
from models import *
import logging, datetime, math, os
#SSL for future security maybe?
'''
import ssl
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain('yourserver.crt', 'yourserver.key')
'''
app = Flask(__name__)
app.debug = False
app.config['MONGODB_SETTINGS'] = {
'db': 'dogebook',
'host': '127.0.0.1',
'port': 27017
}
db = MongoEngine(app)
app.logger.setLevel(logging.INFO) # use the native logger of flask
# secret app key. keep this really secret:
app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'
#Index route
@app.route('/')
def index():
if 'userid' in session:
return redirect('/posts/0')
return render_template("index.html", data={})
#New post route
@app.route('/newpost', methods=['POST'])
def new_post():
if 'userid' in session and request.method == 'POST':
user = User.objects(id=session['userid']).first()
postContent = request.form['inputText']
post = Post(content=postContent, author=user).save()
return redirect(url_for('index'))
#Add friend by userid route
@app.route('/add_friend/<userid>')
def add_friend(userid=None):
if 'userid' in session:
user = User.objects(id=session['userid']).first()
#print user.friends.__class__.__name__
friend = User.objects(id=userid).first()
if friend not in user.friends:
user.friends.append(friend)
user.save()
return redirect('/profile/'+session["userid"])
return redirect('/profile/'+session["userid"])
#Remove friend by userid route
@app.route('/remove_friend/<userid>')
def remove_friend(userid=None):
if 'userid' in session:
user = User.objects(id=session['userid']).first()
#print user.friends.__class__.__name__
friend = User.objects(id=userid).first()
if friend in user.friends:
user.friends.remove(friend)
user.save()
return redirect(url_for('index'))
return redirect(url_for('index'))
#Friend search route
@app.route('/find_friends', methods=['GET', 'POST'])
def find_friends():
if 'userid' in session:
user = User.objects(id=session['userid']).first()
results = []
if request.method == 'POST':
somename = request.form['inputName']
results = User.objects(name__contains=somename)
return render_template("find_friends.html", data={"user":user, "results":results, "nresults":len(results)})
return redirect(url_for('index'))
#Get a page of posts for your current user
@app.route('/posts/<page>')
def posts_page(page=0):
if 'userid' in session:
page = int(page)
posts_per_page = 10
user = User.objects(id=session['userid']).first()
#print user.friends
#User.objects.get(id='55a51d434c149d1f60daec89') #lookup by id example
#print "Wat?"
current_post = page * posts_per_page
posts_full = Post.objects(db.Q(author__in = user.friends) | db.Q(author = user)).order_by('-datetime')
page_count = int(math.ceil(posts_full.count()/10))
page_count = min(page_count,10)
posts = posts_full.skip(current_post).limit(10)
comment_counts = []
for post in posts:
comment_counts.append(len(post.comments))
next_page = page+1
if next_page > page_count:
next_page = page_count
prev_page = page-1
if prev_page < 0:
prev_page = 0
#print posts
return render_template("feed.html", data={"prev_page": prev_page, "currpage":page, "next_page":next_page, \
"page_count":page_count, "user":user, "posts":posts, "comment_counts":comment_counts})
return redirect(url_for('index'))
#Get a single post by id (And view comments)
@app.route('/post/<postid>')
def post_id(postid=None):
if 'userid' in session:
post = Post.objects(id=postid).first()
user = User.objects(id=session['userid']).first()
comments = post.comments
comments = sorted(comments, key=lambda r: r.datetime, reverse=True)[:15]
return render_template("single_post.html", data={"user":user, "post":post, "comments":comments})
return redirect(url_for('index'))
#Delete a user by id
@app.route('/delete_user/<userid>')
def delete_user_id(userid=None):
if 'userid' in session: #My userid
user = User.objects(id=session['userid']).first()
if user.username == "[email protected]":
user = User.objects(id=session['userid']).first()
targetUser = User.objects(id=userid).first()
posts = Post.objects(author=targetUser)
posts.delete()
targetUser.delete()
return redirect(url_for('index'))
return redirect(url_for('index'))
#Delete a post by id
@app.route('/post/<postid>/delete')
def delete_post_id(postid=None):
if 'userid' in session:
user = User.objects(id=session['userid']).first()
post = Post.objects(id=postid).first()
if(post.author == user): #Actually delete the post here
post.delete()
return redirect(url_for('index')) #Ultimately redirect
return redirect(url_for('index'))
#Add comment to post by id
@app.route('/post/<postid>/add_comment', methods=['POST'])
def comment_post_id(postid=None):
if request.method == 'POST':
if 'userid' in session:
user = User.objects(id=session['userid']).first()
post = Post.objects(id=postid).first()
if user in post.author.friends or post.author == user: #Actually add comment here
print "Adding comment"
comment = Comment(content=request.form['inputText'],author=user).save()
post.comments.append(comment)
post.save()
return redirect('/post/'+str(post.id)) #Ultimately redirect
return redirect(url_for('index'))
#Log in to the app
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
someusername = request.form['inputEmail']
alleged_password = request.form['inputPassword']
user = User.objects(username=someusername).first()
if user != None and user.password == crypt(alleged_password, user.password):
session['userid'] = str(user.id)
return redirect(url_for('index'))
return render_template('login.html', data={"message":"Wrong email or password"})
else:
if 'userid' in session:
return render_template('error.html', data={"error":"You're already logged in..."})
else:
return render_template('login.html', data={})
#Create an account
@app.route('/create_account', methods=['GET', 'POST'])
def create_account():
if request.method == 'POST':
name = request.form['inputName']
someemail = request.form['inputEmail']
pwhash = crypt(request.form['inputPassword'])
count = User.objects(username=someemail).count()
if(count == 0):
user = User(username=someemail, password=pwhash, name=name).save()
session['userid'] = str(user.id)
return redirect(url_for('index'))
else:
return render_template('create_account.html', data={"message":"Sorry, that email is already taken."})
else:
if 'userid' in session:
return render_template('error.html', data={"error":"You're already logged in. Please log out to create a new account."})
else:
return render_template('create_account.html')
#Log out of app
@app.route('/logout')
def logout():
# remove the username from the session if it's there
session.pop('userid', None)
return redirect(url_for('index'))
#Redirect for supporting cool url scheme with convenience wrapped
@app.route('/profile')
def profile():
if 'userid' in session:
return redirect('/profile/'+session['userid'])
return redirect(url_for('index'))
#Go to profile by id
@app.route('/profile/<profileid>')
def profile_id(profileid=None):
if 'userid' in session:
user = User.objects(id=profileid).first()
currentuser = User.objects(id=session["userid"]).first()
userid = str(user.id)
return render_template("profile.html", data={"user":user, "friends":user.friends, "currentuser":currentuser, "userid":userid})
return redirect(url_for('index'))
#Edit profile by id. Only your own :)
@app.route('/profile/<profileid>/edit', methods=['GET', 'POST'])
def edit_profile_id(profileid=None):
if 'userid' in session:
if request.method == 'POST':
if session['userid'] == profileid:
user = User.objects(id=session['userid']).first()
user.update(name=request.form['inputName'], tagline=request.form['inputTagline'],city=request.form['inputCity'],state=request.form['inputState'],bio=request.form['inputBio'])
return redirect('/profile/'+profileid)
else:
print "Hackerrzzz"
else:
user = User.objects(id=session['userid']).first()
return render_template("edit_profile.html", data={"user":user})
else:
return redirect(url_for('index'))
#Handle some errors
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
#Handle some more errors
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 404
#Start the app
if (__name__ == '__main__'):
app.run(host=os.getenv('IP', '0.0.0.0'),port=int(os.getenv('PORT', 8080)))
#app.run(host="127.0.0.1", port=8080, ssl_context=context) #No way to do ssl yet
|
apache-2.0
| -2,140,761,096,780,513,500 | 37.821429 | 190 | 0.627275 | false | 3.655456 | false | false | false |
tmorrell/Molframe
|
inputs/selector.py
|
1
|
14370
|
import tkSimpleDialog
import tkMessageBox
#import p3d.protein
#import p3d.geo
from pymol.wizard import Wizard
from pymol import cmd, util
from pymol.controlling import mode_dict
class Bond(object):
def __init__(self,bond1,bond2,resid1,resid2):
if bond2 > bond1:
self.bond1=bond1
self.bond2=bond2
self.resid1=resid1
self.resid2=resid2
else:
self.bond1=bond2
self.bond2=bond1
self.resid1=resid2
self.resid2=resid1
self.indexes=[self.bond1,self.bond2]
class selector(Wizard):
def __init__(self,name,chain,resid,resid2,_self=cmd):
Wizard.__init__(self,_self)
self.resid = resid
self.resid2 = resid2
self.name = name
self.chain = chain
self.extend = 1
self.bonds=[]
self.resids=[]
self.indexes=[]
self.load=None
self.lead=0
def get_panel(self):
label = 'No Mutation'
return [
[ 1, 'Select Rotatable Bonds',''],
[ 1, 'for Residue '+ self.resid ,''],
[ 2, 'Pick Bond' , 'cmd.get_wizard().apply()'],
[ 2, 'Rotate View' , 'cmd.get_wizard().rotate()'],
[ 2, 'Show More Bonds' , 'cmd.get_wizard().show()'],
[ 2, 'Pick Rotatable Section' , 'cmd.get_wizard().srot()'],
[ 2, 'Write Bonds' , 'cmd.get_wizard().set_bonds()'],
[ 2, 'Reset Selected Bonds' , 'cmd.get_wizard().reset()'],
[ 2, 'Finished' , 'cmd.get_wizard().clear()'],
]
def srot(self):
cmd.deselect()
#self.pk2_st=None
self.load=1
self.get_prompt()
print "Testing", self.lead
cmd.config_mouse('three_button_editing')
def show(self):
left = str(int(self.resid)-self.extend)
right = str(int(self.resid)+self.extend)
cmd.show('lines','resid '+left+':'+right)
cmd.zoom('resid '+left+':'+right)
self.extend = self.extend+1
#def isbonded(self,bond0,bond1,stems):
# nextres = 0
# for stem in stems:
# if bond0==stem:
# nextres=bond1
# if bond1==stem:
# nextres=bond0
# return nextres
def get_bonds(self,stems,allbonds,rot_bonds=[]):
nextbonds = []
for stem in stems:
print "STEM", stem
for bond in allbonds:
#print bond.index
if stem in bond.index: #save next bond
print bond.index,"matched bond"
for n in bond.index:
if n != stem: #find next atom
if n not in rot_bonds: #if atom is new:
nextbonds.append(n)
#return indexes connected to stem
return nextbonds
def is_in_bonds(self,stem,bonds):
yes = 0
for bond in bonds:
if stem in bond.indexes:
yes = 1
return yes
def is_in_multiple_bonds(self,stem,bonds):
count = 0
for bond in bonds:
if stem in bond.indexes:
count = count + 1
if count == 2:
return True
else:
return False
#def reset_bond(self,known,bonds): #reset bond, if repeated index save repeat
# ret = []
# print "reset_bond"
# print known, "known"
# for rbon in bonds: #for each rot bond
# if known[0] in rbon.indexes:
# if known[1] not in rbon.indexes:
# ret = [known[1]]
# if known[1] in rbon.indexes:
# if known[0] not in rbon.indexes:
# ret = [known[0]]
# return ret
def set_bonds(self):
startingbond=[]
rangev = []
if self.lead==0:
print "Must select rotatable section first"
elif len(self.bonds)==0:
print "Must select at least one rotatable bonds"
else:
mres = min(self.resids)
xres = max(self.resids)
model = cmd.get_model('all') #('resid '+str(self.resid)+':'+str(self.resid2))
allbonds = model.bond
'''
Removed efficiency code to test end residue labeling - will be slow
if mres != xres: #multires case
mind = min(self.indexes)
xind = max(self.indexes)
irange = [mind,xind] #range of indexes we care about for bonding pattern
if self.lead < mind:
irange = [self.lead,xind]
if self.lead > xind:
irange = [mind,self.lead]
limitedset = []
we want to limit allbonds to a limited index range
for efficiency-may be problem if indexes are really screwed up
for b in allbonds:
if b.index[0] in range(irange[0],irange[1]) or \
b.index[1] in range(irange[0],irange[1]):
limitedset.append(b)
allbonds = limitedset
'''
#Remove dummy atom-for bonding only, will still be rotated
dummy = 'ZZ'
reduced = []
for b in allbonds:
d = False
if self.get_atom(b.index[0])[2] == dummy or self.get_atom(b.index[1])[2] == dummy:
d = True
if d == False:
reduced.append(b)
print self.get_atom(b.index[0]),self.get_atom(b.index[1])
#print "DONE"
allbonds = reduced
#start from rotatable selection point and find what atoms are always rotatable
rot_bonds = [self.lead]
print rot_bonds,"LEAD"
print self.bonds
for b in allbonds:
print b.index
stems = self.get_bonds(rot_bonds,allbonds,rot_bonds)
nextstep=[]
while len(stems) != 0: #while a bond remains
next_stem = set() #Internal
for s in stems: #check if at rotation
if self.is_in_bonds(s,self.bonds):
if len(nextstep) == 0:
nextstep.append(s) #don't move beyond rotation
rot_bonds.append(s)
next_stem.add(s)
#No else - We discard any other rotatable bonds - deal with later
else:
rot_bonds.append(s)
next_stem.add(s)
stems = self.get_bonds(next_stem,allbonds,rot_bonds)
outstring = "!Rotation of dye\n"
lenv = len(self.bonds)
outstring = outstring + '!NROT '+str(lenv)+'\n'
outstring = outstring + 'cons fix sele dbackbone .or. .not. '+\
'(resid @res .and. segid @chain) end\n\n'
#now we look along rest of chain
botbonds = []
count = 0
excluded = rot_bonds #We don't want to select rotatable bonds
stems = self.get_bonds(nextstep,allbonds,excluded)
bond=nextstep #This is a rotatable object
while len(stems) != 0:
excluded=excluded+stems#don't go to a stem two times
for stem in stems:
if self.is_in_bonds(stem,self.bonds): #only care about bonds
if len(bond)==0: #we have a new end of a bond
bond.append(stem)
elif stem != bond[0]:#We have second half of new bond
new_bond = stem
bond.append(new_bond)
count = count + 1
#We need to tease out other rotatable atoms from those in stems
for stem in stems:
if self.is_in_bonds(stem,self.bonds) == False:
#Just looking at other stems-none of these
# have rotatable elements
botbonds = botbonds+[stem]
nexts = list(set(self.get_bonds([stem],allbonds,excluded)))
while len(nexts) != 0:
botbonds = botbonds+nexts
excluded = excluded+nexts #don't go to stem two times
nexts = list(set(self.get_bonds(nexts,allbonds,excluded)))
#Now write output for rotation
outstring = outstring + 'label loop'+str(count)+'\n'
outstring = outstring + self.rotate_axis(bond[0],bond[1])
outstring = outstring + self.rotate_sel(120,botbonds)
outstring = outstring + 'incr '+str(count)+' by '+str(count)+'\n'
outstring = outstring + 'goto mini \n \n'
#We check if the new_bond atom is shared
#The old atom is discarded because we don't go backwards
if self.is_in_multiple_bonds(new_bond,self.bonds):
bond = [new_bond]
else:
bond = []
botbonds=botbonds+stems
stems = list(set(self.get_bonds(stems,allbonds,excluded)))
outfile = open('../../inputs/'+self.name+'_rot.str','w')
outfile.write(outstring)
#write .str file
stream = '!The atoms that are the end of the dye\n'
stream = stream + "define dyefix sele .NOT. ( "
for bindex in botbonds:
atom = self.get_atom(bindex)
stream = stream + " chain "+atom[0]+" .and. resi " + atom[1]+" .and. name "+atom[2]+ " .OR. "
stream = stream + ' ) end\n'
outfile = open('../../inputs/'+self.name+'.str','w')
outfile.write(stream)
print "All files written for ",self.name
def get_atom(self,index):
cmd.select("_p","index "+str(index+1))#convert from internal back to
#label numbering
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_at',""name)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ac',""chain)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ar',""resi)")
return [str(self.pk_ac),str(self.pk_ar),str(self.pk_at)]
def rotate_axis(self,index1,index2):#print axis output
atom1=self.get_atom(index1)
atom2=self.get_atom(index2)
return "coor axis sele atom "+atom1[0]+' '+atom1[1]+' '+atom1[2]+\
" end sele atom "+atom2[0]+' '+atom2[1]+' '+atom2[2]+" end \n"
def rotate_sel(self,angle,flexbonds):#print selection output
outstring = 'coor rota axis PHI '+str(angle)+' sele dyefix '
atoms = []
print "rotate_sel", flexbonds
for index in flexbonds:
cmd.select("_p","index "+str(index+1))#convert from internal back
#to label numbering
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_at',""name)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ac',""chain)")
cmd.iterate("_p","setattr(cmd.get_wizard(),'pk_ar',""resi)")
atoms.append([str(self.pk_at),str(self.pk_ac),str(self.pk_ar)])
for atom in atoms: #set(atoms): #ensure every atom is only included once
outstring = outstring + ' .or. '
outstring = outstring+'atom '+atom[1]+' '+atom[2]+' '+atom[0]
return outstring+' end \n'
def do_select(self,selection):
cmd.deselect()
def rotate(self):
mode_dict['three_button_viewing'] = [ ('l','none','rota')]
cmd.config_mouse('three_button_viewing')
def reset(self):
#cmd.color("atomic")
#cmd.set_bond("line_color","atomic","all")
#util.cbag("all")
self.bonds=[]
cmd.set_bond("line_color","green","all")
def apply(self):
mode_dict['three_button_viewing'] = [ ('l','none','PkTB')]
cmd.config_mouse('three_button_viewing')
print "Apply"
def clear(self):
cmd.quit()
def get_prompt(self):
if self.load!=None:
return ["Please pick the atom in the direction of the section you want to rotate"]
if self.pk2_st!=None:
return ["You picked the bond between %s and %s"%(self.pk1_st, self.pk2_st)]
else:
return ["Please pick an atom or a bond..."]
def do_pick(self,picked_bond):
cmd.iterate("pk1","setattr(cmd.get_wizard(),'pk1_st',""'%s/%s/%s/%s/%s/%s'%(model,segi,chain,resi,name,index))")
print "Picking Loop"
if picked_bond:
cmd.iterate("pk2","setattr(cmd.get_wizard(),'pk2_st',""'%s/%s/%s/%s/%s/%s'%(model,segi,chain,resi,name,index))")
cmd.set_bond("line_color","orange","pk1","pk2")
print [self.pk1_st,self.pk2_st],'bond'
self.resids.append(int(self.pk1_st.split('/')[3])-1)
self.resids.append(int(self.pk2_st.split('/')[3])-1)
self.indexes.append(int(self.pk1_st.split('/')[5])-1)
self.indexes.append(int(self.pk2_st.split('/')[5])-1)
self.bonds.append(Bond(int(self.pk1_st.split('/')[5])-1,int(self.pk2_st.split('/')[5])-1,int(self.pk1_st.split('/')[3])-1,int(self.pk2_st.split('/')[3])-1))
# -1 converts to 0 start index, which is used for bonds - This will be one off from labels in pymol
cmd.unpick()
else:
# for single atom, also get 3D coordinates (EXAMPLE)
print "Single Atom"
self.load=None
cmd.iterate("pk1","setattr(cmd.get_wizard(),'pk1_r',""index)")
self.lead=self.pk1_r-1 #Converting to 0 start index, which is used for bonds
#This will be one off from labels in pymol
cmd.iterate_state(cmd.get_state(),"pk1","setattr(cmd.get_wizard(),'pk1_xyz',(x,y,z))")
#cmd.unpick()
cmd.refresh_wizard()
|
gpl-2.0
| -8,294,040,477,171,383,000 | 40.412104 | 168 | 0.499304 | false | 3.844302 | false | false | false |
iLoop2/ResInsight
|
ThirdParty/Ert/devel/python/python/ert/__init__.py
|
1
|
3998
|
# Copyright (C) 2011 Statoil ASA, Norway.
#
# The file '__init__.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
"""
ert - Ensemble Reservoir Tool - a package for reservoir modeling.
The ert package itself has no code, but contains several subpackages:
ert.ecl: Package for working with ECLIPSE files. The far most mature
package in ert.
ert.job_queue:
ert.util:
The ert package is based on wrapping the libriaries from the ERT C
code with ctypes; an essential part of ctypes approach is to load the
shared libraries with the ctypes.CDLL() function. The ctypes.CDLL()
function uses the standard methods of the operating system,
i.e. standard locations configured with ld.so.conf and the environment
variable LD_LIBRARY_PATH.
To avoid conflict with other application using the ert libraries the
Python code should be able to locate the shared libraries without
(necessarily) using the LD_LIBRARY_PATH variable. The default
behaviour is to try to load from the library ../../lib64, but by using
the enviornment variable ERT_LIBRARY_PATH you can alter how ert looks
for shared libraries. This module will set the ert_lib_path of the
ert.cwrap.clib module; the actual loading will take place in that
module.
1. By default the code will try to load the shared libraries from
'../../lib64' relative to the location of this file.
2. Depending on the value of ERT_LIBRARY_PATH two different
behaviours can be imposed:
Existing path: the package will look in the path pointed to
by ERT_LIBRARY_PATH for shared libraries.
Arbitrary value: the package will use standard load order for
the operating system.
If the fixed path, given by the default ../../lib64 or ERT_LIBRARY_PATH
alternative fails, the loader will try the default load behaviour
before giving up completely.
"""
import os.path
import cwrap.clib
import sys
import warnings
try:
import ert_site_init
except ImportError:
pass
required_version_hex = 0x02060000
# 1. Start by initialing the ert_lib_path variable to None
ert_lib_path = None
# 2. Try to load the __ert_lib_path module; this module has been
# configured by cmake during the build configuration process. The
# module should contain the variable lib_path pointing to the
# directory with shared object files.
try:
import __ert_lib_path
ert_lib_path = __ert_lib_path.lib_path
except ImportError:
pass
# 3. Using the environment variable ERT_LIBRARY_PATH it is possible to
# override the default algorithms. If the ERT_LIBRARY_PATH is set
# to a non existing directory a warning will go to stderr and the
# setting will be ignored.
env_lib_path = os.getenv("ERT_LIBRARY_PATH")
if env_lib_path:
if os.path.isdir( env_lib_path ):
ert_lib_path = os.getenv("ERT_LIBRARY_PATH")
else:
sys.stderr.write("Warning: Environment variable ERT_LIBRARY_PATH points to nonexisting directory:%s - ignored" % env_lib_path)
# Check that the final ert_lib_path setting corresponds to an existing
# directory.
if ert_lib_path:
if not os.path.exists( ert_lib_path ):
ert_lib_path = None
# Set the module variable ert_lib_path of the ert.cwrap.clib module;
# this is where the actual loading will be performed.
cwrap.clib.ert_lib_path = ert_lib_path
if sys.hexversion < required_version_hex:
raise Exception("ERT Python requires at least version 2.6 of Python")
|
gpl-3.0
| -8,916,155,810,471,547,000 | 33.17094 | 134 | 0.731616 | false | 3.654479 | false | false | false |
pgmillon/ansible
|
lib/ansible/modules/cloud/docker/docker_secret.py
|
1
|
9011
|
#!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_secret
short_description: Manage docker secrets.
version_added: "2.4"
description:
- Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm).
- Adds to the metadata of new secrets 'ansible_key', an encrypted hash representation of the data, which is then used
in future runs to test if a secret has changed. If 'ansible_key is not present, then a secret will not be updated
unless the C(force) option is set.
- Updates to secrets are performed by removing the secret and creating it again.
options:
data:
description:
- The value of the secret. Required when state is C(present).
type: str
data_is_b64:
description:
- If set to C(true), the data is assumed to be Base64 encoded and will be
decoded before being used.
- To use binary C(data), it is better to keep it Base64 encoded and let it
be decoded by this option.
type: bool
default: no
version_added: "2.8"
labels:
description:
- "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string."
- If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again.
type: dict
force:
description:
- Use with state C(present) to always remove and recreate an existing secret.
- If I(true), an existing secret will be replaced, even if it has not changed.
type: bool
default: no
name:
description:
- The name of the secret.
type: str
required: yes
state:
description:
- Set to C(present), if the secret should exist, and C(absent), if it should not.
type: str
default: present
choices:
- absent
- present
extends_documentation_fragment:
- docker
- docker.docker_py_2_documentation
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
- "Docker API >= 1.25"
author:
- Chris Houseknecht (@chouseknecht)
'''
EXAMPLES = '''
- name: Create secret foo (from a file on the control machine)
docker_secret:
name: foo
# If the file is JSON or binary, Ansible might modify it (because
# it is first decoded and later re-encoded). Base64-encoding the
# file directly after reading it prevents this to happen.
data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}"
data_is_b64: true
state: present
- name: Change the secret data
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
state: present
- name: Add a new label
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Adding a new label will cause a remove/create of the secret
two: '2'
state: present
- name: No change
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: baz
one: '1'
# Even though 'two' is missing, there is no change to the existing secret
state: present
- name: Update an existing label
docker_secret:
name: foo
data: Goodnight everyone!
labels:
bar: monkey # Changing a label will cause a remove/create of the secret
one: '1'
state: present
- name: Force the removal/creation of the secret
docker_secret:
name: foo
data: Goodnight everyone!
force: yes
state: present
- name: Remove secret foo
docker_secret:
name: foo
state: absent
'''
RETURN = '''
secret_id:
description:
- The ID assigned by Docker to the secret object.
returned: success and C(state == "present")
type: str
sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
'''
import base64
import hashlib
import traceback
try:
from docker.errors import DockerException, APIError
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible.module_utils.docker.common import AnsibleDockerClient, DockerBaseClass, compare_generic
from ansible.module_utils._text import to_native, to_bytes
class SecretManager(DockerBaseClass):
def __init__(self, client, results):
super(SecretManager, self).__init__()
self.client = client
self.results = results
self.check_mode = self.client.check_mode
parameters = self.client.module.params
self.name = parameters.get('name')
self.state = parameters.get('state')
self.data = parameters.get('data')
if self.data is not None:
if parameters.get('data_is_b64'):
self.data = base64.b64decode(self.data)
else:
self.data = to_bytes(self.data)
self.labels = parameters.get('labels')
self.force = parameters.get('force')
self.data_key = None
def __call__(self):
if self.state == 'present':
self.data_key = hashlib.sha224(self.data).hexdigest()
self.present()
elif self.state == 'absent':
self.absent()
def get_secret(self):
''' Find an existing secret. '''
try:
secrets = self.client.secrets(filters={'name': self.name})
except APIError as exc:
self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
for secret in secrets:
if secret['Spec']['Name'] == self.name:
return secret
return None
def create_secret(self):
''' Create a new secret '''
secret_id = None
# We can't see the data after creation, so adding a label we can use for idempotency check
labels = {
'ansible_key': self.data_key
}
if self.labels:
labels.update(self.labels)
try:
if not self.check_mode:
secret_id = self.client.create_secret(self.name, self.data, labels=labels)
except APIError as exc:
self.client.fail("Error creating secret: %s" % to_native(exc))
if isinstance(secret_id, dict):
secret_id = secret_id['ID']
return secret_id
def present(self):
''' Handles state == 'present', creating or updating the secret '''
secret = self.get_secret()
if secret:
self.results['secret_id'] = secret['ID']
data_changed = False
attrs = secret.get('Spec', {})
if attrs.get('Labels', {}).get('ansible_key'):
if attrs['Labels']['ansible_key'] != self.data_key:
data_changed = True
labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
if data_changed or labels_changed or self.force:
# if something changed or force, delete and re-create the secret
self.absent()
secret_id = self.create_secret()
self.results['changed'] = True
self.results['secret_id'] = secret_id
else:
self.results['changed'] = True
self.results['secret_id'] = self.create_secret()
def absent(self):
''' Handles state == 'absent', removing the secret '''
secret = self.get_secret()
if secret:
try:
if not self.check_mode:
self.client.remove_secret(secret['ID'])
except APIError as exc:
self.client.fail("Error removing secret %s: %s" % (self.name, to_native(exc)))
self.results['changed'] = True
def main():
argument_spec = dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['absent', 'present']),
data=dict(type='str', no_log=True),
data_is_b64=dict(type='bool', default=False),
labels=dict(type='dict'),
force=dict(type='bool', default=False)
)
required_if = [
('state', 'present', ['data'])
]
client = AnsibleDockerClient(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=required_if,
min_docker_version='2.1.0',
min_docker_api_version='1.25',
)
try:
results = dict(
changed=False,
secret_id=''
)
SecretManager(client, results)()
client.module.exit_json(**results)
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
gpl-3.0
| 1,511,846,390,148,133,000 | 29.238255 | 137 | 0.609255 | false | 3.934934 | false | false | false |
mrocklin/termpy
|
termpy/unification.py
|
1
|
2669
|
from functools import partial
from util import transitive_get as walk
from util import assoc
from variable import Var, var, isvar
import itertools as it
from ground import new, op, args, isleaf
################
# Reificiation #
################
def reify_generator(t, s):
return it.imap(partial(reify, s=s), t)
def reify_tuple(*args):
return tuple(reify_generator(*args))
def reify_list(*args):
return list(reify_generator(*args))
def reify_dict(d, s):
return dict((k, reify(v, s)) for k, v in d.items())
reify_dispatch = {
tuple: reify_tuple,
list: reify_list,
dict: reify_dict,
}
reify_isinstance_list = []
def reify(e, s):
""" Replace variables of expression with substitution
>>> from termpy.unification import reify, var
>>> x, y = var(), var()
>>> e = (1, x, (3, y))
>>> s = {x: 2, y: 4}
>>> reify(e, s)
(1, 2, (3, 4))
>>> e = {1: x, 3: (y, 5)}
>>> reify(e, s)
{1: 2, 3: (4, 5)}
"""
if isvar(e):
return reify(s[e], s) if e in s else e
elif type(e) in reify_dispatch:
return reify_dispatch[type(e)](e, s)
elif not isleaf(e):
new_op = reify(op(e), s)
new_args = reify(args(e), s)
return new(new_op, new_args)
else:
return e
###############
# Unification #
###############
def unify_seq(u, v, s):
if len(u) != len(v):
return False
for uu, vv in zip(u, v): # avoiding recursion
s = unify(uu, vv, s)
if s is False:
return False
return s
def unify_dict(u, v, s):
if len(u) != len(v):
return False
for key, uval in u.iteritems():
if key not in v:
return False
s = unify(uval, v[key], s)
if s is False:
return False
return s
unify_dispatch = {
(tuple, tuple): unify_seq,
(list, list): unify_seq,
(dict, dict): unify_dict,
}
def unify(u, v, s): # no check at the moment
""" Find substitution so that u == v while satisfying s
>>> from termpy.unification import unify, var
>>> x = var('x')
>>> unify((1, x), (1, 2), {})
{~x: 2}
"""
u = walk(u, s)
v = walk(v, s)
if u == v:
return s
elif isvar(u):
return assoc(s, u, v)
elif isvar(v):
return assoc(s, v, u)
types = (type(u), type(v))
if types in unify_dispatch:
return unify_dispatch[types](u, v, s)
elif not isleaf(u) and not isleaf(v):
s = unify(op(u), op(v), s)
if s is False:
return s
else:
return unify(args(u), args(v), s)
else:
return False
|
bsd-3-clause
| -958,728,437,775,557,600 | 22.619469 | 59 | 0.516298 | false | 3.012415 | false | false | false |
opennode/nodeconductor-assembly-waldur
|
src/waldur_core/monitoring/models.py
|
1
|
1936
|
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.translation import ugettext_lazy as _
from waldur_core.core.models import NameMixin
from waldur_core.monitoring.managers import (
ResourceItemManager,
ResourceSlaManager,
ResourceSlaStateTransitionManager,
)
class ScopeMixin(models.Model):
content_type = models.ForeignKey(on_delete=models.CASCADE, to=ContentType)
object_id = models.PositiveIntegerField()
scope = GenericForeignKey('content_type', 'object_id')
class Meta:
abstract = True
class ResourceItem(NameMixin, ScopeMixin):
value = models.FloatField()
objects = ResourceItemManager()
class Meta:
unique_together = ('name', 'content_type', 'object_id')
class ResourceSla(ScopeMixin):
period = models.CharField(max_length=10)
value = models.DecimalField(max_digits=11, decimal_places=4, null=True, blank=True)
agreed_value = models.DecimalField(
max_digits=11, decimal_places=4, null=True, blank=True
)
objects = ResourceSlaManager()
class Meta:
unique_together = ('period', 'content_type', 'object_id')
class ResourceSlaStateTransition(ScopeMixin):
period = models.CharField(max_length=10)
timestamp = models.IntegerField()
state = models.BooleanField(
default=False, help_text=_('If state is True resource became available')
)
objects = ResourceSlaStateTransitionManager()
class Meta:
unique_together = ('timestamp', 'period', 'content_type', 'object_id')
class MonitoringModelMixin(models.Model):
class Meta:
abstract = True
sla_items = GenericRelation('monitoring.ResourceSla')
monitoring_items = GenericRelation('monitoring.ResourceItem')
state_items = GenericRelation('monitoring.ResourceSlaStateTransition')
|
mit
| -6,042,245,834,683,955,000 | 30.737705 | 87 | 0.728822 | false | 4.075789 | false | false | false |
LordFlashmeow/Collatz-Conjecture
|
Length_Highest_Manipulation.py
|
1
|
2253
|
def highest(start, stop):
begin = start
dict_max = {}
while begin <= stop:
current = set()
number = begin
if begin == 1:
number = 2
while number >= 1:
if number == 1:
max_num = int(max(current))
break
elif number % 2 == 0:
number /= 2
current.add(number)
else:
number = (number * 3) + 1
current.add(int(number))
if begin == 1:
dict_max[1] = 0
else:
dict_max[begin] = max_num
begin += 1
return dict_max
def longest(start, stop):
begin = start
dict_length = {1: 0}
while begin <= stop:
number = begin
numbers = set()
while number > 1:
if number % 2 == 0:
number /= 2
numbers.add(int(number))
else:
number = (number * 3) + 1
numbers.add(int(number))
dict_length[begin] = len(numbers)
begin += 1
return dict_length
def combined(start, stop,):
dict_length = longest(start, stop)
dict_max = highest(start, stop)
final_dict = {}
for key in (dict_length.keys() | dict_max.keys()):
if key in dict_length: final_dict.setdefault(key, []).append(dict_length[key])
if key in dict_max: final_dict.setdefault(key, []).append(dict_max[key])
return final_dict
start_num = int(input("Enter the number to start at "))
stop_num = int(input("Enter the number to end at "))
my_list = combined(start_num, stop_num)
export_yn = input("Do you want to export the set of values? (y/n) ")
if export_yn == "y":
filename = input("Enter the name of the file to save to: ") + ".csv"
# with open(filename, 'w') as file:
# [file.write('{0},{1}\n'.format(key, value)) for key, value in my_list.items()]
begin = start_num
for length, high in my_list.values():
file = open(filename, "a")
length = str(length)
high = str(high)
begin = str(begin)
combined = begin + "," + length + "," + high + "\n"
file.write(combined)
file.close()
begin = int(begin)
begin += 1
|
gpl-3.0
| -3,609,880,505,250,593,300 | 26.814815 | 87 | 0.509987 | false | 3.761269 | false | false | false |
orashi/PaintsPytorch
|
dev_train.py
|
1
|
13516
|
import argparse
import os
import random
from math import log10
import scipy.stats as stats
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
from torch.autograd import Variable, grad
from models.dev_model import *
from data.nvData import CreateDataLoader
parser = argparse.ArgumentParser()
parser.add_argument('--datarootC', required=True, help='path to colored dataset')
parser.add_argument('--datarootS', required=True, help='path to sketch dataset')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batchSize', type=int, default=16, help='input batch size')
parser.add_argument('--imageSize', type=int, default=256, help='the height / width of the input image to network')
parser.add_argument('--cut', type=int, default=1, help='cut backup frequency')
parser.add_argument('--niter', type=int, default=700, help='number of epochs to train for')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--lrG', type=float, default=0.0001, help='learning rate, default=0.0001')
parser.add_argument('--lrD', type=float, default=0.0001, help='learning rate, default=0.0001')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--optim', action='store_true', help='load optimizer\'s checkpoint')
parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')
parser.add_argument('--Diters', type=int, default=1, help='number of D iters per each G iter')
parser.add_argument('--manualSeed', type=int, default=2345, help='random seed to use. Default=1234')
parser.add_argument('--baseGeni', type=int, default=2500, help='start base of pure pair L1 loss')
parser.add_argument('--geni', type=int, default=0, help='continue gen image num')
parser.add_argument('--epoi', type=int, default=0, help='continue epoch num')
parser.add_argument('--env', type=str, default=None, help='tensorboard env')
parser.add_argument('--advW', type=float, default=0.0001, help='adversarial weight, default=0.0001')
parser.add_argument('--gpW', type=float, default=10, help='gradient penalty weight')
opt = parser.parse_args()
print(opt)
####### regular set up
if torch.cuda.is_available() and not opt.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
gen_iterations = opt.geni
try:
os.makedirs(opt.outf)
except OSError:
pass
# random seed setup # !!!!!
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
####### regular set up end
writer = SummaryWriter(log_dir=opt.env, comment='this is great')
dataloader = CreateDataLoader(opt)
netG = def_netG(ngf=opt.ngf)
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
netD = def_netD(ndf=opt.ndf)
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
netF = def_netF()
print(netD)
criterion_L1 = nn.L1Loss()
criterion_L2 = nn.MSELoss()
one = torch.FloatTensor([1])
mone = one * -1
fixed_sketch = torch.FloatTensor()
fixed_hint = torch.FloatTensor()
saber = torch.FloatTensor([0.485 - 0.5, 0.456 - 0.5, 0.406 - 0.5]).view(1, 3, 1, 1)
diver = torch.FloatTensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1)
if opt.cuda:
netD.cuda()
netG.cuda()
netF.cuda()
fixed_sketch, fixed_hint = fixed_sketch.cuda(), fixed_hint.cuda()
saber, diver = saber.cuda(), diver.cuda()
criterion_L1.cuda()
criterion_L2.cuda()
one, mone = one.cuda(), mone.cuda()
# setup optimizer
optimizerG = optim.Adam(netG.parameters(), lr=opt.lrG, betas=(opt.beta1, 0.9))
optimizerD = optim.Adam(netD.parameters(), lr=opt.lrD, betas=(opt.beta1, 0.9))
if opt.optim:
optimizerG.load_state_dict(torch.load('%s/optimG_checkpoint.pth' % opt.outf))
optimizerD.load_state_dict(torch.load('%s/optimD_checkpoint.pth' % opt.outf))
# schedulerG = lr_scheduler.ReduceLROnPlateau(optimizerG, mode='max', verbose=True, min_lr=0.0000005,
# patience=8) # 1.5*10^5 iter
# schedulerD = lr_scheduler.ReduceLROnPlateau(optimizerD, mode='max', verbose=True, min_lr=0.0000005,
# patience=8) # 1.5*10^5 iter
# schedulerG = lr_scheduler.MultiStepLR(optimizerG, milestones=[60, 120], gamma=0.1) # 1.5*10^5 iter
# schedulerD = lr_scheduler.MultiStepLR(optimizerD, milestones=[60, 120], gamma=0.1)
def calc_gradient_penalty(netD, real_data, fake_data):
# print "real_data: ", real_data.size(), fake_data.size()
alpha = torch.rand(opt.batchSize, 1, 1, 1)
# alpha = alpha.expand(opt.batchSize, real_data.nelement() / opt.batchSize).contiguous().view(opt.batchSize, 3, 64,
# 64)
alpha = alpha.cuda() if opt.cuda else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if opt.cuda:
interpolates = interpolates.cuda()
interpolates = Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates)
gradients = grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda() if opt.cuda else torch.ones(
disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * opt.gpW
return gradient_penalty
flag = 1
lower, upper = 0, 1
mu, sigma = 1, 0.01
maskS = opt.imageSize // 4
X = stats.truncnorm(
(lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
for epoch in range(opt.niter):
data_iter = iter(dataloader)
i = 0
while i < len(dataloader):
############################
# (1) Update D network
###########################
for p in netD.parameters(): # reset requires_grad
p.requires_grad = True # they are set to False below in netG update
for p in netG.parameters():
p.requires_grad = False # to avoid computation
# train the discriminator Diters times
Diters = opt.Diters
if gen_iterations < opt.baseGeni: # L1 stage
Diters = 0
j = 0
while j < Diters and i < len(dataloader):
j += 1
netD.zero_grad()
data = data_iter.next()
real_cim, real_vim, real_sim = data
i += 1
###############################
if opt.cuda:
real_cim, real_vim, real_sim = real_cim.cuda(), real_vim.cuda(), real_sim.cuda()
mask = torch.cat([torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(opt.batchSize)],
0).cuda()
hint = torch.cat((real_vim * mask, mask), 1)
# train with fake
fake_cim = netG(Variable(real_sim, volatile=True), Variable(hint, volatile=True)).data
errD_fake = netD(Variable(torch.cat((fake_cim, real_sim), 1))).mean(0).view(1)
errD_fake.backward(one, retain_graph=True) # backward on score on real
errD_real = netD(Variable(torch.cat((real_cim, real_sim), 1))).mean(0).view(1)
errD_real.backward(mone, retain_graph=True) # backward on score on real
errD = errD_real - errD_fake
# gradient penalty
gradient_penalty = calc_gradient_penalty(netD, torch.cat([real_cim, real_sim], 1),
torch.cat([fake_cim, real_sim], 1))
gradient_penalty.backward()
optimizerD.step()
############################
# (2) Update G network
############################
if i < len(dataloader):
for p in netD.parameters():
p.requires_grad = False # to avoid computation
for p in netG.parameters():
p.requires_grad = True # to avoid computation
netG.zero_grad()
data = data_iter.next()
real_cim, real_vim, real_sim = data
i += 1
if opt.cuda:
real_cim, real_vim, real_sim = real_cim.cuda(), real_vim.cuda(), real_sim.cuda()
mask = torch.cat([torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(opt.batchSize)],
0).cuda()
hint = torch.cat((real_vim * mask, mask), 1)
if flag: # fix samples
writer.add_image('target imgs', vutils.make_grid(real_cim.mul(0.5).add(0.5), nrow=16))
writer.add_image('sketch imgs', vutils.make_grid(real_sim.mul(0.5).add(0.5), nrow=16))
writer.add_image('hint', vutils.make_grid((real_vim * mask).mul(0.5).add(0.5), nrow=16))
vutils.save_image(real_cim.mul(0.5).add(0.5),
'%s/color_samples' % opt.outf + '.png')
vutils.save_image(real_sim.mul(0.5).add(0.5),
'%s/blur_samples' % opt.outf + '.png')
fixed_sketch.resize_as_(real_sim).copy_(real_sim)
fixed_hint.resize_as_(hint).copy_(hint)
flag -= 1
fake = netG(Variable(real_sim), Variable(hint))
if gen_iterations < opt.baseGeni:
contentLoss = criterion_L2(netF((fake.mul(0.5) - Variable(saber)) / Variable(diver)),
netF(Variable((real_cim.mul(0.5) - saber) / diver)))
contentLoss.backward()
errG = contentLoss
# contentLoss = criterion_L1(fake, Variable(real_cim))
# contentLoss.backward()
# errG = contentLoss
else:
errG = netD(torch.cat((fake, Variable(real_sim)), 1)).mean(0).view(
1) * opt.advW # TODO: what if???
errG.backward(mone, retain_graph=True)
contentLoss = criterion_L2(netF((fake.mul(0.5) - Variable(saber)) / Variable(diver)),
netF(Variable((real_cim.mul(0.5) - saber) / diver)))
contentLoss.backward()
# contentLoss = criterion_L1(fake, Variable(real_cim))
# contentLoss.backward(retain_graph=True)
optimizerG.step()
############################
# (3) Report & 100 Batch checkpoint
############################
if gen_iterations < opt.baseGeni:
writer.add_scalar('VGG MSE Loss', contentLoss.data[0], gen_iterations)
print('[%d/%d][%d/%d][%d] content %f '
% (epoch, opt.niter, i, len(dataloader), gen_iterations, contentLoss.data[0]))
else:
writer.add_scalar('VGG MSE Loss', contentLoss.data[0], gen_iterations)
writer.add_scalar('wasserstein distance', errD.data[0], gen_iterations)
writer.add_scalar('errD_real', errD_real.data[0], gen_iterations)
writer.add_scalar('errD_fake', errD_fake.data[0], gen_iterations)
writer.add_scalar('Gnet loss toward real', errG.data[0], gen_iterations)
writer.add_scalar('gradient_penalty', gradient_penalty.data[0], gen_iterations)
print('[%d/%d][%d/%d][%d] errD: %f err_G: %f err_D_real: %f err_D_fake %f content loss %f'
% (epoch, opt.niter, i, len(dataloader), gen_iterations,
errD.data[0], errG.data[0], errD_real.data[0], errD_fake.data[0], contentLoss.data[0]))
if gen_iterations % 500 == 0:
fake = netG(Variable(fixed_sketch, volatile=True), Variable(fixed_hint, volatile=True))
writer.add_image('colorized imgs', vutils.make_grid(fake.data.mul(0.5).add(0.5), nrow=16),
gen_iterations)
if gen_iterations % 2000 == 0:
for name, param in netG.named_parameters():
writer.add_histogram('netG ' + name, param.clone().cpu().data.numpy(), gen_iterations)
for name, param in netD.named_parameters():
writer.add_histogram('netD ' + name, param.clone().cpu().data.numpy(), gen_iterations)
vutils.save_image(fake.data.mul(0.5).add(0.5),
'%s/fake_samples_gen_iter_%08d.png' % (opt.outf, gen_iterations))
gen_iterations += 1
# do checkpointing
if opt.cut == 0:
torch.save(netG.state_dict(), '%s/netG_epoch_only.pth' % opt.outf)
torch.save(netD.state_dict(), '%s/netD_epoch_only.pth' % opt.outf)
elif epoch % opt.cut == 0:
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))
torch.save(optimizerG.state_dict(), '%s/optimG_checkpoint.pth' % opt.outf)
torch.save(optimizerD.state_dict(), '%s/optimD_checkpoint.pth' % opt.outf)
|
mit
| -7,141,511,444,784,469,000 | 44.204013 | 119 | 0.591299 | false | 3.328244 | false | false | false |
marcus-oscarsson/mxcube3
|
mxcube3/routes/ra.py
|
1
|
7501
|
# -*- coding: utf-8 -*-
import gevent
import logging
from flask import (
session,
jsonify,
Response,
request,
make_response,
copy_current_request_context,
)
from mxcube3 import socketio
from mxcube3 import mxcube
from mxcube3 import server
from mxcube3 import blcontrol
from mxcube3.core import loginutils
@server.route("/mxcube/api/v0.1/ra/request_control", methods=["POST"])
@server.restrict
def request_control():
"""
"""
@copy_current_request_context
def handle_timeout_gives_control(sid, timeout=30):
gevent.sleep(timeout)
if mxcube.TIMEOUT_GIVES_CONTROL:
user = loginutils.get_user_by_sid(sid)
# Pass control to user if still waiting
if user.get("requestsControl"):
toggle_operator(sid, "Timeout expired, you have control")
data = request.get_json()
remote_addr = loginutils.remote_addr()
# Is someone already asking for control
for observer in loginutils.get_observers():
if observer["requestsControl"] and observer["host"] != remote_addr:
msg = "Another user is already asking for control"
return make_response(msg, 409)
user = loginutils.get_user_by_sid(session.sid)
user["name"] = data["name"]
user["requestsControl"] = data["control"]
user["message"] = data["message"]
observers = loginutils.get_observers()
gevent.spawn(handle_timeout_gives_control, session.sid, timeout=10)
socketio.emit("observersChanged", observers, namespace="/hwr")
return make_response("", 200)
@server.route("/mxcube/api/v0.1/ra/take_control", methods=["POST"])
@server.restrict
def take_control():
"""
"""
# Already master do nothing
if loginutils.is_operator(session.sid):
return make_response("", 200)
# Not inhouse user so not allowed to take control by force,
# return error code
if not session["loginInfo"]["loginRes"]["Session"]["is_inhouse"]:
return make_response("", 409)
toggle_operator(session.sid, "You were given control")
return make_response("", 200)
@server.route("/mxcube/api/v0.1/ra/give_control", methods=["POST"])
@server.restrict
def give_control():
"""
"""
sid = request.get_json().get("sid")
toggle_operator(sid, "You were given control")
return make_response("", 200)
def toggle_operator(new_op_sid, message):
current_op = loginutils.get_operator()
new_op = loginutils.get_user_by_sid(new_op_sid)
loginutils.set_operator(new_op["sid"])
new_op["message"] = message
observers = loginutils.get_observers()
# Append the new data path so that it can be updated on the client
new_op["rootPath"] = blcontrol.beamline.session.get_base_image_directory()
# Current op might have logged out, while this is happening
if current_op:
current_op["rootPath"] = blcontrol.beamline.session.get_base_image_directory()
current_op["message"] = message
socketio.emit(
"setObserver", current_op, room=current_op["socketio_sid"], namespace="/hwr"
)
socketio.emit("observersChanged", observers, namespace="/hwr")
socketio.emit("setMaster", new_op, room=new_op["socketio_sid"], namespace="/hwr")
def remain_observer(observer_sid, message):
observer = loginutils.get_user_by_sid(observer_sid)
observer["message"] = message
socketio.emit(
"setObserver", observer, room=observer["socketio_sid"], namespace="/hwr"
)
@server.route("/mxcube/api/v0.1/ra", methods=["GET"])
@server.restrict
def observers():
"""
"""
data = {
"observers": loginutils.get_observers(),
"sid": session.sid,
"master": loginutils.is_operator(session.sid),
"observerName": loginutils.get_observer_name(),
"allowRemote": mxcube.ALLOW_REMOTE,
"timeoutGivesControl": mxcube.TIMEOUT_GIVES_CONTROL,
}
return jsonify(data=data)
@server.route("/mxcube/api/v0.1/ra/allow_remote", methods=["POST"])
@server.restrict
def allow_remote():
"""
"""
allow = request.get_json().get("allow")
if mxcube.ALLOW_REMOTE and allow == False:
socketio.emit("forceSignoutObservers", {}, namespace="/hwr")
mxcube.ALLOW_REMOTE = allow
return Response(status=200)
@server.route("/mxcube/api/v0.1/ra/timeout_gives_control", methods=["POST"])
@server.restrict
def timeout_gives_control():
"""
"""
control = request.get_json().get("timeoutGivesControl")
mxcube.TIMEOUT_GIVES_CONTROL = control
return Response(status=200)
def observer_requesting_control():
observer = None
for o in loginutils.get_observers():
if o["requestsControl"]:
observer = o
return observer
@server.route("/mxcube/api/v0.1/ra/request_control_response", methods=["POST"])
@server.restrict
def request_control_response():
"""
"""
data = request.get_json()
new_op = observer_requesting_control()
# Request was denied
if not data["giveControl"]:
remain_observer(new_op["sid"], data["message"])
else:
toggle_operator(new_op["sid"], data["message"])
new_op["requestsControl"] = False
return make_response("", 200)
@server.route("/mxcube/api/v0.1/ra/chat", methods=["POST"])
@server.restrict
def append_message():
message = request.get_json().get("message", "")
sid = request.get_json().get("sid", "")
if message and sid:
loginutils.append_message(message, sid)
return Response(status=200)
@server.route("/mxcube/api/v0.1/ra/chat", methods=["GET"])
@server.restrict
def get_all_mesages():
return jsonify({"messages": loginutils.get_all_messages()})
@socketio.on("connect", namespace="/hwr")
@server.ws_restrict
def connect():
user = loginutils.get_user_by_sid(session.sid)
# Make sure user is logged, session may have been closed i.e by timeout
if user:
user["socketio_sid"] = request.sid
# (Note: User is logged in if operator)
if loginutils.is_operator(session.sid):
loginutils.emit_pending_events()
if (
not blcontrol.beamline.queue_manager.is_executing()
and not loginutils.DISCONNECT_HANDLED
):
loginutils.DISCONNECT_HANDLED = True
socketio.emit("resumeQueueDialog", namespace="/hwr")
msg = "Client reconnected, Queue was previously stopped, asking "
msg += "client for action"
logging.getLogger("HWR").info(msg)
@socketio.on("disconnect", namespace="/hwr")
@server.ws_restrict
def disconnect():
if (
loginutils.is_operator(session.sid)
and blcontrol.beamline.queue_manager.is_executing()
):
loginutils.DISCONNECT_HANDLED = False
blcontrol.beamline.queue_manager.pause(True)
logging.getLogger("HWR").info("Client disconnected, pausing queue")
@socketio.on("setRaMaster", namespace="/hwr")
@server.ws_restrict
def set_master(data):
loginutils.emit_pending_events()
return session.sid
@socketio.on("setRaObserver", namespace="/hwr")
@server.ws_restrict
def set_observer(data):
name = data.get("name", "")
observers = loginutils.get_observers()
observer = loginutils.get_user_by_sid(session.sid)
if observer and name:
observer["name"] = name
socketio.emit("observerLogin", observer, include_self=False, namespace="/hwr")
socketio.emit("observersChanged", observers, namespace="/hwr")
return session.sid
|
gpl-2.0
| -4,123,180,664,764,723,000 | 26.47619 | 88 | 0.654313 | false | 3.539877 | false | false | false |
joshspeagle/frankenz
|
frankenz/bruteforce.py
|
1
|
23629
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Object used to fit data and compute PDFs using brute-force methods.
"""
from __future__ import (print_function, division)
import six
from six.moves import range
import sys
import os
import warnings
import math
import numpy as np
import warnings
from .pdf import *
try:
from scipy.special import logsumexp
except ImportError:
from scipy.misc import logsumexp
__all__ = ["BruteForce"]
class BruteForce():
"""
Fits data and generates predictions using a simple brute-force approach.
"""
def __init__(self, models, models_err, models_mask):
"""
Load the model data into memory.
Parameters
----------
models : `~numpy.ndarray` of shape (Nmodel, Nfilt)
Model values.
models_err : `~numpy.ndarray` of shape (Nmodel, Nfilt)
Associated errors on the model values.
models_mask : `~numpy.ndarray` of shape (Nmodel, Nfilt)
Binary mask (0/1) indicating whether the model value was observed.
"""
# Initialize values.
self.models = models
self.models_err = models_err
self.models_mask = models_mask
self.NMODEL, self.NDIM = models.shape
self.fit_lnprior = None
self.fit_lnlike = None
self.fit_lnprob = None
self.fit_Ndim = None
self.fit_chi2 = None
self.fit_scale = None
self.fit_scale_err = None
def fit(self, data, data_err, data_mask, lprob_func=None,
lprob_args=None, lprob_kwargs=None, track_scale=False,
verbose=True):
"""
Fit all input models to the input data to compute the associated
log-posteriors.
Parameters
----------
data : `~numpy.ndarray` of shape (Ndata, Nfilt)
Model values.
data_err : `~numpy.ndarray` of shape (Ndata, Nfilt)
Associated errors on the data values.
data_mask : `~numpy.ndarray` of shape (Ndata, Nfilt)
Binary mask (0/1) indicating whether the data value was observed.
lprob_func : str or func, optional
Log-posterior function to be used. Must return ln(prior), ln(like),
ln(post), Ndim, chi2, and (optionally) scale and std(scale).
If not provided, `~frankenz.pdf.logprob` will be used.
lprob_args : args, optional
Arguments to be passed to `lprob_func`.
lprob_kwargs : kwargs, optional
Keyword arguments to be passed to `lprob_func`.
track_scale : bool, optional
Whether `lprob_func` also returns the scale-factor. Default is
`False`.
verbose : bool, optional
Whether to print progress to `~sys.stderr`. Default is `True`.
"""
# Initialize values.
if lprob_func is None:
lprob_func = logprob
if lprob_args is None:
lprob_args = []
if lprob_kwargs is None:
lprob_kwargs = dict()
Ndata = len(data)
# Fit data.
for i, results in enumerate(self._fit(data, data_err, data_mask,
lprob_func=lprob_func,
lprob_args=lprob_args,
lprob_kwargs=lprob_kwargs,
track_scale=track_scale,
save_fits=True)):
if verbose:
sys.stderr.write('\rFitting object {0}/{1}'.format(i+1, Ndata))
sys.stderr.flush()
if verbose:
sys.stderr.write('\n')
sys.stderr.flush()
def _fit(self, data, data_err, data_mask, lprob_func=None,
lprob_args=None, lprob_kwargs=None, track_scale=False,
save_fits=True):
"""
Internal generator used to compute fits.
Parameters
----------
data : `~numpy.ndarray` of shape (Ndata, Nfilt)
Model values.
data_err : `~numpy.ndarray` of shape (Ndata, Nfilt)
Associated errors on the data values.
data_mask : `~numpy.ndarray` of shape (Ndata, Nfilt)
Binary mask (0/1) indicating whether the data value was observed.
lprob_func : str or func, optional
Log-posterior function to be used. Must return ln(prior), ln(like),
ln(post), Ndim, chi2, and (optionally) scale and std(scale).
If not provided, `~frankenz.pdf.logprob` will be used.
lprob_args : args, optional
Arguments to be passed to `lprob_func`.
lprob_kwargs : kwargs, optional
Keyword arguments to be passed to `lprob_func`.
track_scale : bool, optional
Whether `lprob_func` also returns the scale-factor. Default is
`False`.
save_fits : bool, optional
Whether to save fits internally while computing predictions.
Default is `True`.
Returns
-------
results : tuple
Output of `lprob_func` yielded from the generator.
"""
# Initialize values.
if lprob_func is None:
lprob_func = logprob
if lprob_args is None:
lprob_args = []
if lprob_kwargs is None:
lprob_kwargs = dict()
Ndata = len(data)
Nmodels = self.NMODEL
self.NDATA = Ndata
if save_fits:
self.fit_lnprior = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_lnlike = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_lnprob = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_Ndim = np.zeros((Ndata, Nmodels), dtype='int')
self.fit_chi2 = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_scale = np.ones((Ndata, Nmodels), dtype='float')
self.fit_scale_err = np.zeros((Ndata, Nmodels), dtype='float')
# Fit data.
for i, (x, xe, xm) in enumerate(zip(data, data_err, data_mask)):
results = lprob_func(x, xe, xm, self.models, self.models_err,
self.models_mask, *lprob_args, **lprob_kwargs)
if save_fits:
self.fit_lnprior[i] = results[0] # ln(prior)
self.fit_lnlike[i] = results[1] # ln(like)
self.fit_lnprob[i] = results[2] # ln(prob)
self.fit_Ndim[i] = results[3] # dimensionality of fit
self.fit_chi2[i] = results[4] # chi2
if track_scale:
self.fit_scale[i] = results[5] # scale-factor
self.fit_scale_err[i] = results[6] # std(s)
yield results
def predict(self, model_labels, model_label_errs, label_dict=None,
label_grid=None, logwt=None, kde_args=None, kde_kwargs=None,
return_gof=False, verbose=True):
"""
Compute photometric 1-D predictions to the target distribution.
Parameters
----------
model_labels : `~numpy.ndarray` of shape (Nmodel)
Model values.
model_label_errs : `~numpy.ndarray` of shape (Nmodel)
Associated errors on the data values.
label_dict : `~frankenz.pdf.PDFDict` object, optional
Dictionary of pre-computed stationary kernels. If provided,
:meth:`~frankenz.pdf.gauss_kde_dict` will be used for KDE.
label_grid : `~numpy.ndarray` of shape (Ngrid), optional
Grid points to evaluate the 1-D PDFs over. Only used when
`label_dict` is not provided, at which point
:meth:`~frankenz.pdf.gauss_kde` will be used for KDE.
logwt : `~numpy.ndarray` of shape (Ndata, Nmodel), optional
A new set of log-weights used to compute the marginalized 1-D
PDFs in place of the log-probability.
kde_args : args, optional
Arguments to be passed to the KDE function.
kde_kwargs : kwargs, optional
Keyword arguments to be passed to the KDE function.
return_gof : bool, optional
Whether to return a tuple containing the ln(MAP) and
ln(evidence) values for the predictions
along with the pdfs. Default is `False`.
verbose : bool, optional
Whether to print progress to `~sys.stderr`. Default is `True`.
Returns
-------
pdfs : `~numpy.ndarray` of shape (Nobj, Ngrid)
Collection of 1-D PDFs for each object.
(lmap, levid) : 2-tuple of `~numpy.ndarray` with shape (Nobj), optional
Set of ln(MAP) and ln(evidence) values for each object.
"""
# Initialize values.
if kde_args is None:
kde_args = []
if kde_kwargs is None:
kde_kwargs = dict()
if logwt is None:
logwt = self.fit_lnprob
if label_dict is None and label_grid is None:
raise ValueError("`label_dict` or `label_grid` must be specified.")
if self.fit_lnprob is None and logwt is None:
raise ValueError("Fits have not been computed and weights have "
"not been provided.")
if label_dict is not None:
Nx = label_dict.Ngrid
else:
Nx = len(label_grid)
Ndata = self.NDATA
pdfs = np.zeros((Ndata, Nx))
if return_gof:
lmap = np.zeros(Ndata)
levid = np.zeros(Ndata)
# Compute PDFs.
for i, res in enumerate(self._predict(model_labels, model_label_errs,
label_dict=label_dict,
label_grid=label_grid,
logwt=logwt, kde_args=kde_args,
kde_kwargs=kde_kwargs)):
pdf, gof = res
pdfs[i] = pdf
if return_gof:
lmap[i], levid[i] = gof
if verbose:
sys.stderr.write('\rGenerating PDF {0}/{1}'
.format(i+1, Ndata))
sys.stderr.flush()
if verbose:
sys.stderr.write('\n')
sys.stderr.flush()
if return_gof:
return pdfs, (lmap, levid)
else:
return pdfs
def _predict(self, model_labels, model_label_errs, label_dict=None,
label_grid=None, logwt=None, kde_args=None, kde_kwargs=None):
"""
Internal generator used to compute photometric 1-D predictions.
Parameters
----------
model_labels : `~numpy.ndarray` of shape (Nmodel)
Model values.
model_label_errs : `~numpy.ndarray` of shape (Nmodel)
Associated errors on the data values.
label_dict : `~frankenz.pdf.PDFDict` object, optional
Dictionary of pre-computed stationary kernels. If provided,
:meth:`~frankenz.pdf.gauss_kde_dict` will be used for KDE.
label_grid : `~numpy.ndarray` of shape (Ngrid), optional
Grid points to evaluate the 1-D PDFs over. Only used when
`label_dict` is not provided, at which point
:meth:`~frankenz.pdf.gauss_kde` will be used for KDE.
logwt : `~numpy.ndarray` of shape (Ndata, Nmodel), optional
A new set of log-weights used to compute the marginalized 1-D
PDFs in place of the log-posterior.
kde_args : args, optional
Arguments to be passed to the KDE function.
kde_kwargs : kwargs, optional
Keyword arguments to be passed to the KDE function.
Returns
-------
pdf : `~numpy.ndarray` of shape (Ngrid)
1-D PDF yielded by the generator.
(lmap, levid) : 2-tuple of floats
ln(MAP) and ln(evidence) values yielded by the generator.
"""
# Initialize values.
if kde_args is None:
kde_args = []
if kde_kwargs is None:
kde_kwargs = dict()
if logwt is None:
logwt = self.fit_lnprob
if label_dict is None and label_grid is None:
raise ValueError("`label_dict` or `label_grid` must be specified.")
if label_dict is not None:
y_idx, y_std_idx = label_dict.fit(model_labels, model_label_errs)
# Generate PDFs.
for i, lwt in enumerate(logwt):
lmap, levid = max(lwt), logsumexp(lwt)
wt = np.exp(lwt - levid)
if label_dict is not None:
# Use dictionary if available.
pdf = gauss_kde_dict(label_dict, y_idx=y_idx,
y_std_idx=y_std_idx, y_wt=wt,
*kde_args, **kde_kwargs)
else:
# Otherwise just use KDE.
pdf = gauss_kde(model_labels, model_label_errs, label_grid,
y_wt=wt, *kde_args, **kde_kwargs)
pdf /= pdf.sum()
yield pdf, (lmap, levid)
def fit_predict(self, data, data_err, data_mask, model_labels,
model_label_errs, lprob_func=None, label_dict=None,
label_grid=None, kde_args=None, kde_kwargs=None,
lprob_args=None, lprob_kwargs=None, return_gof=False,
track_scale=False, verbose=True, save_fits=True):
"""
Fit all input models to the input data to compute the associated
log-posteriors and 1-D predictions.
Parameters
----------
data : `~numpy.ndarray` of shape (Ndata, Nfilt)
Model values.
data_err : `~numpy.ndarray` of shape (Ndata, Nfilt)
Associated errors on the data values.
data_mask : `~numpy.ndarray` of shape (Ndata, Nfilt)
Binary mask (0/1) indicating whether the data value was observed.
model_labels : `~numpy.ndarray` of shape (Nmodel)
Model values.
model_label_errs : `~numpy.ndarray` of shape (Nmodel)
Associated errors on the data values.
lprob_func : str or func, optional
Log-posterior function to be used. Must return ln(prior), ln(like),
ln(post), Ndim, chi2, and (optionally) scale and std(scale).
If not provided, `~frankenz.pdf.logprob` will be used.
label_dict : `~frankenz.pdf.PDFDict` object, optional
Dictionary of pre-computed stationary kernels. If provided,
:meth:`~frankenz.pdf.gauss_kde_dict` will be used for KDE.
label_grid : `~numpy.ndarray` of shape (Ngrid), optional
Grid points to evaluate the 1-D PDFs over. Only used when
`label_dict` is not provided, at which point
:meth:`~frankenz.pdf.gauss_kde` will be used for KDE.
kde_args : args, optional
Arguments to be passed to the KDE function.
kde_kwargs : kwargs, optional
Keyword arguments to be passed to the KDE function.
lprob_args : args, optional
Arguments to be passed to `lprob_func`.
lprob_kwargs : kwargs, optional
Keyword arguments to be passed to `lprob_func`.
return_gof : bool, optional
Whether to return a tuple containing the ln(MAP) and
ln(evidence) values for the predictions
along with the pdfs. Default is `False`.
track_scale : bool, optional
Whether `lprob_func` also returns the scale-factor. Default is
`False`.
verbose : bool, optional
Whether to print progress to `~sys.stderr`. Default is `True`.
save_fits : bool, optional
Whether to save fits internally while computing predictions.
Default is `True`.
Returns
-------
pdfs : `~numpy.ndarray` of shape (Nobj, Ngrid)
Collection of 1-D PDFs for each object.
(lmap, levid) : 2-tuple of `~numpy.ndarray` with shape (Nobj), optional
Set of ln(MAP) and ln(evidence) values for each object.
"""
# Initialize values.
if lprob_func is None:
lprob_func = logprob
if lprob_args is None:
lprob_args = []
if lprob_kwargs is None:
lprob_kwargs = dict()
if kde_args is None:
kde_args = []
if kde_kwargs is None:
kde_kwargs = dict()
if label_dict is None and label_grid is None:
raise ValueError("`label_dict` or `label_grid` must be specified.")
if label_dict is not None:
Nx = label_dict.Ngrid
else:
Nx = len(label_grid)
Ndata = len(data)
pdfs = np.zeros((Ndata, Nx))
if return_gof:
lmap = np.zeros(Ndata)
levid = np.zeros(Ndata)
# Generate predictions.
for i, res in enumerate(self._fit_predict(data, data_err, data_mask,
model_labels,
model_label_errs,
lprob_func=lprob_func,
label_dict=label_dict,
label_grid=label_grid,
kde_args=kde_args,
kde_kwargs=kde_kwargs,
lprob_args=lprob_args,
lprob_kwargs=lprob_kwargs,
track_scale=track_scale,
save_fits=save_fits)):
pdf, gof = res
pdfs[i] = pdf
if return_gof:
lmap[i], levid[i] = gof # save gof metrics
if verbose:
sys.stderr.write('\rGenerating PDF {0}/{1}'
.format(i+1, Ndata))
sys.stderr.flush()
if verbose:
sys.stderr.write('\n')
sys.stderr.flush()
if return_gof:
return pdfs, (lmap, levid)
else:
return pdfs
def _fit_predict(self, data, data_err, data_mask, model_labels,
model_label_errs, lprob_func=None, label_dict=None,
label_grid=None, kde_args=None, kde_kwargs=None,
lprob_args=None, lprob_kwargs=None,
track_scale=False, save_fits=True):
"""
Internal generator used to fit and compute predictions.
Parameters
----------
data : `~numpy.ndarray` of shape (Ndata, Nfilt)
Model values.
data_err : `~numpy.ndarray` of shape (Ndata, Nfilt)
Associated errors on the data values.
data_mask : `~numpy.ndarray` of shape (Ndata, Nfilt)
Binary mask (0/1) indicating whether the data value was observed.
model_labels : `~numpy.ndarray` of shape (Nmodel)
Model values.
model_label_errs : `~numpy.ndarray` of shape (Nmodel)
Associated errors on the data values.
lprob_func : str or func, optional
Log-posterior function to be used. Must return ln(prior), ln(like),
ln(post), Ndim, chi2, and (optionally) scale and std(scale).
If not provided, `~frankenz.pdf.logprob` will be used.
label_dict : `~frankenz.pdf.PDFDict` object, optional
Dictionary of pre-computed stationary kernels. If provided,
:meth:`~frankenz.pdf.gauss_kde_dict` will be used for KDE.
label_grid : `~numpy.ndarray` of shape (Ngrid), optional
Grid points to evaluate the 1-D PDFs over. Only used when
`label_dict` is not provided, at which point
:meth:`~frankenz.pdf.gauss_kde` will be used for KDE.
kde_args : args, optional
Arguments to be passed to the KDE function.
kde_kwargs : kwargs, optional
Keyword arguments to be passed to the KDE function.
lprob_args : args, optional
Arguments to be passed to `lprob_func`.
lprob_kwargs : kwargs, optional
Keyword arguments to be passed to `lprob_func`.
track_scale : bool, optional
Whether `lprob_func` also returns the scale-factor. Default is
`False`.
save_fits : bool, optional
Whether to save fits internally while computing predictions.
Default is `True`.
Returns
-------
pdfs : `~numpy.ndarray` of shape (Ngrid)
1-D PDF for each object yielded by the generator.
(lmap, levid) : 2-tuple of floats
ln(MAP) and ln(evidence) values for each object.
"""
# Initialize values.
if lprob_func is None:
lprob_func = logprob
if lprob_args is None:
lprob_args = []
if lprob_kwargs is None:
lprob_kwargs = dict()
if kde_args is None:
kde_args = []
if kde_kwargs is None:
kde_kwargs = dict()
if label_dict is None and label_grid is None:
raise ValueError("`label_dict` or `label_grid` must be specified.")
Ndata = len(data)
Nmodels = self.NMODEL
if save_fits:
self.fit_lnprior = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_lnlike = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_lnprob = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_Ndim = np.zeros((Ndata, Nmodels), dtype='int')
self.fit_chi2 = np.zeros((Ndata, Nmodels), dtype='float')
self.fit_scale = np.ones((Ndata, Nmodels), dtype='float')
self.fit_scale_err = np.zeros((Ndata, Nmodels), dtype='float')
self.NDATA = Ndata
if label_dict is not None:
y_idx, y_std_idx = label_dict.fit(model_labels, model_label_errs)
# Run generator.
for i, (x, xe, xm) in enumerate(zip(data, data_err, data_mask)):
# Compute fit.
results = lprob_func(x, xe, xm, self.models, self.models_err,
self.models_mask, *lprob_args, **lprob_kwargs)
if save_fits:
self.fit_lnprior[i] = results[0] # ln(prior)
self.fit_lnlike[i] = results[1] # ln(like)
self.fit_lnprob[i] = results[2] # ln(prob)
self.fit_Ndim[i] = results[3] # dimensionality of fit
self.fit_chi2[i] = results[4] # chi2
if track_scale:
self.fit_scale[i] = results[5] # scale-factor
self.fit_scale_err[i] = results[6] # std(s)
lnprob = results[2]
# Compute PDF and GOF metrics.
lmap, levid = max(lnprob), logsumexp(lnprob)
wt = np.exp(lnprob - levid)
if label_dict is not None:
pdf = gauss_kde_dict(label_dict, y_idx=y_idx,
y_std_idx=y_std_idx, y_wt=wt,
*kde_args, **kde_kwargs)
else:
pdf = gauss_kde(model_labels, model_label_errs,
label_grid, y_wt=wt,
*kde_args, **kde_kwargs)
pdf /= pdf.sum()
yield pdf, (lmap, levid)
|
mit
| -5,926,666,856,018,205,000 | 36.44691 | 79 | 0.534936 | false | 4.001524 | false | false | false |
archatas/whoosh
|
whoosh/highlight.py
|
1
|
17889
|
#===============================================================================
# Copyright 2008 Matt Chaput
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
"""The highlight module contains classes and functions for displaying short
excerpts from hit documents in the search results you present to the user, with
query terms highlighted.
"""
from __future__ import division
from heapq import nlargest
from cgi import escape as htmlescape
from whoosh.util.anyall import *
# Fragment object
class Fragment(object):
"""Represents a fragment (extract) from a hit document. This object is
mainly used to keep track of the start and end points of the fragment; it
does not contain the text of the fragment or do much else.
"""
def __init__(self, tokens, charsbefore=0, charsafter=0, textlen=999999):
"""
:param tokens: list of the Token objects in the fragment.
:param charsbefore: approx. how many characters before the start of the
first matched term to include in the fragment.
:param charsafter: approx. how many characters after the end of the
last matched term to include in the fragment.
:param textlen: length in characters of the document text.
"""
#: index of the first character of the fragment in the original
# document
self.startchar = max(0, tokens[0].startchar - charsbefore)
#: index after the last character of the fragment in the original
#document
self.endchar = min(textlen, tokens[-1].endchar + charsafter)
self.matches = [t for t in tokens if t.matched]
self.matched_terms = frozenset(t.text for t in self.matches)
def __len__(self):
return self.endchar - self.startchar
def overlaps(self, fragment):
sc = self.startchar
ec = self.endchar
fsc = fragment.startchar
fec = fragment.endchar
return (fsc > sc and fsc < ec) or (fec > sc and fec < ec)
def overlapped_length(self, fragment):
sc = self.startchar
ec = self.endchar
fsc = fragment.startchar
fec = fragment.endchar
return max(ec, fec) - min(sc, fsc)
def has_matches(self):
return any(t.matched for t in self.tokens)
# Filters
def copyandmatchfilter(termset, tokens):
for t in tokens:
t = t.copy()
t.matched = t.text in termset
yield t
# Fragmenters
def NullFragmenter(text, tokens):
"""Doesn't fragment the token stream. This object just returns the entire
stream as one "fragment". This is useful if you want to highlight the
entire text.
"""
tokens = list(tokens)
before = after = 0
if tokens:
before = tokens[0].startchar
after = len(text) - tokens[-1].endchar
return [Fragment(tokens, charsbefore=before, charsafter=after)]
class SimpleFragmenter(object):
"""Simply splits the text into roughly equal sized chunks.
"""
def __init__(self, size=70):
"""
:param size: size (in characters) to chunk to. The chunking is based on
tokens, so the fragments will usually be smaller.
"""
self.size = size
def __call__(self, text, tokens):
size = self.size
first = None
frag = []
for t in tokens:
if first is None:
first = t.startchar
if t.endchar - first > size:
first = None
if frag:
yield Fragment(frag)
frag = []
frag.append(t)
if frag:
yield Fragment(frag)
class SentenceFragmenter(object):
"""Breaks the text up on sentence end punctuation characters
(".", "!", or "?"). This object works by looking in the original text for a
sentence end as the next character after each token's 'endchar'.
When highlighting with this fragmenter, you should use an analyzer that
does NOT remove stop words, for example::
sa = StandardAnalyzer(stoplist=None)
"""
def __init__(self, maxchars=200, sentencechars=".!?"):
"""
:param maxchars: The maximum number of characters allowed in a fragment.
"""
self.maxchars = maxchars
self.sentencechars = frozenset(sentencechars)
def __call__(self, text, tokens):
maxchars = self.maxchars
sentencechars = self.sentencechars
textlen = len(text)
first = None
frag = []
for t in tokens:
if first is None:
first = t.startchar
endchar = t.endchar
if endchar - first > maxchars:
first = None
if frag:
yield Fragment(frag)
frag = []
frag.append(t)
if frag and endchar < textlen and text[endchar] in sentencechars:
# Don't break for two periods in a row (e.g. ignore "...")
if endchar + 1 < textlen and text[endchar + 1] in sentencechars:
continue
yield Fragment(frag, charsafter=0)
frag = []
first = None
if frag:
yield Fragment(frag)
class ContextFragmenter(object):
"""Looks for matched terms and aggregates them with their surrounding
context.
This fragmenter only yields fragments that contain matched terms.
"""
def __init__(self, termset, maxchars=200, surround=20):
"""
:param termset: A collection (probably a set or frozenset) containing
the terms you want to match to token.text attributes.
:param maxchars: The maximum number of characters allowed in a
fragment.
:param surround: The number of extra characters of context to add both
before the first matched term and after the last matched term.
"""
self.maxchars = maxchars
self.charsbefore = self.charsafter = surround
def __call__(self, text, tokens):
maxchars = self.maxchars
charsbefore = self.charsbefore
charsafter = self.charsafter
current = []
currentlen = 0
countdown = -1
for t in tokens:
if t.matched:
countdown = charsafter
current.append(t)
length = t.endchar - t.startchar
currentlen += length
if countdown >= 0:
countdown -= length
if countdown < 0 or currentlen >= maxchars:
yield Fragment(current)
current = []
currentlen = 0
else:
while current and currentlen > charsbefore:
t = current.pop(0)
currentlen -= t.endchar - t.startchar
if countdown >= 0:
yield Fragment(current)
#class VectorFragmenter(object):
# def __init__(self, termmap, maxchars=200, charsbefore=20, charsafter=20):
# """
# :param termmap: A dictionary mapping the terms you're looking for to
# lists of either (posn, startchar, endchar) or
# (posn, startchar, endchar, boost) tuples.
# :param maxchars: The maximum number of characters allowed in a fragment.
# :param charsbefore: The number of extra characters of context to add before
# the first matched term.
# :param charsafter: The number of extra characters of context to add after
# the last matched term.
# """
#
# self.termmap = termmap
# self.maxchars = maxchars
# self.charsbefore = charsbefore
# self.charsafter = charsafter
#
# def __call__(self, text, tokens):
# maxchars = self.maxchars
# charsbefore = self.charsbefore
# charsafter = self.charsafter
# textlen = len(text)
#
# vfrags = []
# for term, data in self.termmap.iteritems():
# if len(data) == 3:
# t = Token(startchar = data[1], endchar = data[2])
# elif len(data) == 4:
# t = Token(startchar = data[1], endchar = data[2], boost = data[3])
# else:
# raise ValueError(repr(data))
#
# newfrag = VFragment([t], charsbefore, charsafter, textlen)
# added = False
#
# for vf in vfrags:
# if vf.overlaps(newfrag) and vf.overlapped_length(newfrag) < maxchars:
# vf.merge(newfrag)
# added = True
# break
# Fragment scorers
def BasicFragmentScorer(f):
# Add up the boosts for the matched terms in this passage
score = sum(t.boost for t in f.matches)
# Favor diversity: multiply score by the number of separate
# terms matched
score *= len(f.matched_terms) * 100
return score
# Fragment sorters
def SCORE(fragment):
"Sorts higher scored passages first."
return None
def FIRST(fragment):
"Sorts passages from earlier in the document first."
return fragment.startchar
def LONGER(fragment):
"Sorts longer passages first."
return 0 - len(fragment)
def SHORTER(fragment):
"Sort shorter passages first."
return len(fragment)
# Formatters
class UppercaseFormatter(object):
"""Returns a string in which the matched terms are in UPPERCASE.
"""
def __init__(self, between="..."):
"""
:param between: the text to add between fragments.
"""
self.between = between
def _format_fragment(self, text, fragment):
output = []
index = fragment.startchar
for t in fragment.matches:
if t.startchar > index:
output.append(text[index:t.startchar])
ttxt = text[t.startchar:t.endchar]
if t.matched: ttxt = ttxt.upper()
output.append(ttxt)
index = t.endchar
output.append(text[index:fragment.endchar])
return "".join(output)
def __call__(self, text, fragments):
return self.between.join((self._format_fragment(text, fragment)
for fragment in fragments))
class HtmlFormatter(object):
"""Returns a string containing HTML formatting around the matched terms.
This formatter wraps matched terms in an HTML element with two class names.
The first class name (set with the constructor argument ``classname``) is
the same for each match. The second class name (set with the constructor
argument ``termclass`` is different depending on which term matched. This
allows you to give different formatting (for example, different background
colors) to the different terms in the excerpt.
>>> hf = HtmlFormatter(tagname="span", classname="match", termclass="term")
>>> hf(mytext, myfragments)
"The <span class="match term0">template</span> <span class="match term1">geometry</span> is..."
This object maintains a dictionary mapping terms to HTML class names (e.g.
``term0`` and ``term1`` above), so that multiple excerpts will use the same
class for the same term. If you want to re-use the same HtmlFormatter
object with different searches, you should call HtmlFormatter.clear()
between searches to clear the mapping.
"""
template = '<%(tag)s class=%(q)s%(cls)s%(tn)s%(q)s>%(t)s</%(tag)s>'
def __init__(self, tagname="strong", between="...",
classname="match", termclass="term", maxclasses=5,
attrquote='"'):
"""
:param tagname: the tag to wrap around matching terms.
:param between: the text to add between fragments.
:param classname: the class name to add to the elements wrapped around
matching terms.
:param termclass: the class name prefix for the second class which is
different for each matched term.
:param maxclasses: the maximum number of term classes to produce. This
limits the number of classes you have to define in CSS by recycling
term class names. For example, if you set maxclasses to 3 and have
5 terms, the 5 terms will use the CSS classes ``term0``, ``term1``,
``term2``, ``term0``, ``term1``.
"""
self.between = between
self.tagname = tagname
self.classname = classname
self.termclass = termclass
self.attrquote = attrquote
self.maxclasses = maxclasses
self.seen = {}
def _format_fragment(self, text, fragment, seen):
htmlclass = " ".join((self.classname, self.termclass))
output = []
index = fragment.startchar
for t in fragment.matches:
if t.startchar > index:
output.append(text[index:t.startchar])
ttxt = htmlescape(text[t.startchar:t.endchar])
if t.matched:
if t.text in seen:
termnum = seen[t.text]
else:
termnum = len(seen) % self.maxclasses
seen[t.text] = termnum
ttxt = self.template % {"tag": self.tagname,
"q": self.attrquote,
"cls": htmlclass,
"t": ttxt, "tn": termnum}
output.append(ttxt)
index = t.endchar
if index < fragment.endchar:
output.append(text[index:fragment.endchar])
return "".join(output)
def __call__(self, text, fragments):
seen = self.seen
return self.between.join(self._format_fragment(text, fragment, seen)
for fragment in fragments)
def clear(self):
"""Clears the dictionary mapping terms to HTML classnames.
"""
self.seen = {}
class GenshiFormatter(object):
"""Returns a Genshi event stream containing HTML formatting around the
matched terms.
"""
def __init__(self, qname="strong", between="..."):
"""
:param qname: the QName for the tag to wrap around matched terms.
:param between: the text to add between fragments.
"""
self.qname = qname
self.between = between
from genshi.core import START, END, TEXT, Attrs, Stream #@UnresolvedImport
self.START, self.END, self.TEXT = START, END, TEXT
self.Attrs, self.Stream = Attrs, Stream
def _add_text(self, text, output):
if output and output[-1][0] == self.TEXT:
output[-1] = (self.TEXT, output[-1][1] + text, output[-1][2])
else:
output.append((self.TEXT, text, (None, -1, -1)))
def _format_fragment(self, text, fragment):
START, TEXT, END, Attrs = self.START, self.TEXT, self.END, self.Attrs
qname = self.qname
output = []
index = fragment.startchar
lastmatched = False
for t in fragment.matches:
if t.startchar > index:
if lastmatched:
output.append((END, qname, (None, -1, -1)))
lastmatched = False
self._add_text(text[index:t.startchar], output)
ttxt = text[t.startchar:t.endchar]
if not lastmatched:
output.append((START, (qname, Attrs()), (None, -1, -1)))
lastmatched = True
output.append((TEXT, ttxt, (None, -1, -1)))
index = t.endchar
if lastmatched:
output.append((END, qname, (None, -1, -1)))
return output
def __call__(self, text, fragments):
output = []
first = True
for fragment in fragments:
if not first:
self._add_text(self.between, output)
first = False
output += self._format_fragment(text, fragment)
return self.Stream(output)
# Highlighting
def top_fragments(text, terms, analyzer, fragmenter, top=3,
scorer=BasicFragmentScorer, minscore=1):
termset = frozenset(terms)
tokens = copyandmatchfilter(termset, analyzer(text, chars=True,
keeporiginal=True))
scored_frags = nlargest(top, ((scorer(f), f)
for f in fragmenter(text, tokens)))
return [sf for score, sf in scored_frags if score > minscore]
def highlight(text, terms, analyzer, fragmenter, formatter, top=3,
scorer=BasicFragmentScorer, minscore=1,
order=FIRST):
fragments = top_fragments(text, terms, analyzer, fragmenter,
top=top, minscore=minscore)
fragments.sort(key=order)
return formatter(text, fragments)
if __name__ == '__main__':
pass
|
apache-2.0
| -1,190,348,764,071,662,300 | 33.335893 | 99 | 0.565711 | false | 4.336727 | false | false | false |
JensTimmerman/easybuild-easyblocks
|
easybuild/easyblocks/n/ncl.py
|
1
|
6733
|
##
# Copyright 2009-2012 Ghent University
# Copyright 2009-2012 Stijn De Weirdt
# Copyright 2010 Dries Verdegem
# Copyright 2010-2012 Kenneth Hoste
# Copyright 2011 Pieter De Baets
# Copyright 2011-2012 Jens Timmerman
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing NCL, implemented as an easyblock
"""
import fileinput
import os
import re
import sys
from distutils.version import LooseVersion
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.filetools import run_cmd
from easybuild.tools.modules import get_software_root, get_software_version
class EB_NCL(EasyBlock):
"""Support for building/installing NCL."""
def configure_step(self):
"""Configure build:
- create Makefile.ini using make and run ymake script to create config file
- patch config file with correct settings, and add missing config entries
- create config/Site.local file to avoid interactive install
- generate Makefile using config/ymkmf sciprt
-
"""
try:
os.chdir('config')
except OSError, err:
self.log.error("Failed to change to the 'config' dir: %s" % err)
cmd = "make -f Makefile.ini"
run_cmd(cmd, log_all=True, simple=True)
cmd = "./ymake -config $PWD"
run_cmd(cmd, log_all=True, simple=True)
# figure out name of config file
cfg_regexp = re.compile('^\s*SYSTEM_INCLUDE\s*=\s*"(.*)"\s*$', re.M)
f = open("Makefile", "r")
txt = f.read()
f.close()
cfg_filename = cfg_regexp.search(txt).group(1)
# adjust config file as needed
ctof_libs = ''
ifort = get_software_root('ifort')
if ifort:
if LooseVersion(get_software_version('ifort')) < LooseVersion('2011.4'):
ctof_libs = '-lm -L%s/lib/intel64 -lifcore -lifport' % ifort
else:
ctof_libs = '-lm -L%s/compiler/lib/intel64 -lifcore -lifport' % ifort
elif get_software_root('GCC'):
ctof_libs = '-lgfortran -lm'
macrodict = {
'CCompiler': os.getenv('CC'),
'FCompiler': os.getenv('F77'),
'CcOptions': '-ansi %s' % os.getenv('CFLAGS'),
'FcOptions': os.getenv('FFLAGS'),
'COptimizeFlag': os.getenv('CFLAGS'),
'FOptimizeFlag': os.getenv('FFLAGS'),
'ExtraSysLibraries': os.getenv('LDFLAGS'),
'CtoFLibraries': ctof_libs
}
# replace config entries that are already there
for line in fileinput.input(cfg_filename, inplace=1, backup='%s.orig' % cfg_filename):
for (key, val) in macrodict.items():
regexp = re.compile("(#define %s\s*).*" % key)
match = regexp.search(line)
if match:
line = "#define %s %s\n" % (key, val)
macrodict.pop(key)
sys.stdout.write(line)
# add remaining config entries
f = open(cfg_filename, "a")
for (key, val) in macrodict.items():
f.write("#define %s %s\n" % (key, val))
f.close()
f = open(cfg_filename, "r")
self.log.debug("Contents of %s: %s" % (cfg_filename, f.read()))
f.close()
# configure
try:
os.chdir(self.cfg['start_dir'])
except OSError, err:
self.log.error("Failed to change to the build dir %s: %s" % (self.cfg['start_dir'], err))
# instead of running the Configure script that asks a zillion questions,
# let's just generate the config/Site.local file ourselves...
# order of deps is important
# HDF needs to go after netCDF, because both have a netcdf.h include file
deps = ["HDF5", "JasPer", "netCDF", "HDF", "g2lib", "g2clib", "Szip"]
libs = ''
includes = ''
for dep in deps:
root = get_software_root(dep)
if not root:
self.log.error('%s not available' % dep)
libs += ' -L%s/lib ' % root
includes += ' -I%s/include ' % root
cfgtxt="""#ifdef FirstSite
#endif /* FirstSite */
#ifdef SecondSite
#define YmakeRoot %(installdir)s
#define LibSearch %(libs)s
#define IncSearch %(includes)s
#define BuildNCL 1
#define HDFlib
#define HDFEOSlib
#define UdUnitslib
#define BuildGRIB2 1
#define BuildRasterHDF 0
#define BuildHDF4 0
#define BuildTRIANGLE 0
#define BuildUdunits 0
#define BuildHDFEOS 0
#define BuildHDFEOS5 0
#endif /* SecondSite */
""" % {
'installdir': self.installdir,
'libs': libs,
'includes': includes
}
f = open("config/Site.local", "w")
f.write(cfgtxt)
f.close()
# generate Makefile
cmd = "./config/ymkmf"
run_cmd(cmd, log_all=True, simple=True)
def build_step(self):
"""Building is done in install_step."""
pass
def install_step(self):
"""Build in install dir using build_step."""
cmd = "make Everything"
run_cmd(cmd, log_all=True, simple=True)
def sanity_check_step(self):
"""
Custom sanity check for NCL
"""
custom_paths = {
'files': ["bin/ncl", "lib/libncl.a", "lib/libncarg.a"],
'dirs': ["include/ncarg"]
}
super(EB_NCL, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Set NCARG_ROOT environment variable in module."""
txt = super(EB_NCL, self).make_module_extra()
txt += "setenv\tNCARG_ROOT\t$root\n"
return txt
|
gpl-2.0
| 1,901,864,687,388,564 | 32.167488 | 101 | 0.596168 | false | 3.623789 | true | false | false |
detly/dumat
|
dumat/cubicsuperpath.py
|
1
|
5417
|
#!/usr/bin/env python
# Copyright (C) 2005 Aaron Spike, [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from dumat import simplepath
from math import *
def matprod(mlist):
prod=mlist[0]
for m in mlist[1:]:
a00=prod[0][0]*m[0][0]+prod[0][1]*m[1][0]
a01=prod[0][0]*m[0][1]+prod[0][1]*m[1][1]
a10=prod[1][0]*m[0][0]+prod[1][1]*m[1][0]
a11=prod[1][0]*m[0][1]+prod[1][1]*m[1][1]
prod=[[a00,a01],[a10,a11]]
return prod
def rotmat(teta):
return [[cos(teta),-sin(teta)],[sin(teta),cos(teta)]]
def applymat(mat, pt):
x=mat[0][0]*pt[0]+mat[0][1]*pt[1]
y=mat[1][0]*pt[0]+mat[1][1]*pt[1]
pt[0]=x
pt[1]=y
def norm(pt):
return sqrt(pt[0]*pt[0]+pt[1]*pt[1])
def ArcToPath(p1,params):
A=p1[:]
rx,ry,teta,longflag,sweepflag,x2,y2=params[:]
teta = teta*pi/180.0
B=[x2,y2]
if rx==0 or ry==0 or A==B:
return([[A[:],A[:],A[:]],[B[:],B[:],B[:]]])
mat=matprod((rotmat(teta),[[1/rx,0],[0,1/ry]],rotmat(-teta)))
applymat(mat, A)
applymat(mat, B)
k=[-(B[1]-A[1]),B[0]-A[0]]
d=k[0]*k[0]+k[1]*k[1]
k[0]/=sqrt(d)
k[1]/=sqrt(d)
d=sqrt(max(0,1-d/4))
if longflag==sweepflag:
d*=-1
O=[(B[0]+A[0])/2+d*k[0],(B[1]+A[1])/2+d*k[1]]
OA=[A[0]-O[0],A[1]-O[1]]
OB=[B[0]-O[0],B[1]-O[1]]
start=acos(OA[0]/norm(OA))
if OA[1]<0:
start*=-1
end=acos(OB[0]/norm(OB))
if OB[1]<0:
end*=-1
if sweepflag and start>end:
end +=2*pi
if (not sweepflag) and start<end:
end -=2*pi
NbSectors=int(abs(start-end)*2/pi)+1
dTeta=(end-start)/NbSectors
#v=dTeta*2/pi*0.552
#v=dTeta*2/pi*4*(sqrt(2)-1)/3
v = 4*tan(dTeta/4)/3
#if not sweepflag:
# v*=-1
p=[]
for i in range(0,NbSectors+1,1):
angle=start+i*dTeta
v1=[O[0]+cos(angle)-(-v)*sin(angle),O[1]+sin(angle)+(-v)*cos(angle)]
pt=[O[0]+cos(angle) ,O[1]+sin(angle) ]
v2=[O[0]+cos(angle)- v *sin(angle),O[1]+sin(angle)+ v *cos(angle)]
p.append([v1,pt,v2])
p[ 0][0]=p[ 0][1][:]
p[-1][2]=p[-1][1][:]
mat=matprod((rotmat(teta),[[rx,0],[0,ry]],rotmat(-teta)))
for pts in p:
applymat(mat, pts[0])
applymat(mat, pts[1])
applymat(mat, pts[2])
return(p)
def CubicSuperPath(simplepath):
csp = []
subpath = -1
subpathstart = []
last = []
lastctrl = []
for s in simplepath:
cmd, params = s
if cmd == 'M':
if last:
csp[subpath].append([lastctrl[:],last[:],last[:]])
subpath += 1
csp.append([])
subpathstart = params[:]
last = params[:]
lastctrl = params[:]
elif cmd == 'L':
csp[subpath].append([lastctrl[:],last[:],last[:]])
last = params[:]
lastctrl = params[:]
elif cmd == 'C':
csp[subpath].append([lastctrl[:],last[:],params[:2]])
last = params[-2:]
lastctrl = params[2:4]
elif cmd == 'Q':
q0=last[:]
q1=params[0:2]
q2=params[2:4]
x0= q0[0]
x1=1./3*q0[0]+2./3*q1[0]
x2= 2./3*q1[0]+1./3*q2[0]
x3= q2[0]
y0= q0[1]
y1=1./3*q0[1]+2./3*q1[1]
y2= 2./3*q1[1]+1./3*q2[1]
y3= q2[1]
csp[subpath].append([lastctrl[:],[x0,y0],[x1,y1]])
last = [x3,y3]
lastctrl = [x2,y2]
elif cmd == 'A':
arcp=ArcToPath(last[:],params[:])
arcp[ 0][0]=lastctrl[:]
last=arcp[-1][1]
lastctrl = arcp[-1][0]
csp[subpath]+=arcp[:-1]
elif cmd == 'Z':
csp[subpath].append([lastctrl[:],last[:],last[:]])
last = subpathstart[:]
lastctrl = subpathstart[:]
#append final superpoint
csp[subpath].append([lastctrl[:],last[:],last[:]])
return csp
def unCubicSuperPath(csp):
a = []
for subpath in csp:
if subpath:
a.append(['M',subpath[0][1][:]])
for i in range(1,len(subpath)):
a.append(['C',subpath[i-1][2][:] + subpath[i][0][:] + subpath[i][1][:]])
return a
def parsePath(d):
return CubicSuperPath(simplepath.parsePath(d))
def formatPath(p, terminate=False):
# Modified by JH to add 'Z' termination when needed
simple_path = unCubicSuperPath(p)
if terminate:
simple_path.append(['Z', []])
return simplepath.formatPath(simple_path)
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
|
gpl-3.0
| -6,287,155,482,373,087,000 | 30.678363 | 88 | 0.512461 | false | 2.787957 | false | false | false |
pytorch/text
|
benchmark/benchmark_sentencepiece.py
|
1
|
1914
|
import time
import argparse
from torchtext.experimental.transforms import load_sp_model as load_pybind_sp_model
from torchtext.data.functional import load_sp_model as load_torchbind_sp_model
from torchtext.utils import download_from_url
from torchtext.datasets import DATASETS
def benchmark_sentencepiece(args):
def _run_benchmark(train, spm_processor):
t0 = time.monotonic()
for (_, text) in train:
spm_processor(text)
print("Sentencepiece processor time:", time.monotonic() - t0)
# Download a pretrained sentencepiece model
sp_model_path = download_from_url('https://pytorch.s3.amazonaws.com/models/text/pretrained_spm/text_unigram_15000.model')
# existing sentencepiece model with torchbind
train = DATASETS[args.dataset](split='train')
sp_model = load_torchbind_sp_model(sp_model_path)
print("SentencePiece EncodeAsIds - torchbind")
_run_benchmark(train, sp_model.EncodeAsIds)
# experimental sentencepiece model with pybind
train = DATASETS[args.dataset](split='train')
sp_model = load_pybind_sp_model(sp_model_path)
print("SentencePiece EncodeAsIds - pybind")
_run_benchmark(train, sp_model.EncodeAsIds)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='SentencePiece benchmark')
parser.add_argument('--dataset', type=str, default='AG_NEWS',
help='Dataset for performance benchmark')
args = parser.parse_args()
benchmark_sentencepiece(args)
# Running with AG_NEWS
# SentencePiece EncodeAsIds - torchbind
# Sentencepiece processor time: 11.536989663727582
# SentencePiece EncodeAsIds - pybind
# Sentencepiece processor time: 11.38821320142597
# Running with YelpReviewFull
# SentencePiece EncodeAsIds - torchbind
# Sentencepiece processor time: 224.23954573180526
# SentencePiece EncodeAsIds - pybind
# Sentencepiece processor time: 217.134037473239
|
bsd-3-clause
| -530,106,896,059,488,200 | 38.061224 | 125 | 0.738767 | false | 3.461121 | false | false | false |
google/flax
|
flax/core/nn/attention.py
|
1
|
18495
|
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Attention core modules for Flax."""
from collections.abc import Iterable # pylint: disable=g-importing-member
import functools
import warnings
from typing import Any
from . import stochastic
from flax import jax_utils
from flax import struct
from flax.nn import initializers
from flax.core import Scope
import jax
from jax import lax
from jax import random
import jax.numpy as jnp
from .linear import default_kernel_init
from .linear import dense_general
import numpy as np
def dot_product_attention(scope,
query,
key,
value,
dtype=jnp.float32,
bias=None,
axis=None,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None):
"""Computes dot-product attention given query, key, and value.
This is the core function for applying attention based on
https://arxiv.org/abs/1706.03762. It calculates the attention weights given
query and key and combines the values using the attention weights. This
function supports multi-dimensional inputs.
Args:
query: queries for calculating attention with shape of `[batch_size, dim1,
dim2, ..., dimN, num_heads, mem_channels]`.
key: keys for calculating attention with shape of `[batch_size, dim1, dim2,
..., dimN, num_heads, mem_channels]`.
value: values to be used in attention with shape of `[batch_size, dim1,
dim2,..., dimN, num_heads, value_channels]`.
dtype: the dtype of the computation (default: float32)
bias: bias for the attention weights. This can be used for incorporating
autoregressive mask, padding mask, proximity bias.
axis: axises over which the attention is applied.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
Returns:
Output of shape `[bs, dim1, dim2, ..., dimN,, num_heads, value_channels]`.
"""
assert key.shape[:-1] == value.shape[:-1]
assert (query.shape[0:1] == key.shape[0:1] and
query.shape[-1] == key.shape[-1])
if axis is None:
axis = tuple(range(1, key.ndim - 2))
if not isinstance(axis, Iterable):
axis = (axis,)
assert key.ndim == query.ndim
assert key.ndim == value.ndim
for ax in axis:
if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2):
raise ValueError('Attention axis must be between the batch '
'axis and the last-two axes.')
depth = query.shape[-1]
n = key.ndim
# batch_dims is <bs, <non-attention dims>, num_heads>
batch_dims = tuple(np.delete(range(n), axis + (n - 1,)))
# q & k -> (bs, <non-attention dims>, num_heads, <attention dims>, channels)
qk_perm = batch_dims + axis + (n - 1,)
key = key.transpose(qk_perm)
query = query.transpose(qk_perm)
# v -> (bs, <non-attention dims>, num_heads, channels, <attention dims>)
v_perm = batch_dims + (n - 1,) + axis
value = value.transpose(v_perm)
query = query / jnp.sqrt(depth).astype(dtype)
batch_dims_t = tuple(range(len(batch_dims)))
attn_weights = lax.dot_general(
query,
key, (((n - 1,), (n - 1,)), (batch_dims_t, batch_dims_t)),
precision=precision)
# apply attention bias: masking, droput, proximity bias, ect.
if bias is not None:
attn_weights = attn_weights + bias
# normalize the attention weights
norm_dims = tuple(range(attn_weights.ndim - len(axis), attn_weights.ndim))
attn_weights = lax.exp(
attn_weights -
jax.scipy.special.logsumexp(attn_weights, axis=norm_dims, keepdims=True))
attn_weights = attn_weights.astype(dtype)
# apply dropout
if not deterministic and dropout_rate > 0.:
if dropout_rng is None:
dropout_rng = scope.make_rng('dropout')
keep_prob = jax.lax.tie_in(attn_weights, 1.0 - dropout_rate)
if broadcast_dropout:
# dropout is broadcast across the batch+head+non-attention dimension
dropout_dims = attn_weights.shape[-(2 * len(axis)):]
dropout_shape = (tuple([1] * len(batch_dims_t)) + dropout_dims)
keep = random.bernoulli(dropout_rng, keep_prob, dropout_shape)
else:
keep = random.bernoulli(dropout_rng, keep_prob, attn_weights.shape)
multiplier = (keep.astype(attn_weights.dtype) /
jnp.asarray(keep_prob, dtype=dtype))
attn_weights = attn_weights * multiplier
# compute the new values given the attention weights
wv_contracting_dims = (norm_dims, range(value.ndim - len(axis), value.ndim))
y = lax.dot_general(
attn_weights,
value, (wv_contracting_dims, (batch_dims_t, batch_dims_t)),
precision=precision)
# back to (bs, dim1, dim2, ..., dimN, num_heads, channels)
perm_inv = _invert_perm(qk_perm)
y = y.transpose(perm_inv)
return y
def _invert_perm(perm):
perm_inv = [0] * len(perm)
for i, j in enumerate(perm):
perm_inv[j] = i
return tuple(perm_inv)
@struct.dataclass
class CacheEntry:
key: np.ndarray
value: np.ndarray
i: np.ndarray
def multi_head_dot_product_attention(
scope: Scope,
inputs_q,
inputs_kv,
num_heads,
dtype=jnp.float32,
qkv_features=None,
out_features=None,
attention_axis=None,
causal_mask=False,
padding_mask=None,
key_padding_mask=None,
segmentation=None,
key_segmentation=None,
cache=False,
broadcast_dropout=True,
dropout_rng=None,
dropout_rate=0.,
deterministic=False,
precision=None,
kernel_init=default_kernel_init,
bias_init=initializers.zeros,
bias=True,
attention_fn=dot_product_attention):
"""Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
This can be used for encoder-decoder attention by specifying both `inputs_q`
and `inputs_kv` orfor self-attention by only specifying `inputs_q` and
setting `inputs_kv` to None.
Args:
inputs_q: input queries of shape `[bs, dim1, dim2, ..., dimN, features]`.
inputs_kv: key/values of shape `[bs, dim1, dim2, ..., dimN, features]`
or None for self-attention, inn which case key/values will be derived
from inputs_q.
num_heads: number of attention heads. Features (i.e. inputs_q.shape[-1])
should be divisible by the number of heads.
dtype: the dtype of the computation (default: float32)
qkv_features: dimension of the key, query, and value.
out_features: dimension of the last projection
attention_axis: axes over which the attention is applied ( 'None' means
attention over all axes, but batch, heads, and features).
causal_mask: boolean specifying whether to apply a causal mask on the
attention weights. If True, the output at timestep `t` will not depend
on inputs at timesteps strictly greater than `t`.
padding_mask: boolean specifying query tokens that are pad token.
key_padding_mask: boolean specifying key-value tokens that are pad token.
segmentation: segment indices for packed inputs_q data.
key_segmentation: segment indices for packed inputs_kv data.
cache: an instance of `flax.nn.attention.Cache` used for efficient
autoregressive decoding.
broadcast_dropout: bool: use a broadcasted dropout along batch dims.
dropout_rng: JAX PRNGKey: to be used for dropout
dropout_rate: dropout rate
deterministic: bool, deterministic or not (to apply dropout)
precision: numerical precision of the computation see `jax.lax.Precision`
for details.
kernel_init: initializer for the kernel of the Dense layers.
bias_init: initializer for the bias of the Dense layers.
bias: bool: whether pointwise QKVO dense transforms use bias.
attention_fn: dot_product_attention or compatible function. Accepts
query, key, value, and returns output of shape
`[bs, dim1, dim2, ..., dimN,, num_heads, value_channels]``
Returns:
output of shape `[bs, dim1, dim2, ..., dimN, features]`.
"""
assert causal_mask or not cache, (
'Caching is only support for causal attention.')
if inputs_kv is None:
inputs_kv = inputs_q
if attention_axis is None:
attention_axis = tuple(range(1, inputs_q.ndim - 1))
features = out_features or inputs_q.shape[-1]
qkv_features = qkv_features or inputs_q.shape[-1]
assert qkv_features % num_heads == 0, (
'Memory dimension must be divisible by number of heads.')
head_dim = qkv_features // num_heads
dense = functools.partial(
dense_general,
axis=-1,
dtype=dtype,
features=(num_heads, head_dim),
kernel_init=kernel_init,
bias_init=bias_init,
bias=bias,
precision=precision)
# project inputs_q to multi-headed q/k/v
# dimensions are then [bs, dims..., n_heads, n_features_per_head]
query = scope.child(dense, 'query')(inputs_q)
key = scope.child(dense, 'key')(inputs_kv)
value = scope.child(dense, 'value')(inputs_kv)
if cache:
if not scope.has_variable('cache', 'entry'):
ndim, tail_shape = (key.ndim, key.shape[-2:])
def init_fn(shape, dtype=jnp.float32):
full_shape = shape + tail_shape
if len(full_shape) != ndim:
raise ValueError('Shape should be a tuple with the shape of the batch'
'and attention dims.')
return CacheEntry(
key=jnp.zeros(full_shape, dtype),
value=jnp.zeros(full_shape, dtype),
i=jnp.zeros((), jnp.uint32))
cache_entry = init_fn
else:
cache_entry = scope.get_variable('cache', 'entry')
if not isinstance(cache_entry, CacheEntry):
raise ValueError('Cache is not initialized.')
expected_shape = list(cache_entry.key.shape[:-2])
for attn_dim in attention_axis:
expected_shape[attn_dim] = 1
expected_shape = tuple(expected_shape) + inputs_q.shape[-1:]
if expected_shape != inputs_q.shape:
raise ValueError('Invalid shape provided, '
'expected shape %s instead got %s.' %
(expected_shape, inputs_q.shape))
cshape = cache_entry.key.shape
indices = [0] * len(cshape)
i = cache_entry.i
attn_size = np.prod(np.take(cshape, attention_axis))
for attn_dim in attention_axis:
attn_size //= cshape[attn_dim]
indices[attn_dim] = i // attn_size
i = i % attn_size
key = lax.dynamic_update_slice(cache_entry.key, key, indices)
value = lax.dynamic_update_slice(cache_entry.value, value, indices)
one = jnp.array(1, jnp.uint32)
cache_entry = cache_entry.replace(i=cache_entry.i + one,
key=key,
value=value)
# TODO(levskaya): verify this is still needed in translation decoding.
key_padding_mask = jnp.broadcast_to(
(jnp.arange(cshape[1]) < cache_entry.i), cshape[:2])
key_padding_mask = key_padding_mask.astype(jnp.float32)[..., None]
scope.put_variable('cache', 'entry', cache_entry)
# create attention masks
mask_components = []
if causal_mask:
if cache and isinstance(cache_entry, CacheEntry):
bias_pre_shape = (1,) * (key.ndim - 1)
attn_shape = tuple(np.take(key.shape, attention_axis))
attn_size = np.prod(attn_shape)
ii = jnp.arange(attn_size, dtype=jnp.uint32)
mask = ii < cache_entry.i
mask_components.append(mask.reshape(bias_pre_shape + attn_shape))
else:
mask_components.append(_make_causal_mask(key, attention_axis))
if padding_mask is not None:
if key_padding_mask is None:
key_padding_mask = padding_mask
padding_mask = make_padding_mask(
padding_mask_query=padding_mask,
padding_mask_key=key_padding_mask,
query_shape=query.shape,
key_shape=key.shape,
attention_axis=attention_axis)
mask_components.append(padding_mask)
if segmentation is not None:
if key_segmentation is None:
key_segmentation = segmentation
segmentation_mask = make_padding_mask(
padding_mask_query=segmentation,
padding_mask_key=key_segmentation,
query_shape=query.shape,
key_shape=key.shape,
attention_axis=attention_axis,
segmentation_mask=True)
mask_components.append(segmentation_mask)
if mask_components:
attention_mask = mask_components[0]
for component in mask_components[1:]:
attention_mask = jnp.logical_and(attention_mask, component)
# attention mask in the form of attention bias
attention_bias = lax.select(
attention_mask > 0, jnp.full(attention_mask.shape, 0.).astype(dtype),
jnp.full(attention_mask.shape, -1e10).astype(dtype))
else:
attention_bias = None
# apply attention
x = scope.child(attention_fn)(
query,
key,
value,
dtype=dtype,
axis=attention_axis,
bias=attention_bias,
precision=precision,
dropout_rng=dropout_rng,
dropout_rate=dropout_rate,
broadcast_dropout=broadcast_dropout,
deterministic=deterministic)
# back to the original inputs dimensions
out = scope.child(dense_general, name='out')(
x,
features=features,
axis=(-2, -1),
kernel_init=kernel_init,
bias_init=bias_init,
bias=bias,
dtype=dtype,
precision=precision)
return out
# TODO(flax-dev): Consider refactoring MultiHeadDotProductAttention and moving
# causal_mask and cache support into this class instead.
#SelfAttention = MultiHeadDotProductAttention.partial(inputs_kv=None)
def make_padding_mask(padding_mask_query,
padding_mask_key,
query_shape,
key_shape,
attention_axis=None,
segmentation_mask=False):
"""Makes padding mask for attention weights.
In case of 1d inputs (i.e., `[bs, len, features]`, the attention weights will
be `[bs, len, len]` and this function makes a square matrix [len, len].
Args:
padding_mask_query: padding mask of query <bs, qdim1,.., qdimn>
padding_mask_key: padding mask of query <bs, key1,.., keyn>
query_shape: shape of the query
key_shape: shape of the key, which is equal to the shape of value.
attention_axis: axis over which attention is applied.
segmentation_mask: bool: if true use equality on cartesian product rather
than outer product for constructing segmentation masks.
Returns:
The padding mask for attention weights.
"""
assert query_shape[0] == key_shape[0]
assert len(query_shape) == len(key_shape)
ndim = len(key_shape)
if attention_axis is None:
attention_axis = tuple(range(1, ndim - 2))
assert isinstance(attention_axis, tuple)
for ax in attention_axis:
if not (ndim >= 3 and 1 <= ax < ndim - 2):
raise ValueError(
'Attention axis must be between the batch axis and the last-two axes.'
)
mask_shape_final = (query_shape[0], 1) # batch_size, 1 (for all heads)s
for ax in attention_axis:
mask_shape_final += (query_shape[ax],)
for ax in attention_axis:
mask_shape_final += (key_shape[ax],)
padding_mask_query = padding_mask_query[..., None]
padding_mask_key = padding_mask_key[..., None]
perm = (0,) + tuple(np.flip(np.arange(padding_mask_key.ndim)))[:-1]
if segmentation_mask:
mask = jnp.equal(padding_mask_query, padding_mask_key.transpose(perm))
else:
mask = jnp.multiply(padding_mask_query, padding_mask_key.transpose(perm))
mask = mask.reshape(mask_shape_final)
mask = jax.lax.convert_element_type(mask, jnp.float32)
return mask
def _make_causal_mask(key, attention_axis=None, self_mask=False):
"""Makes a causal mask, to be used for masking out the future for attention.
In case of 1d inputs (i.e., `[bs, len, features]`, the attention weights will
be `[bs, len, len]` and this function makes a square matrix [len, len] with
zeros in upper triangle and ones in lower triangle.
Args:
key: shape of the key, which is equal to the shape of value and is
assumed to be equal to the shape of the query (since this is used in
self-attention when decoding).
attention_axis: axis over which attention is applied.
self_mask: if mask out the diagonal or not.
Returns:
A causal mask to be used to mask out future positions.
"""
if attention_axis is None:
attention_axis = tuple(range(1, key.ndim - 2))
assert isinstance(attention_axis, tuple)
for ax in attention_axis:
if not (key.ndim >= 3 and 1 <= ax < key.ndim - 2):
raise ValueError(
'Attention axis must be between the batch axis and the last-two axes.'
)
mask_shape = tuple([1] * (key.ndim - len(attention_axis) - 1))
mask_shape_final = mask_shape
for _ in range(2):
flatten_dim = 1
for ax in attention_axis:
mask_shape_final += (key.shape[ax],)
flatten_dim *= key.shape[ax]
mask_shape += (flatten_dim,)
def tri(n, m, k=0):
# Tie in the key to avoid the mask becoming a constant.
# This way XLA can construct the mask during computation and fuse it
# with the attention ops.
x = lax.tie_in(key, jnp.arange(n, dtype=jnp.int32))
y = lax.tie_in(key, jnp.arange(m, dtype=jnp.int32))
mask = lax.ge(
(lax.broadcast_in_dim(x, shape=(n, m), broadcast_dimensions=(0,))) + k,
lax.broadcast(y, [n]))
return mask
k = -1 if self_mask else 0
mask = tri(*mask_shape[-2:], k=k).reshape(mask_shape_final)
return mask
|
apache-2.0
| -5,751,349,227,438,841,000 | 35.916168 | 80 | 0.656394 | false | 3.598949 | false | false | false |
priestc/giotto
|
giotto/controllers/__init__.py
|
1
|
7907
|
from collections import deque
import inspect
import json
from giotto import get_config
from giotto.exceptions import (GiottoException, InvalidInput, ProgramNotFound,
MockNotFound, ControlMiddlewareInterrupt, NotAuthorized, InvalidInvocation)
from giotto.primitives import GiottoPrimitive, RAW_INVOCATION_ARGS
from giotto.keyvalue import DummyKeyValue
from giotto.control import GiottoControl
class GiottoController(object):
middleware_interrupt = None
persist_data = None
def __init__(self, request, manifest, model_mock=False, errors=None):
self.request = request
self.model_mock = model_mock
self.cache = get_config('cache_engine', DummyKeyValue())
self.errors = errors
self.manifest = manifest
self.middleware_interrupt_exc = None
self.middleware_control = None
self.display_data = 'Not calculated yet'
# the program that corresponds to this invocation
invocation = self.get_invocation()
name = self.get_controller_name()
parsed = self.manifest.parse_invocation(invocation, controller_tag=name)
self.raw_args = parsed['raw_args']
self.program = parsed['program']
self.program.name_on_manifest = parsed['program_name']
self.path_args = parsed['args']
if parsed['superformat']:
self.mimetype = parsed['superformat_mime'] or parsed['superformat']
else:
self.mimetype = self.mimetype_override() or self.default_mimetype
def get_response(self):
"""
High level function for getting a response. This is what the concrete
controller should call. Returns a controller specific response.
"""
last_good_request = self.request
middleware_result = None
try:
last_good_request, middleware_result = self.program.execute_input_middleware_stream(self.request, self)
except GiottoException as exc:
# save this exception so it can be re-raised from within
# get_data_response() so that get_concrete_response() can handle it
self.middleware_interrupt_exc = exc
self.request = last_good_request
else:
self.request = middleware_result # middleware ended cleanly
if GiottoControl in type(middleware_result).mro():
# middleware returned a control object
self.middleware_control = middleware_result
self.request = last_good_request
response = self.get_concrete_response()
if self.persist_data:
response = self.persist(self.persist_data, response)
return self.program.execute_output_middleware_stream(self.request, response, self)
def get_data_response(self):
"""
Execute the model and view, and handle the cache.
Returns controller-agnostic response data.
"""
if self.middleware_interrupt_exc:
## the middleware raised an exception, re-raise it here so
## get_concrete_response (defined in subclasses) can catch it.
raise self.middleware_interrupt_exc
if self.middleware_control:
## this redirect object came from middleware but return it as if it
## came from a view.
return {'body': self.middleware_control}
if self.model_mock and self.program.has_mock_defined():
model_data = self.program.get_model_mock()
else:
args, kwargs = self.program.get_model_args_kwargs()
data = self.get_data_for_model(args, kwargs)
self.display_data = data # just for displaying in __repr__
if self.program.cache and not self.errors:
key = self.get_cache_key(data)
hit = self.cache.get(key)
if hit:
return hit
model_data = self.program.execute_model(data)
response = self.program.execute_view(model_data, self.mimetype, self.errors)
if self.program.cache and not self.errors and not self.model_mock:
self.cache.set(key, response, self.program.cache)
if 'persist' in response:
self.persist_data = response['persist']
return response
def get_data_for_model(self, args, kwargs):
"""
In comes args and kwargs expected for the model. Out comes the data from
this invocation that will go to the model.
In other words, this function does the "data negotiation" between the
controller and the model.
"""
kwargs_from_invocation = self.get_raw_data()
args_from_invocation = deque(self.path_args)
defaults = kwargs
values = args + list(kwargs.keys())
output = {}
raw = False
for i, field in enumerate(values):
## going through each bit of data that the model needs
## `field` here is the name of each needed var.
# the 'default' value that may be defined in the model.
# this variable might be a string or int or might even be a primitive object.
# NotImplemented here is used as to preserve if a default value is None.
# it is used here as a sort of MetaNone.
default_defined_in_model = defaults.get(field, NotImplemented)
# the value in kwarg arguments such as --values and GET params
from_data_kwargs = kwargs_from_invocation.get(field, None)
# The value that will end up being used.
value_to_use = None
if default_defined_in_model == RAW_INVOCATION_ARGS:
# flag that the RAW_INVOCATION_ARGS primitive has been invoked
# used later to suppress errors for unused program args
# when this primitive is invoked, all positional args are invalid.
raw = True
if type(default_defined_in_model) == GiottoPrimitive:
value_to_use = self.get_primitive(default_defined_in_model.name)
elif from_data_kwargs:
value_to_use = from_data_kwargs
elif not raw and args_from_invocation:
value_to_use = args_from_invocation.popleft()
elif default_defined_in_model is not NotImplemented:
value_to_use = default_defined_in_model
else:
raise InvalidInvocation("Data Missing For Program. Missing: %s" % field)
output[field] = value_to_use
if args_from_invocation and not raw:
msg = "Too many arguments. Program `%s` takes %s arguments, %s given" % (
self.program.name, len(args) + len(kwargs), len(args_from_invocation)
)
raise InvalidInvocation(msg)
return output
def persist(self, values):
"""
Persist this data between the user and the server.
"""
raise NotImplementedError("This controller does not support persistance")
def __repr__(self):
controller = self.get_controller_name()
model = self.program.name
data = self.display_data
return "<%s %s - %s - %s>" % (
self.__class__.__name__, controller, model, data
)
def mimetype_override(self):
"""
In some circumstances, the returned mimetype can be changed. Return that here.
Otherwise the default or superformat will be used.
"""
return None
def get_cache_key(self, data):
try:
controller_args = json.dumps(data, separators=(',', ':'), sort_keys=True)
except TypeError:
# controller contains info that can't be json serialized:
controller_args = str(data)
program = self.program.name
return "%s(%s)(%s)" % (controller_args, program, self.mimetype)
|
bsd-2-clause
| 1,328,981,605,514,870,500 | 38.934343 | 115 | 0.615404 | false | 4.414852 | false | false | false |
EuroPython/ep-tools
|
tasks.py
|
1
|
4203
|
"""
Invoke tasks to be run from the command line.
"""
import os
from invoke import task
from eptools import talks, people
from eptools.gspread_utils import get_api_key_file
from eptools.config import (
conference,
sponsors_billing_worksheet,
finaid_submissions_worksheet
)
@task
def sponsor_agreement(ctx, company_name, output_dir, template_file="", api_key_file=""):
""" Call docstamp to produce a sponsor agreement for `company_name`
using `template_file`. The output will be saved in `output_dir`.
Parameters
----------
company_name: str
Can be a substring of the company name in the spreadsheet.
template_file: str
output_dir: str
api_key_file: str
The path to the Google Credentials json file.
If left empty will try to look for its path in the config.py file.
"""
from eptools.sponsors import (
get_sponsor,
get_sponsors_ws_data,
create_sponsor_agreement,
contract_template,
company_name_column,
)
if not template_file:
template_file = contract_template
if not api_key_file:
api_key_file = get_api_key_file()
output_dir = os.path.abspath(output_dir)
responses = get_sponsors_ws_data(api_key_file=api_key_file, doc_key=sponsors_billing_worksheet[0])
try:
sponsor_data = get_sponsor(sponsor_name=company_name, sponsors=responses, col_name=company_name_column)
except Exception:
raise KeyError("Could not find data for sponsor {}.".format(company_name))
else:
fpath = create_sponsor_agreement(sponsor_data, template_file=template_file, output_dir=output_dir)
print("Created {}.".format(fpath))
@task
def finaid_receipt(ctx, applicant_name, output_dir, template_file="", api_key_file=""):
""" Call docstamp to produce a financial aid receipt
for `applicant_name` using `template_file`.
The output will be saved in `output_dir`.
Parameters
----------
applicant_name: str
template_file: str
output_dir: str
api_key_file: str
Path to the Google credentials json file.
If left empty will try to look for its path in the config.py file.
"""
from eptools.finaid import get_finaid_ws_data, get_applicant, receipt_template_spa, create_receipt
if not template_file:
template_file = receipt_template_spa
if not api_key_file:
api_key_file = get_api_key_file()
output_dir = os.path.abspath(output_dir)
responses = get_finaid_ws_data(api_key_file=api_key_file, doc_key=finaid_submissions_worksheet[0])
try:
applicant_data = get_applicant(applicant_name=applicant_name, submissions=responses, col_name="full_name")
except Exception:
raise KeyError("Could not find data for applicant {}.".format(applicant_name))
else:
fpath = create_receipt(applicant_data, template_file=template_file, output_dir=output_dir)
print("Created {}.".format(fpath))
@task
def fetch_ticket_profiles(ctx, out_filepath, conf=conference, status="all", nondups=False, raise_=False, ticket_id=""):
""" Create a json file with the all the tickets of the conference.
make_option('--status',
choices=['all', 'complete', 'incomplete'],
help='Status of the orders related with the tickets.',
make_option('--nondups',
help='If enables will remove the tickets with '
'same owner/email.',
make_option('--raise',
help='If enabled will raise any error that it may find.',
make_option('--ticket-id',
help='Will output the profile of the given ticket only.',
"""
return people.fetch_files(out_filepath, conf=conf, status=status, nondups=nondups, raise_=raise_, ticket_id=ticket_id)
@task
def fetch_talks_json(ctx, out_filepath="", status="proposed", conf=conference, host="europython.io", with_votes=False):
""" Return the talks in a json format. `status` choices: ['accepted', 'proposed']
"""
return talks.fetch_talks_json(out_filepath=out_filepath, status=status, conf=conf, host=host, with_votes=with_votes)
|
mit
| 804,876,676,924,397,200 | 32.624 | 122 | 0.656674 | false | 3.70962 | false | false | false |
previtus/MGR-Project-Code
|
Settings/independent_experiments/effect_of_data_shuffling/shuffle_effective_1200.py
|
1
|
3067
|
def Setup(Settings,DefaultModel):
# shuffle_effective_1200.py
# - in this case always shuffled is better than not shuffled
# - and then osm only val is best, osm img mix is second and last is img only
Settings["experiment_name"] = "Test_Shuffling_3 models vs 3 models_1200x_markable_299x299_shuffleNowInMod6"
Settings["graph_histories"] = ['together', [0,3], [1,4], [2,5],[0,1,2],[3,4,5]]
n=0
Settings["models"][n]["dataset_name"] = "1200x_markable_299x299" # "1200x_markable_299x299", "5556x_mark_res_299x299", "5556x_markable_640x640"
Settings["models"][n]["pixels"] = 299
Settings["models"][n]["model_type"] = 'img_osm_mix'
Settings["models"][n]["unique_id"] = 'notShuffled_mix'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 300
Settings["models"][n]["shuffle_dataset"] = False
Settings["models"].append(DefaultModel.copy())
n=1
Settings["models"][n]["dataset_pointer"] = 0 # 0 - reuse the first dataset
Settings["models"][n]["model_type"] = 'osm_only'
Settings["models"][n]["unique_id"] = 'notShuffled_osm_only'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 300
Settings["models"][n]["shuffle_dataset"] = False
Settings["models"].append(DefaultModel.copy())
n=2
Settings["models"][n]["dataset_pointer"] = 0 # 0 - reuse the first dataset
Settings["models"][n]["model_type"] = 'simple_cnn_with_top'
Settings["models"][n]["unique_id"] = 'notShuffled_img_only'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 300
Settings["models"][n]["shuffle_dataset"] = False
Settings["models"].append(DefaultModel.copy())
n=3
Settings["models"][n]["dataset_pointer"] = -1 # 0 - reuse the first dataset
Settings["models"][n]["dataset_name"] = "1200x_markable_299x299" # "1200x_markable_299x299", "5556x_mark_res_299x299", "5556x_markable_640x640"
Settings["models"][n]["pixels"] = 299
Settings["models"][n]["model_type"] = 'img_osm_mix'
Settings["models"][n]["unique_id"] = 'Shuffled_img_osm_mix'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 300
Settings["models"][n]["shuffle_dataset"] = True
Settings["models"].append(DefaultModel.copy())
n=4
Settings["models"][n]["dataset_pointer"] = 1
Settings["models"][n]["model_type"] = 'osm_only'
Settings["models"][n]["unique_id"] = 'Shuffled_osm_only'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 300
Settings["models"][n]["shuffle_dataset"] = True
Settings["models"].append(DefaultModel.copy())
n=5
Settings["models"][n]["dataset_pointer"] = 1
Settings["models"][n]["model_type"] = 'simple_cnn_with_top'
Settings["models"][n]["unique_id"] = 'Shuffled_img_only'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 300
Settings["models"][n]["shuffle_dataset"] = True
return Settings
|
mit
| 7,533,434,955,248,147,000 | 41.597222 | 147 | 0.632214 | false | 3.266241 | false | false | false |
skasamatsu/vaspgrid
|
zav.py
|
1
|
2327
|
# Beware! Only tested for non-spin-polarized case
import re
import sys
import rlcompleter
import readline
#from numpy import *
from enterfi import enterfi
from outputfi import outputfi
gridfname = enterfi("Enter VASP field data (CHGCAR, LOCPOT, etc.)")
outfname = outputfi("Enter output file name ")
gridfi = open(gridfname,"r")
gridfi.readline() # Skip system name
# Read lattice scaling constant
li = gridfi.readline().split()
scale = [0.0,0.0,0.0]
if len(li) == 1:
li = float(li[0])
for i in range(3):
scale[i] = li
else:
if len(li) == 3:
for i in range(3):
scale[i] = float(li[i])
# Read lattice vectors
latcons = [[0.0,0.0,0.0],[0.0,0.0,0.0],[0.0,0.0,0.0]]
for i in range(3):
li = gridfi.readline().split()
for j in range(3):
latcons[i][j] = float(li[j])*scale[j]
print latcons
# Is this lattice orthorhombic in z direction?
assert latcons[0][2] <= 1.0e-8
assert latcons[1][2] <= 1.0e-8
assert latcons[2][0] <= 1.0e-8
assert latcons[2][1] <= 1.0e-8
# Read number of atoms
# Is this from vasp5 or vasp4? vasp5 has element names on the sixth line
# while vasp 4 does not.
li = gridfi.readline().split()
if re.match("[0-9]",li[0].strip()):
# It's vasp4
nspecs = len(li)
natoms = 0
for i in range(nspecs):
li[i] = int(li[i])
natoms = natoms + li[i]
else:
# It's vasp5. Read one more line.
li = gridfi.readline().split()
nspecs = len(li)
natoms = 0
for i in range(nspecs):
li[i] = int(li[i])
natoms = natoms + li[i]
print natoms
gridfi.readline() # Skip one line. It probably says "Direct".
for i in range(natoms+1):
gridfi.readline() # Skip the atom coordinates plus 1 blank line
# Read the grid dimensions
grid = gridfi.readline().split()
for i in range(len(grid)):
grid[i]=int(grid[i])
ngrid = grid[0] * grid[1] * grid[2]
dz = latcons[2][2]/grid[2]
# Now read the rest of the file
data=gridfi.read().split()
for i in range(ngrid):
data[i]=float(data[i])
zavg=[]
for i in range(grid[2]):
zavgtmp=0.0
for j in range(grid[0]*grid[1]):
zavgtmp+=data[i*grid[0]*grid[1]+j]
zavgtmp=zavgtmp/(grid[0]*grid[1])
zavg.append(zavgtmp)
outfi = open(outfname,"w")
for i in range(len(zavg)):
outfi.write(str(dz*i) + " " + str(zavg[i]) + "\n")
#print zavg
|
mit
| -8,205,839,579,492,689,000 | 23.755319 | 72 | 0.62312 | false | 2.650342 | false | false | false |
darkfeline/dantalian
|
src/dantalian/base.py
|
1
|
5280
|
# Copyright (C) 2015 Allen Li
#
# This file is part of Dantalian.
#
# Dantalian is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dantalian is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Dantalian. If not, see <http://www.gnu.org/licenses/>.
"""This module defines basic interaction with a semantic filesystem.
This module primarily extends link(), unlink(), and rename() to work as though
they support directory linking. The rest of the functions exist as
implementation details to manage directory linking with symlinks and dtags.
"""
from itertools import chain
import os
import posixpath
from dantalian import dtags
from dantalian import oserrors
from dantalian import pathlib
from dantalian import tagnames
def link(rootpath, src, dst):
"""Link src to dst.
Args:
rootpath: Path for tagname conversions.
src: Source path.
dst: Destination path.
"""
if posixpath.isdir(src):
src = pathlib.readlink(src)
os.symlink(posixpath.abspath(src), dst)
dtags.add_tag(src, tagnames.path2tag(rootpath, dst))
else:
os.link(src, dst)
def unlink(rootpath, path):
"""Unlink given path.
If the target is a directory without any other links, raise OSError.
"""
target = path
# We unlink the target. However, if it is a directory, we want to swap it
# out for one of its symlinks, then unlink the symlink. If the directory
# doesn't have any tags, then we fail.
if posixpath.isdir(target):
if not posixpath.islink(target):
tags = dtags.list_tags(target)
if not tags:
raise oserrors.is_a_directory(target)
swap_candidate = tagnames.tag2path(rootpath, tags[0])
swap_dir(rootpath, swap_candidate)
assert posixpath.islink(target)
dtags.remove_tag(target, tagnames.path2tag(rootpath, target))
os.unlink(target)
def rename(rootpath, src, dst):
"""Rename src to dst and fix tags for directories.
Doesn't overwrite an existing file at dst.
Args:
rootpath: Rootpath for tagname conversions.
src: Source path.
dst: Destination path.
"""
link(rootpath, src, dst)
unlink(rootpath, src)
def swap_dir(rootpath, path):
"""Swap a symlink with its target directory.
Args:
rootpath: Rootpath for tag conversions.
path: Path of target symlink.
"""
target = path
if posixpath.islink(target) and posixpath.isdir(target):
here = target
there = pathlib.readlink(target)
# here is the symlink
# there is the dir
here_tag = tagnames.path2tag(rootpath, here)
there_tag = tagnames.path2tag(rootpath, there)
dtags.remove_tag(here, here_tag)
dtags.add_tag(here, there_tag)
os.unlink(here)
# here is now nothing
# there is now the dir
os.rename(there, here)
# here is now the dir
# there is now nothing
os.symlink(here, there)
else:
raise ValueError('{} is not a symlink to a directory'.format(target))
def list_links(top, path):
"""List all links to the target file.
Args:
top: Path to top of directory tree to search.
path: Path of file.
Returns:
Generator yielding paths.
"""
target = path
for (dirpath, dirnames, filenames) in os.walk(top):
for name in chain(dirnames, filenames):
filepath = posixpath.join(dirpath, name)
if posixpath.samefile(target, filepath):
yield filepath
def save_dtags(rootpath, top, dirpath):
"""Save symlinks to a directory's dtags, overwriting it.
Args:
rootpath: Path for tag conversions.
top: Path of directory in which to search.
dirpath: Path of directory whose dtags to update.
"""
dirpath = pathlib.readlink(dirpath)
tags = [tagnames.path2tag(rootpath, path)
for path in list_links(top, dirpath)]
dir_tagname = tagnames.path2tag(rootpath, dirpath)
tags = [tagname
for tagname in tags
if tagname != dir_tagname]
dtags.set_tags(dirpath, tags)
def load_dtags(rootpath, dirpath):
"""Create symlinks for a directory using its dtags."""
tags = dtags.list_tags(dirpath)
dirpath = pathlib.readlink(dirpath)
target = posixpath.abspath(dirpath)
for tagname in tags:
dstpath = tagnames.tag2path(rootpath, tagname)
os.symlink(target, dstpath)
def unload_dtags(rootpath, dirpath):
"""Remove symlinks using a directory's dtags."""
tags = dtags.list_tags(dirpath)
dirpath = pathlib.readlink(dirpath)
for tagname in tags:
tagpath = tagnames.tag2path(rootpath, tagname)
if posixpath.samefile(dirpath, tagpath):
os.unlink(tagpath)
|
gpl-3.0
| 4,279,770,812,570,900,500 | 29.697674 | 78 | 0.659848 | false | 3.955056 | false | false | false |
onitake/Uranium
|
UM/Scene/ToolHandle.py
|
1
|
3897
|
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from . import SceneNode
from UM.Resources import Resources
from UM.Application import Application
from UM.Math.Color import Color
from UM.Math.Vector import Vector
from UM.Scene.Selection import Selection
from UM.View.GL.OpenGL import OpenGL
from UM.View.RenderBatch import RenderBatch
## A tool handle is a object in the scene that gives queues for what the tool it is
# 'paired' with can do. ToolHandles are used for translation, rotation & scale handles.
# They can also be used as actual objects to interact with (in the case of translation,
# pressing one arrow of the toolhandle locks the translation in that direction)
class ToolHandle(SceneNode.SceneNode):
NoAxis = 1
XAxis = 2
YAxis = 3
ZAxis = 4
AllAxis = 5
DisabledColor = Color(0.5, 0.5, 0.5, 1.0)
XAxisColor = Color(1.0, 0.0, 0.0, 1.0)
YAxisColor = Color(0.0, 0.0, 1.0, 1.0)
ZAxisColor = Color(0.0, 1.0, 0.0, 1.0)
AllAxisColor = Color(1.0, 1.0, 1.0, 1.0)
def __init__(self, parent = None):
super().__init__(parent)
self._scene = Application.getInstance().getController().getScene()
self._solid_mesh = None
self._line_mesh = None
self._selection_mesh = None
self._shader = None
self._previous_dist = None
self._active_axis = None
self._auto_scale = True
self.setCalculateBoundingBox(False)
Selection.selectionCenterChanged.connect(self._onSelectionCenterChanged)
def getLineMesh(self):
return self._line_mesh
def setLineMesh(self, mesh):
self._line_mesh = mesh
self.meshDataChanged.emit(self)
def getSolidMesh(self):
return self._solid_mesh
def setSolidMesh(self, mesh):
self._solid_mesh = mesh
self.meshDataChanged.emit(self)
def getSelectionMesh(self):
return self._selection_mesh
def setSelectionMesh(self, mesh):
self._selection_mesh = mesh
self.meshDataChanged.emit(self)
def getMaterial(self):
return self._shader
def render(self, renderer):
if not self._shader:
self._shader = OpenGL.getInstance().createShaderProgram(Resources.getPath(Resources.Shaders, "toolhandle.shader"))
if self._auto_scale:
camera_position = self._scene.getActiveCamera().getWorldPosition()
dist = (camera_position - self.getWorldPosition()).length()
scale = dist / 400
self.setScale(Vector(scale, scale, scale))
if self._line_mesh:
renderer.queueNode(self, mesh = self._line_mesh, mode = RenderBatch.RenderMode.Lines, overlay = True, shader = self._shader)
if self._solid_mesh:
renderer.queueNode(self, mesh = self._solid_mesh, overlay = True, shader = self._shader)
return True
def getSelectionMap(self):
return {
self.XAxisColor: self.XAxis,
self.YAxisColor: self.YAxis,
self.ZAxisColor: self.ZAxis,
self.AllAxisColor: self.AllAxis
}
def setActiveAxis(self, axis):
if axis == self._active_axis or not self._shader:
return
if axis:
self._shader.setUniformValue("u_activeColor", self._axisColorMap[axis])
else:
self._shader.setUniformValue("u_activeColor", self.DisabledColor)
self._active_axis = axis
self._scene.sceneChanged.emit(self)
@classmethod
def isAxis(cls, value):
return value in cls._axisColorMap
_axisColorMap = {
NoAxis: DisabledColor,
XAxis: XAxisColor,
YAxis: YAxisColor,
ZAxis: ZAxisColor,
AllAxis: AllAxisColor
}
def _onSelectionCenterChanged(self):
self.setPosition(Selection.getSelectionCenter())
|
agpl-3.0
| -6,628,176,053,176,245,000 | 30.176 | 136 | 0.641519 | false | 3.739923 | false | false | false |
AngryDevelopersLLC/res-scheduler
|
setup.py
|
1
|
3088
|
"""
Resystem Scheduling Service.
Released under New BSD License.
Copyright © 2015, Vadim Markovtsev :: Angry Developers LLC
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Angry Developers LLC nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL VADIM MARKOVTSEV BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from setuptools import setup
import os
def parse_requirements():
path = os.path.join(os.path.dirname(__file__), "requirements.txt")
reqs = []
with open(path, "r") as fin:
for r in fin.read().split("\n"):
r = r.strip()
if r.startswith("#") or not r:
continue
if r.startswith("git+"):
print("Warning: git dependencies cannot be used in setuptools "
"(%s)" % r)
continue
if not r.startswith("-r"):
reqs.append(r)
return reqs
setup(
name="res-scheduling",
description="Resystem Scheduling Service",
version="1.0.2",
license="New BSD",
author="Vadim Markovtsev",
author_email="[email protected]",
url="https://github.com/AngryDevelopersLLC/res-scheduler",
download_url='https://github.com/AngryDevelopersLLC/res-scheduler',
packages=["res.scheduling"],
install_requires=parse_requirements(),
package_data={"": [
'res/scheduling/requirements/base.txt',
'res/scheduling/res_scheduling.service',
'res/scheduling/run.sh']},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
]
)
|
bsd-3-clause
| -7,938,679,896,831,706,000 | 40.716216 | 79 | 0.688371 | false | 4.397436 | false | false | false |
cloudmesh/vagrant
|
cloudmesh_vagrant/cm_vbox.py
|
1
|
5612
|
from __future__ import print_function
from docopt import docopt
import cloudmesh_vagrant as vagrant
from cloudmesh_client.common.dotdict import dotdict
from pprint import pprint
from cloudmesh_client.common.Printer import Printer
from cloudmesh_client.common.Shell import Shell
import sys
import os
from cloudmesh_vagrant.version import __version__
# pprint (vagrant.vm.list())
# vagrant.vm.execute("w2", "uname")
# pprint (vagrant.image.list())
def defaults():
"""
default values
:return: a number of default values for memory, image, and script
:rtype: dotdict
"""
d = dotdict()
d.memory = 1024
# d.image = "ubuntu/xenial64"
d.image = "ubuntu/trusty64"
d.port = 8080
d.script = None
return d
def _convert(lst, id="name"):
d = {}
for entry in lst:
d[entry[id]] = entry
return d
def _LIST_PRINT(l, output, order=None):
if output in ["yaml", "dict", "json"]:
l = _convert(l)
result = Printer.write(l,
order=order,
output=output)
if output in ["table", "yaml", "json", "csv"]:
print(result)
else:
pprint(result)
def do_vbox(argv):
"""
::
Usage:
vbox version [--format=FORMAT]
vbox image list [--format=FORMAT]
vbox image find NAME
vbox image add NAME
vbox vm list [--format=FORMAT] [-v]
vbox vm delete NAME
vbox vm config NAME
vbox vm ip NAME [--all]
vbox create NAME ([--memory=MEMORY]
[--image=IMAGE]
[--script=SCRIPT] | list)
vbox vm boot NAME ([--memory=MEMORY]
[--image=IMAGE]
[--port=PORT]
[--script=SCRIPT] | list)
vbox vm ssh NAME [-e COMMAND]
"""
arg = dotdict(docopt(do_vbox.__doc__, argv))
arg.format = arg["--format"] or "table"
arg.verbose = arg["-v"]
arg.all = arg["--all"]
if arg.version:
versions = {
"vagrant": {
"attribute": "Vagrant Version",
"version": vagrant.version(),
},
"cloudmesh-vbox": {
"attribute":"cloudmesh vbox Version",
"version": __version__
}
}
_LIST_PRINT(versions, arg.format)
elif arg.image and arg.list:
l = vagrant.image.list(verbose=arg.verbose)
_LIST_PRINT(l, arg.format, order=["name", "provider", "date"])
elif arg.image and arg.add:
l = vagrant.image.add(arg.NAME)
print(l)
elif arg.image and arg.find:
l = vagrant.image.find(arg.NAME)
print(l)
elif arg.vm and arg.list:
l = vagrant.vm.list()
_LIST_PRINT(l,
arg.format,
order=["name", "state", "id", "provider", "directory"])
elif arg.create and arg.list:
result = Shell.cat("{NAME}/Vagrantfile".format(**arg))
print (result)
elif arg.create:
d = defaults()
arg.memory = arg["--memory"] or d.memory
arg.image = arg["--image"] or d.image
arg.script = arg["--script"] or d.script
vagrant.vm.create(
name=arg.NAME,
memory=arg.memory,
image=arg.image,
script=arg.script)
elif arg.config:
# arg.NAME
d = vagrant.vm.info(name=arg.NAME)
result = Printer.attribute(d, output=arg.format)
print (result)
elif arg.ip:
data = []
result = vagrant.vm.execute(arg.NAME, "ifconfig")
if result is not None:
lines = result.splitlines()[:-1]
for line in lines:
if "inet addr" in line:
line = line.replace("inet addr", "ip")
line = ' '.join(line.split())
_adresses = line.split(" ")
address = {}
for element in _adresses:
attribute, value = element.split(":")
address[attribute] = value
data.append(address)
if arg.all:
d = {}
i = 0
for e in data:
d[str(i)] = e
i = i + 1
result = Printer.attribute(d, output=arg.format)
print(result)
else:
for element in data:
ip = element['ip']
if ip == "127.0.0.1" or ip.startswith("10."):
pass
else:
print (element['ip'])
elif arg.boot:
d = defaults()
arg.memory = arg["--memory"] or d.memory
arg.image = arg["--image"] or d.image
arg.script = arg["--script"] or d.script
arg.port = arg["--port"] or d.port
vagrant.vm.boot(
name=arg.NAME,
memory=arg.memory,
image=arg.image,
script=arg.script,
port=arg.port)
elif arg.delete:
result = vagrant.vm.delete(name=arg.NAME)
print(result)
elif arg.ssh:
if arg.COMMAND is None:
os.system("cd {NAME}; vagrant ssh {NAME}".format(**arg))
else:
result = vagrant.vm.execute(arg.NAME, arg.COMMAND)
if result is not None:
lines = result.splitlines()[:-1]
for line in lines:
print (line)
else:
print ("use help")
def main():
args = sys.argv[1:]
do_vbox(args)
if __name__ == '__main__':
main()
|
apache-2.0
| -8,535,315,272,027,744,000 | 25.224299 | 74 | 0.498396 | false | 3.88912 | false | false | false |
balazssimon/ml-playground
|
udemy/lazyprogrammer/reinforcement-learning-python/approx_mc_prediction.py
|
1
|
2661
|
import numpy as np
import matplotlib.pyplot as plt
from grid_world import standard_grid, negative_grid
from iterative_policy_evaluation import print_values, print_policy
# NOTE: this is only policy evaluation, not optimization
# we'll try to obtain the same result as our other MC script
from monte_carlo_random import random_action, play_game, SMALL_ENOUGH, GAMMA, ALL_POSSIBLE_ACTIONS
LEARNING_RATE = 0.001
if __name__ == '__main__':
# use the standard grid again (0 for every step) so that we can compare
# to iterative policy evaluation
grid = standard_grid()
# print rewards
print("rewards:")
print_values(grid.rewards, grid)
# state -> action
# found by policy_iteration_random on standard_grid
# MC method won't get exactly this, but should be close
# values:
# ---------------------------
# 0.43| 0.56| 0.72| 0.00|
# ---------------------------
# 0.33| 0.00| 0.21| 0.00|
# ---------------------------
# 0.25| 0.18| 0.11| -0.17|
# policy:
# ---------------------------
# R | R | R | |
# ---------------------------
# U | | U | |
# ---------------------------
# U | L | U | L |
policy = {
(2, 0): 'U',
(1, 0): 'U',
(0, 0): 'R',
(0, 1): 'R',
(0, 2): 'R',
(1, 2): 'U',
(2, 1): 'L',
(2, 2): 'U',
(2, 3): 'L',
}
# initialize theta
# our model is V_hat = theta.dot(x)
# where x = [row, col, row*col, 1] - 1 for bias term
theta = np.random.randn(4) / 2
def s2x(s):
return np.array([s[0] - 1, s[1] - 1.5, s[0]*s[1] - 3, 1])
# repeat until convergence
deltas = []
t = 1.0
for it in range(20000):
if it % 100 == 0:
t += 0.01
alpha = LEARNING_RATE/t
# generate an episode using pi
biggest_change = 0
states_and_returns = play_game(grid, policy)
seen_states = set()
for s, G in states_and_returns:
# check if we have already seen s
# called "first-visit" MC policy evaluation
if s not in seen_states:
old_theta = theta.copy()
x = s2x(s)
V_hat = theta.dot(x)
# grad(V_hat) wrt theta = x
theta += alpha*(G - V_hat)*x
biggest_change = max(biggest_change, np.abs(old_theta - theta).sum())
seen_states.add(s)
deltas.append(biggest_change)
plt.plot(deltas)
plt.show()
# obtain predicted values
V = {}
states = grid.all_states()
for s in states:
if s in grid.actions:
V[s] = theta.dot(s2x(s))
else:
# terminal state or state we can't otherwise get to
V[s] = 0
print("values:")
print_values(V, grid)
print("policy:")
print_policy(policy, grid)
|
apache-2.0
| -1,145,871,072,479,624,100 | 26.153061 | 98 | 0.534386 | false | 3.04811 | false | false | false |
petrundiy2/arithmetic_dragons
|
enemies.py
|
1
|
3433
|
__author__ = 'student'
# coding: utf-8
# license: GPLv3
from gameunit import *
from random import randint, choice
class Enemy(Attacker):
pass
def generate_random_enemy():
RandomEnemyType = choice(enemy_types)
enemy = RandomEnemyType()
return enemy
def generate_dragon_list(enemy_number):
enemy_list = [generate_random_enemy() for i in range(enemy_number)]
return enemy_list
class Dragon(Enemy):
def set_answer(self, answer):
self.__answer = answer
def check_answer(self, answer):
return answer == self.__answer
class Troll(Enemy):
def set_answer(self,answer):
self.__answer = answer
def check_answer(self, answer):
return answer == self.__answer
class GreenDragon(Dragon):
def __init__(self):
self._health = 200
self._attack = 10
self._color = 'зелёный дракон'
def question(self):
x = randint(1,100)
y = randint(1,100)
self.__quest = str(x) + '+' + str(y)
self.set_answer(x + y)
return self.__quest
class RedDragon(Dragon):
def __init__(self):
self._health = 170
self._attack = 15
self._color = 'красный дракон'
def question(self):
x = randint(1,100)
y = randint(1,100)
self.__quest = str(x) + '-' + str(y)
self.set_answer(x - y)
return self.__quest
class BlackDragon(Dragon):
def __init__(self):
self._health = 250
self._attack = 9
self._color = 'черный дракон'
def question(self):
x = randint(1,100)
y = randint(1,100)
self.__quest = str(x) + '*' + str(y)
self.set_answer(x * y)
return self.__quest
class CleverTroll1(Troll):
def __init__(self):
self._health=300
self._attack=20
self._color='зеленый толстый тролль'
def question(self):
x = randint(1,5)
self.__quest = 'Угадай число от 1 до 5!'
self.set_answer(x)
return self.__quest
class CleverTroll2(Troll):
def __init__(self):
self._health=280
self._attack=25
self._color='синий худой тролль'
def question(self):
x = randint(1,1000)
self.__quest = 'Угадай, простое ли число'+' '+str(x)+'?'+'Простое-1,Составное-0,Ни простое, ни составное-00'
n=0
for y in range(1,x):
if x%y==0:
n+=1
if n>2:
self.set_answer(1)
if n==2:
self.set_answer(0)
if n==1:
self.set_answer(00)
return self.__quest
class CleverTroll3(Troll):
def __init__(self):
self._health=350
self._attack=20
self._color='Огромный серый тролль'
def question(self):
x = randint(1,100)
self.__quest = 'Разложи число'+' '+str(x)+' '+'на множители в порядке возрастания! Само число включается!'
A=[]
for y in range (1,x+1):
if x%y==0:
A.append(y)
j=''
for t in range(len(A)):
j+=str(A[t])
u=int(j)
self.set_answer(u)
return self.__quest
enemy_types = [GreenDragon, RedDragon, BlackDragon,CleverTroll1,CleverTroll2,CleverTroll3]
|
gpl-3.0
| -4,596,253,404,611,616,300 | 25.733333 | 116 | 0.551434 | false | 2.818981 | false | false | false |
ric2b/Vivaldi-browser
|
chromium/third_party/blink/renderer/devtools/scripts/build/build_debug_applications.py
|
1
|
2057
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Builds applications in debug mode:
- Copies the module directories into their destinations.
- Copies app.html as-is.
"""
from os import path
from os.path import join
import os
import shutil
import sys
import modular_build
def main(argv):
try:
input_path_flag_index = argv.index('--input_path')
input_path = argv[input_path_flag_index + 1]
output_path_flag_index = argv.index('--output_path')
output_path = argv[output_path_flag_index + 1]
except:
print('Usage: %s app_1 app_2 ... app_N --input_path <input_path> --output_path <output_path>' % argv[0])
raise
symlink_dir_or_copy(input_path, output_path)
def symlink_dir_or_copy(src, dest):
if hasattr(os, 'symlink'):
if path.exists(dest):
if os.path.islink(dest):
os.unlink(dest)
else:
shutil.rmtree(dest)
os.symlink(join(os.getcwd(), src), dest)
else:
for filename in os.listdir(src):
new_src = join(os.getcwd(), src, filename)
if os.path.isdir(new_src):
copy_dir(new_src, join(dest, filename))
else:
copy_file(new_src, join(dest, filename), safe=True)
def copy_file(src, dest, safe=False):
if safe and path.exists(dest):
os.remove(dest)
shutil.copy(src, dest)
def copy_dir(src, dest):
if path.exists(dest):
shutil.rmtree(dest)
for src_dir, dirs, files in os.walk(src):
subpath = path.relpath(src_dir, src)
dest_dir = path.normpath(join(dest, subpath))
os.mkdir(dest_dir)
for name in files:
src_name = join(os.getcwd(), src_dir, name)
dest_name = join(dest_dir, name)
copy_file(src_name, dest_name)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
bsd-3-clause
| -6,639,254,433,883,959,000 | 27.444444 | 112 | 0.599121 | false | 3.357377 | false | false | false |
jabbalaci/jabbapylib
|
demos/browser_automation/splinter_2.py
|
1
|
1090
|
#!/usr/bin/env python
"""
splinter
http://splinter.cobrateam.info
"""
from time import sleep
from splinter.browser import Browser
#url = 'http://simile.mit.edu/crowbar/test.html'
#url = 'http://dl.dropbox.com/u/144888/hello_js.html'
url = 'http://www.ncbi.nlm.nih.gov/nuccore/CP002059.1'
#url = 'http://translate.google.com/#en|fr|game'
def main():
#browser = Browser('zope.testbrowser')
#browser = Browser('webdriver.chrome')
browser = Browser()
browser.visit(url)
#browser.execute_script("var win = window.open(); win.document.write('<html><head><title>Generated HTML of ' + location.href + '</title></head><pre>' + document.documentElement.innerHTML.replace(/&/g, '&').replace(/</g, '<') + '</pre></html>'); win.document.close(); void 0;")
while 'ORIGIN' not in browser.html:
sleep(5)
f = open("/tmp/source.html", "w")
print >>f, browser.html
f.close()
browser.quit()
print '__END__'
#############################################################################
if __name__ == "__main__":
main()
|
gpl-3.0
| -3,163,800,249,660,252,700 | 26.974359 | 288 | 0.577064 | false | 3.30303 | false | false | false |
OpusVL/odoo
|
openerp/addons/base/module/wizard/base_module_upgrade.py
|
1
|
5096
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.osv import osv, fields
from openerp.tools.translate import _
class base_module_upgrade(osv.osv_memory):
""" Module Upgrade """
_name = "base.module.upgrade"
_description = "Module Upgrade"
_columns = {
'module_info': fields.text('Modules to Update',readonly=True),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(base_module_upgrade, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
if view_type != 'form':
return res
context = {} if context is None else context
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if (not record_id) or (not active_model):
return res
ids = self.get_module_list(cr, uid, context=context)
if not ids:
res['arch'] = '''<form string="Upgrade Completed" version="7.0">
<separator string="Upgrade Completed" colspan="4"/>
<footer>
<button name="config" string="Start Configuration" type="object" class="oe_highlight"/> or
<button special="cancel" string="Close" class="oe_link"/>
</footer>
</form>'''
return res
def get_module_list(self, cr, uid, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = mod_obj.search(cr, uid, [
('state', 'in', ['to upgrade', 'to remove', 'to install'])])
return ids
def default_get(self, cr, uid, fields, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = self.get_module_list(cr, uid, context=context)
res = mod_obj.read(cr, uid, ids, ['name','state'], context)
return {'module_info': '\n'.join(map(lambda x: x['name']+' : '+x['state'], res))}
def upgrade_module_cancel(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.module.module')
to_installed_ids = mod_obj.search(cr, uid, [
('state', 'in', ['to upgrade', 'to remove'])])
if to_installed_ids:
mod_obj.write(cr, uid, to_installed_ids, {'state': 'installed'}, context=context)
to_uninstalled_ids = mod_obj.search(cr, uid, [
('state', '=', 'to install')])
if to_uninstalled_ids:
mod_obj.write(cr, uid, to_uninstalled_ids, {'state': 'uninstalled'}, context=context)
return {'type': 'ir.actions.act_window_close'}
def upgrade_module(self, cr, uid, ids, context=None):
ir_module = self.pool.get('ir.module.module')
# install/upgrade: double-check preconditions
ids = ir_module.search(cr, uid, [('state', 'in', ['to upgrade', 'to install'])])
if ids:
cr.execute("""SELECT d.name FROM ir_module_module m
JOIN ir_module_module_dependency d ON (m.id = d.module_id)
LEFT JOIN ir_module_module m2 ON (d.name = m2.name)
WHERE m.id in %s and (m2.state IS NULL or m2.state IN %s)""",
(tuple(ids), ('uninstalled',)))
unmet_packages = [x[0] for x in cr.fetchall()]
if unmet_packages:
raise osv.except_osv(_('Unmet Dependency!'),
_('Following modules are not installed or unknown: %s') % ('\n\n' + '\n'.join(unmet_packages)))
ir_module.download(cr, uid, ids, context=context)
cr.commit() # save before re-creating cursor below
openerp.api.Environment.reset()
openerp.modules.registry.RegistryManager.new(cr.dbname, update_module=True)
return {'type': 'ir.actions.act_window_close'}
def config(self, cr, uid, ids, context=None):
return self.pool.get('res.config').next(cr, uid, [], context=context)
|
agpl-3.0
| -3,829,129,742,672,154,600 | 45.327273 | 157 | 0.566719 | false | 3.938176 | false | false | false |
neothemachine/crowfood
|
crowfood/cli.py
|
1
|
5675
|
from __future__ import absolute_import, print_function
import sys
import os
import argparse
from warnings import warn
import crowfood.engine
from crowfood.utils import is_subdir
description = '''
See sfood for output format.
'''
def getParser():
parser = argparse.ArgumentParser(prog='cfood',
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('path', help='file or directory to scan (directory becomes a hierarchy root)',
nargs='+',
)
parser.add_argument('--quotetypes', help=
'Select for parsing the files included by strip quotes or angle brackets:\n'
'both - the default, parse all headers\n'
'angle - include only "system" headers included by anglebrackets (<>)\n'
'quote - include only "user" headers included by strip quotes ("")',
default='both', choices=['both', 'angle', 'quote'])
parser.add_argument('--ext', help='an additional extension for files to be scanned\n'
'default: c, h, cc, cpp, cxx, hpp, hxx',
action='append', default=[], dest='additional_exts',
)
parser.add_argument('--merge', help='file - the default, treats each file as separate\n'
'module - merges .c/.cc/.cpp/.cxx and .h/.hpp/.hxx pairs',
default='file', choices=['file', 'module'])
parser.add_argument('-i','--ignore', help='directory to ignore',
dest='ignore_paths', metavar='IGNORE',
action='append', default=[],
)
parser.add_argument('-I','--include', help=
'additional include search path (for external dependencies\n'
'or when directory to scan does not correspond to #include path)',
dest='include_paths', metavar='INCLUDE',
action='append', default=[],
)
parser.add_argument('--no-include-current', help=
'Do not search for includes in the folder of the '
'currently scanned file',
dest='no_include_current',
action='store_true',
)
parser.add_argument('--fuzzy', help=
'Try to locate all non-found includes by matching '
'with file name only. Note that this may lead to '
'wrong dependencies.',
dest='fuzzy',
action='store_true',
)
parser.add_argument('-E','--external-root', help=
'root directory to use for additional -I include paths for external dependencies'
'if not given, then the -I directories become the roots instead',
dest='external_roots', metavar='ROOT',
action='append', default=[],
)
parser.add_argument('--print-roots', help='Only print the roots, useful for testing',
dest='print_roots',
action='store_true',
)
parser.add_argument('-v', '--verbose', help='be more verbose',
dest='verbose',
action='store_true',
)
return parser
def parseargs(argv):
parser = getParser()
if not argv:
parser.print_help()
sys.exit(1)
args = parser.parse_args(argv)
for path in args.include_paths:
if not os.path.isdir(path):
parser.error('{} is not a directory'.format(path))
for path in args.ignore_paths:
if not os.path.isdir(path):
warn.warn('{} is not a directory'.format(path))
for path in args.path:
if not os.path.exists(path):
parser.error('{} does not exist'.format(path))
for ext_root in args.external_roots:
if not os.path.isdir(ext_root):
parser.error('{} is not a directory'.format(ext_root))
if not any(is_subdir(include_path, ext_root) for include_path in args.include_paths):
parser.error('The external root {} must have at least ' +
'one matching -I subdirectory'.format(ext_root))
args.include_paths = list(map(os.path.abspath, args.include_paths))
args.external_roots = list(map(os.path.abspath, args.external_roots))
args.ignore_paths = list(map(os.path.abspath, args.ignore_paths))
args.path = list(map(os.path.abspath, args.path))
return args
def main():
args = parseargs(sys.argv[1:])
if args.print_roots:
input_roots, input_include_paths, external_roots, external_include_paths =\
crowfood.engine.get_roots_and_include_paths(args)
print('input roots:')
print(input_roots)
print('input roots search paths:')
print(list(input_include_paths.values()))
print('external roots:')
print(external_roots)
print('external roots search paths:')
print(list(external_include_paths.values()))
sys.exit()
deps = crowfood.engine.run(args)
for dep in deps:
print(dep)
if __name__ == '__main__':
main()
|
mit
| -2,339,957,922,401,396,000 | 38.971831 | 105 | 0.52141 | false | 4.744983 | false | false | false |
jirenz/CS229_Project
|
hearthbreaker/replay.py
|
1
|
21593
|
import re
import json
import hearthbreaker
from hearthbreaker.cards.heroes import hero_from_name
import hearthbreaker.constants
from hearthbreaker.engine import Game, card_lookup, Deck
import hearthbreaker.game_objects
import hearthbreaker.cards
import hearthbreaker.proxies
from hearthbreaker.serialization.move import Move, AttackMove, PowerMove, TurnEndMove, \
TurnStartMove, ConcedeMove, PlayMove, GameEndMove
from pprint import pprint
__doc__ = """
Responsible for reading and writing replays in either the compact or complete replay format (see the `replay format
<https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_ for details).
Recording a game
~~~~~~~~~~~~~~~~
Recording a game is a matter of creating a game, calling :meth:record on that game, playing the game, and then saving
the replay. For example: ::
game = create_a_game() # Create a game somehow
replay = record(game) # Create a replay that will track the game's moves
game.start() # Play the game
replay.write_json("my_replay.hsreplay") # Save the replay to a file
Playing back a game
~~~~~~~~~~~~~~~~~~~
Playing back a game is a matter of loading the replay, getting a game for playing it back, and then starting the game
For example: ::
replay = Replay() # create a new replay object
replay.read_json("my_replay.hsreplay") # load the replay (this can be combined with the previous line)
game = playback(replay) # create a game associated with the replay
game.start() # play the recorded game
"""
class Replay:
"""
Encapsulates the data stored in a replay, along with functions to read and write replays. The data
stored in this class can be used for either recording or playing back replays.
"""
def __init__(self, filename=None):
"""
Create a new Replay. This replay can be used for recording or playing back a game.
If the `filename` string is present, then this will also load the file located at `filename` for playback
:param string filename: A string representing a filename for a replay file to load or None (the default).
If present, it will load the selected replay and prepare it for playback.
The replay file must be in the complete format
"""
self._moves = []
self.__next_target = None
self.__next_index = -1
self.decks = []
self.keeps = []
self.random = []
schema_file = open("replay.schema.json", "r")
self.schema = json.load(schema_file)
schema_file.close()
if filename is not None:
self.read_json(filename)
def _save_decks(self, deck1, deck2):
"""
Save the decks specified by the parameters
:param hearthbreaker.game_objects.Deck deck1: The deck for player 1
:param hearthbreaker.game_objects.Deck deck2: The deck for player 2
"""
self.decks = [deck1, deck2]
def _record_random(self, result):
"""
Record a random number that has been generated by the system.
This random number will be added to the header if the game hasn't started, or top the most recent
move if it has.
"""
if len(self._moves) > 0:
if self._moves[-1].__class__.__name__ != 'GameEndMove':
self._moves[-1].random_numbers.append(result)
else:
self._moves[-2].random_numbers.append(result)
else:
self.random.append(result)
def _record_card_played(self, card, index):
"""
Record that a card has been played. This will add a new PlayMove to the moves array
"""
self._moves.append(PlayMove(hearthbreaker.proxies.ProxyCard(index), target=card.target))
if self.__next_index >= 0:
self._moves[-1].index = self.__next_index
self.__next_index = -1
def _record_option_chosen(self, option):
"""
Record that an option was chosen. This will update whichever is the most recent move
"""
self._moves[-1].card.set_option(option)
def _record_attack(self, attacker, target):
"""
Record that an attack occurred. This will create a new AttackMove in the moves array
"""
self._moves.append(AttackMove(attacker, target))
self.__next_target = None
def _record_power(self):
"""
Record that the current played used their hero power
"""
self._moves.append(PowerMove(self.__next_target))
self.__next_target = None
def _record_target(self, target):
"""
Record that a target was chosen. This affects PlayMoves and PowerMoves. AttackMoves have
their target passed in as an argument
"""
self.__next_target = target
def _record_index(self, index):
"""
Records the index that a minion is played at. Will update the most recent move with this index
"""
self.__next_index = index
def _record_kept_index(self, cards, card_index):
"""
Records the index of the cards that a player kept.
"""
k_arr = []
for index in range(0, len(cards)):
if card_index[index]:
k_arr.append(index)
self.keeps.append(k_arr)
def _record_game_end(self, winner):
"""
Record the end of the game
"""
self._moves.append(GameEndMove(winner))
def __shorten_deck(self, cards):
"""
Mostly for testing, this function will check if the deck is made up of a repeating pattern and if so, shorten
the output, since the parser will generate the pattern from a shorter sample
:param cards: The deck of cards to replace
:return: an array of cards that represents the deck if repeated until 30 cards are found
"""
for pattern_length in range(1, 15):
matched = True
for index in range(pattern_length, 30):
if not isinstance(cards[index % pattern_length], type(cards[index])):
matched = False
break
if matched:
return cards[0:pattern_length]
return cards
def write(self, file):
"""
Write a replay in the compact format. This format is a series of directives, and isn't as flexible
or well structured as the json format (in :meth:write_json). For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file is to be written. If an IO object, then the IO object should be opened for
writing.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
if 'write' not in dir(file):
was_filename = True
writer = open(file, 'w')
else:
was_filename = False
writer = file
for deck in self.decks:
writer.write("deck(")
writer.write(deck.hero.short_name)
writer.write(",")
writer.write(",".join([card.name for card in self.__shorten_deck(deck.cards)]))
writer.write(")\n")
found_random = False
if self.random.count(0) == len(self.random):
for move in self._moves:
if move.random_numbers.count(0) != len(move.random_numbers):
found_random = True
break
else:
found_random = True
if not found_random:
writer.write("random()\n")
else:
writer.write("random(")
writer.write(",".join([str(num) for num in self.random]))
writer.write(")\n")
for keep in self.keeps:
writer.write("keep(")
writer.write(",".join([str(k) for k in keep]))
writer.write(")\n")
for move in self._moves:
writer.write(move.to_output_string() + "\n")
if len(move.random_numbers) > 0:
writer.write("random(")
writer.write(",".join([str(num) for num in move.random_numbers]))
writer.write(")\n")
if was_filename:
writer.close()
def write_json(self, file):
"""
Write a replay in the complete json format. This format is compatible with the netplay format, and is
also designed to be more future proof. For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file should be written. If an IO object, then the IO object should be opened for
writing.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
was_filename = False
if 'write' not in dir(file):
was_filename = True
writer = open(file, 'w')
else:
writer = file
header_cards = [{"cards": [card.name for card in self.__shorten_deck(deck.cards)],
"hero": deck.hero.short_name} for deck in self.decks]
header = {
'decks': header_cards,
'keep': self.keeps,
'random': self.random,
}
json.dump({'header': header, 'moves': self._moves}, writer, default=lambda o: o.__to_json__(), indent=2,
sort_keys=True)
if was_filename:
writer.close()
def read_json(self, file):
"""
Read a replay in the complete json format. This format is compatible with the netplay format, and is
also designed to be more future proof. For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file is found. If an IO object, then the IO object should be opened for
reading.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
from jsonschema import validate
was_filename = False
if 'read' not in dir(file):
was_filename = True
file = open(file, 'r')
jd = json.load(file)
validate(jd, self.schema)
self.decks = []
for deck in jd['header']['decks']:
deck_size = len(deck['cards'])
cards = [card_lookup(deck['cards'][index % deck_size]) for index in range(0, 30)]
self.decks.append(
Deck(cards, hero_from_name(deck['hero'])))
self.random = jd['header']['random']
self.keeps = jd['header']['keep']
if len(self.keeps) == 0:
self.keeps = [[0, 1, 2], [0, 1, 2, 3]]
self._moves = [Move.from_json(**js) for js in jd['moves']]
if was_filename:
file.close()
def read(self, file):
"""
Read a replay in the compact format. This format is a series of directives, and isn't as flexible
or well structured as the json format (in :meth:write_json). For more info, see the
`replay format <https://github.com/danielyule/hearthbreaker/blob/master/replay_format.md>`_
:param file: Either a string or an IO object. If a string, then it is assumed to be a filename describing
where a replay file is to be found. If an IO object, then the IO object should be opened for
reading.
:type file: :class:`str` or :class:`io.TextIOBase`
"""
was_filename = False
if 'read' not in dir(file):
was_filename = True
file = open(file, 'r')
line_pattern = re.compile("\s*(\w*)\s*\(([^)]*)\)\s*(;.*)?$")
for line in file:
(move, args) = line_pattern.match(line).group(1, 2)
args = [arg.strip() for arg in args.split(",")]
if move == 'play':
card = args[0]
if len(args) > 1:
target = args[1]
else:
target = None
self._moves.append(PlayMove(hearthbreaker.proxies.ProxyCard(card), target=target))
elif move == 'summon':
card = args[0]
index = int(args[1])
if len(args) > 2:
target = args[2]
else:
target = None
self._moves.append(PlayMove(hearthbreaker.proxies.ProxyCard(card), index, target))
elif move == 'attack':
self._moves.append(AttackMove(args[0], args[1]))
elif move == 'power':
if len(args) > 0 and args[0] != '':
self._moves.append(PowerMove(args[0]))
else:
self._moves.append(PowerMove())
elif move == 'end':
self._moves.append(TurnEndMove())
elif move == 'start':
self._moves.append(TurnStartMove())
elif move == 'random':
if len(self._moves) == 0:
if len(args[0]) > 0:
for num in args:
self.random.append(int(num))
else:
for num in args:
if num.isdigit():
self._moves[-1].random_numbers.append(int(num))
else:
self._moves[-1].random_numbers.append(hearthbreaker.proxies.ProxyCharacter(num))
elif move == 'deck':
if len(self.decks) > 1:
raise Exception("Maximum of two decks per file")
deck_size = len(args) - 1
cards = [card_lookup(args[1 + index % deck_size]) for index in range(0, 30)]
self.decks.append(
Deck(cards, hero_from_name(args[0])))
elif move == 'keep':
if len(self.keeps) > 1:
raise Exception("Maximum of two keep directives per file")
self.keeps.append([int(a) for a in args])
elif move == 'concede':
self._moves.append(ConcedeMove())
elif move == 'game_end':
pass # currently we are not putting in game end because it will end anyways
if was_filename:
file.close()
if len(self.keeps) is 0:
self.keeps = [[0, 1, 2], [0, 1, 2, 3]]
def record(game):
"""
Ready a game for recording. This function must be called before the game is played.
Several methods of the game and its agents are modified. These modifications will not affect the operation
of the game or its agents, although any further modifications to these methods will not be recorded.
:param game: A game which has not been started
:type game: :class:`Game <hearthbreaker.game_objects.Game>`
:return: A replay that will track the actions of the game as it is played. Once the game is complete,
this replay can be written to a file to remember the state of this game.
:rtype: :class:`Replay`
"""
class RecordingAgent:
__slots__ = ['agent']
def __init__(self, proxied_agent):
object.__setattr__(self, "agent", proxied_agent)
def choose_index(self, card, player):
index = self.agent.choose_index(card, player)
replay._record_index(index)
return index
def choose_target(self, targets):
target = self.agent.choose_target(targets)
replay._record_target(target)
return target
def choose_option(self, options, player):
option = self.agent.choose_option(options, player)
replay._record_option_chosen(options.index(option))
return option
def __getattr__(self, item):
return self.agent.__getattribute__(item)
def __setattr__(self, key, value):
setattr(self.__getattribute__("agent"), key, value)
replay = hearthbreaker.replay.Replay()
replay.random.append(game.first_player)
game.players[0].agent = RecordingAgent(game.players[0].agent)
game.players[1].agent = RecordingAgent(game.players[1].agent)
if game.first_player == 0:
replay._save_decks(game.players[0].deck, game.players[1].deck)
else:
replay._save_decks(game.players[1].deck, game.players[0].deck)
game.bind("kept_cards", replay._record_kept_index)
game.bind("game_ended", replay._record_game_end)
for player in game.players:
player.bind("used_power", replay._record_power)
player.hero.bind("found_power_target", replay._record_target)
player.bind("card_played", replay._record_card_played)
player.bind("character_attack", replay._record_attack)
_old_random_choice = game.random_choice
_old_generate_random_between = game._generate_random_between
_old_start_turn = game._start_turn
_old_end_turn = game._end_turn
def random_choice(choice):
result = _old_random_choice(choice)
if isinstance(result, hearthbreaker.game_objects.Character):
replay._moves[-1].random_numbers[-1] = hearthbreaker.proxies.ProxyCharacter(result)
return result
def _generate_random_between(lowest, highest):
result = _old_generate_random_between(lowest, highest)
replay._record_random(result)
return result
def _end_turn():
replay._moves.append(TurnEndMove())
_old_end_turn()
def _start_turn():
replay._moves.append(TurnStartMove())
_old_start_turn()
game.random_choice = random_choice
game._generate_random_between = _generate_random_between
game._end_turn = _end_turn
game._start_turn = _start_turn
return replay
def playback(replay):
"""
Create a game which can be replayed back out of a replay.
:param replay: The replay to load the game out of
:type replay: :class:`Replay`
:return: A game which when played will perform all of the actions in the replay.
:rtype: :class:`Game <hearthbreaker.game_objects.Game>`
"""
move_index = -1
k_index = 0
random_index = 0
game = None
class ReplayAgent:
def __init__(self):
self.next_target = None
self.next_index = -1
self.next_option = None
def do_card_check(self, cards):
nonlocal k_index
keep_arr = [False] * len(cards)
for index in replay.keeps[k_index]:
keep_arr[int(index)] = True
k_index += 1
return keep_arr
def do_turn(self, player):
nonlocal move_index, random_index
while move_index < len(replay._moves) and not player.hero.dead and type(
replay._moves[move_index]) is not hearthbreaker.serialization.move.TurnEndMove:
random_index = 0
print(replay._moves[move_index].to_output_string())
replay._moves[move_index].play(game)
move_index += 1
if move_index == len(replay._moves):
player.game.game_ended = True
def set_game(self, game):
pass
def choose_target(self, targets):
return self.next_target
def choose_index(self, card, player):
return self.next_index
def choose_option(self, options, player):
return options[self.next_option]
game = Game.__new__(Game)
_old_random_choice = game.random_choice
_old_start_turn = game._start_turn
_old_end_turn = game._end_turn
_old_pre_game = game.pre_game
def _generate_random_between(lowest, highest):
nonlocal random_index
if len(replay.random) == 0:
return 0
else:
random_index += 1
if move_index == -1:
return replay.random[random_index - 1]
return replay._moves[move_index].random_numbers[random_index - 1]
def random_choice(choice):
nonlocal move_index, random_index
if isinstance(replay._moves[move_index].random_numbers[random_index], hearthbreaker.proxies.ProxyCharacter):
result = replay._moves[move_index].random_numbers[random_index].resolve(game)
random_index += 1
return result
return _old_random_choice(choice)
def _start_turn():
nonlocal move_index, random_index
random_index = 0
_old_start_turn()
move_index += 1
def _end_turn():
nonlocal move_index, random_index
random_index = 0
_old_end_turn()
move_index += 1
def pre_game():
nonlocal move_index
_old_pre_game()
move_index = 0
game.random_choice = random_choice
game._generate_random_between = _generate_random_between
game._end_turn = _end_turn
game._start_turn = _start_turn
game.pre_game = pre_game
game.__init__(replay.decks, [ReplayAgent(), ReplayAgent()])
return game
|
mit
| -7,057,704,691,699,661,000 | 37.150177 | 118 | 0.574538 | false | 4.042876 | false | false | false |
Iconoclasteinc/tgit
|
test/ui/file_dialogs/test_file_dialog.py
|
1
|
1231
|
# -*- coding: utf-8 -*-
import pytest
from PyQt5.QtWidgets import QFileDialog
from hamcrest import ends_with, assert_that, equal_to
from cute.widgets import QFileDialogDriver, window
from test.ui import show_, close_
from tgit.ui import locations
from tgit.ui.dialogs.file_dialogs import make_file_dialog, name_filter
pytestmark = pytest.mark.ui
@pytest.yield_fixture()
def driver(prober, automaton):
dialog_driver = QFileDialogDriver(window(QFileDialog), prober, automaton)
yield dialog_driver
close_(dialog_driver)
def show_dialog(name_filters="", file_mode=QFileDialog.ExistingFile, directory="", parent=None):
dialog = make_file_dialog(name_filters, file_mode, directory, parent, False)
show_(dialog)
return dialog
def test_shows_name_filters(driver):
_ = show_dialog("PNG Images (*.png)")
driver.filter_files_of_type("PNG Images (*.png)")
def test_initially_starts_in_directory(driver):
_ = show_dialog(directory=locations.Documents)
driver.has_current_directory(ends_with("Documents"))
def test_builds_name_filters():
assert_that(name_filter(["type1", "type2"], "caption"), equal_to("caption (*.type1 *.type2)"), "The name filters")
|
gpl-3.0
| -7,738,104,564,986,402,000 | 30.447368 | 118 | 0.70593 | false | 3.487252 | true | false | false |
Skeletrox/usb-backend-pinut
|
file_upload/fileupload/views.py
|
1
|
15896
|
import json, os, subprocess, getpass, shutil
import logging
from .USBFinder import attemptMount,transfer_file, get_usb_name
from hashlib import sha1
from django.http import HttpResponse,HttpResponseRedirect, JsonResponse
from django.views.decorators.csrf import ensure_csrf_cookie
from django.template import Context, loader
from django.shortcuts import render,get_object_or_404
from django.views.generic import CreateView, DeleteView, ListView
from .models import EkFile, Content
from django.contrib.auth.models import User
from .response import JSONResponse, response_mimetype
from .serialize import serialize
from django.urls import reverse
from .extract import extractit
from .deleteExtract import deleteit
from distutils.dir_util import copy_tree
from django.conf import settings
staticFileLocRoot = None
old_files = []
files = []
total_amount = 0
total_done = 0
count = 0
is_auth = True
optional_flag = False
percentage_done = 0
perm_dict = None
user = None
telemetry = None
local_files = []
allowed_exts = settings.ACCEPTED_EXTNS
class User_Permissions:
def __init__(self, user):
self.permissions = user.permission.get_permissions()
def get_permissions(self):
return self.permissions
class NoFilesError(ValueError):
def __init__ (self, arg = None):
self.strerror = arg
self.args = {arg}
def user_logout(request):
logout(request)
return HttpResponseRedirect('../../upload/')
def index(request):
return render(request,'fileupload/LOGIN.html')
'''
<<<<<<< HEAD
=======
Dev's code that is not actually called in the program, can be ignored, kept for future references if needed
>>>>>>> refs/remotes/origin/master
@ensure_csrf_cookie
def upload(request):
if request.method=='POST':
instance=EkFile(file=request.FILES['files'])
obj=instance.save();
print (instance)
values=serialize(instance)
data={"files":values}
response=json.dumps(data)
print (response)
if instance.type_of_file=="ecar":
print instance.path_of_file
files=extractit(instance.path_of_file)
instance=Content(ekfile=instance,folder_file=files,json_file=files+".json")
instance.save()
return HttpResponse(response,content_type="application/json")
@ensure_csrf_cookie
def list_the_files(request):
values=[serialize(instance) for instance in EkFile.objects.all()]
data={"files":values}
response=json.dumps(data)
print (response)
return HttpResponse(response,content_type="application/json")
@ensure_csrf_cookie
def delete_files(request):
print ("Delete this file: "+request.POST['id'])
instance=EkFile.objects.get(id=request.POST['id'])
print (instance)
if instance.type_of_file=="ecar":
obj=Content.objects.get(ekfile=instance.id)
deleteit({'folder_file':obj.folder_file,'json_file':obj.json_file})
obj.delete()
instance.delete()
return HttpResponse(json.dumps({"id":4}),content_type="application/json")
<<<<<<< HEAD
'''
def verify(request, optional=False):
flag='INIT'
global optional_flag
optional_flag = False
global is_auth, user, password, telemetry
if optional:
optional_flag = True
return HttpResponseRedirect('../new')
try:
user=User.objects.get(username=request.POST['email'])
logger = logging.getLogger(__name__)
password=request.POST.get('password', '')
#_,salt,hashpw=user.password.split('$')
logger.error(request.POST.get('email', '')+","+request.POST.get('password', '')+" \n")
logger.error(user.password+", username is "+user.username)
flag='REAL'
except User.DoesNotExist:
flag = 'FAKE'
if(flag == 'REAL' and user.check_password(password)):
global perm_dict
perm_dict = User_Permissions(user)
is_auth = True
############################################################
# Load values from res.json file #
############################################################
staticFileLocRoot = settings.MEDIA_ROOT
telemetry = settings.TELEMETRY
return HttpResponseRedirect('new/')
else:
return render(request,'fileupload/LOGIN.html',{'invalid':'not a valid username or password',})
#=======
config_json_dir = settings.CONFIG_JSON_DIR
class EkFileCreateView(CreateView):
model = EkFile
fields = "__all__"
def form_valid(self, form):
self.object = form.save()
print "self Object: "
print unicode(self.object)
self.object.file_upload = self.object.slug
files = [serialize(self.object)]
data = {'files': files}
response = JSONResponse(data, mimetype=response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
print 'Before you send post request'
print self.object.path_of_file
print '-'*10 + 'WE GON EXTRACT IT YO' + '-'*10
#print self.object.slug
if(self.object.path_of_file.endswith(".json")):
if not os.path.exists(config_json_dir):
os.makedirs(config_json_dir)
shutil.copy2(self.object.path_of_file, config_json_dir)
else:
if(settings.ACTIVE_PROFILE == "ekstep"):
files = extractit(self.object.path_of_file)
for f in files:
obj=Content(ekfile=self.object,filename=f)
obj.save()
return response
def form_invalid(self, form):
data = json.dumps(form.errors)
print data + ' omg fail '
return HttpResponse(content=data, status=400, content_type='application/json')
class EkFileDeleteView(DeleteView):
model = EkFile
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
print 'Attempting to delete ' + unicode(self.object)
if(self.object.path_of_file.endswith(".json")):
json_file = unicode(self.object.file_upload)
file_name = config_json_dir+json_file
os.remove(file_name)
else:
if(settings.ACTIVE_PROFILE == "ekstep"):
files = Content.objects.filter(ekfile = self.object.id)
filename = []
for f in files:
filename.append(f.filename)
f.delete()
deleteit(filename)
self.object.delete()
response = JSONResponse(True, mimetype=response_mimetype(request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
class EkFileListView(ListView):
model = EkFile
def render_to_response(self, context, **response_kwargs):
files = [ serialize(p) for p in self.get_queryset() ]
data = {'files': files}
response = JSONResponse(data, mimetype=response_mimetype(self.request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
def verify_USB(request):
value = attemptMount()
response_data = 'disabled'
if value is not None:
response_data = 'active '
return JsonResponse({'data':response_data})
def serve_extensions(requests):
global allowed_exts
return JSONResponse({"exts":allowed_exts})
def download_to_USBx(request):
usb_name = get_usb_name()
if usb_name is not None:
local_files_dir = '/' + getpass.getuser() + '/FILES/'
if os.geteuid() != 0: #If not root, user location is /home/user/files
local_files_dir = '/home/' + getpass.getuser() + '/FILES/'
print local_files_dir
local_files = []
for root, folders, files in os.walk(local_files_dir):
for file in files:
if (not os.path.isdir(file)) and file.endswith(".json"):
local_files.append(os.path.join(root, file))
print local_files
actual_index = local_files[0].split('/').index('FILES') + 1
for file in local_files:
os.chdir('/media/' + getpass.getuser() + '/' + usb_name)
split_list = file.split('/')
for i in range (actual_index, len(split_list) - 1):
if not os.path.exists(split_list[i]):
os.makedirs(split_list[i])
os.chdir(split_list[i])
command = 'cp "' + file + '" "' + os.getcwd() + '"'
t = subprocess.Popen(command, shell=True)
t.communicate()[0]
result = t.returncode
if result != 0:
return JsonResponse ({'res': 'Copy aborted! [USB Unplugged/Insufficient Space?]'})
return JsonResponse({'res': 'Copy successful'})
return JsonResponse({'res':'Reinsert USB'})
'''
def download_to_USB(request):
print request.method
usb_name = get_usb_name()
val = request.POST.get("counter", None)
print "HAI " + str(val)
if val is None:
return HttpResponseRedirect('/upload/new/')
if val == 'INIT':
global local_files
if usb_name is None:
return HttpResponseRedirect('/upload/new/')
local_files = []
for root, folders, files in os.walk(telemetry):
for file in files:
if not os.path.isdir(file):
local_files.append(os.path.join(root, file))
return JsonResponse({'value': '-1', 'length' : str(len(local_files))})
else:
try:
current = int(val)
# global local_files
curr_file = local_files[current]
file_localized_name = curr_file[curr_file.find("telemetry") + len("telemetry/"):]
shutil.copy2(curr_file, usb_name + file_localized_name)
return JsonResponse({'value', str(current+1)})
except ValueError:
return JsonResponse({'res': 'Use an integer for manual inputs!'})
except IndexError:
return JsonResponse({'res': 'Files have been successfully copied!'})
except OSError:
return JsonResponse({'res': 'Copy error! USB unplugged/insufficient storage space?'})
'''
def split_dirs(text): #Splits the entire path to get the file name
splitty = text.split('/')
value = splitty[len(splitty) - 1]
return value
def transfer(request):
try:
if not is_auth:
return HttpResponse("Please access this URL properly")
elif request.method == 'GET' or request.method == 'POST':
global percentage_done
global total_amount, total_done, count, files, old_files
files_existing = []
if request.method == 'GET':
new_files = attemptMount()
if new_files is None:
print "new_files none"
return HttpResponseRedirect('../new')
old_files = [fModel.file_upload for fModel in EkFile.objects.all()]
files = [thing for thing in new_files if split_dirs(thing) not in old_files]
total_done = 0
total_amount = len(files)
fileCount = 0
else:
fileCount = request.POST.get("file_descriptor", "")
download_more = True
file_to_transfer = None
if len(files) > 0:
temp_value = 0
for file in files:
try:
#Runs each time. Can be optimized further to handle JSON requests and responses
value = split_dirs(file)
x = EkFile.objects.get(file_upload=str(value))
except EkFile.DoesNotExist:
file_size = os.stat(file).st_size
value = split_dirs(file)
fModel = EkFile(id = temp_value+1, file_upload = str(value))
temp_value += 1
if fModel not in files_existing:
files_existing.append(fModel)
try:
if len(files_existing) == 0:
raise NoFilesError('No Files')
file_to_transfer = files[int(fileCount)]
return_code = transfer_file(file_to_transfer)
if return_code != 0:
print 'USB unexpectedly removed!'
removeCorruptFile(file_to_transfer)
except NoFilesError as error:
global optional_flag #If a simple refresh occurs without a change in USB attached
if optional_flag:
return HttpResponseRedirect('../new')
template = loader.get_template('fileupload/downloadFiles.html')
total_files_in_db = EkFile.objects.all()
context = {
'files_existing' : None,
'show_output' : False,
'percentage_done' : 0,
'current_count' : 0,
'btn_check_flag' : 'disabled',
'download_more' : False,
}
return HttpResponse(template.render(context, request))
count += 1
total_done += 1
percentage_done = int(total_done*100/total_amount)
#Code below updates the file transferred list
if file_to_transfer is not None:
print "file_to_transfer " + file_to_transfer
value = split_dirs(file_to_transfer)
file_to_save = EkFile(id = count, file_upload = value)
file_to_save.save()
if(value.endswith(".json")):
if not os.path.exists(config_json_dir):
os.makedirs(config_json_dir)
shutil.copy2(file_to_save.path_of_file, config_json_dir)
else:
if(settings.ACTIVE_PROFILE == "ekstep"):
files2 = extractit(file_to_save.path_of_file)
for f in files2:
obj=Content(ekfile=file_to_save,filename=f)
obj.save()
print '[Z]Saved ' + value
#list_of_files.append(file_to_save)
#files.remove(file_to_transfer)
#=======
#extractit(file_to_save.path_of_file)
#Code above updates the file transferred list
if (total_done <= total_amount - 1 or len(files_existing) == 0):
#We still have files to download
template = loader.get_template('fileupload/downloadFiles.html')
context = {
'files_existing' : files_existing,
'show_output' : True,
'percentage_done' : percentage_done,
'current_count' : total_done,
'btn_check_flag' : 'disabled',
'download_more' : True,
}
return HttpResponse(template.render(context, request))
#Code below is for final condition
if total_done == total_amount and len(files_existing) > 0:
optional_flag = True #Any further refreshes will not attempt to show "no new files available"
download_more = None
return HttpResponseRedirect('../new')
#Code above is for final condition
return JsonResponse({'null':'null'}) #For testing only, report if thrown anytime!
except OSError:
return HttpResponseRedirect('../new/');
def removeCorruptFile(file):
global staticFileLocRoot
delete_from_db_file = EkFile.objects.get(split_dirs(file))
delete_from_db_file.delete()
sendString = "rm " + staticFileLocRoot + file
t = subprocess.Popen(sendString)
t.communicate()[0]
|
apache-2.0
| 7,018,819,054,383,964,000 | 37.865526 | 109 | 0.57417 | false | 4.178759 | true | false | false |
vodkina/GlobaLeaks
|
backend/globaleaks/models/validators.py
|
1
|
5111
|
# -*- coding: UTF-8
#
# validator
# *********
#
# Utilities to validate data recorded in the ORM
import re
from globaleaks import LANGUAGES_SUPPORTED_CODES
from globaleaks.settings import GLSettings
from globaleaks.rest import errors
from globaleaks.utils.utility import log
def natnum_v(self, attr, value):
"""
Validates that the passed value is a natural number (in Z+)
"""
if not isinstance(value, int) or value < 0:
raise errors.InvalidModelInput("natnum_v: expected val to be in Z+ (%s:%d)" % (attr, value))
return value
class range_v(object):
def __call__(self, model_obj, attr, value):
if not isinstance(value, int):
raise errors.InvalidModelInput("range_v: expected int (%s)" % attr)
if value < self.start or value > self.stop:
m = "range_v(%d, %d): value outside of acceptable range (%d)" % (self.start, self.stop, value)
raise errors.InvalidModelInput(m)
return value
def __init__(self, start, stop):
self.start = start
self.stop = stop
def shorttext_v(self, attr, value):
if isinstance(value, str):
value = unicode(value)
if not isinstance(value, unicode):
raise errors.InvalidModelInput("shorttext_v: expected unicode (%s:%s)" % (attr, value))
if GLSettings.enable_input_length_checks and len(value) > GLSettings.memory_copy.maximum_namesize:
raise errors.InvalidModelInput("shorttext_v: length need to be < of %d"
% GLSettings.memory_copy.maximum_namesize)
return value
def longtext_v(self, attr, value):
"""
"""
if not attr:
return value
if isinstance(value, str):
value = unicode(value)
if not isinstance(value, unicode):
raise errors.InvalidModelInput("longtext_v: expected unicode (%s:%s)" %
(attr, value))
if GLSettings.enable_input_length_checks and len(value) > GLSettings.memory_copy.maximum_textsize:
raise errors.InvalidModelInput("longtext_v: unicode text in %s " \
"overcomes length " \
"limit %d" % (attr, GLSettings.memory_copy.maximum_textsize))
return value
def dict_v(self, attr, value):
if not value:
return {}
if not isinstance(value, dict):
raise errors.InvalidModelInput("dict_v: expected dict (%s)" % attr)
for key, subvalue in value.iteritems():
if isinstance(subvalue, str):
subvalue = unicode(subvalue)
if isinstance(subvalue, unicode):
if GLSettings.enable_input_length_checks and len(subvalue) > GLSettings.memory_copy.maximum_textsize:
raise errors.InvalidModelInput("dict_v: text for key %s in %s " \
"overcomes length limit of %d" % (key, attr,
GLSettings.memory_copy.maximum_textsize))
if isinstance(subvalue, dict):
dict_v(self, attr, subvalue)
return value
def shortlocal_v(self, attr, value):
dict_v(None, attr, value)
if not value:
return value
# If a language does not exist, it does not mean that a malicious input have been provided,
# this condition in fact may happen when a language is removed from the package and
# so the proper way to handle it so is simply to log the issue and discard the input.
# https://github.com/globaleaks/GlobaLeaks/issues/879
remove = [lang for lang in value if lang not in LANGUAGES_SUPPORTED_CODES]
for k in remove:
try:
del value[unicode(k)]
except KeyError:
pass
log.debug("shortlocal_v: (%s) Invalid language code in %s, skipped" %
(k, attr))
for lang, text in value.iteritems():
shorttext_v(None, None, text)
return value
def longlocal_v(self, attr, value):
dict_v(None, attr, value)
if not value:
return value
# If a language does not exist, it does not mean that a malicious input have been provided,
# this condition in fact may happen when a language is removed from the package and
# so the proper way to handle it so is simply to log the issue and discard the input.
# https://github.com/globaleaks/GlobaLeaks/issues/879
remove = [lang for lang in value if lang not in LANGUAGES_SUPPORTED_CODES]
for k in remove:
try:
del value[unicode(k)]
except KeyError:
pass
log.debug("longlocal_v: (%s) Invalid language code in %s, skipped" %
(k, attr))
for lang, text in value.iteritems():
longtext_v(None, attr, text)
return value
def shorturl_v(self, attr, value):
if not re.match(r'^(/s/[a-z0-9]{1,30})$', value):
raise errors.InvalidModelInput("invalid shorturl")
return value
def longurl_v(self, attr, value):
if not re.match(r'^(/[a-z0-9#=_&?/-]{1,255})$', value):
raise errors.InvalidModelInput("invalid longurl")
return value
|
agpl-3.0
| -8,436,190,481,681,134,000 | 31.974194 | 123 | 0.6126 | false | 3.88079 | false | false | false |
vapor-ware/synse-server
|
synse_server/backoff.py
|
1
|
1845
|
"""Retry backoff strategies."""
import random
import time
from typing import Union
__all__ = ['ExponentialBackoff']
class ExponentialBackoff:
"""An implementation of the exponential backoff strategy.
This is useful for getting an exponentially backed-off delay for
reconnection or retry actions.
Each call to ``delay`` will return the next exponentially backed-off
value, in seconds, to use for waiting. The backoff will continue for
each call, up to a maximum of 2^10 * base.
Args:
base: The base delay, in seconds. This is the starting point for
the returned exponentially backed off time values.
cap: The cap on the exponent, after which the backoff will not
grow exponentially. This is 9 by default (2^9 = 512 ~= 8.5 minutes)
"""
def __init__(self, base: int = 1, cap: int = 9) -> None:
self._base = base
self._exp = 0
self._max = cap
self._reset_time = base * 2 ** (self._max + 1)
self._last_invocation = time.monotonic()
self.rand = random.Random()
self.rand.seed()
def delay(self) -> Union[int, float]:
"""Get the next exponentially backed off time delay, in seconds.
The delay value is incremented exponentially with every call, up
to the defined max. If a period of time greater than 2^(max+1) * base
has passed, the backoff is reset.
Returns:
The time, in seconds, to be used as the next delay interval.
"""
invocation = time.monotonic()
interval = invocation - self._last_invocation
self._last_invocation = invocation
if interval > self._reset_time:
self._exp = 0
self._exp = min(self._exp + 1, self._max)
return self.rand.uniform(0, self._base * 2 ** self._exp)
|
gpl-3.0
| 5,880,698,989,741,114,000 | 31.946429 | 79 | 0.625474 | false | 4.072848 | false | false | false |
ecreall/lagendacommun
|
urlshortener/main.py
|
1
|
5007
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 by Ecreall under licence AGPL terms
# available on http://www.gnu.org/licenses/agpl.html
# Source: https://github.com/narenaryan/Pyster
# licence: AGPL
# author: Amen Souissi
import sqlite3
import string
from flask import Flask, request, render_template, redirect, jsonify
from flask.ext.cors import CORS, cross_origin
from sqlite3 import OperationalError
from urllib.parse import urlparse
#host = 'http://localhost:5000/'
host = 'http://6li.eu/'
BASE = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
BASE.extend(list(string.ascii_lowercase))
BASE.extend(list(string.ascii_uppercase))
BASE_LEN = len(BASE)
#Assuming urls.db is in your app root folder
app = Flask(__name__)
cors = CORS(app, resources={r"/": {"origins": "*"}})
def get_base_next(char):
if char == '':
return False, '0'
char_index = BASE.index(char)
char_index += 1
return (False, BASE[char_index]) if \
char_index < BASE_LEN else (True, '0')
def next_id(id_=None):
new_id = id_
if id_ is None:
new_id = '0'
else:
index = -1
to_inc = new_id[index]
final, next = get_base_next(to_inc)
new_id = new_id[:index] + next
index -= 1
len_id = len(new_id)
while index+6 >= 0 and final:
if index+len_id >= 0:
to_inc = new_id[index]
final, next = get_base_next(to_inc)
new_id = new_id[:index] + next + new_id[index+1:]
else:
to_inc = ''
final, next = get_base_next(to_inc)
new_id = next + new_id[index+1:]
index -= 1
return new_id
def table_check():
create_table = """
CREATE TABLE WEB_URL(
ID INTEGER PRIMARY KEY AUTOINCREMENT,
NUM TEXT NOT NULL UNIQUE,
URL TEXT NOT NULL UNIQUE
);
"""
with sqlite3.connect('var/urls.db') as conn:
cursor = conn.cursor()
try:
cursor.execute(create_table)
except OperationalError:
pass
@app.route('/', methods=['GET', 'POST'])
# @cross_origin(origin='localhost',headers=['Content- Type','Authorization'])
def home():
method = request.method
with sqlite3.connect('var/urls.db') as conn:
try:
cursor = conn.cursor()
rows_query = """
SELECT NUM, max(ID) FROM WEB_URL"""
result_cursor = cursor.execute(rows_query)
result_fetch = result_cursor.fetchone()
last_num = result_fetch[0]
number_of_rows = result_fetch[1]
number_of_rows = 0 if number_of_rows is None else number_of_rows
if method == 'POST':
original_url = request.form.get('url')
if original_url:
if urlparse(original_url).scheme == '':
original_url = 'http://' + original_url
exist_row = """
SELECT NUM FROM WEB_URL
WHERE URL='{url}'
""".format(url=original_url)
result_cursor = cursor.execute(exist_row)
result_fetch = result_cursor.fetchone()
if result_fetch:
new_num = result_fetch[0]
else:
new_num = next_id(last_num)
insert_row = """
INSERT INTO WEB_URL (URL, NUM)
VALUES ('{url}', '{num}')
""".format(url=original_url, num=new_num)
cursor.execute(insert_row)
number_of_rows += 1
encoded_string = new_num
return jsonify(**{'short_url': host + encoded_string,
'code': 'SUCCESS',
'original_url': original_url})
return render_template('home.html', number_of_rows=number_of_rows)
except Exception as error:
return jsonify(**{'code': 'ERROR',
'error': str(error),
'original_url': original_url
})
@app.route('/<short_url>')
def redirect_short_url(short_url):
decoded_string = short_url
with sqlite3.connect('var/urls.db') as conn:
cursor = conn.cursor()
select_row = """
SELECT URL FROM WEB_URL
WHERE NUM='{num}'
""".format(num=decoded_string)
result_cursor = cursor.execute(select_row)
try:
return redirect(result_cursor.fetchone()[0])
except Exception:
pass
return render_template(
'home.html',
error=True)
if __name__ == '__main__':
# This code checks whether database table is created or not
table_check()
# app.run(debug=True)
app.run(host='0.0.0.0')
|
agpl-3.0
| 2,148,260,794,711,410,000 | 31.72549 | 78 | 0.505892 | false | 3.905616 | false | false | false |
liverliu/netmusichacker
|
python/hacker/hacker.py
|
1
|
7384
|
import logging
import web
import config
import requests
import json
import hashlib
import random
logger = logging.getLogger('route')
logger.setLevel(logging.INFO)
formatter = logging.Formatter('[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s','%m-%d %H:%M:%S')
#console
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
#file
fh = logging.FileHandler('hacker.log')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
urls = (
'/.*', 'route',
'/api/.*', 'route',
'/eapi/.*', 'route',
)
valid_header = ['HTTP_ORIGIN', 'HTTP_COOKIE', 'HTTP_ACCEPT', 'HTTP_CONNECTION', 'HTTP_USER_AGENT',
'HTTP_ACCEPT_LANGUAGE', 'HTTP_ACCEPT_ENCODING', 'CONTENT_LENGTH', 'CONTENT_TYPE',
'HTTP_BATCH_METHOD', 'HTTP_REFERER']
new_header = {'HTTP_ORIGIN':'Origin', 'HTTP_COOKIE':'Cookie', 'HTTP_ACCEPT':'Accept',
'HTTP_CONNECTION':'Connection', 'HTTP_USER_AGENT':'User-Agent', 'HTTP_HOST':'Host',
'HTTP_ACCEPT_LANGUAGE':'Accept-Language', 'HTTP_ACCEPT_ENCODING':'Accept-Encoding',
'CONTENT_LENGTH':'Content-Length', 'CONTENT_TYPE':'Content-Type', 'HTTP_BATCH_METHOD':'Batch-Method',
'HTTP_REFERER':'Referer'}
response_headers = ['Content-Type', 'Connection', 'Pragrma', 'Cache-Control', 'Expires', 'Vary', 'Server', 'Date']
class MyApplication(web.application):
def run(self, host='127.0.0.1', port=8080, *middleware):
return web.httpserver.runsimple(self.wsgifunc(*middleware), (host, port))
class route:
def GET(self):
return handle()
def POST(self):
return handle()
def handle():
logger.info('-------------------')
logger.info(web.ctx.path)
try:
headers={}
for k,v in web.ctx.env.items():
if(k.upper()=='HTTP_HOST'):
headers[new_header[k]]='music.163.com'
continue
if(k.upper() in valid_header):
headers[new_header[k]] = v
if web.ctx.env['REQUEST_METHOD'].upper() == 'POST':
response = requests.post(config.host+web.ctx.path, data=web.data(), headers=headers)
elif web.ctx.env['REQUEST_METHOD'].upper() == 'GET':
response = requests.get(config.host+web.ctx.env['REQUEST_URI'], headers=headers)
else:
return None
for k,v in response.headers.items():
if k in response_headers:
web.header(k, v)
return modify(response.content)
except Exception, ex:
logger.error(ex)
return None
def modify(message):
try:
result = json.loads(message)
if web.ctx.path.startswith('/eapi/v1/album/'):
logger.info('modify album info')
if result['songs']:
for song in result['songs']:
modify_privilege(song['privilege'])
if song['fee'] and song['fee']>0:
song['fee']=0
elif web.ctx.path=='/eapi/v3/song/detail/':
logger.info('modify songs privileges')
map(modify_privilege, result['privileges']) if result['privileges'] else None
elif web.ctx.path=='/eapi/v3/playlist/detail':
logger.info('modify songs info')
map(modify_privilege, result['privileges']) if result['privileges'] else None
elif web.ctx.path=='/eapi/song/enhance/player/url':
data = result['data'][0]
if data['code'] != 200:
logger.info('try to generate url: {}'.format(data['id']))
song = music_detail(data['id'])
music = get_music(song)
data['code']=200
data['type']='mp3'
data['url']=gen_url(song)
data['gain']=music['volumeDelta']
data['br']=music['bitrate']
data['size']=music['size']
data['md5']=music['dfsId']
logger.info(result)
elif web.ctx.path=='/eapi/batch':
logger.info('modify search result')
search = result['/api/cloudsearch/pc']
[modify_privilege(song['privilege']) for song in search['result']['songs']] if search['code']==200 else None
elif web.ctx.path=='/eapi/cloudsearch/pc':
logger.info('modify search result')
[modify_privilege(song['privilege']) for song in result['result']['songs']] if result['code']==200 else None
elif web.ctx.path.startswith('/eapi/v1/artist'):
logger.info('modify singer info')
[modify_privilege(hot_song['privilege']) for hot_song in result['hotSongs']]
elif web.ctx.path.startswith('/eapi/song/enhance/download/url'):
logger.info(message)
return json.dumps(result)
except Exception, ex:
logger.info(ex)
return message
def get_music(song):
if(song['hMusic']):
return song['hMusic']
elif song['mMusic']:
return song['mMusic']
elif song['lMusic']:
return song['lMusic']
elif song['bMusic']:
return song['bMusic']
else:
return song['audition']
def modify_privilege(privilege):
if privilege:
if privilege['st'] and privilege['st']<0:
privilege['st']=0
privilege['cs']=False
privilege['subp']=1
privilege['fl']=privilege['maxbr']
privilege['dl']=privilege['maxbr']
privilege['pl']=privilege['maxbr']
privilege['sp']=7
privilege['cp']=1
if privilege['fee'] and privilege['fee']>0:
privilege['fee']=0
privilege['st']=0
privilege['cs']=False
privilege['subp']=1
privilege['fl']=privilege['maxbr']
privilege['dl']=privilege['maxbr']
privilege['pl']=privilege['maxbr']
privilege['sp']=7
privilege['cp']=1
def music_detail(id):
url = '{}/api/song/detail?ids=[{}]'.format(config.host, id)
response = requests.get(url, headers=config.headers).text.encode('utf-8')
song = json.loads(response)['songs'][0]
if not song['hMusic'] and not song['mMusic'] and not song['lMusic'] and not song['bMusic']:
album = album_detail(song)
for song1 in album['songs']:
if song1['id'] == song['id']:
return song1
return song
def album_detail(song):
url = '{}/api/album/{}'.format(config.host, song['album']['id'])
response = requests.get(url, headers=config.headers).text.encode('utf-8')
return json.loads(response)['album']
def gen_url(song):
music = get_music(song)
song_id = music['dfsId']
enc_id = encrypt(song_id)
return 'http://m{}.music.126.net/{}/{}.mp3'.format(random.randint(1,2), enc_id, song_id)
def encrypt(id):
magic = bytearray('3go8&$8*3*3h0k(2)2')
song_id = bytearray(str(id))
magic_len = len(magic)
for i in xrange(len(song_id)):
song_id[i] = song_id[i] ^ magic[i % magic_len]
m = hashlib.md5(song_id)
result = m.digest().encode('base64')[:-1]
result = result.replace('/', '_')
result = result.replace('+', '-')
return result
app = MyApplication(urls, globals())
application = app.wsgifunc()
if __name__ == '__main__':
app.run(host=config.server_host, port=config.server_port)
|
apache-2.0
| 2,123,856,222,579,568,600 | 35.20098 | 120 | 0.574485 | false | 3.632071 | true | false | false |
repotvsupertuga/tvsupertuga.repository
|
script.module.streamlink.base/resources/lib/streamlink/plugin/api/validate.py
|
1
|
12873
|
"""This module provides an API to validate and to some extent
manipulate data structures, such as JSON and XML parsing results.
Example usage:
>>> validate(int, 5)
5
>>> validate({text: int}, {'foo': '1'})
ValueError: Type of '1' should be 'int' but is 'str'
>>> validate({'foo': transform(int)}, {'foo': '1'})
{'foo': 1}
"""
from xml.etree import ElementTree as ET
from copy import copy as copy_obj
try:
from functools import singledispatch
except ImportError:
from streamlink.utils.singledispatch import singledispatch
from ...compat import is_py2, urlparse
from ...exceptions import PluginError
__all__ = [
"any", "all", "filter", "get", "getattr", "hasattr", "length", "optional",
"transform", "text", "union", "url", "startswith", "endswith", "contains",
"xml_element", "xml_find", "xml_findall", "xml_findtext",
"validate", "Schema", "SchemaContainer"
]
#: Alias for text type on each Python version
text = is_py2 and basestring or str
# References to original functions that we override in this module
_all = all
_getattr = getattr
_hasattr = hasattr
_filter = filter
_map = map
_re_match_attr = ("group", "groups", "groupdict", "re")
def _is_re_match(value):
return _all(_hasattr(value, a) for a in _re_match_attr)
class any(tuple):
"""At least one of the schemas must be valid."""
def __new__(cls, *args):
return super(any, cls).__new__(cls, args)
class all(tuple):
"""All schemas must be valid."""
def __new__(cls, *args):
return super(all, cls).__new__(cls, args)
class SchemaContainer(object):
def __init__(self, schema):
self.schema = schema
class transform(object):
"""Applies function to value to transform it."""
def __init__(self, func):
# text is an alias for basestring on Python 2, which cannot be
# instantiated and therefore can't be used to transform the value,
# so we force to unicode instead.
if is_py2 and func == text:
func = unicode
self.func = func
class optional(object):
"""An optional key used in a dict or union-dict."""
def __init__(self, key):
self.key = key
class union(SchemaContainer):
"""Extracts multiple validations based on the same value."""
class attr(SchemaContainer):
"""Validates an object's attributes."""
class xml_element(object):
"""A XML element."""
def __init__(self, tag=None, text=None, attrib=None):
self.tag = tag
self.text = text
self.attrib = attrib
def length(length):
"""Checks value for minimum length using len()."""
def min_len(value):
if not len(value) >= length:
raise ValueError(
"Minimum length is {0} but value is {1}".format(length, len(value))
)
return True
return min_len
def startswith(string):
"""Checks if the string value starts with another string."""
def starts_with(value):
validate(text, value)
if not value.startswith(string):
raise ValueError("'{0}' does not start with '{1}'".format(value, string))
return True
return starts_with
def endswith(string):
"""Checks if the string value ends with another string."""
def ends_with(value):
validate(text, value)
if not value.endswith(string):
raise ValueError("'{0}' does not end with '{1}'".format(value, string))
return True
return ends_with
def contains(string):
"""Checks if the string value contains another string."""
def contains_str(value):
validate(text, value)
if string not in value:
raise ValueError("'{0}' does not contain '{1}'".format(value, string))
return True
return contains_str
def get(item, default=None):
"""Get item from value (value[item]).
If the item is not found, return the default.
Handles XML elements, regex matches and anything that has __getitem__.
"""
def getter(value):
if ET.iselement(value):
value = value.attrib
try:
# Use .group() if this is a regex match object
if _is_re_match(value):
return value.group(item)
else:
return value[item]
except (KeyError, IndexError):
return default
except (TypeError, AttributeError) as err:
raise ValueError(err)
return transform(getter)
def getattr(attr, default=None):
"""Get a named attribute from an object.
When a default argument is given, it is returned when the attribute
doesn't exist.
"""
def getter(value):
return _getattr(value, attr, default)
return transform(getter)
def hasattr(attr):
"""Verifies that the object has an attribute with the given name."""
def has_attr(value):
return _hasattr(value, attr)
return has_attr
def filter(func):
"""Filters out unwanted items using the specified function.
Supports both dicts and sequences, key/value pairs are
expanded when applied to a dict.
"""
def expand_kv(kv):
return func(*kv)
def filter_values(value):
cls = type(value)
if isinstance(value, dict):
return cls(_filter(expand_kv, value.items()))
else:
return cls(_filter(func, value))
return transform(filter_values)
def map(func):
"""Apply function to each value inside the sequence or dict.
Supports both dicts and sequences, key/value pairs are
expanded when applied to a dict.
"""
# text is an alias for basestring on Python 2, which cannot be
# instantiated and therefore can't be used to transform the value,
# so we force to unicode instead.
if is_py2 and text == func:
func = unicode
def expand_kv(kv):
return func(*kv)
def map_values(value):
cls = type(value)
if isinstance(value, dict):
return cls(_map(expand_kv, value.items()))
else:
return cls(_map(func, value))
return transform(map_values)
def url(**attributes):
"""Parses an URL and validates its attributes."""
def check_url(value):
validate(text, value)
parsed = urlparse(value)
if not parsed.netloc:
raise ValueError("'{0}' is not a valid URL".format(value))
for name, schema in attributes.items():
if not _hasattr(parsed, name):
raise ValueError("Invalid URL attribute '{0}'".format(name))
try:
validate(schema, _getattr(parsed, name))
except ValueError as err:
raise ValueError(
"Unable to validate URL attribute '{0}': {1}".format(
name, err
)
)
return True
# Convert "http" to be either any("http", "https") for convenience
if attributes.get("scheme") == "http":
attributes["scheme"] = any("http", "https")
return check_url
def xml_find(xpath):
"""Find a XML element via xpath."""
def xpath_find(value):
validate(ET.iselement, value)
value = value.find(xpath)
if value is None:
raise ValueError("XPath '{0}' did not return an element".format(xpath))
return validate(ET.iselement, value)
return transform(xpath_find)
def xml_findall(xpath):
"""Find a list of XML elements via xpath."""
def xpath_findall(value):
validate(ET.iselement, value)
return value.findall(xpath)
return transform(xpath_findall)
def xml_findtext(xpath):
"""Find a XML element via xpath and extract its text."""
return all(
xml_find(xpath),
getattr("text"),
)
@singledispatch
def validate(schema, value):
if callable(schema):
if schema(value):
return value
else:
raise ValueError("{0}({1!r}) is not true".format(schema.__name__, value))
if schema == value:
return value
else:
raise ValueError("{0!r} does not equal {1!r}".format(value, schema))
@validate.register(any)
def validate_any(schema, value):
errors = []
for subschema in schema:
try:
return validate(subschema, value)
except ValueError as err:
errors.append(err)
else:
err = " or ".join(_map(str, errors))
raise ValueError(err)
@validate.register(all)
def validate_all(schemas, value):
for schema in schemas:
value = validate(schema, value)
return value
@validate.register(transform)
def validate_transform(schema, value):
validate(callable, schema.func)
return schema.func(value)
@validate.register(list)
@validate.register(tuple)
@validate.register(set)
@validate.register(frozenset)
def validate_sequence(schema, value):
validate(type(schema), value)
return type(schema)(validate(any(*schema), v) for v in value)
@validate.register(dict)
def validate_dict(schema, value):
validate(type(schema), value)
new = type(schema)()
for key, subschema in schema.items():
if isinstance(key, optional):
if key.key not in value:
continue
key = key.key
if type(key) in (type, transform, any, all, union):
for subkey, subvalue in value.items():
new[validate(key, subkey)] = validate(subschema, subvalue)
break
else:
if key not in value:
raise ValueError("Key '{0}' not found in {1!r}".format(key, value))
try:
new[key] = validate(subschema, value[key])
except ValueError as err:
raise ValueError("Unable to validate key '{0}': {1}".format(key, err))
return new
@validate.register(type)
def validate_type(schema, value):
if isinstance(value, schema):
return value
else:
raise ValueError(
"Type of {0!r} should be '{1}' but is '{2}'".format(
value, schema.__name__, type(value).__name__
)
)
@validate.register(xml_element)
def validate_xml_element(schema, value):
validate(ET.iselement, value)
new = ET.Element(value.tag, attrib=value.attrib)
if schema.attrib is not None:
try:
new.attrib = validate(schema.attrib, value.attrib)
except ValueError as err:
raise ValueError("Unable to validate XML attributes: {0}".format(err))
if schema.tag is not None:
try:
new.tag = validate(schema.tag, value.tag)
except ValueError as err:
raise ValueError("Unable to validate XML tag: {0}".format(err))
if schema.text is not None:
try:
new.text = validate(schema.text, value.text)
except ValueError as err:
raise ValueError("Unable to validate XML text: {0}".format(err))
for child in value:
new.append(child)
return new
@validate.register(attr)
def validate_attr(schema, value):
new = copy_obj(value)
for attr, schema in schema.schema.items():
if not _hasattr(value, attr):
raise ValueError("Attribute '{0}' not found on object '{1}'".format(
attr, value
))
setattr(new, attr, validate(schema, _getattr(value, attr)))
return new
@singledispatch
def validate_union(schema, value):
raise ValueError("Invalid union type: {0}".format(type(schema).__name__))
@validate_union.register(dict)
def validate_union_dict(schema, value):
new = type(schema)()
for key, schema in schema.items():
optional_ = isinstance(key, optional)
if optional_:
key = key.key
try:
new[key] = validate(schema, value)
except ValueError as err:
if optional_:
continue
raise ValueError("Unable to validate union '{0}': {1}".format(key, err))
return new
@validate_union.register(list)
@validate_union.register(tuple)
@validate_union.register(set)
@validate_union.register(frozenset)
def validate_union_sequence(schemas, value):
return type(schemas)(validate(schema, value) for schema in schemas)
@validate.register(union)
def validate_unions(schema, value):
return validate_union(schema.schema, value)
class Schema(object):
"""Wraps a validator schema into a object."""
def __init__(self, *schemas):
self.schema = all(*schemas)
def validate(self, value, name="result", exception=PluginError):
try:
return validate(self.schema, value)
except ValueError as err:
raise exception("Unable to validate {0}: {1}".format(name, err))
@validate.register(Schema)
def validate_schema(schema, value):
return schema.validate(value, exception=ValueError)
|
gpl-2.0
| 4,050,243,957,748,727,000 | 25.325153 | 86 | 0.61027 | false | 4.095768 | false | false | false |
doc-cloud/ds
|
string/string-rotate/rotate.py
|
1
|
1026
|
# -*- coding: utf-8 -*-
import copy
def simple_rotate(s, m):
if m < 0:
raise Exception('m is less than 0')
m %= len(s)
t = copy.copy(s)
del s[:]
s += t[m:] + t[:m]
def left_shift_m(s, m):
if m < 0:
raise Exception('m is less than 0')
length = len(s)
m %= length
for i in xrange(m):
t = s[0]
for j in xrange(1, length):
s[j - 1] = s[j]
s[length - 1] = t
def reverse(s, b, e):
n = e - b + 1;
for i in xrange(n / 2):
s[b + i], s[e - i] = s[e - i], s[b + i]
def rotate_reverse(s, m):
if m < 0:
raise Exception('m is less than 0')
length = len(s)
m %= length
reverse(s, 0, m - 1)
reverse(s, m, length - 1)
reverse(s, 0, length - 1)
if __name__ == '__main__':
s = ['a', 'b', 'c', 'd', 'e', 'f', 'g']
s0 = copy.copy(s)
simple_rotate(s0, 3)
print s0
s1 = copy.copy(s)
left_shift_m(s1, 3)
print s1
s2 = copy.copy(s)
rotate_reverse(s2, 4)
print s2
|
gpl-2.0
| 102,612,010,791,808,480 | 19.52 | 47 | 0.457115 | false | 2.597468 | false | false | false |
elfnor/sverchok
|
nodes/transforms/scale_mk2.py
|
1
|
2683
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
from mathutils import Vector
import bpy
from bpy.props import FloatProperty, BoolProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode
from sverchok.utils.sv_recursive import sv_recursive_transformations
class SvScaleNodeMK2(bpy.types.Node, SverchCustomTreeNode):
''' Scale MK2 '''
bl_idname = 'SvScaleNodeMK2'
bl_label = 'Scale'
bl_icon = 'MAN_SCALE'
factor_ = FloatProperty(name='multiplyer', description='scaling factor',
default=1.0,
options={'ANIMATABLE'}, update=updateNode)
separate = BoolProperty(name='separate', description='Separate UV coords',
default=False,
update=updateNode)
def sv_init(self, context):
self.inputs.new('VerticesSocket', "vertices", "vertices")
self.inputs.new('VerticesSocket', "centers", "centers")
self.inputs.new('StringsSocket', "multiplier", "multiplier").prop_name = "factor_"
self.outputs.new('VerticesSocket', "vertices", "vertices")
def draw_buttons(self, context, layout):
layout.prop(self, 'separate')
def process(self):
# inputs
vers = self.inputs['vertices'].sv_get()
vecs = self.inputs['centers'].sv_get(default=[[[0.0, 0.0, 0.0]]])
mult = self.inputs['multiplier'].sv_get()
# outputs
if self.outputs[0].is_linked:
sca = sv_recursive_transformations(self.scaling,vers,vecs,mult,self.separate)
self.outputs['vertices'].sv_set(sca)
def scaling(self, v, c, m):
print(c,v,m)
return [(Vector(c) + m * (Vector(v) - Vector(c)))[:]]
def register():
bpy.utils.register_class(SvScaleNodeMK2)
def unregister():
bpy.utils.unregister_class(SvScaleNodeMK2)
if __name__ == '__main__':
register()
|
gpl-3.0
| -1,494,444,262,516,776,700 | 34.315789 | 90 | 0.656355 | false | 3.784203 | false | false | false |
lucidfrontier45/flasktest
|
test/test.py
|
1
|
2489
|
from flask_app import application
import unittest
import logging
import json
import uuid
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
def make_logger(name):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(ch)
return logger
class FlaskTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.app = application.app
self.app.config["TESTING"] = True
self.logger = make_logger(self.__class__.__name__)
def setUp(self):
self.client = self.app.test_client()
def test_index(self):
r = self.client.get("/")
self.logger.debug("code={0}, data={1}".format(r.status_code, r.data))
assert r.status_code == 200
def test_user_model(self):
model = application.model
u = model.User(name="Tom", data="hoge")
assert str(u) == "User[None,Tom,hoge]"
def test_put_user(self):
r = self.client.put("/user/")
assert r.status_code == 200
data = json.loads(r.data)
assert data["code"] == 400
name = str(uuid.uuid4())
r = self.client.put("/user/{0}/".format(name), data={"data":"fuga2"})
assert r.status_code == 200
data = json.loads(r.data)
assert data["code"] == 200
assert data["result"]["data"] == "fuga2"
r = self.client.put("/user/{0}/".format(name), data={"data":"fuga2"})
self.logger.info(r.status_code)
assert r.status_code == 200
data = json.loads(r.data)
assert data["code"] == 200
assert data["result"]["data"] == "fuga2"
def test_get_user(self):
r = self.client.get("/user/")
assert r.status_code == 200
data = json.loads(r.data)
assert data["code"] == 200
assert isinstance(data["result"], list)
self.client.put("/user/tom/", data={"data":"test"})
r = self.client.get("/user/tom/")
self.logger.info(r.status_code)
assert r.status_code == 200
data = json.loads(r.data)
self.logger.info(data)
assert data["code"] == 200
r = self.client.get("/user/tom2/")
assert r.status_code == 200
data = json.loads(r.data)
assert data["code"] == 404
if __name__ == "__main__":
unittest.main()
|
mit
| -5,974,267,527,050,064,000 | 29 | 85 | 0.575733 | false | 3.505634 | true | false | false |
awni/tensorflow
|
tensorflow/python/training/training.py
|
1
|
7130
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=wildcard-import,unused-import,g-bad-import-order,line-too-long
"""This library provides a set of classes and functions that helps train models.
## Optimizers
The Optimizer base class provides methods to compute gradients for a loss and
apply gradients to variables. A collection of subclasses implement classic
optimization algorithms such as GradientDescent and Adagrad.
You never instantiate the Optimizer class itself, but instead instantiate one
of the subclasses.
@@Optimizer
@@GradientDescentOptimizer
@@AdadeltaOptimizer
@@AdagradOptimizer
@@MomentumOptimizer
@@AdamOptimizer
@@FtrlOptimizer
@@RMSPropOptimizer
## Gradient Computation
TensorFlow provides functions to compute the derivatives for a given
TensorFlow computation graph, adding operations to the graph. The
optimizer classes automatically compute derivatives on your graph, but
creators of new Optimizers or expert users can call the lower-level
functions below.
@@gradients
@@AggregationMethod
@@stop_gradient
## Gradient Clipping
TensorFlow provides several operations that you can use to add clipping
functions to your graph. You can use these functions to perform general data
clipping, but they're particularly useful for handling exploding or vanishing
gradients.
@@clip_by_value
@@clip_by_norm
@@clip_by_average_norm
@@clip_by_global_norm
@@global_norm
## Decaying the learning rate
@@exponential_decay
## Moving Averages
Some training algorithms, such as GradientDescent and Momentum often benefit
from maintaining a moving average of variables during optimization. Using the
moving averages for evaluations often improve results significantly.
@@ExponentialMovingAverage
## Coordinator and QueueRunner
See [Threading and Queues](../../how_tos/threading_and_queues/index.md)
for how to use threads and queues. For documentation on the Queue API,
see [Queues](../../api_docs/python/io_ops.md#queues).
@@Coordinator
@@QueueRunner
@@add_queue_runner
@@start_queue_runners
## Summary Operations
The following ops output
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffers as serialized string tensors.
You can fetch the output of a summary op in a session, and pass it to
a [SummaryWriter](../../api_docs/python/train.md#SummaryWriter) to append it
to an event file. Event files contain
[`Event`](https://www.tensorflow.org/code/tensorflow/core/util/event.proto)
protos that can contain `Summary` protos along with the timestamp and
step. You can then use TensorBoard to visualize the contents of the
event files. See [TensorBoard and
Summaries](../../how_tos/summaries_and_tensorboard/index.md) for more
details.
@@scalar_summary
@@image_summary
@@histogram_summary
@@zero_fraction
@@merge_summary
@@merge_all_summaries
## Adding Summaries to Event Files
See [Summaries and
TensorBoard](../../how_tos/summaries_and_tensorboard/index.md) for an
overview of summaries, event files, and visualization in TensorBoard.
@@SummaryWriter
@@summary_iterator
## Training utilities
@@global_step
@@write_graph
"""
# Optimizers.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from tensorflow.python.ops import gradients
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training.adadelta import AdadeltaOptimizer
from tensorflow.python.training.adagrad import AdagradOptimizer
from tensorflow.python.training.adam import AdamOptimizer
from tensorflow.python.training.ftrl import FtrlOptimizer
from tensorflow.python.training.momentum import MomentumOptimizer
from tensorflow.python.training.moving_averages import ExponentialMovingAverage
from tensorflow.python.training.optimizer import Optimizer
from tensorflow.python.training.rmsprop import RMSPropOptimizer
from tensorflow.python.training.gradient_descent import GradientDescentOptimizer
from tensorflow.python.training.sync_replicas_optimizer import SyncReplicasOptimizer
# Utility classes for training.
from tensorflow.python.training.coordinator import Coordinator
from tensorflow.python.training.coordinator import LooperThread
from tensorflow.python.training.queue_runner import *
# For the module level doc.
from tensorflow.python.training import input as _input
from tensorflow.python.training.input import *
from tensorflow.python.training.device_setter import replica_device_setter
from tensorflow.python.training.saver import generate_checkpoint_state_proto
from tensorflow.python.training.saver import get_checkpoint_state
from tensorflow.python.training.saver import latest_checkpoint
from tensorflow.python.training.saver import Saver
from tensorflow.python.training.saver import update_checkpoint_state
from tensorflow.python.training.saver import export_meta_graph
from tensorflow.python.training.saver import import_meta_graph
from tensorflow.python.training.session_manager import SessionManager
from tensorflow.python.training.summary_io import summary_iterator
from tensorflow.python.training.summary_io import SummaryWriter
from tensorflow.python.training.supervisor import Supervisor
from tensorflow.python.training.training_util import write_graph
from tensorflow.python.training.training_util import global_step
from tensorflow.python.pywrap_tensorflow import NewCheckpointReader
# Training data protos.
from tensorflow.core.example.example_pb2 import *
from tensorflow.core.example.feature_pb2 import *
from tensorflow.core.protobuf.saver_pb2 import *
# Utility op. Open Source. TODO(touts): move to nn?
from tensorflow.python.training.learning_rate_decay import exponential_decay
from tensorflow.python.util.all_util import make_all
# Include extra modules for docstrings because:
# * Input methods in tf.train are documented in io_ops.
# * Saver methods in tf.train are documented in state_ops.
__all__ = make_all(__name__, [sys.modules[__name__], io_ops, state_ops])
# Symbols whitelisted for export without documentation.
# TODO(cwhipkey): review these and move to contrib or expose through
# documentation.
__all__.extend([
"BytesList",
"Example",
"Feature",
"FeatureList",
"FeatureLists",
"Features",
"FloatList",
"InferenceExample",
"Int64List",
"LooperThread",
"SaverDef",
"SequenceExample",
"export_meta_graph",
"generate_checkpoint_state_proto",
"import_meta_graph",
"queue_runner",
])
|
apache-2.0
| 2,207,461,269,452,418,600 | 32.952381 | 84 | 0.78892 | false | 4.055745 | false | false | false |
pacoqueen/bbinn
|
formularios/utils_almacen.py
|
1
|
5346
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado, #
# Diego Muñoz Escalante. #
# ([email protected], [email protected]) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## utils_almacen.py - Utilidades del módulo almacén.
###################################################################
## NOTAS:
##
## ----------------------------------------------------------------
##
###################################################################
## Changelog:
## 4 de octubre de 2005 -> Inicio
## 19 de enero de 2006 -> Fork a v02.
###################################################################
import sys
import time
sys.path.append('../framework')
import pclases, sqlobject, time
def id_propia_empresa_proveedor():
"""
Devuelve el id de la propia empresa en la tabla proveedores.
"""
try:
empresa = pclases.DatosDeLaEmpresa.select()[0]
except IndexError:
print "ERROR: No hay datos de la empresa."
return 0
try:
empresa = pclases.Proveedor.select(pclases.Proveedor.q.nombre==empresa.nombre)[0]
except: #IndexError? SQLObjectNotFound?
print "ERROR: La empresa no está en la tabla de de proveedores."
return 0
return empresa.id
def id_propia_empresa():
"""
Devuelve el id de la propia empresa en la tabla clientes.
"""
try:
empresa = pclases.DatosDeLaEmpresa.select()[0]
except IndexError:
print "ERROR: No hay datos de la empresa."
return 0
try:
empresa = pclases.Cliente.select(pclases.Cliente.q.nombre==empresa.nombre)[0]
except: #IndexError? SQLObjectNotFound?
print "ERROR: La empresa no está en la tabla de clientes."
return 0
return empresa.id
def ultimo_pedido_de_compra_mas_uno():
"""
Devuelve el último número de pedido de compra válido
0 si no hay pedidos de compra.
Devuelve el número de pedido como numérico (aunque en
realidad sea un str en la BD).
No tiene en cuenta aquellos pedidos cuyo número de
pedido no se puede interpretar como número y solo
tiene en cuenta los pedidos del año corriente.
El criterio para averiguar el último número de
pedido es la fecha.
Si el número siguiente al del último pedido por fecha
está ocupado (no debería), sigue sumando 1 hasta que
llegue a un número de pedido libre.
"""
import mx
from mx.DateTime import localtime as ahora
strnumspedido = pclases.PedidoCompra._connection.queryAll("SELECT numpedido FROM pedido_compra WHERE date_part('year', fecha) = %d ORDER BY fecha, numpedido;" % (ahora().year))
intnumspedido = []
for numpedido in strnumspedido:
try:
intnumspedido.append(int(numpedido[0]))
except (ValueError, IndexError):
pass
if len(intnumspedido) == 0:
ultimo = 0
else:
ultimo = intnumspedido[-1]
while pclases.PedidoCompra.select(pclases.PedidoCompra.q.numpedido == str(ultimo)).count() != 0:
ultimo += 1
return ultimo
def ultimo_numalbaran(venta, interno):
"""
Devuelve el último número de albarán que cumpla
las condiciones venta==True/False e interno==True/False
o 0 si no hay ninguno.
"""
albs = pclases.Albaran.select(sqlobject.AND(pclases.Albaran.q.venta == venta,
pclases.Albaran.q.interno == interno),
orderBy="-numalbaran")
if albs.count() == 0:
return 0
return albs[0].numalbaran
def productosConFicha():
"""
Devuelve una lista de identificadores de productos que tienen ficha de
producción.
"""
fichas = pclases.FichaDeProduccion.select()
return [f.idproducto.id for f in fichas]
|
gpl-2.0
| 4,870,805,919,252,435,000 | 39.5 | 181 | 0.532548 | false | 3.597577 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.