code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- encoding: utf-8 -*-
from abjad import *
def test_agenttools_InspectionAgent__select_vertical_moment_at_01():
score = Score([])
tuplet = scoretools.FixedDurationTuplet(Duration(4, 8), [])
tuplet.extend("d''8 c''8 b'8")
score.append(Staff([tuplet]))
staff_group = StaffGroup([])
staff_group.context_name = 'PianoStaff'
staff_group.append(Staff("a'4 g'4"))
staff_group.append(Staff("f'8 e'8 d'8 c'8"))
clef = Clef('bass')
attach(clef, staff_group[1])
score.append(staff_group)
assert systemtools.TestManager.compare(
score,
r'''
\new Score <<
\new Staff {
\tweak #'text #tuplet-number::calc-fraction-text
\times 4/3 {
d''8
c''8
b'8
}
}
\new PianoStaff <<
\new Staff {
a'4
g'4
}
\new Staff {
\clef "bass"
f'8
e'8
d'8
c'8
}
>>
>>
'''
)
def staff_group_moment(offset):
return inspect_(staff_group).get_vertical_moment_at(offset)
moment = staff_group_moment(Offset(0, 8))
assert moment.leaves == (staff_group[0][0], staff_group[1][0])
moment = staff_group_moment(Offset(1, 8))
assert moment.leaves == (staff_group[0][0], staff_group[1][1])
moment = staff_group_moment(Offset(2, 8))
assert moment.leaves == (staff_group[0][1], staff_group[1][2])
moment = staff_group_moment(Offset(3, 8))
assert moment.leaves == (staff_group[0][1], staff_group[1][3])
moment = staff_group_moment(Offset(99, 8))
assert moment.leaves == ()
def test_agenttools_InspectionAgent__select_vertical_moment_at_02():
score = Score([])
tuplet = scoretools.FixedDurationTuplet(Duration(4, 8), [])
tuplet.extend("d''8 c''8 b'8")
score.append(Staff([tuplet]))
staff_group = StaffGroup([])
staff_group.context_name = 'PianoStaff'
staff_group.append(Staff("a'4 g'4"))
staff_group.append(Staff("f'8 e'8 d'8 c'8"))
clef = Clef('bass')
attach(clef, staff_group[1])
score.append(staff_group)
assert systemtools.TestManager.compare(
score,
r'''
\new Score <<
\new Staff {
\tweak #'text #tuplet-number::calc-fraction-text
\times 4/3 {
d''8
c''8
b'8
}
}
\new PianoStaff <<
\new Staff {
a'4
g'4
}
\new Staff {
\clef "bass"
f'8
e'8
d'8
c'8
}
>>
>>
'''
)
def scorewide_vertical_moment(offset):
return inspect_(score).get_vertical_moment_at(offset)
moment = scorewide_vertical_moment(Offset(0, 8))
assert moment.leaves == (
score[0][0][0],
staff_group[0][0],
staff_group[1][0],
)
moment = scorewide_vertical_moment(Offset(1, 8))
assert moment.leaves == (
score[0][0][0],
staff_group[0][0],
staff_group[1][1],
)
moment = scorewide_vertical_moment(Offset(2, 8))
assert moment.leaves == (
score[0][0][1],
staff_group[0][1],
staff_group[1][2],
)
moment = scorewide_vertical_moment(Offset(3, 8))
assert moment.leaves == (
score[0][0][2],
staff_group[0][1],
staff_group[1][3],
)
moment = scorewide_vertical_moment(Offset(99, 8))
assert moment.leaves == () | mscuthbert/abjad | abjad/tools/agenttools/test/test_agenttools_InspectionAgent__select_vertical_moment_at.py | Python | gpl-3.0 | 3,867 |
from nose.tools import *
import builder.automate as ba
from pywinauto import WindowNotFoundError, application
def test_exe():
exe = ba.exe("/")
assert exe.split('.')[1] == 'exe'
@raises(WindowNotFoundError)
def test_connect_not_running():
ba.connect()
def test_connect():
ba.start(ba.exe(ba.default_path))
app = ba.connect()
assert type(app) == application.Application
app.kill_()
def test_start():
app = ba.start(ba.exe(ba.default_path))
assert type(app) == application.Application
app.kill_()
def test_builder():
bd = ba.Builder()
assert type(bd.app) == application.Application
assert bd.app.window_(title_re=ba.match_untitled).Exists()
bd.app.kill_()
def test_builder_exit():
bd = ba.Builder()
bd.exit()
assert not bd.app.window_(title_re=ba.match_untitled).Exists()
def test_builder_main():
bd = ba.Builder()
main = bd.main()
assert type(main) == application.WindowSpecification
bd.app.kill_()
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-v'])
| degiere/builder-automation | tests/test_automate.py | Python | gpl-2.0 | 1,084 |
# -*- coding: utf-8 -*-
# django-djcopyright
# tests/templatetags/test_djcopyright_tags.py
from typing import List
from django.test import TestCase
from django.utils import timezone
from djcopyright.templatetags.djcopyright_tags import djcopyright_years
__all__: List[str] = ["DjcopyrightYearsTemplatetagTest"]
YEAR: int = timezone.now().today().year
class DjcopyrightYearsTemplatetagTest(TestCase):
"""Djcopyright years templatetag tests."""
def test_djcopyright_years(self) -> None:
"""Must return formatted copyright tag."""
result: str = djcopyright_years()
self.assertIsInstance(obj=result, cls=str)
self.assertEqual(first=result, second=str(YEAR))
| vint21h/django-djcopyright | tests/templatetags/test_djcopyright_tags.py | Python | gpl-3.0 | 707 |
#coding:utf-8
from flask import Flask,request, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('demo.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
| gensmusic/test | third_party/python/flask/third-party/pagedown/usage.py | Python | gpl-2.0 | 230 |
# -*- coding: utf-8 -*-
#
# This file is distributed under MIT License or default open-tamil license.
# (C) 2013-2015 Muthiah Annamalai
#
# This file is part of 'open-tamil' examples
# It can be used to identify patterns in a Tamil text files;
# e.g. it has been used to identify patterns in Tamil Wikipedia
# articles.
#
from __future__ import print_function
import tamil
import sys
import codecs
from transliterate import *
import re
from functools import cmp_to_key
import operator
PYTHON3 = sys.version[0] > '2'
if not PYTHON3:
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
# use generators for better memory footprint -- 04/04/15
class WordFrequency(object):
# get words
@staticmethod
def get_tamil_words_iterable( letters ):
""" given a list of UTF-8 letters section them into words, grouping them at spaces """
#punctuations = u'-,+,/,*,>,<,_,],[,{,},(,)'.split(',')+[',']
#isspace_or_tamil = lambda x: not x in punctuations and tamil.utf8.istamil(x)
# correct algorithm for get-tamil-words
buf = []
for idx,let in enumerate(letters):
if tamil.utf8.istamil( let ):
buf.append( let )
else:
if len(buf) > 0:
yield u"".join( buf )
buf = []
if len(buf) > 0:
yield u"".join(buf)
# sentinel
def __init__(self,tatext=u''):
object.__init__(self)
self.frequency = {}
# process data
def process(self,new_text):
for taline in new_text.split(u"\n"):
self.tamil_words_process( taline )
return
# finalize
def display(self):
self.print_tamil_words( )
return
# processor / core
def tamil_words_process( self, taline ):
taletters = tamil.utf8.get_letters_iterable(taline)
# raw words
#for word in re.split(u"\s+",tatext):
# print(u"-> ",word)
# tamil words only
for pos,word in enumerate(WordFrequency.get_tamil_words_iterable(taletters)):
if len(word) < 1:
continue
self.frequency[word] = 1 + self.frequency.get(word,0)
return
# closer/results
def print_tamil_words(self):
# sort words by descending order of occurence
print(u"# unique words = %d"%(len(self.frequency)))
for l in sorted(self.frequency.items(), key=operator.itemgetter(1)):
print( l[0],':',l[1])
print(u"#"*80)
print(u"# sorted in Tamil order")
for l in sorted(self.frequency.keys(), key=cmp_to_key(tamil.utf8.compare_words_lexicographic)):
print( l,':',self.frequency[l])
return
# driver
def demo_tamil_text_filter( file_urls ):
#url = u"../tawiki-20150316-all-titles"
if not type(file_urls) is list:
file_urls = [file_urls]
obj = WordFrequency( )
for filepath in file_urls:
try:
tatext = codecs.open(filepath,'r','utf-8').read()
obj.process(tatext)
except Exception as e:
sys.stderr.write("Skipping the file :"+filepath+" due to exception\n\t\t " + str(e)+"\n")
obj.display()
return obj
if __name__ == u"__main__":
if len(sys.argv) < 2:
print("usage: python solpattiyal.py <filename>")
print(" this command shows list of unique words in Tamil and their frequencies in document(s);")
print(" it also relists the words in the sorted order")
sys.exit(-1)
demo_tamil_text_filter(sys.argv[1:])
| tshrinivasan/open-tamil | examples/solpattiyal.py | Python | mit | 3,639 |
from __future__ import absolute_import
__author__ = 'chris'
from django.forms.models import model_to_dict
import six
class UpdateScriptsMixin(object):
def save(self, **kwargs):
super(UpdateScriptsMixin, self).save(**kwargs)
from ..backend.utils import load_scripts
load_scripts()
class WooeyPy2Mixin(object):
def __unicode__(self):
return unicode(self.__str__())
# from
# http://stackoverflow.com/questions/1355150/django-when-saving-how-can-you-check-if-a-field-has-changed
class ModelDiffMixin(object):
"""
A model mixin that tracks model fields' values and provide some useful api
to know what fields have been changed.
"""
def __init__(self, *args, **kwargs):
super(ModelDiffMixin, self).__init__(*args, **kwargs)
self.__initial = self._dict
@property
def diff(self):
d1 = self.__initial
d2 = self._dict
diffs = [(k, (v, d2[k])) for k, v in d1.items() if v != d2[k]]
return dict(diffs)
@property
def has_changed(self):
return bool(self.diff)
@property
def changed_fields(self):
return self.diff.keys()
def get_field_diff(self, field_name):
"""
Returns a diff for field if it's changed and None otherwise.
"""
return self.diff.get(field_name, None)
def save(self, *args, **kwargs):
"""
Saves model and set initial state.
"""
super(ModelDiffMixin, self).save(*args, **kwargs)
self.__initial = self._dict
@property
def _dict(self):
return model_to_dict(self, fields=[field.name for field in
self._meta.fields])
| wooey/django-djangui | wooey/models/mixins.py | Python | bsd-3-clause | 1,696 |
#!/usr/bin/python
#
# Retrieve information on an existing VPC.
#
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
import boto.vpc
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
resource_tags=dict(type='dict', required=True)
))
module = AnsibleModule(argument_spec=argument_spec)
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
if not region:
module.fail_json(msg="region must be specified")
try:
connection = boto.vpc.connect_to_region(
region,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
vpcs = connection.get_all_vpcs()
vpcs_w_resources = filter(
lambda x: x.tags == module.params.get('resource_tags'), vpcs)
if len(vpcs_w_resources) != 1:
if len(vpcs_w_resources) == 0:
module.fail_json(msg="No vpc found")
else:
module.fail_json(msg="Multiple VPCs with specified resource_tags")
vpc = vpcs_w_resources[0]
subnets = connection.get_all_subnets(filters={'vpc_id': vpc.id})
def subnet_data(s):
d = s.__dict__
del d["connection"]
del d["region"]
return d
data = map(subnet_data, subnets)
facts = {
'ec2_vpc': {
'id': vpc.id,
'subnets': data
}
}
module.exit_json(changed=False, ansible_facts=facts)
main()
| rackn/container-networking-ansible | test/common/library/ec2_vpc_facts.py | Python | apache-2.0 | 1,587 |
# encoding: UTF-8
import sys
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf8')
sys.path.append('../')
sys.path.append('../../')
sys.path.append('../../vnpy/trader/')
import multiprocessing
import signal
from time import sleep
from datetime import datetime, time
import vnpy.trader.vtPath
from vnpy.event.eventType import *
from vnpy.trader.vtEngine import MainEngine,LogEngine
from vnpy.event.eventEngine import Event
from vnpy.trader.vtEvent import *
from vnpy.trader.vtFunction import isRecordingTime, isTradingTime
# 加载底层接口
try:
from vnpy.trader.gateway import ctpGateway
except ImportError:
print("ctpGateway load failed!")
pass
# 加载上层应用
from vnpy.trader.app import (riskManager,dataRecorder,rpcService,ctaStrategy,spreadTrading,algoTrading)
def isRestartTime():
currentTime = datetime.now().time()
currentTime = int(currentTime.strftime("%H%M"))
restart = False
# 判断当前处于的时间段
if (currentTime == 850 or
currentTime == 2050):
restart = True
return restart
#----------------------------------------------------------------------
def processErrorEvent(event):
"""
处理错误事件
错误信息在每次登陆后,会将当日所有已产生的均推送一遍,所以不适合写入日志
"""
error = event.dict_['data']
print(u'错误代码:%s,错误信息:%s' %(error.errorID, error.errorMsg))
#----------------------------------------------------------------------
def runChildProcess():
"""子进程运行函数"""
# print '-'*20
# 创建日志引擎
le = LogEngine()
le.setLogLevel(le.LEVEL_INFO)
le.addConsoleHandler()
le.addFileHandler()
le.info(u'启动行情记录运行子进程')
# 创建主引擎
me = MainEngine()
ee = me.eventEngine
ee.register(EVENT_LOG, le.processLogEvent)
ee.register(EVENT_CTA_LOG, le.processLogEvent)
ee.register(EVENT_ERROR, processErrorEvent)
try:
# 添加交易接口
try:
me.addGateway(ctpGateway)
except:
pass
# 添加上层应用
me.addApp(riskManager)
me.addApp(dataRecorder)
#fix: 当服务端初始化完毕后再开启rpcService
#me.addApp(rpcService)
me.addApp(ctaStrategy)
me.addApp(spreadTrading)
me.addApp(algoTrading)
le.info(u'主引擎创建成功')
# 自动建立MongoDB数据库
me.dbConnect()
le.info(u'connect MongoDB')
# 自动建立CTP链接
me.connect('CTP')
le.info(u'连接CTP接口')
# 取仓位信息
me.qryPosition("CTP")
while not me.getAllContracts():
sleep(5)
le.info(u'收集合约信息...')
sleep(3)
le.info(u'合约信息中数量: %s' % len(me.getAllContracts()))
# 及时保存数据引擎里的合约数据到硬盘
me.dataEngine.saveContracts()
#服务端初始化完成
#开启RPC
me.addApp(rpcService)
le.info(u'开启RPCService')
'''
bug-fix: 休息,以便于客户端连接上来收CTP信息
'''
sleep(5.)
#CTP连接完成,发送重启信号
event = Event(EVENT_CTP_RESTARTED)
me.eventEngine.put(event)
le.info(u'通知客户端CTP RESTART')
while True:
sleep(1)
except KeyboardInterrupt:
le.info(u"Keyboard interrupt in process")
finally:
le.info(u"cleaning up")
#exit 有时会一直无法退出,暂且屏了
# try:
# me.exit()
# except Exception as e:
# self.writeLog(u'Engine退出出错:%s' %e)
#----------------------------------------------------------------------
# @Daemon('TradeServer.pid')
def runParentProcess():
"""父进程运行函数"""
# 创建日志引擎
print(u'启动行情记录守护父进程')
p = None # 子进程句柄
while True:
# 记录时间则需要启动子进程
if p is None:
print(u'启动子进程')
p = multiprocessing.Process(target=runChildProcess)
p.daemon = True
p.start()
print(u'子进程启动成功')
sleep(60) #一分钟时间窗口(避免再次重启)
# 开盘时重启子进程(先关闭,然后循环后会重启)
if p is not None and isRestartTime() :
print(u'关闭子进程')
p.terminate()
p.join()
p = None
print(u'子进程关闭成功')
sleep(7)
if __name__ == '__main__':
runParentProcess() | bigdig/vnpy | account/runServer.py | Python | mit | 4,757 |
import urllib2,cookielib
import random
import signal
import sys
from sys import argv
from bs4 import BeautifulSoup
USAGE = """Usage:
python s3knock.py wordlist term position
Example: python s3knock.py wordlist.txt tumblr 1
"""
SEPARATORS = ["", "-", ".","_"]
class bcolors:
public = '\033[92m'
exists = '\033[93m'
problem = '\033[91m'
stop = '\033[0m'
discovered = []
def main(wordlist, base, position):
with open(wordlist) as wordlist_file:
lines = [line.rstrip('\n') for line in wordlist_file]
print "Starting search..."
for line in lines:
for sep in SEPARATORS:
if position == 1:
site = "http://%s%s%s.s3.amazonaws.com/" % (base, sep, line if line[-1] != "." else line[-1])
else:
site = "https://%s%s%s.s3.amazonaws.com/" % (line, sep, base)
sys.stdout.write("\033[K")
print "Testing: %s" % site
sys.stdout.write("\033[F")
hdr1 = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
hdr2 = {'User-Agent': 'Mozilla/5.0 AppleWebKit/537.11 Chrome/23.0.1271.64 Safari/537.16',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
req = urllib2.Request(site, headers=random.choice([hdr1, hdr2]))
try:
page = urllib2.urlopen(req)
xml = e.fp.read()
print bcolors.public + '[*] found : ' + site + " Public! " + bcolors.stop
discovered.append(site)
except urllib2.HTTPError, e:
xml = e.fp.read()
soup = BeautifulSoup(xml, features='xml')
for q in soup.find_all('Error'):
if q.find('Code').text != 'NoSuchBucket':
print bcolors.exists + '[*] found : ' + site + " " + q.find('Code').text + bcolors.stop
except urllib2.URLError, e:
print 'INFO: Invalid domain format. No DNS resolution for site %s' % site
print_summary()
def print_summary():
print ""
if not discovered:
print "No public sites found!"
return
print "Summary of public sites found: "
for s in discovered:
print s
def signal_handler(signal, frame):
print "\nCtrl+C detected. Exiting..."
print_summary()
sys.exit(0)
if __name__ == '__main__':
if len(argv) < 4:
print "ERROR: Not enough arguments given"
print USAGE
sys.exit(1)
wordlist = argv[1]
base = argv[2]
try:
position = int(argv[3])
except ValueError as e:
print "ERROR: position argument not a number"
sys.exit(1)
signal.signal(signal.SIGINT, signal_handler)
main(wordlist, base, position)
| theanti9/s3knock | s3knock.py | Python | mit | 3,409 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import os, json
KEY1 = "Key1"
KEY2 = "Key2"
KEY3 = "Key3"
# Basic template of configuration
DEFAULT_CONF = {'mailbox0':{'username':'',
'password':'',
'imap_server':'',
'imap_port':'993',
'smtp_server':'',
'smtp_port':'587',
'auto_check_freq':60 # in second
},
'settings':{'num_of_mailbox':0, # be supported more than 1 email
'auto_check':False,
'startup':True
}
}
conf = {} # configurations
dirs = {} # important directories of project
tmp = {} # tmp variables
def init():
global conf, dirs
# Initialize dirs
dirs["console"] = os.getcwd()
dirs["src"] = os.path.dirname(__file__)
dirs["config"] = os.path.join(dirs["src"], "config.json")
dirs["project"] = dirs["src"]
dirs["res"] = os.path.join(dirs["project"], "res")
dirs["app_icon"] = os.path.join(dirs["res"], "app_icon.png")
dirs["tray_icon"] = os.path.join(dirs["res"], "tray_icon.png")
dirs["mail_icon"] = os.path.join(dirs["res"], "mail_icon.png")
# Initialize conf
if os.path.isfile(dirs["config"]):
try:
conf = json.load(open('config.json'))
except:
print("[!] Config file is not valid. Loading default configurations.")
set_default_conf()
else:
print("[!] There is no config file. I am creating one for you :)")
set_default_conf()
def set_default_conf():
global conf
conf = DEFAULT_CONF.copy()
save_conf()
def save_conf():
json.dump(conf, open(dirs["config"], 'w'))
def encrypt(value, k=KEY1):
if value=="":
return ""
return xor(value, k)
def decrypt(value, k=KEY1):
if value=="":
return ""
value = "".join(map(chr, value))
value = xor(value, k)
value = "".join(map(chr, value))
return value
def xor(s, k=KEY1):
k = k*(int(len(s)/len(k))+1)
return [ord(s[i]) ^ ord(k[i]) for i in range(len(s))]
init()
| furkantokac/Fmail | Fmail/config.py | Python | mit | 2,219 |
import logging
from grab.selector.selector import * # noqa
from grab.util.warning import warn
warn('Module `grab.selector` is deprecated. Use `selection` package.')
| kevinlondon/grab | grab/selector/__init__.py | Python | mit | 167 |
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.graph_models import MultiBarChart, Axis
from corehq.apps.reports.sqlreport import TableDataFormat
from custom.care_pathways.reports import CareBaseReport
from custom.care_pathways.filters import GeographyFilter, GenderFilter, GroupLeadershipFilter, CBTNameFilter, \
GroupByFilter, PPTYearFilter, ScheduleFilter, DisaggregateByFilter, TypeFilter
from custom.care_pathways.sqldata import AdoptionDisaggregatedSqlData
from custom.care_pathways.utils import CareDataFormatter, _chunks
class AdoptionDisaggregatedReport(CareBaseReport):
name = 'Adoption Disaggregated'
slug = 'adoption_disaggregated'
report_title = 'Adoption Disaggregated'
report_template_path = 'care_pathways/adoption_disaggregated_report.html'
default_rows = 100
@property
def fields(self):
filters = [GeographyFilter,
PPTYearFilter,
GenderFilter,
GroupLeadershipFilter,
CBTNameFilter]
if self.domain == 'pathways-india-mis':
filters.append(ScheduleFilter)
filters.append(TypeFilter)
filters.append(GroupByFilter)
filters.append(DisaggregateByFilter)
return filters
@property
def report_config(self):
config = super(AdoptionDisaggregatedReport, self).report_config
config.update(dict(
group=self.request.GET.get('group_by', ''),
disaggregate_by=self.request.GET.get('disaggregate_by', '')
))
self.chunk_size = 1 if (config['gender'] or config['group_leadership']) else 3
return config
@property
def report_context(self):
context = super(AdoptionDisaggregatedReport, self).report_context
context.update({'chunk_size': self.chunk_size+1})
return context
@property
def data_provider(self):
return AdoptionDisaggregatedSqlData(domain=self.domain, config=self.report_config, request_params=self.request_params)
@property
def headers(self):
columns = [DataTablesColumn(c.header, sortable=False) for c in self.data_provider.columns]
headers = DataTablesHeader(*columns)
return headers
@property
def rows(self):
formatter = CareDataFormatter(TableDataFormat(self.data_provider.columns, no_value=self.data_provider.no_value))
return formatter.format(self.data_provider.data, keys=self.data_provider.keys,
group_by=self.data_provider.group_by, domain=self.domain, chunk_size=self.chunk_size)
def get_chart(self, rows, x_label, y_label):
chunks = _chunks(list(rows), self.chunk_size + 1)
charts = []
if self.request.GET.get('group_by', '') == 'domain':
chunks = sorted(chunks, key=lambda k: k[0][0])
for chunk in chunks:
chart = MultiBarChart(chunk[0][0], x_axis=Axis(x_label), y_axis=Axis(y_label, '.0%'))
chart.height = 300
chart.rotateLabels = 0
chart.marginBottom = 80
chart.marginLeft = 100
self._chart_data(chart, chunk[1:])
charts.append(chart)
return charts
def _chart_data(self, chart, rows):
def p2f(column):
return float(column['html'].strip('%')) / 100.0
if rows:
charts = [[], [], []]
for row in rows:
group_name = row[0]
total = sum(row[1:])
total = float(total) if total else 1.0
for ix, column in enumerate(row[1:]):
charts[ix].append({'x': group_name, 'y': float(column) / total})
chart.add_dataset('None', charts[0], "red")
chart.add_dataset('Some', charts[1], "yellow")
chart.add_dataset('All', charts[2], "green")
@property
def charts(self):
rows = self.rows
return self.get_chart(rows, '', '')
| puttarajubr/commcare-hq | custom/care_pathways/reports/adoption_disaggregated_report.py | Python | bsd-3-clause | 4,008 |
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def main():
args = demisto.args()
query = args.get('query')
size = int(args.get('size'))
query_result = demisto.searchIndicators(query=query, size=1, page=0)
total = query_result.get('total', 0)
outputs = {
'Query': query,
'Size': total,
'ConditionMet': total >= size
}
return_results(CommandResults(outputs=outputs, outputs_key_field='Query', outputs_prefix='IndicatorsCheck'))
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| VirusTotal/content | Packs/DeveloperTools/Scripts/VerifyEnoughIndicators/VerifyEnoughIndicators.py | Python | mit | 593 |
from datetime import datetime
now = datetime.now()
year, month = now.year, now.month
from calendar import Calendar
def consumed_hours():
from numpy.random import normal
return round(normal(10.5, 0.7), 1)
cal = Calendar()
for d in cal.itermonthdates(year, month):
if d.month != month:
print '%5s'%('%d/%d'%(d.month, d.day)),
else:
print '%5d'%d.day,
if d.weekday() >= 6:
print
print ' '.join(['%5.1f'%consumed_hours() for _ in range(5)]),
print ' '.join(['%5s'%'-' for _ in range(2)])
print '-'*42
| ayokyun/si-epm-timetable | random_timetable.py | Python | mit | 572 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import os.path as path
import zipfile
from lxml import etree
import difflib
from translate.storage import factory
from translate.convert import odf2xliff
from translate.convert import xliff2odf
def setup_module(module):
os.chdir(path.dirname(__file__))
def args(src, tgt, **kwargs):
arg_list = [u'--errorlevel=traceback', src, tgt]
for flag, value in kwargs.iteritems():
value = unicode(value)
if len(flag) == 1:
arg_list.append(u'-%s' % flag)
else:
arg_list.append(u'--%s' % flag)
if value is not None:
arg_list.append(value)
return arg_list
def xliff___eq__(self, other):
return self.units == other.units
factory.classes[u'xlf'].__eq__ = xliff___eq__
def print_diff(store1, store2):
for line in difflib.unified_diff(str(store1).split('\n'), str(store2).split('\n')):
print line
SOURCE_ODF = u'test_2.odt'
REFERENCE_XLF = u'test_2-test_odf2xliff-reference.xlf'
GENERATED_XLF_ITOOLS = u'test_2-test_odf2xliff-itools.xlf'
GENERATED_XLF_TOOLKIT = u'test_2-test_odf2xliff-toolkit.xlf'
TARGET_XLF = u'test_2-test_roundtrip.xlf'
REFERENCE_ODF = u'test_2.odt'
GENERATED_ODF = u'test_2-test_roundtrip-generated.odt'
def test_odf2xliff():
reference_xlf = factory.getobject(REFERENCE_XLF)
odf2xliff.main(args(SOURCE_ODF, GENERATED_XLF_TOOLKIT))
generated_xlf_toolkit = factory.getobject(GENERATED_XLF_TOOLKIT)
print_diff(reference_xlf, generated_xlf_toolkit)
assert reference_xlf == generated_xlf_toolkit
odf2xliff.main(args(SOURCE_ODF, GENERATED_XLF_ITOOLS))
generated_xlf_itools = factory.getobject(GENERATED_XLF_ITOOLS)
print_diff(reference_xlf, generated_xlf_itools)
assert reference_xlf == generated_xlf_itools
def is_content_file(filename):
return filename in (u'content.xml', u'meta.xml', u'styles.xml')
class ODF(object):
def __init__(self, filename):
self.odf = zipfile.ZipFile(filename)
def _get_data(self, filename):
return self.odf.read(filename)
def _get_doc_root(self, filename):
return etree.tostring(etree.fromstring(self._get_data(filename)), pretty_print=True)
def __eq__(self, other):
if other == None:
return False
l1 = sorted(zi.filename for zi in self.odf.infolist())
l2 = sorted(zi.filename for zi in other.odf.infolist())
if l1 != l2:
print "File lists don't match:"
print l1
print l2
return False
for filename in l1:
if is_content_file(filename):
l = self._get_doc_root(filename)
r = other._get_doc_root(filename)
if l != r:
print "difference for file named", filename
return False
else:
if self._get_data(filename) != other._get_data(filename):
print "difference for file named", filename
return False
return True
def __str__(self):
return self._get_doc_root('content.xml')
def test_roundtrip():
odf2xliff.main(args(SOURCE_ODF, TARGET_XLF))
xliff2odf.main(args(TARGET_XLF, GENERATED_ODF, t=SOURCE_ODF))
reference_odf = ODF(REFERENCE_ODF)
generated_odf = ODF(GENERATED_ODF)
print_diff(reference_odf, generated_odf)
assert reference_odf == generated_odf
def remove(filename):
"""Removes the file if it exists."""
if os.path.exists(filename):
os.unlink(filename)
def teardown_module(module):
remove(GENERATED_XLF_TOOLKIT)
remove(GENERATED_ODF)
remove(GENERATED_XLF_ITOOLS)
remove(TARGET_XLF)
if __name__ == '__main__':
setup_module(None)
test_roundtrip()
teardown_module(None)
| lehmannro/translate | tests/odf_xliff/test_odf_xliff.py | Python | gpl-2.0 | 4,609 |
from Components.config import config
from Extra.SAPCL import SAPCL
import os
def autoSendCrashLogs(session, id, arg):
if not config.sifteam.cloud.crashlogs.value:
return
api = SAPCL()
files = os.listdir("/hdd/")
if not files:
return
for file in files:
try:
if file[:14] != "sifteam_crash_" or file[-4:] != ".log" or file[-9:] == "_sent.log":
continue
buff = open("/hdd/" + file).read()
api.sendCrashLog(buff)
os.rename("/hdd/" + file, "/hdd/" + file.replace(".log", "_sent.log"))
except Exception, e:
pass
| SIFTeam/enigma2 | lib/python/SIFTeam/Crashlogs.py | Python | gpl-2.0 | 552 |
"""The tests for the Template switch platform."""
from homeassistant.core import callback
from homeassistant import setup
import homeassistant.components as core
from homeassistant.const import STATE_ON, STATE_OFF
from tests.common import (
get_test_home_assistant, assert_setup_component)
class TestTemplateSwitch:
"""Test the Template switch."""
hass = None
calls = None
# pylint: disable=invalid-name
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.calls = []
@callback
def record_call(service):
"""Track function calls.."""
self.calls.append(service)
self.hass.services.register('test', 'automation', record_call)
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_template_state_text(self):
"""Test the state text of a template."""
with assert_setup_component(1, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.set('switch.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_ON
state = self.hass.states.set('switch.test_state', STATE_OFF)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_OFF
def test_template_state_boolean_on(self):
"""Test the setting of the state with boolean on."""
with assert_setup_component(1, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ 1 == 1 }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_ON
def test_template_state_boolean_off(self):
"""Test the setting of the state with off."""
with assert_setup_component(1, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ 1 == 2 }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_OFF
def test_icon_template(self):
"""Test icon template."""
with assert_setup_component(1, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
'icon_template':
"{% if states.switch.test_state.state %}"
"mdi:check"
"{% endif %}"
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.attributes.get('icon') == ''
state = self.hass.states.set('switch.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.attributes['icon'] == 'mdi:check'
def test_entity_picture_template(self):
"""Test entity_picture template."""
with assert_setup_component(1, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
'entity_picture_template':
"{% if states.switch.test_state.state %}"
"/local/switch.png"
"{% endif %}"
}
}
}
})
self.hass.start()
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.attributes.get('entity_picture') == ''
state = self.hass.states.set('switch.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.attributes['entity_picture'] == '/local/switch.png'
def test_template_syntax_error(self):
"""Test templating syntax error."""
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{% if rubbish %}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_invalid_name_does_not_create(self):
"""Test invalid name."""
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test INVALID switch': {
'value_template':
"{{ rubbish }",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_invalid_switch_does_not_create(self):
"""Test invalid switch."""
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': 'Invalid'
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_no_switches_does_not_create(self):
"""Test if there are no switches no creation."""
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template'
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_missing_template_does_not_create(self):
"""Test missing template."""
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'not_value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_missing_on_does_not_create(self):
"""Test missing on."""
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'not_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_missing_off_does_not_create(self):
"""Test missing off."""
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'not_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
assert self.hass.states.all() == []
def test_on_action(self):
"""Test on action."""
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'test.automation'
},
'turn_off': {
'service': 'switch.turn_off',
'entity_id': 'switch.test_state'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
self.hass.states.set('switch.test_state', STATE_OFF)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_OFF
core.switch.turn_on(self.hass, 'switch.test_template_switch')
self.hass.block_till_done()
assert len(self.calls) == 1
def test_off_action(self):
"""Test off action."""
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'template',
'switches': {
'test_template_switch': {
'value_template':
"{{ states.switch.test_state.state }}",
'turn_on': {
'service': 'switch.turn_on',
'entity_id': 'switch.test_state'
},
'turn_off': {
'service': 'test.automation'
},
}
}
}
})
self.hass.start()
self.hass.block_till_done()
self.hass.states.set('switch.test_state', STATE_ON)
self.hass.block_till_done()
state = self.hass.states.get('switch.test_template_switch')
assert state.state == STATE_ON
core.switch.turn_off(self.hass, 'switch.test_template_switch')
self.hass.block_till_done()
assert len(self.calls) == 1
| persandstrom/home-assistant | tests/components/switch/test_template.py | Python | apache-2.0 | 16,305 |
import os
import sys
import time
import datetime
import pandas as pd
from math import log
from algorithm import Algorithm
epoch = datetime.datetime(1970, 1, 1)
class Hot:
"""
Ranks stocks based on if the daily close price is above or below
the open stock price.
"""
def __init__(self, ups, downs, date):
self.ups = ups
self.downs = downs
self.date = date
self.score = self.ups - self.downs
self.start_time = 1
def set_start_time(self, timestamp):
self.start_time = timestamp
#timestamp
def epoch_seconds(self):
""" Return the number of seconds from the epoch to date."""
td = self.date - epoch
return td.days * 86400 + td.seconds + (float(td.microseconds) / 1000000)
def execute(self):
""" The hot formula. Should match the equivalent function in postgres."""
order = log(max(abs(self.score), 1), 10)
sign = 1 if self.score > 0 else -1 if self.score < 0 else 0
seconds = self.epoch_seconds() - self.start_time
return round(sign * order + seconds / 45000, 7)
class Main(Algorithm):
name = "Reddit HOT Ranking"
def __init__(self, **kwargs):
''' Set local variables '''
self.good_time = time.mktime(time.strptime(kwargs['start_date'], "%Y-%m-%d"))
year = kwargs['start_date'].split('-')[0]
month = kwargs['start_date'].split('-')[1]
day = kwargs['start_date'].split('-')[2]
self.last_time = datetime.datetime(int(year), int(month), int(day))
super(Main, self).__init__()
def main(self):
self.results = {}
for symbol in self.symbols:
ups = 0
downs = 0
last_time = None
symbol_data = self.data[symbol]
try:
for i in range(len(symbol_data['Date'])):
str_open = "Open"
str_close = "Close"
s_time = str(symbol_data['Date'][i])
s_time = time.mktime(time.strptime(s_time, "%Y-%m-%d %H:%M:%S"))
s_open = symbol_data[str_open][i]
s_close = symbol_data[str_close][i]
if s_time >= self.good_time:
if s_close >= s_open:
ups = ups + 1
elif s_close < s_open:
downs = downs + 1
hot = Hot(ups, downs, self.last_time)
#hot.set_start_time(1101798784)
rank = hot.execute(), symbol
self.results[symbol] = rank
except:
self.results[symbol] = -1
return self.results
| hirodotp/mdata | algorithms/reddit.py | Python | gpl-2.0 | 2,221 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import sys
import signal
import argparse
import numpy as np
np.set_printoptions(precision=3, suppress=True)
np.set_printoptions(formatter={'float': '{: 0.3f}'.format})
# see: http://stackoverflow.com/questions/2891790/pretty-printing-of-numpy-array
import rospy
import roslib
roslib.load_manifest('vehicle_core')
from vehicle_interface.msg import PilotRequest, PilotStatus
from vehicle_interface.srv import BooleanService
# config
TOPIC_STATUS = 'pilot/status'
TOPIC_POS = 'pilot/position_req'
TOPIC_VEL = 'pilot/velocity_req'
TOPIC_STY = 'pilot/stay_req'
TOPIC_BOD = 'pilot/body_req'
SRV_SWITCH = 'pilot/switch'
DEFAULT_SLEEP = 1.0
TYPE_POS = 'position'
TYPE_VEL = 'velocity'
TYPE_STY = 'stay'
TYPE_BOD = 'body'
TYPE_TOPIC = {
TYPE_POS: TOPIC_POS,
TYPE_VEL: TOPIC_VEL,
TYPE_STY: TOPIC_STY,
TYPE_BOD: TOPIC_BOD
}
MODES = (
'depth',
'altitude',
)
STATUS_FIELDS = ('status', 'mode', 'des_pos', 'des_vel', 'err_pos', 'err_vel','lim_vel_user', 'lim_vel_ctrl')
# info
DESC='''
This utility sends position, velocity, body or stay requests to the pilot module.
The mode parameter (depth or altitude) requests either depth or altitude control on the z-axis.
If the axis value is missing or equals to 'n' the control will be disabled for that axis.
'''
EPI='''
Stay requests, require all arguments to be provided by the user (e.g. all zeros).
Please refer to the package's documentation for further info.
'''
def main():
parser = argparse.ArgumentParser(description=DESC, epilog=EPI)
parser.add_argument('type', choices=TYPE_TOPIC.keys(), metavar='type', help='select request to send', default='position')
parser.add_argument('x', help='north coordinate', default='n', nargs='?')
parser.add_argument('y', help='east coordinate', default='n', nargs='?')
parser.add_argument('z', help='depth or altitude', default='n', nargs='?')
parser.add_argument('m', help='pitch attitude', default='n', nargs='?')
parser.add_argument('n', help='yaw attitude', default='n', nargs='?')
parser.add_argument('mode', choices=MODES, metavar='mode', help='select z-axis mode', default='depth', nargs='?')
parser.add_argument('-v', '--verbose', action='store_true', help='Print detailed information.')
if len(sys.argv) <= 1:
parser.print_help()
sys.exit(1)
# parse input
args = parser.parse_args()
# default request
des_pos = np.zeros(6)
des_vel = np.zeros(6)
dis_axi = np.zeros(6)
lim_vel = np.zeros(6)
mode = args.mode
type = args.type
topic = TYPE_TOPIC[type]
# parse degrees of freedom
for i, d in enumerate((args.x, args.y, args.z, 0, args.m, args.n)):
if d == 'n':
dis_axi[i] = 1
else:
if type == TYPE_VEL:
des_vel[i] = float(d)
else:
des_pos[i] = float(d)
# log info
print('Sending %s request at 1 Hz:' % type)
print('-----------------------------------------------------------------------------------')
print('mode: %s' % mode)
print('position: %s' % des_pos)
print('velocity: %s' % des_vel)
print('disable: %s\n' % dis_axi)
# ros code
rospy.init_node('pilot_cli', anonymous=True)
def send_request():
pr = PilotRequest()
pr.header.stamp = rospy.Time.now()
pr.position = des_pos.tolist()
pr.velocity = des_vel.tolist()
pr.disable_axis = dis_axi.tolist()
pr.limit_velocity = lim_vel.tolist()
pub.publish(pr)
def handle_status(data):
print('Pilot Status [%s]' % rospy.Time.now().to_sec())
print('-----------------------------------------------------------------------------------')
for field in STATUS_FIELDS:
value = getattr(data, field)
if field is not ('status', 'mode'):
value = np.array(value)
print('%s:\t%s' % (field, value))
print('dis_axi:\t%s' % dis_axi)
print('-----------------------------------------------------------------------------------')
print('Press Ctrl+C to interrupt ...\n\n')
# ros interface
pub = rospy.Publisher(topic, PilotRequest, tcp_nodelay=True, queue_size=1)
sub = rospy.Subscriber(TOPIC_STATUS, PilotStatus, handle_status, tcp_nodelay=True, queue_size=1)
srv = rospy.ServiceProxy(SRV_SWITCH, BooleanService)
def enable_pilot():
try:
srv.call(request=True)
except rospy.ServiceException as se:
print('Pilot Service not available!')
print(se)
sys.exit(-1)
# shutdown hook (intercepts Ctrl+C)
def disable_pilot():
try:
print('Got interrupt! Disabling the pilot on exit!\n')
srv.call(request=False)
except rospy.ServiceException as se:
print('Pilot Service not available!')
print(se)
rospy.on_shutdown(disable_pilot)
# start the processing
enable_pilot()
# send initial burst
for n in xrange(5):
send_request()
# main loop
while not rospy.is_shutdown():
# keep sending new requests only for specific types
if type not in (TYPE_STY, TYPE_BOD):
send_request()
rospy.sleep(DEFAULT_SLEEP)
if __name__ == '__main__':
main()
| decabyte/vehicle_core | scripts/pilot_cli.py | Python | bsd-3-clause | 5,387 |
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for support of portable globes."""
import json
import os
import shlex
import subprocess
import sys
import time
import urlparse
import xml.sax.saxutils as saxutils
import distutils.dir_util
import distutils.errors
import errors
BYTES_PER_MEGABYTE = 1024.0 * 1024.0
NAME_TEMPLATE = "%s_%s"
class OsCommandError(Exception):
"""Thrown if os command fails."""
pass
# TODO: consider to use a lib like bleach that is specifically
# aimed at foiling XSS attacks.
# Additional characters that need to be escaped for HTML defined in a dictionary
# the character to its escape string.
# xml.sax.saxutils.escape() takes care of &, < and >.
_HTML_ESCAPE_TABLE = {
'"': """,
"'": "'",
"`": "`",
"|": "|"
}
def HtmlEscape(text):
"""Escapes a string for HTML.
Args:
text: source string that needs to be escaped for HTML.
Returns:
HTML escaped string.
"""
if not text:
return text
return saxutils.escape(text, _HTML_ESCAPE_TABLE)
def FileSize(file_path):
"""Returns size of file in megabytes."""
return os.path.getsize(file_path) / BYTES_PER_MEGABYTE
def SizeAsString(size):
"""Converts megabyte float to a string."""
if size < 1000.0:
return "%0.2fMB" % size
size /= 1024.0
if size < 1000.0:
return "%0.2fGB" % size
else:
return "%0.2fTB" % (size / 1024.0)
def FileSizeAsString(file_path):
"""Returns size of file as a string."""
return SizeAsString(FileSize(file_path))
def DirectorySize(directory):
"""Returns size of directory in megabytes."""
directory_size = 0
if os.path.isdir(directory):
for (path, unused_dirs, files) in os.walk(directory):
for file_name in files:
file_path = os.path.join(path, file_name)
directory_size += os.path.getsize(file_path)
return directory_size / BYTES_PER_MEGABYTE
def DirectorySizeAsString(directory):
"""Returns size of directory as a string."""
return SizeAsString(DirectorySize(directory))
def CreateDirectory(directory):
"""Create entire directory path."""
if os.path.exists(directory):
return
try:
os.makedirs(directory)
except OSError:
PrintAndLog("Raising error: Cannot create directory \'%s\'" % directory)
raise
def CopyDirectory(source, destination, logger):
"""Copy from source to destination, which will be created if it does not exist."""
cmd = "Copying %s to %s" % (source, destination)
PrintAndLog(cmd, logger)
try:
distutils.dir_util.copy_tree(source, destination)
except distutils.errors.DistutilsFileError:
PrintAndLog("Raising error: Cannot copy to directory %s" % destination)
raise
def DiskSpace(path):
"""Returns remaining disk space in Megabytes."""
mount_info = os.statvfs(path)
return mount_info.f_bsize * mount_info.f_bavail / BYTES_PER_MEGABYTE
def Uid():
"""Returns a uid for identifying a globe building sequence."""
return "%d_%f" % (os.getpid(), time.time())
def GlobesToText(globes, template, sort_item, reverse=False, is_text=False):
"""Fills in globe template for each globe and returns as array of strings."""
result = []
# If it is text, sort the lower case version of the text.
if is_text:
items = sorted(globes.iteritems(),
key=lambda globe_pair: globe_pair[1][sort_item].lower(),
reverse=reverse)
# If it is NOT text, use default less than comparison.
else:
items = sorted(globes.iteritems(),
key=lambda globe_pair: globe_pair[1][sort_item],
reverse=reverse)
for [unused_key, globe] in iter(items):
next_entry = template
for [globe_term, globe_value] in globe.iteritems():
replace_item = "[$%s]" % globe_term.upper()
if globe_term == "globe" or globe_term == "info_loaded":
pass
elif globe_term == "size":
next_entry = next_entry.replace(replace_item, SizeAsString(globe_value))
else:
next_entry = next_entry.replace(replace_item, globe_value)
result.append(next_entry)
return result
def GlobeNameReplaceParams(globe_name):
"""Returns a single replacement parameter for the globe name."""
return {"[$GLOBE_NAME]": globe_name}
def ReplaceParams(text, replace_params):
"""Replace keys with values in the given text."""
for (key, value) in replace_params.iteritems():
text = text.replace(key, value)
return text
def OutputFile(file_name, replace_params):
"""Outputs a file to standard out with the globe name replaced."""
fp = open(file_name)
text = fp.read()
fp.close()
print ReplaceParams(text, replace_params)
def CreateInfoFile(path, description):
"""Create globe info file."""
content = "Portable Globe\n"
content += GmTimeStamp()
content += "\n%s" % TimeStamp()
content += "Globe description: %s\n" % description
CreateFile(path, content)
def CreateFile(path, content):
"""Create globe info file."""
try:
fp = open(path, "w")
fp.write(content)
fp.close()
except IOError as error:
print error
sys.exit(1)
def TimeStamp():
"""Create timestamp based on local time."""
return time.strftime("%Y-%m-%d %H:%M:%S\n", time.localtime())
def GmTimeStamp():
"""Create timestamp based on Greenwich Mean Time."""
return time.strftime("%Y-%m-%d %H:%M:%S GMT\n", time.gmtime())
def ConvertToQtNode(level, col, row):
"""Converts col, row, and level to corresponding qtnode string."""
qtnode = "0"
half_ndim = 1 << (level - 1)
for unused_ in xrange(level):
if row >= half_ndim and col < half_ndim:
qtnode += "0"
row -= half_ndim
elif row >= half_ndim and col >= half_ndim:
qtnode += "1"
row -= half_ndim
col -= half_ndim
elif row < half_ndim and col >= half_ndim:
qtnode += "2"
col -= half_ndim
else:
qtnode += "3"
half_ndim >>= 1
return qtnode
def JsBoolString(bool_value):
"""Write boolean value as javascript boolean."""
if bool_value:
return "true"
else:
return "false"
def WriteHeader(content_type="text/html"):
"""Output header for web page."""
# Pick up one print from the Python print.
print "Content-Type: %s\n" % content_type
def ExecuteCmd(os_cmd, logger, dry_run=False):
"""Execute os command and log results.
Runs command, waits until it finishes, then analyses the return code, and
reports either "SUCCESS" or "FAILED".
Use if output of command is not desired, otherwise it should be redirected
to a file or use RunCmd below.
Args:
os_cmd: Linux shell command to execute.
logger: Logger responsible for outputting log messages.
dry_run: Whether command should only be printed but not run.
Throws:
OsCommandError
"""
PrintAndLog("Executing: %s" % os_cmd, logger)
if dry_run:
PrintAndLog("-- dry run --", logger)
return
try:
if isinstance(os_cmd, str):
os_cmd = shlex.split(os_cmd)
p = subprocess.Popen(os_cmd, shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
err_data = p.communicate()[1]
return_code = p.returncode
if return_code != 0:
PrintAndLog("Raising error: %s (return code %d)\n"
% (err_data, return_code), logger)
raise OsCommandError()
else:
PrintAndLog("SUCCESS", logger, None)
except Exception, e:
PrintAndLog("FAILED: %s" % e.__str__(), logger)
raise OsCommandError()
def ExecuteCmdInBackground(os_cmd, logger):
"""Execute os command in the background and log results.
Runs command in the background and returns immediately without waiting for
the execution to finish.
Use if the command will take longer time to finish than request timeout.
Args:
os_cmd: Linux shell command to execute.
logger: Logger responsible for outputting log messages.
Throws:
OsCommandError
"""
PrintAndLog("Executing in background: %s" % os_cmd, logger)
try:
subprocess.Popen(os_cmd + " &", shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except Exception, e:
PrintAndLog("FAILED: %s" % e.__str__(), logger)
raise OsCommandError()
def RunCmd(os_cmd):
"""Execute os command and return list of results and errors.
Runs command, waits until it finishes, then returns the output of execution
(if succeeded) or error information (if failed).
Use if output of command is needed.
Args:
os_cmd: Linux shell command to execute.
Returns:
Array of result lines.
"""
try:
if isinstance(os_cmd, str):
os_cmd = shlex.split(os_cmd)
results = subprocess.check_output(os_cmd)
return results.split("\n")
except subprocess.CalledProcessError as e:
# print "FAILURE: %s" % e.__str__()
return ["", e.__str__()]
def PrintAndLog(msg, logger=None, prefix="\n"):
if prefix:
print "%s%s" % (prefix, msg)
else:
print msg
if logger:
logger.Log(msg)
def GetDbrootInfoJson(globe, name):
"""Get dbroot info as a json string.
Args:
globe: portable_globe object.
name: name of portable globe
Returns:
Dbroot info in Json formatted string.
"""
dbroot_info = {"name": name,
"has_imagery": globe.HasImagery(),
"has_terrain": globe.HasTerrain(),
"is_proto_imagery": globe.IsProtoImagery(),
}
return json.dumps(dbroot_info)
def NormalizeTargetPath(target):
"""Normalizes the target path.
Adds leading slash if needed, strips ending slashes.
Args:
target: The target path (fusion db publish point).
Returns:
Normalized target path.
"""
if not target:
return target
target = target.strip()
target = target.rstrip("/")
if not target:
return target
if target[0] != "/":
target = "/{0}".format(target)
return target
def GetServerAndPathFromUrl(url):
"""Gets a server and a path from the url.
Args:
url: the URL.
Returns:
tuple (server, path). The server is 'scheme://host:port'.
The path can be empty string.
Raises:
InvalidValueError: when the url is not valid.
"""
server = ""
path = ""
url_obj = urlparse.urlparse(url)
if url_obj.scheme and url_obj.netloc and url_obj.path:
server = "{0}://{1}".format(url_obj.scheme, url_obj.netloc)
path = url_obj.path
elif url_obj.scheme and url_obj.netloc:
server = "{0}://{1}".format(url_obj.scheme, url_obj.netloc)
elif url_obj.path:
path = url_obj.path
else:
raise errors.InvalidValueError("Invalid URL: %s" % url)
return (server, path)
def IsProcessRunningForGlobe(tool_name, base_dir):
"""Checks whether specified job is running for portable.
Checks if process is running by detecting it in the output returned by
executing "ps -ef | grep base_dir".
Args:
tool_name: tool name to check if it is present in list of running
processes.
base_dir: base directory for corresponding portable.
Returns:
whether specified job is running.
"""
ps_cmd = "ps -ef"
grep_cmd = "grep \"%s\"" % base_dir
ps_subprocess = subprocess.Popen(shlex.split(ps_cmd),
shell=False,
stdout=subprocess.PIPE)
grep_subprocess = subprocess.Popen(shlex.split(grep_cmd),
shell=False,
stdin=ps_subprocess.stdout,
stdout=subprocess.PIPE)
ps_subprocess.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
procs = grep_subprocess.communicate()[0]
if procs:
procs = procs.split("/n")
for proc in procs:
if proc.find(tool_name) > 0:
return True
return False
class Log(object):
"""Simple logger class."""
def __init__(self, log_file, enabled=True):
self.log_file_ = log_file
self.enabled_ = enabled
def CheckLogFolder(self):
return os.path.exists(os.path.dirname(self.log_file_))
def Clear(self):
"""Clear the log file."""
if not self.CheckLogFolder():
return
fp = open(self.log_file_, "w")
fp.close()
def Log(self, message):
"""Log message to cutter log."""
if not self.enabled_ or not self.CheckLogFolder():
return
fp = open(self.log_file_, "a")
fp.write("%s" % TimeStamp())
fp.write("%s\n" % message)
fp.close()
| tst-ahernandez/earthenterprise | earth_enterprise/src/fusion/portableglobe/cutter/cgi-bin/common/utils.py | Python | apache-2.0 | 12,945 |
from runner import StoryRunner
from finder import (find_steps_modules,
find_text_specs,
find_before_all,
find_after_all,
find_before_each,
find_after_each)
from hooks import BeforeAll, AfterAll, BeforeEach, AfterEach
from console import pycukes_console
from pyhistorian import Given, When, Then, DadoQue, Quando, Entao
import console
| hltbra/pycukes | pycukes/__init__.py | Python | mit | 434 |
# -*- coding: utf-8 -*-
from pySupRST.main import sup_rst
from pprint import pprint
c= sup_rst()
a = c.get_tm("RTR_P_DG1")
pprint(a)
| sourceperl/pySupRST | lab/with_sqlalchemy/test.py | Python | mit | 137 |
#!/usr/bin/env python
## Copyright (c) 2012 The WebM project authors. All Rights Reserved.
##
## Use of this source code is governed by a BSD-style license
## that can be found in the LICENSE file in the root of the source
## tree. An additional intellectual property rights grant can be found
## in the file PATENTS. All contributing project authors may
## be found in the AUTHORS file in the root of the source tree.
##
"""Classes for representing diff pieces."""
__author__ = "[email protected]"
import re
class DiffLines(object):
"""A container for one half of a diff."""
def __init__(self, filename, offset, length):
self.filename = filename
self.offset = offset
self.length = length
self.lines = []
self.delta_line_nums = []
def Append(self, line):
l = len(self.lines)
if line[0] != " ":
self.delta_line_nums.append(self.offset + l)
self.lines.append(line[1:])
assert l+1 <= self.length
def Complete(self):
return len(self.lines) == self.length
def __contains__(self, item):
return item >= self.offset and item <= self.offset + self.length - 1
class DiffHunk(object):
"""A container for one diff hunk, consisting of two DiffLines."""
def __init__(self, header, file_a, file_b, start_a, len_a, start_b, len_b):
self.header = header
self.left = DiffLines(file_a, start_a, len_a)
self.right = DiffLines(file_b, start_b, len_b)
self.lines = []
def Append(self, line):
"""Adds a line to the DiffHunk and its DiffLines children."""
if line[0] == "-":
self.left.Append(line)
elif line[0] == "+":
self.right.Append(line)
elif line[0] == " ":
self.left.Append(line)
self.right.Append(line)
elif line[0] == "\\":
# Ignore newline messages from git diff.
pass
else:
assert False, ("Unrecognized character at start of diff line "
"%r" % line[0])
self.lines.append(line)
def Complete(self):
return self.left.Complete() and self.right.Complete()
def __repr__(self):
return "DiffHunk(%s, %s, len %d)" % (
self.left.filename, self.right.filename,
max(self.left.length, self.right.length))
def ParseDiffHunks(stream):
"""Walk a file-like object, yielding DiffHunks as they're parsed."""
file_regex = re.compile(r"(\+\+\+|---) (\S+)")
range_regex = re.compile(r"@@ -(\d+)(,(\d+))? \+(\d+)(,(\d+))?")
hunk = None
while True:
line = stream.readline()
if not line:
break
if hunk is None:
# Parse file names
diff_file = file_regex.match(line)
if diff_file:
if line.startswith("---"):
a_line = line
a = diff_file.group(2)
continue
if line.startswith("+++"):
b_line = line
b = diff_file.group(2)
continue
# Parse offset/lengths
diffrange = range_regex.match(line)
if diffrange:
if diffrange.group(2):
start_a = int(diffrange.group(1))
len_a = int(diffrange.group(3))
else:
start_a = 1
len_a = int(diffrange.group(1))
if diffrange.group(5):
start_b = int(diffrange.group(4))
len_b = int(diffrange.group(6))
else:
start_b = 1
len_b = int(diffrange.group(4))
header = [a_line, b_line, line]
hunk = DiffHunk(header, a, b, start_a, len_a, start_b, len_b)
else:
# Add the current line to the hunk
hunk.Append(line)
# See if the whole hunk has been parsed. If so, yield it and prepare
# for the next hunk.
if hunk.Complete():
yield hunk
hunk = None
# Partial hunks are a parse error
assert hunk is None
| jian929/stagefright | vp9/omx-components/videocodec/libvpx_internal/libvpx/tools/diff.py | Python | gpl-2.0 | 4,218 |
#!/usr/bin/env python
# coding=utf-8
rship = [
# id, from(sid), to(pid)
['b','a'],
['c','a'],
['c','d'],
['e','c']
]
def add_data(_from,_to):
rship.append([_from,_to])
def search_parent(_id):
pass
def search_son(_id):
pass
| zhaochl/python-utils | agrith_util/graph/tag_relationship.py | Python | apache-2.0 | 258 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# === This file is part of Calamares - <http://github.com/calamares> ===
#
# Copyright 2014, Philip Müller <[email protected]>
#
# Calamares is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Calamares is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Calamares. If not, see <http://www.gnu.org/licenses/>.
import libcalamares
import subprocess
from libcalamares.utils import check_target_env_call, target_env_call
from libcalamares.utils import *
def run_mkinitcpio():
""" Runs mkinitcpio with given kernel profile """
kernel = libcalamares.job.configuration['kernel']
check_target_env_call(['mkinitcpio', '-p', kernel])
def run():
""" Calls routine to create kernel initramfs image.
:return:
"""
root_mount_point = libcalamares.globalstorage.value("rootMountPoint")
subprocess.check_call(["cp", "/run/archiso/bootmnt/arch/boot/x86_64/vmlinuz", root_mount_point + "/boot/vmlinuz-linux"])
run_mkinitcpio()
target_env_call(["/usr/bin/cleanup.sh"])
return None
| Archman-OS/calamares-configs | src/modules/initcpio/main.py | Python | gpl-3.0 | 1,527 |
from enum import Enum
__author__ = 'dirkfuchs'
class Format(Enum):
oneLine = 0
oracle = 1
| erget/tnsmaster | tnsnames/format.py | Python | mit | 101 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Parameter file for the plot_Stairway.py program
"""
# String, path to the file containing the summary of the Stairway plot
stairway_file="../Data/YRI_start2_200files_summary"
# Float, Stairway inference "factor" (if genome length was divided by 10 for the stairway inference to reduce computation time, factor=0.1)
factor=0.1
# String, type of plot needed ("year_Ne" for Ne depending on years, or "mut_theta" for theta per site depending on mutation per site)
graph="year_Ne" | lapierreM/Yoruba_demography | Programs/plot/plot_Stairway_parameters.py | Python | lgpl-2.1 | 528 |
from unittest import TestCase
import numpy as np
import keras as ks
import keras.layers as layers
from orcanet.model_builder import change_dropout_rate
class TestDropoutChange(TestCase):
def setUp(self):
def dropout_model(rate_before, rate_after):
inp1 = layers.Input((5, 1))
x1 = layers.Dropout(rate_before)(inp1)
x1 = layers.Dense(5)(x1)
inp2 = layers.Input((5, 1))
x2 = layers.Dropout(rate_before)(inp2)
x = layers.Concatenate(axis=-1)([x1, x2])
x = layers.Dense(5)(x)
out = layers.Dropout(rate_after)(x)
model = ks.models.Model([inp1, inp2], out)
return model
def get_layer_output(model, samples, layer_no=-1):
l_out = ks.backend.function(
model.input + [ks.backend.learning_phase(), ],
[model.layers[layer_no].output])
# output in train mode = 1
layer_output = l_out(samples + [1, ])[0]
return layer_output
def calculate_rate(model, samples, layer_no):
layer_output = get_layer_output(model, samples, layer_no)
rate = np.sum(layer_output == 0)/layer_output.size
return rate
self.calculate_rate = calculate_rate
self.concat_layer_no = 6
self.model_0 = dropout_model(0., 0.)
self.xs = [np.ones((50, 5, 1)), np.ones((50, 5, 1))]
def test_change_dropout_after_concat(self):
model_changed = change_dropout_rate(self.model_0,
before_concat=0.0,
after_concat=0.999)
rate_before_conc = self.calculate_rate(model_changed, self.xs,
self.concat_layer_no)
rate_after_conc = self.calculate_rate(model_changed, self.xs, -1)
print("rate before Concatenate: {}\tafter: {}".format(rate_before_conc,
rate_after_conc))
self.assertLess(rate_before_conc, 0.2)
self.assertGreater(rate_after_conc, 0.6)
def test_change_dropout_before_and_after_concat(self):
model_changed = change_dropout_rate(self.model_0,
before_concat=0.999,
after_concat=0.999)
rate_before_conc = self.calculate_rate(model_changed, self.xs,
self.concat_layer_no)
rate_after_conc = self.calculate_rate(model_changed, self.xs, -1)
print("rate before Concatenate: {}\tafter: {}".format(rate_before_conc,
rate_after_conc))
self.assertGreater(rate_before_conc, 0.6)
self.assertGreater(rate_after_conc, 0.6)
def test_weights_are_copied_over(self):
model_changed = change_dropout_rate(self.model_0,
before_concat=0.999,
after_concat=0.999)
for layer_no in range(len(self.model_0.layers)):
weights_0 = self.model_0.layers[layer_no].get_weights()
weights_changed = model_changed.layers[layer_no].get_weights()
for i in range(len(weights_0)):
self.assertTrue(np.array_equal(weights_0[i], weights_changed[i]))
| ViaFerrata/DL_pipeline_TauAppearance | orcanet/tests/test_model_setup.py | Python | agpl-3.0 | 3,419 |
# -*- coding: utf-8 -*-
class FakeDirectoryNode(object):
def __init__(self):
self.unlocker = None
self.triggered = False
self.waiting_nodes = []
self.removed_nodes = []
self.prior_nodes = []
self.waiting_for_node = None
def unlock(self, owner):
self.unlocker = owner
def trigger_waiting_task(self):
self.triggered = True
def add_waiting_node(self, node, prior_node=False):
self.waiting_nodes.append((node, prior_node))
def remove_waiting_node(self, node):
self.removed_nodes.append(node)
def set_prior(self, node):
self.prior_nodes.append(node)
| Bajoo/client-pc | tests/unit_tests/index/fake_directory_node.py | Python | gpl-3.0 | 665 |
from __future__ import print_function, division
import unittest
import numpy as np
from pyscf.nao import nao
from os.path import dirname, abspath
class KnowValues(unittest.TestCase):
def test_vna_n2(self):
dname = dirname(abspath(__file__))
n = nao(label='n2', cd=dname)
m = 200
dvec,midv = 2*(n.atom2coord[1] - n.atom2coord[0])/m, (n.atom2coord[1] + n.atom2coord[0])/2.0
vgrid = np.tensordot(np.array(range(-m,m+1)), dvec, axes=0) + midv
sgrid = np.array(range(-m,m+1)) * np.sqrt((dvec*dvec).sum())
vna = n.vna(vgrid)
#print(vna.shape, sgrid.shape)
#np.savetxt('vna_n2_0004.txt', np.row_stack((sgrid, vna)).T)
ref = np.loadtxt(dname+'/vna_n2_0004.txt-ref')
for r,d in zip(ref[:,1],vna): self.assertAlmostEqual(r,d)
def test_vna_lih(self):
dname = dirname(abspath(__file__))
n = nao(label='lih', cd=dname)
m = 200
dvec,midv = 2*(n.atom2coord[1] - n.atom2coord[0])/m, (n.atom2coord[1] + n.atom2coord[0])/2.0
vgrid = np.tensordot(np.array(range(-m,m+1)), dvec, axes=0) + midv
sgrid = np.array(range(-m,m+1)) * np.sqrt((dvec*dvec).sum())
#vgrid = np.array([[-1.517908564663352e+00, 1.180550033093826e+00,0.000000000000000e+00]])
vna = n.vna(vgrid)
#for v,r in zip(vna,vgrid):
# print("%23.15e %23.15e %23.15e %23.15e"%(r[0], r[1], r[2], v))
#print(vna.shape, sgrid.shape)
np.savetxt('vna_lih_0004.txt', np.row_stack((sgrid, vna)).T)
ref = np.loadtxt(dname+'/vna_lih_0004.txt-ref')
for r,d in zip(ref[:,1],vna): self.assertAlmostEqual(r,d)
def test_water_vkb(self):
""" This """
from numpy import einsum, array
import os
dname = os.path.dirname(os.path.abspath(__file__))
sv = nao(label='water', cd=dname)
if __name__ == "__main__": unittest.main()
| gkc1000/pyscf | pyscf/nao/test/test_0004_vna.py | Python | apache-2.0 | 1,811 |
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from workspace_tools.export.exporters import Exporter
import re
import os
class IAREmbeddedWorkbench(Exporter):
"""
Exporter class for IAR Systems.
"""
NAME = 'IAR'
TOOLCHAIN = 'IAR'
TARGETS = [
'LPC1768',
'LPC1347',
'LPC11U24',
'LPC11U35_401',
'LPC11U35_501',
#Removed LPCCAPPUCCINO linker file and startup file missing
#'LPCCAPPUCCINO',
'LPC1114',
'LPC1549',
'LPC812',
'LPC4088',
'LPC4088_DM',
'LPC824',
'UBLOX_C027',
'ARCH_PRO',
'K20D50M',
'KL05Z',
'KL25Z',
'KL46Z',
'K22F',
'K64F',
'NUCLEO_F030R8',
'NUCLEO_F031K6',
'NUCLEO_F070RB',
'NUCLEO_F072RB',
'NUCLEO_F091RC',
'NUCLEO_F103RB',
'NUCLEO_F302R8',
'NUCLEO_F303RE',
'NUCLEO_F334R8',
'NUCLEO_F401RE',
'NUCLEO_F411RE',
'NUCLEO_F446RE',
'NUCLEO_L053R8',
'NUCLEO_L073RZ',
'NUCLEO_L152RE',
'NUCLEO_L476RG',
'DISCO_L053C8',
'DISCO_F334C8',
'DISCO_F746NG',
'DISCO_L476VG',
#'STM32F407', Fails to build same for GCC
'MAXWSNENV',
'MAX32600MBED',
'MTS_MDOT_F405RG',
'MTS_MDOT_F411RE',
'MTS_DRAGONFLY_F411RE',
'NRF51822',
'NRF51_DK',
'NRF51_DONGLE',
'DELTA_DFCM_NNN40',
'SEEED_TINY_BLE',
'HRM1017',
'ARCH_BLE',
'MOTE_L152RC',
]
def generate(self):
"""
Generates the project files
"""
sources = []
sources += self.resources.c_sources
sources += self.resources.cpp_sources
sources += self.resources.s_sources
iar_files = IarFolder("", "", [])
for source in sources:
iar_files.insert_file(source)
ctx = {
'name': self.program_name,
'include_paths': self.resources.inc_dirs,
'linker_script': self.resources.linker_script,
'object_files': self.resources.objects,
'libraries': self.resources.libraries,
'symbols': self.get_symbols(),
'source_files': iar_files.__str__(),
'binary_files': self.resources.bin_files,
}
self.gen_file('iar_%s.ewp.tmpl' % self.target.lower(), ctx, '%s.ewp' % self.program_name)
self.gen_file('iar.eww.tmpl', ctx, '%s.eww' % self.program_name)
self.gen_file('iar_%s.ewd.tmpl' % self.target.lower(), ctx, '%s.ewd' % self.program_name)
class IarFolder():
"""
This is a recursive folder object.
To present the folder structure in the IDE as it is presented on the disk.
This can be used for uvision as well if you replace the __str__ method.
Example:
files: ./main.cpp, ./apis/I2C.h, ./mbed/common/I2C.cpp
in the project this would look like:
main.cpp
common/I2C.cpp
input:
folder_level : folder path to current folder
folder_name : name of current folder
source_files : list of source_files (all must be in same directory)
"""
def __init__(self, folder_level, folder_name, source_files):
self.folder_level = folder_level
self.folder_name = folder_name
self.source_files = source_files
self.sub_folders = {}
def __str__(self):
"""
converts the folder structue to IAR project format.
"""
group_start = ""
group_end = ""
if self.folder_name != "":
group_start = "<group>\n<name>%s</name>\n" %(self.folder_name)
group_end = "</group>\n"
str_content = group_start
#Add files in current folder
if self.source_files:
for src in self.source_files:
str_content += "<file>\n<name>$PROJ_DIR$/%s</name>\n</file>\n" % src
#Add sub folders
if self.sub_folders:
for folder_name in self.sub_folders.iterkeys():
str_content += self.sub_folders[folder_name].__str__()
str_content += group_end
return str_content
def insert_file(self, source_input):
"""
Inserts a source file into the folder tree
"""
if self.source_files:
#All source_files in a IarFolder must be in same directory.
dir_sources = IarFolder.get_directory(self.source_files[0])
#Check if sources are already at their deepest level.
if not self.folder_level == dir_sources:
_reg_exp = r"^" + re.escape(self.folder_level) + r"[/\\]?([^/\\]+)"
folder_name = re.match(_reg_exp, dir_sources).group(1)
self.sub_folders[folder_name] = IarFolder(os.path.join(self.folder_level, folder_name), folder_name, self.source_files)
self.source_files = []
dir_input = IarFolder.get_directory(source_input)
if dir_input == self.folder_level:
self.source_files.append(source_input)
else:
_reg_exp = r"^" + re.escape(self.folder_level) + r"[/\\]?([^/\\]+)"
folder_name = re.match(_reg_exp, dir_input).group(1)
if self.sub_folders.has_key(folder_name):
self.sub_folders[folder_name].insert_file(source_input)
else:
if self.folder_level == "":
#Top level exception
self.sub_folders[folder_name] = IarFolder(folder_name, folder_name, [source_input])
else:
self.sub_folders[folder_name] = IarFolder(os.path.join(self.folder_level, folder_name), folder_name, [source_input])
@staticmethod
def get_directory(file_path):
"""
Returns the directory of the file
"""
return os.path.dirname(file_path)
| mnlipp/mbed | workspace_tools/export/iar.py | Python | apache-2.0 | 6,420 |
import pygame
import src.colours as colours
from src.entity import Entity
from pygame.math import Vector2
class Particle(Entity):
def __init__(
self,
pos: Vector2, # Pretty sure this is unused
velocity: Vector2,
angle: float,
angular_velocity: float,
width: int,
height: int,
ttl: int,
colour):
Entity.__init__(self, pos.x, pos.y, width, height, None)
# Not sure why we need to manually set pygame's layer?
# even calling move_to_front didn't work.
self._layer = 1
self.pos = pos
self.image = pygame.Surface((width, height)).convert_alpha()
self.image.fill(colour)
self.rect = self.image.get_rect()
self.rect.x = int(pos.x)
self.rect.y = int(pos.y)
self.velocity = velocity
self.angle = angle
self.angular_velocity = angular_velocity
self.ttl = ttl
def update(self, dt):
self.ttl -= 1
self.rect.x += self.velocity.x
self.rect.y += self.velocity.y
if self.ttl <= 0:
pygame.sprite.Sprite.kill(self)
| joereynolds/Mr-Figs | src/game_object/particle.py | Python | gpl-3.0 | 1,181 |
# -*- coding:iso-8859-1 -*-
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard Python
datetime module.
"""
from __future__ import unicode_literals
from __future__ import division
__license__ = "Simplified BSD"
import datetime
import string
import time
import sys
import os
import collections
try:
from io import StringIO
except ImportError:
from io import StringIO
from six import text_type, binary_type, integer_types
from . import relativedelta
from . import tz
__all__ = ["parse", "parserinfo"]
# Some pointers:
#
# http://www.cl.cam.ac.uk/~mgk25/iso-time.html
# http://www.iso.ch/iso/en/prods-services/popstds/datesandtime.html
# http://www.w3.org/TR/NOTE-datetime
# http://ringmaster.arc.nasa.gov/tools/time_formats.html
# http://search.cpan.org/author/MUIR/Time-modules-2003.0211/lib/Time/ParseDate.pm
# http://stein.cshl.org/jade/distrib/docs/java.text.SimpleDateFormat.html
class _timelex(object):
def __init__(self, instream):
if isinstance(instream, text_type):
instream = StringIO(instream)
self.instream = instream
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ_'
'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.numchars = '0123456789'
self.whitespace = ' \t\r\n'
self.charstack = []
self.tokenstack = []
self.eof = False
def get_token(self):
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
wordchars = self.wordchars
numchars = self.numchars
whitespace = self.whitespace
while not self.eof:
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
token = nextchar
if nextchar in wordchars:
state = 'a'
elif nextchar in numchars:
state = '0'
elif nextchar in whitespace:
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
seenletters = True
if nextchar in wordchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
if nextchar in numchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
seenletters = True
if nextchar == '.' or nextchar in wordchars:
token += nextchar
elif nextchar in numchars and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
if nextchar == '.' or nextchar in numchars:
token += nextchar
elif nextchar in wordchars and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and
(seenletters or token.count('.') > 1 or token[-1] == '.')):
l = token.split('.')
token = l[0]
for tok in l[1:]:
self.tokenstack.append('.')
if tok:
self.tokenstack.append(tok)
return token
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def next(self):
return self.__next__() # Python 2.x support
def split(cls, s):
return list(cls(s))
split = classmethod(split)
class _resultbase(object):
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def _repr(self, classname):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (classname, ", ".join(l))
def __repr__(self):
return self._repr(self.__class__.__name__)
class parserinfo(object):
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"),
("Wed", "Wednesday"),
("Thu", "Thursday"),
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"),
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "Sept", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z"]
PERTAIN = ["of"]
TZOFFSET = {}
def __init__(self, dayfirst=False, yearfirst=False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year//100*100
def _convert(self, lst):
dct = {}
for i in range(len(lst)):
v = lst[i]
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
if len(name) >= 3:
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
if len(name) >= 3:
try:
return self._months[name.lower()]+1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year):
if year < 100:
year += self._century
if abs(year-self._year) >= 50:
if year < self._year:
year += 100
else:
year -= 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year)
if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
class parser(object):
def __init__(self, info=None):
self.info = info or parserinfo()
def parse(self, timestr, default=None,
ignoretz=False, tzinfos=None,
**kwargs):
if not default:
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
res = self._parse(timestr, **kwargs)
if res is None:
raise ValueError("unknown string format")
repl = {}
for attr in ["year", "month", "day", "hour",
"minute", "second", "microsecond"]:
value = getattr(res, attr)
if value is not None:
repl[attr] = value
ret = default.replace(**repl)
if res.weekday is not None and not res.day:
ret = ret+relativedelta.relativedelta(weekday=res.weekday)
if not ignoretz:
if isinstance(tzinfos, collections.Callable) or tzinfos and res.tzname in tzinfos:
if isinstance(tzinfos, collections.Callable):
tzdata = tzinfos(res.tzname, res.tzoffset)
else:
tzdata = tzinfos.get(res.tzname)
if isinstance(tzdata, datetime.tzinfo):
tzinfo = tzdata
elif isinstance(tzdata, text_type):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, integer_types):
tzinfo = tz.tzoffset(res.tzname, tzdata)
else:
raise ValueError("offset must be tzinfo subclass, " \
"tz string, or int offset")
ret = ret.replace(tzinfo=tzinfo)
elif res.tzname and res.tzname in time.tzname:
ret = ret.replace(tzinfo=tz.tzlocal())
elif res.tzoffset == 0:
ret = ret.replace(tzinfo=tz.tzutc())
elif res.tzoffset:
ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset"]
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False):
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr)
try:
# year/month/day list
ymd = []
# Index of the month string in ymd
mstridx = -1
len_l = len(l)
i = 0
while i < len_l:
# Check if it's a number
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
value = None
if value is not None:
# Token is a number
len_li = len(l[i])
i += 1
if (len(ymd) == 3 and len_li in (2, 4)
and (i >= len_l or (l[i] != ':' and
info.hms(l[i]) is None))):
# 19990101T23[59]
s = l[i-1]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = l[i-1]
if not ymd and l[i-1].find('.') == -1:
ymd.append(info.convertyear(int(s[:2])))
ymd.append(int(s[2:4]))
ymd.append(int(s[4:]))
else:
# 19990101T235959[.59]
res.hour = int(s[:2])
res.minute = int(s[2:4])
res.second, res.microsecond = _parsems(s[4:])
elif len_li == 8:
# YYYYMMDD
s = l[i-1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:]))
elif len_li in (12, 14):
# YYYYMMDDhhmm[ss]
s = l[i-1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:8]))
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li == 14:
res.second = int(s[12:])
elif ((i < len_l and info.hms(l[i]) is not None) or
(i+1 < len_l and l[i] == ' ' and
info.hms(l[i+1]) is not None)):
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
if l[i] == ' ':
i += 1
idx = info.hms(l[i])
while True:
if idx == 0:
res.hour = int(value)
if value%1:
res.minute = int(60*(value%1))
elif idx == 1:
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
if i >= len_l or idx == 2:
break
# 12h00
try:
value_repr = l[i]
value = float(value_repr)
except ValueError:
break
else:
i += 1
idx += 1
if i < len_l:
newidx = info.hms(l[i])
if newidx is not None:
idx = newidx
elif i == len_l and l[i-2] == ' ' and info.hms(l[i-3]) is not None:
# X h MM or X m SS
idx = info.hms(l[i-3]) + 1
if idx == 1:
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
elif idx == 2:
res.second, res.microsecond = \
_parsems(value_repr)
i += 1
elif i+1 < len_l and l[i] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
i += 1
value = float(l[i])
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
i += 1
if i < len_l and l[i] == ':':
res.second, res.microsecond = _parsems(l[i+1])
i += 2
elif i < len_l and l[i] in ('-', '/', '.'):
sep = l[i]
ymd.append(int(value))
i += 1
if i < len_l and not info.jump(l[i]):
try:
# 01-01[-01]
ymd.append(int(l[i]))
except ValueError:
# 01-Jan[-01]
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
else:
return None
i += 1
if i < len_l and l[i] == sep:
# We have three members
i += 1
value = info.month(l[i])
if value is not None:
ymd.append(value)
mstridx = len(ymd)-1
assert mstridx == -1
else:
ymd.append(int(l[i]))
i += 1
elif i >= len_l or info.jump(l[i]):
if i+1 < len_l and info.ampm(l[i+1]) is not None:
# 12 am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i+1]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i+1]) == 0:
res.hour = 0
i += 1
else:
# Year, month or day
ymd.append(int(value))
i += 1
elif info.ampm(l[i]) is not None:
# 12am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i]) == 0:
res.hour = 0
i += 1
elif not fuzzy:
return None
else:
i += 1
continue
# Check weekday
value = info.weekday(l[i])
if value is not None:
res.weekday = value
i += 1
continue
# Check month name
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
i += 1
if i < len_l:
if l[i] in ('-', '/'):
# Jan-01[-99]
sep = l[i]
i += 1
ymd.append(int(l[i]))
i += 1
if i < len_l and l[i] == sep:
# Jan-01-99
i += 1
ymd.append(int(l[i]))
i += 1
elif (i+3 < len_l and l[i] == l[i+2] == ' '
and info.pertain(l[i+1])):
# Jan of 01
# In this case, 01 is clearly year
try:
value = int(l[i+3])
except ValueError:
# Wrong guess
pass
else:
# Convert it here to become unambiguous
ymd.append(info.convertyear(value))
i += 4
continue
# Check am/pm
value = info.ampm(l[i])
if value is not None:
if value == 1 and res.hour < 12:
res.hour += 12
elif value == 0 and res.hour == 12:
res.hour = 0
i += 1
continue
# Check for a timezone name
if (res.hour is not None and len(l[i]) <= 5 and
res.tzname is None and res.tzoffset is None and
not [x for x in l[i] if x not in string.ascii_uppercase]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
i += 1
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i < len_l and l[i] in ('+', '-'):
l[i] = ('+', '-')[l[i] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
continue
# Check for a numbered timezone
if res.hour is not None and l[i] in ('+', '-'):
signal = (-1, 1)[l[i] == '+']
i += 1
len_li = len(l[i])
if len_li == 4:
# -0300
res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60
elif i+1 < len_l and l[i+1] == ':':
# -03:00
res.tzoffset = int(l[i])*3600+int(l[i+2])*60
i += 2
elif len_li <= 2:
# -[0]3
res.tzoffset = int(l[i][:2])*3600
else:
return None
i += 1
res.tzoffset *= signal
# Look for a timezone name between parenthesis
if (i+3 < len_l and
info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and
3 <= len(l[i+2]) <= 5 and
not [x for x in l[i+2]
if x not in string.ascii_uppercase]):
# -0300 (BRST)
res.tzname = l[i+2]
i += 4
continue
# Check jumps
if not (info.jump(l[i]) or fuzzy):
return None
i += 1
# Process year/month/day
len_ymd = len(ymd)
if len_ymd > 3:
# More than three members!?
return None
elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2):
# One member, or two members with a month string
if mstridx != -1:
res.month = ymd[mstridx]
del ymd[mstridx]
if len_ymd > 1 or mstridx == -1:
if ymd[0] > 31:
res.year = ymd[0]
else:
res.day = ymd[0]
elif len_ymd == 2:
# Two members with numbers
if ymd[0] > 31:
# 99-01
res.year, res.month = ymd
elif ymd[1] > 31:
# 01-99
res.month, res.year = ymd
elif dayfirst and ymd[1] <= 12:
# 13-01
res.day, res.month = ymd
else:
# 01-13
res.month, res.day = ymd
if len_ymd == 3:
# Three members
if mstridx == 0:
res.month, res.day, res.year = ymd
elif mstridx == 1:
if ymd[0] > 31 or (yearfirst and ymd[2] <= 31):
# 99-Jan-01
res.year, res.month, res.day = ymd
else:
# 01-Jan-01
# Give precendence to day-first, since
# two-digit years is usually hand-written.
res.day, res.month, res.year = ymd
elif mstridx == 2:
# WTF!?
if ymd[1] > 31:
# 01-99-Jan
res.day, res.year, res.month = ymd
else:
# 99-01-Jan
res.year, res.day, res.month = ymd
else:
if ymd[0] > 31 or \
(yearfirst and ymd[1] <= 12 and ymd[2] <= 31):
# 99-01-01
res.year, res.month, res.day = ymd
elif ymd[0] > 12 or (dayfirst and ymd[1] <= 12):
# 13-01-01
res.day, res.month, res.year = ymd
else:
# 01-13-01
res.month, res.day, res.year = ymd
except (IndexError, ValueError, AssertionError):
return None
if not info.validate(res):
return None
return res
DEFAULTPARSER = parser()
def parse(timestr, parserinfo=None, **kwargs):
# Python 2.x support: datetimes return their string presentation as
# bytes in 2.x and unicode in 3.x, so it's reasonable to expect that
# the parser will get both kinds. Internally we use unicode only.
if isinstance(timestr, binary_type):
timestr = timestr.decode()
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
return DEFAULTPARSER.parse(timestr, **kwargs)
class _tzparser(object):
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = _timelex.split(tzstr)
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
i = j
if (i < len_l and
(l[i] in ('+', '-') or l[i][0] in "0123456789")):
if l[i] in ('+', '-'):
# Yes, that's right. See the TZ variable
# documentation.
signal = (1, -1)[l[i] == '+']
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr,
(int(l[i][:2])*3600+int(l[i][2:])*60)*signal)
elif i+1 < len_l and l[i+1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i])*3600+int(l[i+2])*60)*signal)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2])*3600*signal)
else:
return None
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';': l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
i += 2
if l[i] == '-':
value = int(l[i+1])*-1
i += 1
else:
value = int(l[i])
i += 2
if value:
x.week = value
x.weekday = (int(l[i])-1)%7
else:
x.day = int(l[i])
i += 2
x.time = int(l[i])
i += 2
if i < len_l:
if l[i] in ('-', '+'):
signal = (-1, 1)[l[i] == "+"]
i += 1
else:
signal = 1
res.dstoffset = (res.stdoffset+int(l[i]))*signal
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',', '/', 'J', 'M',
'.', '-', ':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
i += 1
x.month = int(l[i])
i += 1
assert l[i] in ('-', '.')
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
i += 1
assert l[i] in ('-', '.')
i += 1
x.weekday = (int(l[i])-1)%7
else:
# year day (zero based)
x.yday = int(l[i])+1
i += 1
if i < len_l and l[i] == '/':
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2])*3600+int(l[i][2:])*60)
elif i+1 < len_l and l[i+1] == ':':
# -03:00
x.time = int(l[i])*3600+int(l[i+2])*60
i += 2
if i+1 < len_l and l[i+1] == ':':
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2])*3600)
else:
return None
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
return res
DEFAULTTZPARSER = _tzparser()
def _parsetz(tzstr):
return DEFAULTTZPARSER.parse(tzstr)
def _parsems(value):
"""Parse a I[.F] seconds value into (seconds, microseconds)."""
if "." not in value:
return int(value), 0
else:
i, f = value.split(".")
return int(i), int(f.ljust(6, "0")[:6])
# vim:ts=4:sw=4:et
| klahnakoski/cloc | cloc/util/vendor/dateutil/parser.py | Python | mpl-2.0 | 33,572 |
#
# GeoTiler - library to create maps using tiles from a map provider
#
# Copyright (C) 2014 by Artur Wroblewski <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice (restored, based on setup.py file from
# https://github.com/stamen/modestmaps-py):
#
# Copyright (C) 2007-2013 by Michal Migurski and other contributors
# License: BSD
#
from geotiler.geo import Transformation, MercatorProjection, zoom_to
import unittest
class TransformationTestCase(unittest.TestCase):
def test_1(self):
t = Transformation(1, 0, 0, 0, 1, 0)
p = 1, 1
pt = t.transform(p)
self.assertEqual(1.0, pt[0])
self.assertEqual(1.0, pt[1])
ptt = t.untransform(pt)
self.assertEqual(1.0, ptt[0])
self.assertEqual(1.0, ptt[1])
def test_2(self):
t = Transformation(0, 1, 0, 1, 0, 0)
p = 0, 1
pt = t.transform(p)
self.assertEqual(1.0, pt[0])
self.assertEqual(0.0, pt[1])
ptt = t.untransform(pt)
self.assertEqual(0.0, ptt[0])
self.assertEqual(1.0, ptt[1])
def test_3(self):
t = Transformation(1, 0, 1, 0, 1, 1)
p = 0, 0
pt = t.transform(p)
self.assertEqual(1.0, pt[0])
self.assertEqual(1.0, pt[1])
ptt = t.untransform(pt)
self.assertEqual(0.0, ptt[0])
self.assertEqual(0.0, ptt[1])
class MercatorProjectionTestCase(unittest.TestCase):
def test_1(self):
m = MercatorProjection(10)
coord = m.rev_geocode((0, 0))
self.assertAlmostEqual(0.0, coord[0])
self.assertAlmostEqual(0.0, coord[1])
pt = m.geocode((0, 0), 10)
self.assertAlmostEqual(0.0, pt[0])
self.assertAlmostEqual(0.0, pt[1])
def test_2(self):
m = MercatorProjection(10)
coord = m.rev_geocode((-122, 37))
self.assertAlmostEqual(-2.129, coord[0], 3)
self.assertAlmostEqual(0.696, coord[1], 3)
pt = m.geocode((-2.129, 0.696), 10.000)
self.assertAlmostEqual(-121.983, pt[0], 3)
self.assertAlmostEqual(37.001, pt[1], 3)
class ZoomTestCase(unittest.TestCase):
"""
Test tile coordinates zoom functions.
"""
def test_zoom(self):
"""
Test zooming tile coordinates
"""
coord = zoom_to((1, 0), 2, 3)
self.assertEqual((2, 0), coord)
coord = zoom_to((1, 0), 2, 1)
self.assertEqual((0.5, 0), coord)
# vim: sw=4:et:ai
| pikeBishop/OMP_gpxReport | geotiler/tests/test_geo.py | Python | gpl-2.0 | 3,153 |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# pylint: disable=maybe-no-member, invalid-name
"""Test request import and updates."""
import csv
from collections import OrderedDict
from cStringIO import StringIO
from itertools import izip
from flask.json import dumps
from ggrc import db
from ggrc import models
from ggrc.converters import errors
from integration.ggrc.models import factories
from integration.ggrc import TestCase
from integration.ggrc.generator import ObjectGenerator
class TestAssessmentImport(TestCase):
"""Basic Assessment import tests with.
This test suite should test new Assessment imports, exports, and updates.
The main focus of these tests is checking error messages for invalid state
transitions.
"""
def setUp(self):
"""Set up for Assessment test cases."""
super(TestAssessmentImport, self).setUp()
self.client.get("/login")
def test_import_assessments_with_templates(self):
"""Test importing of assessments with templates."""
self.import_file("assessment_template_no_warnings.csv")
response = self.import_file("assessment_with_templates.csv")
self._check_csv_response(response, {})
assessment = models.Assessment.query.filter(
models.Assessment.slug == "A 4").first()
values = set(v.attribute_value for v in assessment.custom_attribute_values)
self.assertIn("abc", values)
self.assertIn("2015-07-15", values)
def _test_assessment_users(self, asmt, users):
""" Test that all users have correct roles on specified Assessment"""
verification_errors = ""
for user_name, expected_types in users.items():
try:
user = models.Person.query.filter_by(name=user_name).first()
rel = models.Relationship.find_related(asmt, user)
if expected_types:
self.assertNotEqual(
rel, None,
"User {} is not mapped to {}".format(user.email, asmt.slug))
self.assertIn("AssigneeType", rel.relationship_attrs)
self.assertEqual(
set(rel.relationship_attrs[
"AssigneeType"].attr_value.split(",")),
expected_types
)
else:
self.assertEqual(
rel, None,
"User {} is mapped to {}".format(user.email, asmt.slug))
except AssertionError as error:
verification_errors += "\n\nChecks for Users-Assessment mapping "\
"failed for user '{}' with:\n{}".format(user_name, str(error))
self.assertEqual(verification_errors, "", verification_errors)
def test_assessment_full_no_warnings(self):
""" Test full assessment import with no warnings
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=704933240&vpid=A7
"""
response = self.import_file("assessment_full_no_warnings.csv")
self._check_csv_response(response, {})
# Test first Assessment line in the CSV file
asmt_1 = models.Assessment.query.filter_by(slug="Assessment 1").first()
users = {
"user 1": {"Assessor"},
"user 2": {"Assessor", "Creator"}
}
self._test_assessment_users(asmt_1, users)
self.assertEqual(asmt_1.status, models.Assessment.START_STATE)
# Test second Assessment line in the CSV file
asmt_2 = models.Assessment.query.filter_by(slug="Assessment 2").first()
users = {
"user 1": {"Assessor"},
"user 2": {"Creator"},
"user 3": {},
"user 4": {},
"user 5": {},
}
self._test_assessment_users(asmt_2, users)
self.assertEqual(asmt_2.status, models.Assessment.PROGRESS_STATE)
audit = [obj for obj in asmt_1.related_objects() if obj.type == "Audit"][0]
self.assertEqual(audit.context, asmt_1.context)
evidence = models.Document.query.filter_by(title="some title 2").first()
self.assertEqual(audit.context, evidence.context)
def test_assessment_import_states(self):
""" Test Assessment state imports
These tests are an intermediate part for zucchini release and will be
updated in the next release.
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=299569476
"""
self.import_file("assessment_full_no_warnings.csv")
response = self.import_file("assessment_update_intermediate.csv")
expected_errors = {
"Assessment": {
"block_errors": set(),
"block_warnings": set(),
"row_errors": set(),
"row_warnings": set(),
}
}
self._check_csv_response(response, expected_errors)
assessments = {r.slug: r for r in models.Assessment.query.all()}
self.assertEqual(assessments["Assessment 60"].status,
models.Assessment.START_STATE)
self.assertEqual(assessments["Assessment 61"].status,
models.Assessment.PROGRESS_STATE)
self.assertEqual(assessments["Assessment 62"].status,
models.Assessment.DONE_STATE)
self.assertEqual(assessments["Assessment 63"].status,
models.Assessment.FINAL_STATE)
self.assertEqual(assessments["Assessment 64"].status,
models.Assessment.FINAL_STATE)
self.assertEqual(assessments["Assessment 3"].status,
models.Assessment.FINAL_STATE)
self.assertEqual(assessments["Assessment 4"].status,
models.Assessment.FINAL_STATE)
# Check that there is only one attachment left
asmt1 = assessments["Assessment 1"]
self.assertEqual(len(asmt1.documents), 1)
# Check that there are only the two new URLs present in asessment 1
url_titles = set(obj.title for obj in asmt1.related_objects()
if isinstance(obj, models.Document))
self.assertEqual(url_titles, set(["a.b.com", "c d com"]))
def test_assessment_warnings_errors(self):
""" Test full assessment import with warnings and errors
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=889865936
"""
self.import_file("assessment_full_no_warnings.csv")
response = self.import_file("assessment_with_warnings_and_errors.csv")
expected_errors = {
"Assessment": {
"block_errors": set([]),
"block_warnings": {
errors.UNKNOWN_COLUMN.format(
line=2,
column_name="error description - non existing column will "
"be ignored"
),
errors.UNKNOWN_COLUMN.format(
line=2,
column_name="actual error message"
),
errors.UNKNOWN_COLUMN.format(
line=2,
column_name="map:project"
),
},
"row_errors": {
errors.MISSING_VALUE_ERROR.format(
line=19,
column_name="Audit"
),
errors.DUPLICATE_VALUE_IN_CSV.format(
line_list="20, 22",
column_name="Code",
value="Assessment 22",
s="",
ignore_lines="22",
),
},
"row_warnings": {
errors.UNKNOWN_OBJECT.format(
line=19,
object_type="Audit",
slug="not existing"
),
errors.WRONG_VALUE_DEFAULT.format(
line=20,
column_name="State",
value="open",
),
},
}
}
self._check_csv_response(response, expected_errors)
def test_mapping_control_through_snapshot(self):
"Test for add mapping control on assessment"
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
factories.RelationshipFactory(source=audit, destination=assessment)
control = factories.ControlFactory()
revision = models.Revision.query.filter(
models.Revision.resource_id == control.id,
models.Revision.resource_type == control.__class__.__name__
).order_by(
models.Revision.id.desc()
).first()
factories.SnapshotFactory(
parent=audit,
child_id=control.id,
child_type=control.__class__.__name__,
revision_id=revision.id
)
db.session.commit()
self.assertFalse(db.session.query(
models.Relationship.get_related_query(
assessment, models.Snapshot()
).exists()).first()[0])
self.import_data(OrderedDict([
("object_type", "Assessment"),
("Code*", assessment.slug),
("map:control", control.slug),
]))
self.assertTrue(db.session.query(
models.Relationship.get_related_query(
assessment, models.Snapshot()
).exists()).first()[0])
def test_create_new_assessment_with_mapped_control(self):
"Test for creation assessment with mapped controls"
audit = factories.AuditFactory()
control = factories.ControlFactory()
revision = models.Revision.query.filter(
models.Revision.resource_id == control.id,
models.Revision.resource_type == control.__class__.__name__
).order_by(
models.Revision.id.desc()
).first()
factories.SnapshotFactory(
parent=audit,
child_id=control.id,
child_type=control.__class__.__name__,
revision_id=revision.id
)
db.session.commit()
self.assertFalse(db.session.query(
models.Relationship.get_related_query(
models.Assessment(), models.Snapshot()
).exists()).first()[0])
slug = "TestAssessment"
self.import_data(OrderedDict([
("object_type", "Assessment"),
("Code*", slug),
("Audit*", audit.slug),
("Assignees*", models.Person.query.all()[0].email),
("Creators", models.Person.query.all()[0].email),
("Title", "Strange title"),
("map:control", control.slug),
]))
assessment = models.Assessment.query.filter(
models.Assessment.slug == slug
).first()
self.assertTrue(db.session.query(models.Relationship.get_related_query(
assessment, models.Snapshot()).exists()).first()[0]
)
def test_create_import_assignee(self):
"Test for creation assessment with mapped assignees"
audit = factories.AuditFactory()
name = "test_name"
email = "[email protected]"
assignee_id = factories.PersonFactory(name=name, email=email).id
slug = "TestAssessment"
self.import_data(OrderedDict([
("object_type", "Assessment"),
("Code*", slug),
("Audit*", audit.slug),
("Assignees*", email),
("Creators", models.Person.query.all()[0].email),
("Title", "Strange title"),
]))
assessment = models.Assessment.query.filter(
models.Assessment.slug == slug
).first()
self.assertEqual([assignee_id], [i.id for i in assessment.assessors])
def test_create_import_creators(self):
"Test for creation assessment with mapped creator"
audit = factories.AuditFactory()
name = "test_name"
email = "[email protected]"
creator_id = factories.PersonFactory(name=name, email=email).id
slug = "TestAssessment"
self.import_data(OrderedDict([
("object_type", "Assessment"),
("Code*", slug),
("Audit*", audit.slug),
("Assignees*", models.Person.query.all()[0].email),
("Creators", email),
("Title", "Strange title"),
]))
assessment = models.Assessment.query.filter(
models.Assessment.slug == slug
).first()
self.assertEqual([creator_id], [i.id for i in assessment.creators])
def test_update_import_creators(self):
"Test for creation assessment with mapped creator"
slug = "TestAssessment"
assessment = factories.AssessmentFactory(slug=slug)
name = "test_name"
email = "[email protected]"
creator_id = factories.PersonFactory(name=name, email=email).id
self.assertNotEqual([creator_id], [i.id for i in assessment.creators])
self.import_data(OrderedDict([
("object_type", "Assessment"),
("Code*", slug),
("Creators", email),
]))
assessment = models.Assessment.query.filter(
models.Assessment.slug == slug
).first()
self.assertEqual([creator_id], [i.id for i in assessment.creators])
def test_update_import_assignee(self):
"Test for creation assessment with mapped creator"
slug = "TestAssessment"
assessment = factories.AssessmentFactory(slug=slug)
name = "test_name"
email = "[email protected]"
assignee_id = factories.PersonFactory(name=name, email=email).id
self.assertNotEqual([assignee_id], [i.id for i in assessment.assessors])
self.import_data(OrderedDict([
("object_type", "Assessment"),
("Code*", slug),
("Assignees", email),
]))
assessment = models.Assessment.query.filter(
models.Assessment.slug == slug
).first()
self.assertEqual([assignee_id], [i.id for i in assessment.assessors])
class TestAssessmentExport(TestCase):
"""Test Assessment object export."""
def setUp(self):
""" Set up for Assessment test cases """
super(TestAssessmentExport, self).setUp()
self.client.get("/login")
self.headers = ObjectGenerator.get_header()
def export_csv(self, data):
return self.client.post("/_service/export_csv", data=dumps(data),
headers=self.headers)
def test_simple_export(self):
""" Test full assessment export with no warnings
CSV sheet:
https://docs.google.com/spreadsheets/d/1Jg8jum2eQfvR3kZNVYbVKizWIGZXvfqv3yQpo2rIiD8/edit#gid=704933240&vpid=A7
"""
self.import_file("assessment_full_no_warnings.csv")
data = [{
"object_name": "Assessment",
"filters": {
"expression": {}
},
"fields": "all",
}]
response = self.export_csv(data)
self.assertIn(u"\u5555", response.data.decode("utf8"))
def assertColumnExportedValue(self, value, instance, column):
"Assertion checks is value equal to exported instance column value."
data = [{
"object_name": instance.__class__.__name__,
"fields": "all",
"filters": {
"expression": {
"text": str(instance.id),
"op": {
"name": "text_search",
}
},
},
}]
response = self.export_csv(data)
keys, vals = response.data.strip().split("\n")[9:11]
keys = next(csv.reader(StringIO(keys), delimiter=","), [])
vals = next(csv.reader(StringIO(vals), delimiter=","), [])
instance_dict = dict(izip(keys, vals))
self.assertEqual(value, instance_dict[column])
def test_export_assesments_without_map_control(self):
"""Test export assesment without related control instance"""
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
factories.RelationshipFactory(source=audit, destination=assessment)
control = factories.ControlFactory()
revision = models.Revision.query.filter(
models.Revision.resource_id == control.id,
models.Revision.resource_type == control.__class__.__name__
).order_by(
models.Revision.id.desc()
).first()
factories.SnapshotFactory(
parent=audit,
child_id=control.id,
child_type=control.__class__.__name__,
revision_id=revision.id
)
db.session.commit()
self.assertColumnExportedValue("", assessment, "map:control")
def test_export_assesments_with_map_control(self):
"""Test export assesment with related control instance
relation snapshot -> assessment
"""
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
factories.RelationshipFactory(source=audit, destination=assessment)
control = factories.ControlFactory()
revision = models.Revision.query.filter(
models.Revision.resource_id == control.id,
models.Revision.resource_type == control.__class__.__name__
).order_by(
models.Revision.id.desc()
).first()
snapshot = factories.SnapshotFactory(
parent=audit,
child_id=control.id,
child_type=control.__class__.__name__,
revision_id=revision.id
)
db.session.commit()
factories.RelationshipFactory(source=snapshot, destination=assessment)
self.assertColumnExportedValue(control.slug, assessment, "map:control")
def test_export_assesments_with_map_control_mirror_relation(self):
"""Test export assesment with related control instance
relation assessment -> snapshot
"""
audit = factories.AuditFactory()
assessment = factories.AssessmentFactory(audit=audit)
factories.RelationshipFactory(source=audit, destination=assessment)
control = factories.ControlFactory()
revision = models.Revision.query.filter(
models.Revision.resource_id == control.id,
models.Revision.resource_type == control.__class__.__name__
).order_by(
models.Revision.id.desc()
).first()
snapshot = factories.SnapshotFactory(
parent=audit,
child_id=control.id,
child_type=control.__class__.__name__,
revision_id=revision.id
)
db.session.commit()
factories.RelationshipFactory(destination=snapshot, source=assessment)
self.assertColumnExportedValue(control.slug, assessment, "map:control")
def test_export_assessments_with_filters_and_conflicting_ca_names(self):
"""Test exporting assessments with conflicting custom attribute names."""
self.import_file(u"assessment_template_no_warnings.csv")
self.import_file(u"assessment_with_templates.csv")
# also create an object level custom attribute with a name that clashes
# with a name of a "regular" attribute
assessment = models.Assessment.query.filter(
models.Assessment.slug == u"A 2").first()
cad = models.CustomAttributeDefinition(
attribute_type=u"Text",
title=u"ca title",
definition_type=u"assessment",
definition_id=assessment.id
)
db.session.add(cad)
db.session.commit()
data = [{
"object_name": "Assessment",
"fields": ["slug", "title", "description", "status"],
"filters": {
"expression": {
"left": {
"left": "code",
"op": {"name": "~"},
"right": "A 2"
},
"op": {"name": "AND"},
"right": {
"left": "title",
"op": {"name": "~"},
"right": "no template Assessment"
}
},
"keys": ["code", "title", "status"],
"order_by": {
"keys": [],
"order": "",
"compare": None
}
}
}]
response = self.export_csv(data)
self.assertIn(u"No template Assessment 2", response.data)
| AleksNeStu/ggrc-core | test/integration/ggrc/converters/test_import_assessments.py | Python | apache-2.0 | 19,212 |
# -*- coding: utf-8 -*-
from openerp import api, fields, models
class AccountCommonAccountReport(models.TransientModel):
_name = 'account.common.account.report'
_description = 'Account Common Account Report'
_inherit = "account.common.report"
display_account = fields.Selection([('all','All'), ('movement','With movements'),
('not_zero','With balance is not equal to 0'),],
string='Display Accounts', required=True, default='movement')
@api.multi
def pre_print_report(self, data):
data['form'].update(self.read(['display_account'])[0])
return data
| vileopratama/vitech | src/addons/account/wizard/account_report_common_account.py | Python | mit | 679 |
'''
Created on Aug 6, 2014
@author: Amit
'''
if __name__ == '__main__':
pass | amitgupta151/tweetstorm | statistics.py | Python | gpl-2.0 | 82 |
def square_dict(num):
return {n: n * n for n in range(1, int(num) + 1)}
# py.test exercise_10_27_16.py --cov=exercise_10_27_16.py --cov-report=html
def test_square_dict():
assert square_dict(3) == {1: 1, 2: 4, 3: 9}
assert square_dict(0) == {}
assert square_dict(-1) == {}
if __name__ == '__main__':
print(square_dict(input("Number: ")))
| JSBCCA/pythoncode | exercises/exercise_10_27_16.py | Python | mit | 362 |
"""
Video Tag
---------
This implements a Liquid-style video tag for Pelican,
based on the octopress video tag [1]_
Syntax
------
{% video url/to/video [width height] [url/to/poster] %}
Example
-------
{% video http://site.com/video.mp4 720 480 http://site.com/poster-frame.jpg %}
Output
------
<video width='720' height='480' preload='none' controls poster='http://site.com/poster-frame.jpg'>
<source src='http://site.com/video.mp4' type='video/mp4; codecs=\"avc1.42E01E, mp4a.40.2\"'/>
</video>
[1] https://github.com/imathis/octopress/blob/master/plugins/video_tag.rb
"""
import os
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = "{% video url/to/video [url/to/video] [url/to/video] [width height] [url/to/poster] %}"
VIDEO = re.compile(r'(/\S+|https?:\S+)(\s+(/\S+|https?:\S+))?(\s+(/\S+|https?:\S+))?(\s+(\d+)\s(\d+))?(\s+(/\S+|https?:\S+))?')
VID_TYPEDICT = {'.mp4':"type='video/mp4; codecs=\"avc1.42E01E, mp4a.40.2\"'",
'.ogv':"type='video/ogg; codecs=theora, vorbis'",
'.webm':"type='video/webm; codecs=vp8, vorbis'"}
@LiquidTags.register('video')
def video(preprocessor, tag, markup):
videos = []
width = None
height = None
poster = None
match = VIDEO.search(markup)
if match:
groups = match.groups()
videos = [g for g in groups[0:6:2] if g]
width = groups[6]
height = groups[7]
poster = groups[9]
if any(videos):
video_out = "<video width='{width}' height='{height}' preload='none' controls poster='{poster}'>".format(width=width, height=height, poster=poster)
for vid in videos:
base, ext = os.path.splitext(vid)
if ext not in VID_TYPEDICT:
raise ValueError("Unrecognized video extension: "
"{0}".format(ext))
video_out += ("<source src='{0}' "
"{1}>".format(vid, VID_TYPEDICT[ext]))
video_out += "</video>"
else:
raise ValueError("Error processing input, "
"expected syntax: {0}".format(SYNTAX))
return video_out
#----------------------------------------------------------------------
# This import allows image tag to be a Pelican plugin
from liquid_tags import register
| wsy1607/Data-Analysis-of-Campus-Crime-Index | website/plugins/liquid_tags/video.py | Python | mit | 2,289 |
import analysis as an
import exchange_API as API
import portfolio as pf
import strategy as st
import database as db
import logging
def backtester_update(df, candle_data, index):
"""
Copies the candle_data row (dataframe) and adds it to the
primary dataframe. The following columns are expected:
[date, close, high, low, open, quoteVolume, volume, weightedAverage]. If
these columns do not exist in the primary dataframe they will be created.
If the columns do not exist in the candle dataframe an error will be thrown.
Parameters
----------
df : primary dataframe
candle_data : A single row dataframe containing candlestick data
index : index of the primary dataframe that the candle data is being copied to
Returns
-------
The results get written to the primary dataframe in the index row
"""
# Adds candle data to dataframe
df.at[index, 'date'] = candle_data['date']
df.at[index, 'close'] = candle_data['close']
df.at[index, 'high'] = candle_data['high']
df.at[index, 'low'] = candle_data['low']
df.at[index, 'open'] = candle_data['open']
df.at[index, 'quoteVolume'] = candle_data['quoteVolume']
df.at[index, 'volume'] = candle_data['volume']
df.at[index, 'weightedAverage'] = candle_data['weightedAverage']
# slow
# df = df.append(candle_data, ignore_index=True)
logging.debug('Candle data written to primary dataframe. Index {}'.format(index))
def run_backtest(start_time, end_time, period, currency_pair, strats,
instrument_1_qty, instrument_2_qty, results_filename):
"""
Primary loop for running a backtest. This function triggers API calls to the
exchange, initializes the portfolio, drip feeds data into the
main dataframe, updates the strategy and portfolio objects, and triggers
data recording and data analysis at the end of a backtest.
Parameters
----------
start_time : the start time of the historic data in MM-DD-YYYY HH:MM format. GMT timezone
end_time : the end time of the historic data in MM-DD-YYYY HH:MM format. GMT timezone
period : period between datapoints
currency_pair : two instruments to be trading between
strats : nested dictionary of all buy and sell criteria, and technicals
instrument_1_qty : starting quantity of instrument 1
instrument_2_qty : starting quantity of instrument 2
results_filename : filename to save the results of the backtest
"""
# imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# removing slice copy warning
pd.options.mode.chained_assignment = None
# requesting candle data
candle_data = API.request_candlesticks(currency_pair, start_time, end_time, period)
# candle_data = read_data(filename)
# Starting with blank dataframe
df = pd.DataFrame(index=[], columns=[])
# Initializing technicals, strategy, portfolio. Adding columns and initial values
portfolio = pf.Portfolio(df, instrument_1_qty, instrument_2_qty)
# Drip-feed candle data to avoid look-ahead bias
for index, row in candle_data.iterrows():
# Updating backtester, technicals, strategy, portfolio
backtester_update(df, row, index)
st.strategy_update(df, index, strats)
portfolio.update(df, index)
# Analyze final dataframe
stats = an.stats_update(df, start_time, end_time, period, instrument_1_qty,
instrument_2_qty)
an.stats_print(df, stats)
an.plotter(df)
# writing all backtest data
db.write_data(df, filename = results_filename)
logging.debug('Backtest complete')
| Shutch/Cralgo | cralgo/backtester.py | Python | mit | 3,655 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import datetime
from turbogears.database import session
from bkr.server.model import TaskStatus, TaskResult, LogRecipe
from bkr.inttest import data_setup
from bkr.inttest.assertions import assert_datetime_within
from bkr.inttest.server.selenium import XmlRpcTestCase
class RecipesXmlRpcTest(XmlRpcTestCase):
def setUp(self):
with session.begin():
self.lc = data_setup.create_labcontroller()
self.lc.user.password = u'logmein'
self.server = self.get_server()
self.server.auth.login_password(self.lc.user.user_name, u'logmein')
# https://bugzilla.redhat.com/show_bug.cgi?id=817518
def test_by_log_server_only_returns_completed_recipesets(self):
with session.begin():
dt = data_setup.create_distro_tree()
completed_recipe = data_setup.create_recipe(distro_tree=dt)
incomplete_recipe = data_setup.create_recipe(distro_tree=dt)
job = data_setup.create_job_for_recipes(
[completed_recipe, incomplete_recipe])
job.recipesets[0].lab_controller = self.lc
data_setup.mark_recipe_running(incomplete_recipe,
system=data_setup.create_system(lab_controller=self.lc))
data_setup.mark_recipe_complete(completed_recipe,
system=data_setup.create_system(lab_controller=self.lc))
result = self.server.recipes.by_log_server(self.lc.fqdn)
self.assertEqual(result, [])
# https://bugzilla.redhat.com/show_bug.cgi?id=962901
def test_by_log_server_skips_recently_completed_recipes(self):
with session.begin():
recently_completed = data_setup.create_completed_job(
lab_controller=self.lc, finish_time=datetime.datetime.utcnow())
completed_yesterday = data_setup.create_completed_job(
lab_controller=self.lc, finish_time=datetime.datetime.utcnow()
- datetime.timedelta(days=1))
result = self.server.recipes.by_log_server(self.lc.fqdn)
self.assertEqual(result, [completed_yesterday.recipesets[0].recipes[0].id])
#https://bugzilla.redhat.com/show_bug.cgi?id=1293010
def test_by_log_server_skips_deleted_recipes(self):
with session.begin():
job = data_setup.create_completed_job(lab_controller=self.lc,
finish_time=datetime.datetime.utcnow() - datetime.timedelta(minutes=2))
job.soft_delete()
result = self.server.recipes.by_log_server(self.lc.fqdn)
self.assertEqual(result, [])
def test_install_done_updates_resource_fqdn(self):
with session.begin():
distro_tree = data_setup.create_distro_tree()
recipe = data_setup.create_recipe(distro_tree=distro_tree)
guestrecipe = data_setup.create_guestrecipe(host=recipe,
distro_tree=distro_tree)
data_setup.create_job_for_recipes([recipe, guestrecipe])
data_setup.mark_recipe_running(recipe)
data_setup.mark_recipe_waiting(guestrecipe)
fqdn = 'theguestname'
result = self.server.recipes.install_done(guestrecipe.id, fqdn)
self.assertEqual(result, fqdn)
with session.begin():
session.expire(guestrecipe.resource)
self.assertEqual(guestrecipe.resource.fqdn, fqdn)
# https://bugzilla.redhat.com/show_bug.cgi?id=879146
def test_install_done_preserves_system_resource_fqdn(self):
with session.begin():
distro_tree = data_setup.create_distro_tree()
recipe = data_setup.create_recipe(distro_tree=distro_tree)
system = data_setup.create_system(lab_controller=self.lc)
initial_fqdn = system.fqdn
data_setup.create_job_for_recipes([recipe])
data_setup.mark_recipe_waiting(recipe, system=system)
self.assertEqual(recipe.resource.fqdn, initial_fqdn)
result = self.server.recipes.install_done(recipe.id, 'somename')
self.assertEqual(result, initial_fqdn)
with session.begin():
session.expire(recipe.resource)
self.assertEqual(recipe.resource.fqdn, initial_fqdn)
def test_install_start(self):
with session.begin():
system = data_setup.create_system(lab_controller=self.lc)
recipe = data_setup.create_recipe()
data_setup.create_job_for_recipes([recipe])
data_setup.mark_recipe_waiting(recipe, system=system)
self.server.recipes.install_start(recipe.id)
with session.begin():
session.expire_all()
assert_datetime_within(recipe.installation.install_started,
tolerance=datetime.timedelta(seconds=10),
reference=datetime.datetime.utcnow())
assert_datetime_within(recipe.watchdog.kill_time,
tolerance=datetime.timedelta(seconds=10),
reference=datetime.datetime.utcnow() + datetime.timedelta(hours=3))
def test_change_files(self):
with session.begin():
job = data_setup.create_completed_job()
recipe = job.recipesets[0].recipes[0]
# beaker-transfer calls something like this, after it finishes copying
# the logs from the LC cache to the archive server
self.server.recipes.change_files(recipe.id,
'http://archive.example.com/beaker-logs',
'/var/www/html/beaker-logs')
with session.begin():
session.expire_all()
# The actual value of .server and .basepath will depend on the date
# and database IDs, so let's just check that it starts with the new
# expected location.
for log in [recipe.logs[0], recipe.tasks[0].logs[0], recipe.tasks[0].results[0].logs[0]]:
self.assert_(log.server.startswith('http://archive.example.com/beaker-logs/'), log.server)
self.assert_(log.basepath.startswith('/var/www/html/beaker-logs/'), log.basepath)
def test_gets_logs(self):
with session.begin():
system = data_setup.create_system(lab_controller=self.lc)
recipe = data_setup.create_recipe()
recipe.logs.append(LogRecipe(filename=u'test.log'))
data_setup.create_job_for_recipes([recipe])
logs = self.server.recipes.files(recipe.id)
self.assertEqual(len(logs), 1)
self.assertEqual(logs[0]['filename'], u'test.log')
# https://bugzilla.redhat.com/show_bug.cgi?id=963492
def test_duplicate_logs_are_filtered_out(self):
# Even if the db contains multiple rows referencing the same filename
# (which it shouldn't) we want recipe.files() to filter those out
# before returning them, to avoid breaking beaker-transfer.
with session.begin():
job = data_setup.create_running_job()
recipe = job.recipesets[0].recipes[0]
recipe.logs.extend([
LogRecipe(path=u'/', filename=u'imadupe.log'),
LogRecipe(path=u'/', filename=u'imadupe.log'),
])
logs = self.server.recipes.files(recipe.id)
self.assertEqual(len(logs), 1)
self.assertEqual(logs[0]['filename'], u'imadupe.log')
| jtoppins/beaker | IntegrationTests/src/bkr/inttest/server/selenium/test_recipes_xmlrpc.py | Python | gpl-2.0 | 7,534 |
import re
from ztag.annotation import Annotation
from ztag.annotation import OperatingSystem
from ztag.annotation import Type
from ztag.annotation import Manufacturer
from ztag import protocols
import ztag.test
class FtpOverlandStorage(Annotation):
protocol = protocols.FTP
subprotocol = protocols.FTP.BANNER
port = None
version_re = re.compile(
"ProFTPD (\d+\.\d+\.\d+)([a-z])? Server",
re.IGNORECASE
)
tests = {
"FtpOverlandStorage_1": {
"global_metadata": {
"device_type": Type.NAS,
"manufacturer": Manufacturer.OVERLAND_STORAGE,
"product": "Snap Appliance"
},
"local_metadata": {
"product": "ProFTPD",
"version": "1.2.9"
}
}
}
def process(self, obj, meta):
banner = obj["banner"]
if "(Snap Appliance FTP Server)" in banner:
meta.global_metadata.device_type = Type.NAS
meta.global_metadata.manufacturer = Manufacturer.OVERLAND_STORAGE
meta.global_metadata.product = "Snap Appliance"
if banner.startswith("220 ProFTPD"):
meta.local_metadata.product = "ProFTPD"
version = self.version_re.search(banner).group(1)
meta.local_metadata.version = version
rev = self.version_re.search(banner).group(2)
meta.local_metadata.revision = rev
return meta
""" Tests
"220 ProFTPD 1.2.9 Server (Snap Appliance FTP Server) [SNAP2252876.spec.local]\r\n"
"220 ProFTPD 1.2.9 Server (Snap Appliance FTP Server) [HAFServer.uconn.edu]\r\n"
"""
| zmap/ztag | ztag/annotations/FtpOverlandStorage.py | Python | apache-2.0 | 1,698 |
"""
Robust MLR via iteratively reweighted least squares.
"""
import numpy as np
from utide.utilities import Bunch
# Weighting functions:
def andrews(r):
r = np.abs(r)
r = max(np.sqrt(np.spacing(1)), r)
w = (r < np.pi) * np.sin(r) / r
return w
def bisquare(r):
r = np.abs(r)
w = (r < 1) * (1 - r ** 2) ** 2
return w
def cauchy(r):
r = np.abs(r)
w = 1 / (1 + r ** 2)
return w
def fair(r):
w = 1 / (1 + np.abs(r))
return w
def huber(r):
w = 1 / max(1, np.abs(r))
return w
def logistic(r):
r = np.abs(r)
r = max(np.sqrt(np.single(1)), r)
w = np.tanh(r) / r
return w
def ols(r):
w = np.ones(len(r))
return w
def talwar(r):
w = (np.abs(r) < 1).astype(float)
return w
def welsch(r):
r = np.abs(r)
w = np.exp(-(r ** 2))
return w
wfuncdict = dict(
andrews=andrews,
bisquare=bisquare,
cauchy=cauchy,
fair=fair,
huber=huber,
logistic=logistic,
ols=ols,
talwar=talwar,
welsch=welsch,
)
tune_defaults = {
"andrews": 1.339,
"bisquare": 4.685,
"cauchy": 2.385,
"fair": 1.400,
"huber": 1.345,
"logistic": 1.205,
"ols": 1,
"talwar": 2.795,
"welsch": 2.985,
}
def sigma_hat(x):
"""
Robust estimate of standard deviation based on medians.
"""
# The center could be based on the mean or some other function.
return np.median(np.abs(x - np.median(x))) / 0.6745
def leverage(x):
"""
Calculate leverage as the diagonal of the "Hat" matrix of the
model matrix, x.
"""
# The Hat is x times its pseudo-inverse.
# In einum, the diagonal is calculated for each row of x
# and column of pinv as the dot product of column j of x.T
# and column j of pinv; hence the 'j' in the output means
# *don't* sum over j.
hdiag = np.einsum("ij, ij -> j", x.T, np.linalg.pinv(x))
# This should be real and positive, but with floating point
# arithmetic the imaginary part is not exactly zero.
return np.abs(hdiag)
def r_normed(R, rfac):
"""
Normalized residuals from raw residuals and a multiplicative factor.
"""
return rfac * R / sigma_hat(R)
def robustfit(
X,
y,
weight_function="bisquare",
tune=None,
rcond=1,
tol=0.001,
maxit=50,
):
"""
Multiple linear regression via iteratively reweighted least squares.
Parameters
----------
X : ndarray (n, p)
MLR model with `p` parameters (independent variables) at `n` times
y : ndarray (n,)
dependent variable
weight_function : string, optional
name of weighting function
tune : None or float, optional
Tuning parameter for normalizing residuals in weight calculation;
larger numbers *decrease* the sensitivity to outliers. If `None`,
a default will be provided based on the `weight_function`.
rcond : float, optional
minimum condition number parameter for `np.linalg.lstsq`
tol : float, optional
When the fractional reduction in mean squared weighted residuals
is less than `tol`, the iteration stops.
maxit : integer, optional
Maximum number of iterations.
Returns
-------
rf : `utide.utilities.Bunch`
- rf.b: model coefficients of the solution
- rf.w: weights used for the solution
- rf.s: singular values for each model component
- rf.rms_resid: rms residuals (unweighted) from the fit
- rf.leverage: sensitivity of the OLS estimate to each point in `y`
- rf.ols_b: OLS model coefficients
- rf.ols_rms_resid: rms residuals from the OLS fit
- rf.iterations: number of iterations completed
"""
if tune is None:
tune = tune_defaults[weight_function]
_wfunc = wfuncdict[weight_function]
if X.ndim == 1:
X = X.reshape((x.size, 1))
n, p = X.shape
lev = leverage(X)
out = Bunch(
weight_function=weight_function,
tune=tune,
rcond=rcond,
tol=tol,
maxit=maxit,
leverage=lev,
)
# LJ2009 has an incorrect expression for leverage in the
# appendix, and an incorrect version of the following
# multiplicative factor for scaling the residuals.
rfac = 1 / (tune * np.sqrt(1 - lev))
# We probably only need to keep track of the rmeansq, but
# it's cheap to carry along rsumsq until we are positive.
oldrsumsq = None
oldrmeansq = None
oldlstsq = None
oldw = None
iterations = 0 # 1-based iteration exit number
w = np.ones(y.shape)
for i in range(maxit):
wX = w[:, np.newaxis] * X
wy = w * y
b, rsumsq, rank, sing = np.linalg.lstsq(wX, wy, rcond)
rsumsq = rsumsq[0]
if i == 0:
rms_resid = np.sqrt(rsumsq / n)
out.update(dict(ols_b=b, ols_rms_resid=rms_resid))
# Weighted mean of squared weighted residuals:
rmeansq = rsumsq / w.sum()
if oldrsumsq is not None:
# improvement = (oldrsumsq - rsumsq) / oldrsumsq
improvement = (oldrmeansq - rmeansq) / oldrmeansq
# print("improvement:", improvement)
if improvement < 0:
b, rsumsq, rank, sing = oldlstsq
w = oldw
iterations = i
break
if improvement < tol:
iterations = i + 1
break
# Save these values in case the next iteration
# makes things worse.
oldlstsq = b, rsumsq, rank, sing
oldw = w
oldrsumsq = rsumsq
oldrmeansq = rmeansq
# Residuals (unweighted) from latest fit:
resid = y - np.dot(X, b)
# Update weights based on these residuals.
w = _wfunc(r_normed(resid, rfac))
if iterations == 0:
iterations = maxit # Did not converge.
rms_resid = np.sqrt(np.mean(np.abs(resid) ** 2))
out.update(
dict(
iterations=iterations,
b=b,
s=sing,
w=w,
rank=rank,
rms_resid=rms_resid,
),
)
return out
# Some simple test cases; this probably will be removed.
if __name__ == "__main__":
np.random.seed(1)
n = 10000
x = np.arange(n)
x0 = np.ones_like(x)
x1 = np.exp(1j * x / 9)
x2 = np.exp(1j * x / 7)
y = (
(1 + 1j) * x1
+ (2 - 1j) * x2
+ (0.1 * np.random.randn(n) + 0.1 * 1j * np.random.randn(n))
)
y[::10] = (np.random.randn(n) + 1j * np.random.randn(n))[::10]
y[10] = 3
y[20] = 2 * 1j
y[30] = -2 - 3 * 1j
A = np.vstack((x0, x1, x2)).T
c = np.linalg.lstsq(A, y)
print("OLS:", c[0])
rf1 = robustfit(A, y)
print("robust:", rf1.b)
print("another test: a very short real series")
x = np.arange(1, 21, dtype=float)
x0 = np.ones_like(x)
xx = np.vstack((x0, x)).T
# Signal for the model: linear trend.
y = 2 * x
# Some outliers.
y[0] = 1.5
y[2] = -2
y[4] = 9.6
# Use a sine as the "noise" component; not part of the model.
y = y + 0.1 * np.sin(x)
rf2 = robustfit(xx, y)
print(np.linalg.lstsq(xx, y)[0])
print(rf2.b)
| wesleybowman/UTide | utide/robustfit.py | Python | mit | 7,233 |
# -*- coding: utf-8 -*-
import os
import re
import sys
import json
import time
import codecs
import urllib2
import logging
kill_ssh_command = 'taskkill /F /IM ssh.exe'
ssh_nat_command = r'ssh -f -NR %(server_port)s:127.0.0.1:%(proxy_port)s %(server_login_name)s@%(server_name)s'
PATH = os.path.dirname(sys.argv[0])
conf_file_path = os.path.join(PATH, 'nat.conf') #配置文件
proxy_alive_check_url = ''
conf_dic=dict([line.strip().split(':') for line in codecs.open(conf_file_path, encoding='utf-8').readlines() if not line.startswith('#') and line.strip()])
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=os.path.join(PATH, 'nat.log'),
filemode='a')
def create_nat_ssh_tunnel():
'''生成新的穿透通道'''
nat_create_command = ssh_nat_command%(conf_dic)
print nat_create_command
os.system(nat_create_command)
def proxy_alive_check():
'''检测代理是否存活'''
alive_check_url = proxy_alive_check_url % conf_dic['server_port']
content = urllib2.urlopen(alive_check_url, timeout=30).read()
try:
json_data = json.loads(content)
return json_data.get('is_alive')
except:
return False
def kill_all_ssh_threads():
'''杀掉所有当前运行的ssh进程'''
os.system(kill_ssh_command)
def main():
'''杀掉所有当前运行的ssh进程, 生成新的穿透通道'''
kill_all_ssh_threads()
create_nat_ssh_tunnel()
if __name__ == '__main__':
current = time.strftime('%Y-%m-%d %H:%M:%S')
is_proxy_alive = proxy_alive_check()
if not is_proxy_alive:
print '%s proxy dead, restart now...'%current
logging.info('proxy dead, restart now...')
main()
else:
print '%s proxy work normal...' %current
logging.info('proxy work normal...')
| wanghuafeng/spider_tools | nat/daemon_nat_win.py | Python | mit | 1,964 |
import sys
# where RobotControl.py, etc lives
sys.path.append('/home/pi/Desktop/ADL/YeastRobot/PythonLibrary')
from RobotControl import *
#################################
### Define Deck Layout
#################################
deck="""\
SW24P BLANK BLANK DW24P BLANK BLANK SW96P SW96P
SW24P BLANK BLANK DW24P BLANK BLANK BLANK BLANK
SW24P BLANK BLANK DW24P BLANK BLANK BLANK BLANK
SW24P BLANK BLANK DW24P BLANK BLANK BLANK BLANK
"""
# 2 3 4 5 6
# note the 1st user defined column is "2" not zero or one, since tips are at 0 & 1
##################################
myvol = 140
# 1 = UL of BoxA, 2 = UR of BoxA, 3 = LL of BoxA, etc.
OffsetDict={0: 'UL', 1: 'UR', 2: 'LL', 3: 'LR'}
# read in deck, etc
DefineDeck(deck)
printDeck()
InitializeRobot()
CurrentTipPosition = 1
for col in [2]:
for row in [0,1,2,3]:
CurrentTipPosition = retrieveTips(CurrentTipPosition)
# initial mix
position(row,col)
mix(300,95,100,5)
# from SW24 to SW96 empty
#position(row,col)
#aspirate(myvol,depth=95,speed=50, mix=3)
#position(col-2,8,position = OffsetDict[row])
#dispense(myvol, depth=90, speed=50)
# from SW24 to SW96 empty
#position(row,col)
#aspirate(myvol,depth=95,speed=50, mix=3)
#position(col-2,9,position = OffsetDict[row])
#dispense(myvol, depth=90, speed=50)
# from SW24 to DW24 300 ul X3 = 900ul
# this is the most we can get from a plate when we start with 1.4ml
for i in [1,2,3]:
position(row,col)
aspirate(myvol*2.14,depth=98,speed=50, mix=3)
position(row,col+3,position = OffsetDict[row])
dispense(myvol*2.14, depth=92, speed=50)
disposeTips()
position(0,0)
ShutDownRobot()
quit()
| tdlong/YeastRobot | UserPrograms/Rob_Sexual_OD630_Spore_4.py | Python | gpl-3.0 | 1,727 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import codecs
import datetime
import functools
import inspect
import itertools
import sys
if sys.version_info < (2, 7):
# On Python <= 2.6, json module is not C boosted, so try to use
# simplejson module if available
try:
import simplejson as json
except ImportError:
import json
else:
import json
import six
import six.moves.xmlrpc_client as xmlrpclib
from keystone.openstack.common import gettextutils
from keystone.openstack.common import importutils
from keystone.openstack.common import strutils
from keystone.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754 <type 'NoneType'>
# 460353 <type 'int'>
# 379632 <type 'unicode'>
# 274610 <type 'str'>
# 199918 <type 'dict'>
# 114200 <type 'datetime.datetime'>
# 51817 <type 'bool'>
# 26164 <type 'list'>
# 6491 <type 'float'>
# 283 <type 'tuple'>
# 19 <type 'long'>
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in six.iteritems(value))
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def dump(obj, fp, *args, **kwargs):
return json.dump(obj, fp, *args, **kwargs)
def loads(s, encoding='utf-8', **kwargs):
return json.loads(strutils.safe_decode(s, encoding), **kwargs)
def load(fp, encoding='utf-8', **kwargs):
return json.load(codecs.getreader(encoding)(fp), **kwargs)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
| scrapinghub/keystone | keystone/openstack/common/jsonutils.py | Python | apache-2.0 | 6,840 |
# -*- coding: utf-8 -*-
'''
Connection module for Amazon SQS
.. versionadded:: 2014.7.0
:configuration: This module accepts explicit sqs credentials but can also utilize
IAM roles assigned to the instance through Instance Profiles. Dynamic
credentials are then automatically obtained from AWS API and no further
configuration is necessary. More Information available at:
.. code-block:: text
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
If IAM roles are not used you need to specify them either in a pillar or
in the minion's config file:
.. code-block:: yaml
sqs.keyid: GKTADJGHEIQSXMKKRBJ08H
sqs.key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
A region may also be specified in the configuration:
.. code-block:: yaml
sqs.region: us-east-1
If a region is not specified, the default is us-east-1.
It's also possible to specify key, keyid and region via a profile, either
as a passed in dict, or as a string to pull from pillars or minion config:
.. code-block:: yaml
myprofile:
keyid: GKTADJGHEIQSXMKKRBJ08H
key: askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs
region: us-east-1
:depends: boto
'''
# keep lint from choking on _get_conn and _cache_id
#pylint: disable=E0602
from __future__ import absolute_import
# Import Python libs
import logging
import json
import salt.ext.six as six
log = logging.getLogger(__name__)
# Import third party libs
try:
# pylint: disable=unused-import
import boto
import boto.sqs
# pylint: enable=unused-import
logging.getLogger('boto').setLevel(logging.CRITICAL)
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from salt.ext.six import string_types
def __virtual__():
'''
Only load if boto libraries exist.
'''
if not HAS_BOTO:
return (False, 'The boto_sqs module could not be loaded: boto libraries not found')
__utils__['boto.assign_funcs'](__name__, 'sqs', pack=__salt__)
return True
def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a queue exists.
CLI example::
salt myminion boto_sqs.exists myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if conn.get_queue(name):
return True
else:
return False
def create(name, region=None, key=None, keyid=None, profile=None):
'''
Create an SQS queue.
CLI example to create a queue::
salt myminion boto_sqs.create myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn.get_queue(name):
try:
conn.create_queue(name)
except boto.exception.SQSError:
msg = 'Failed to create queue {0}'.format(name)
log.error(msg)
return False
log.info('Created queue {0}'.format(name))
return True
def delete(name, region=None, key=None, keyid=None, profile=None):
'''
Delete an SQS queue.
CLI example to delete a queue::
salt myminion boto_sqs.delete myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
queue_obj = conn.get_queue(name)
if queue_obj:
deleted_queue = conn.delete_queue(queue_obj)
if not deleted_queue:
msg = 'Failed to delete queue {0}'.format(name)
log.error(msg)
return False
return True
def get_attributes(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if attributes are set on an SQS queue.
CLI example::
salt myminion boto_sqs.get_attributes myqueue
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return {}
queue_obj = conn.get_queue(name)
if not queue_obj:
log.error('Queue {0} does not exist.'.format(name))
return {}
return conn.get_queue_attributes(queue_obj)
def set_attributes(name, attributes, region=None, key=None, keyid=None,
profile=None):
'''
Set attributes on an SQS queue.
CLI example to set attributes on a queue::
salt myminion boto_sqs.set_attributes myqueue '{ReceiveMessageWaitTimeSeconds: 20}' region=us-east-1
'''
ret = True
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
queue_obj = conn.get_queue(name)
if not queue_obj:
log.error('Queue {0} does not exist.'.format(name))
ret = False
if isinstance(attributes, string_types):
attributes = json.loads(attributes)
for attr, val in six.iteritems(attributes):
attr_set = queue_obj.set_attribute(attr, val)
if not attr_set:
msg = 'Failed to set attribute {0} = {1} on queue {2}'
log.error(msg.format(attr, val, name))
ret = False
else:
msg = 'Set attribute {0} = {1} on queue {2}'
log.info(msg.format(attr, val, name))
return ret
| stephane-martin/salt-debian-packaging | salt-2016.3.3/salt/modules/boto_sqs.py | Python | apache-2.0 | 5,122 |
import threadly, time, random
import unittest
clf = 0
llf = 0
def callLF(lf):
# print "CALLED"
lf.setter(True)
def listenFromFuture():
global llf
# print "GotCalled"
llf +=1
def callFromFuture(s):
global clf
# print "GotCalled", s
clf +=1
def listenException():
raise Exception("TEST1")
def callException(s):
raise Exception("TEST1")
class TestFutures(unittest.TestCase):
def test_futureTest1(self):
global clf, llf
sch = threadly.Scheduler(10)
LF1 = threadly.ListenableFuture()
LF2 = sch.schedule_with_future(callLF, delay=100, args=(LF1,))
LF2.add_listener(listenFromFuture)
LF2.add_callable(callFromFuture)
LF1.add_listener(listenFromFuture)
LF1.add_callable(callFromFuture)
self.assertTrue(LF1.get())
self.assertTrue(LF2.get())
self.assertEqual(2, llf)
self.assertEqual(2, clf)
LF2.add_listener(listenFromFuture)
LF2.add_callable(callFromFuture)
LF1.add_listener(listenFromFuture)
LF1.add_callable(callFromFuture)
self.assertEqual(4, llf)
self.assertEqual(4, clf)
sch.shutdown().get()
def test_futureCallerExceptions(self):
global clf, llf
sch = threadly.Scheduler(10)
LF1 = threadly.ListenableFuture()
LF1.add_listener(listenException)
LF1.add_listener(listenException)
LF1.add_callable(callException)
LF2 = sch.schedule_with_future(callLF, delay=100, args=(LF1,))
self.assertTrue(LF1.get())
self.assertTrue(LF2.get())
sch.shutdown().get()
def test_futureDoubleSet(self):
global clf, llf
sch = threadly.Scheduler(10)
LF1 = threadly.ListenableFuture()
LF2 = sch.schedule_with_future(callLF, delay=100, args=(LF1,))
self.assertTrue(LF1.get())
self.assertTrue(LF2.get())
LF3 = sch.schedule_with_future(callLF, delay=100, args=(LF1,))
self.assertFalse(LF3.get())
self.assertEqual(10, sch.get_poolsize())
sch.shutdown().get()
if __name__ == '__main__':
unittest.main()
| threadly/python-threadly | tests/futureCheck.py | Python | unlicense | 1,961 |
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
MODELTRANSLATION_DEBUG = DEBUG
WSGI_APPLICATION = 'openbudgets.wsgi.application'
SECRET_KEY = 'pvh9d)+7aui4=evh$yv!qgbr3oyz-4=^oj_%6g8+v57b=de5)7'
ALLOWED_HOSTS = ['.openbudgets.dev:8000']
SESSION_COOKIE_DOMAIN = 'openbudgets.dev'
SITE_ID = 1
TIME_ZONE = 'UTC'
USE_TZ = True
USE_I18N = True
USE_L10N = True
ROOT_URLCONF = 'openbudgets.urls'
SUBDOMAIN_URLCONFS = {
'': 'openbudgets.urls',
'www': 'openbudgets.urls',
'he': 'openbudgets.urls',
'en': 'openbudgets.urls',
'ru': 'openbudgets.urls',
'ar': 'openbudgets.urls',
'api': 'openbudgets.urls',
}
gettext = lambda s: s
LANGUAGES = (
('he', gettext('Hebrew')),
('en', gettext('English')),
('ar', gettext('Arabic')),
('ru', gettext('Russian')),
)
LANGUAGE_CODE = LANGUAGES[0][0]
MODELTRANSLATION_DEFAULT_LANGUAGE = LANGUAGE_CODE
MODELTRANSLATION_FALLBACK_LANGUAGES = (LANGUAGES[0][0], LANGUAGES[1][0],
LANGUAGES[2][0], LANGUAGES[3][0])
MEDIA_URL = '/static/media/'
STATIC_URL = '/static/'
SETTINGS_ROOT = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.abspath(os.path.dirname(SETTINGS_ROOT))
MEDIA_ROOT = os.path.abspath(os.path.join(os.path.dirname(PROJECT_ROOT),
'static', 'media'),)
STATIC_ROOT = os.path.abspath(os.path.join(os.path.dirname(PROJECT_ROOT),
'static'),)
STATICFILES_DIRS = (os.path.abspath(os.path.join(PROJECT_ROOT, 'commons',
'static')),)
TEMPLATE_DIRS = (
os.path.abspath(os.path.join(PROJECT_ROOT, 'commons', 'templates')),
# TODO: This below was added, check if it really should be here.
os.path.abspath(os.path.join(PROJECT_ROOT, 'apps', 'entities', 'static', 'entities', 'explorer', 'templates')),
)
FIXTURE_DIRS = (os.path.abspath(os.path.join(PROJECT_ROOT, 'fixtures')),)
LOCALE_PATHS = (os.path.abspath(os.path.join(PROJECT_ROOT, 'locale')),)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'openbudgets.commons.stache.PystacheFilesystemLoader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.locale.LocaleMiddleware',
'openbudgets.apps.international.middleware.InterfaceLanguage',
'django.middleware.common.CommonMiddleware',
'subdomains.middleware.SubdomainURLRoutingMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.comments',
'grappelli.dashboard',
'grappelli',
'grappelli_modeltranslation',
'django.contrib.admin',
'django.contrib.sitemaps',
'oauth2_provider',
'corsheaders',
'south',
'subdomains',
'registration',
'rest_framework',
'modeltranslation',
'raven.contrib.django.raven_compat',
'taggit',
'django_gravatar',
'openbudgets.apps.accounts',
'openbudgets.apps.sheets',
'openbudgets.apps.contexts',
'openbudgets.apps.entities',
'openbudgets.apps.interactions',
'openbudgets.apps.international',
'openbudgets.apps.pages',
'openbudgets.apps.sources',
'openbudgets.apps.taxonomies',
'openbudgets.apps.tools',
'openbudgets.apps.transport',
'openbudgets.apps.api',
'openbudgets.commons',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
'openbudgets.commons.context_processors.site',
'openbudgets.commons.context_processors.forms',
'openbudgets.commons.context_processors.openbudgets',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
ACCOUNT_ACTIVATION_DAYS = 7
AUTH_USER_MODEL = 'accounts.Account'
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_URL = '/accounts/auth/logout/'
ABSOLUTE_URL_OVERRIDES = {
'auth.user': lambda u: '/accounts/{uuid}/'.format(uuid=u.uuid)
}
GRAVATAR_DEFAULT_IMAGE = 'retro'
REDIS = {
'HOST': '127.0.0.1',
'PORT': 6379,
'DB': 0,
'PASSWORD': '',
'SCHEME': 'redis://'
}
REDIS_URL = REDIS['SCHEME'] + REDIS['HOST'] + ':' + \
str(REDIS['PORT']) + '/' + str(REDIS['DB'])
GRAPPELLI_ADMIN_TITLE = 'Open Budgets'
GRAPPELLI_INDEX_DASHBOARD = 'openbudget.dashboard.OpenBudgetsDashboard'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.ext.rest_framework.OAuth2Authentication',
#'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.AllowAny',
#'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.UnicodeJSONRenderer',
#'rest_framework.renderers.BrowsableAPIRenderer',
),
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.SearchFilter',
'rest_framework.filters.OrderingFilter',
),
'PAGINATE_BY': 500,
'PAGINATE_BY_PARAM': 'page_by'
}
OAUTH2_PROVIDER = {
'SCOPES': ['read', 'write']
}
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_METHODS = (
'GET',
'POST',
'PUT',
'PATCH',
'DELETE',
'OPTIONS'
)
CORS_ALLOW_HEADERS = (
'x-requested-with',
'content-type',
'accept',
'origin',
'authorization',
'x-csrftoken'
)
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
ADMINS = (('', ''),)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'openbudgets',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
'OPTIONS': {
'autocommit': True,
}
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
CACHE_MIDDLEWARE_SECONDS = 604800
CACHE_MIDDLEWARE_KEY_PREFIX = 'openbudgets::'
SOUTH_TESTS_MIGRATE = False
RAVEN_CONFIG = {'dsn': ''}
OPENBUDGETS_TEMP_DIR = os.path.abspath(
os.path.join(os.path.dirname(PROJECT_ROOT), 'tmp'))
OPENBUDGETS_NAME_APP = gettext('Open Local Budgets')
OPENBUDGETS_NAME_SPONSOR = gettext('Public Knowledge Workshop')
OPENBUDGETS_GROUP_ID_CORE = 1
OPENBUDGETS_GROUP_ID_CONTENT = 2
OPENBUDGETS_GROUP_ID_PUBLIC = 3
OPENBUDGETS_PERIOD_RANGES = ('yearly',)
OPENBUDGETS_AVATAR_ANON = STATIC_URL + 'img/avatar_anon.png'
OPENBUDGETS_IMPORT_FIELD_DELIMITER = ','
OPENBUDGETS_IMPORT_INTRA_FIELD_DELIMITER = '|'
OPENBUDGETS_IMPORT_INTRA_FIELD_MULTIPLE_VALUE_DELIMITER = ';'
OPENBUDGETS_COMPARABLE_TEMPLATENODE_DEFAULT = True
OPENBUDGETS_COMPARABLE_TEMPLATENODE_NOT_IN_BLUEPRINT_DEFAULT = True
OPENBUDGETS_COMPARABLE_WITHIN_ENTITY = True
OPENBUDGETS_COMPARABLE_ACROSS_ENTITIES = True
OPENBUDGETS_CKAN = [
{
'name': 'Datahub',
'base_url': 'http://datahub.io/api',
'package_url': 'http://datahub.io/dataset/',
'api_key': '884da76c-87b6-4974-97dc-cfd3f639d15a'
}
]
OPENBUDGETS_SETTING = {
'tags': ['budget', 'municipalities', 'israel'],
'notes': 'This is the Budget and the Actual of',
'owner_org': 'israel-municipalities'
}
# if we are on a deploy env, we should have a settings.deploy module to load.
try:
from .deploy import *
except ImportError:
# if we are on local, we accept overrides in a settings.local module.
# For safety, we only try to load settings.local if settings.production
# does not exist.
try:
from .local import *
except ImportError:
pass
# if we are on the CI server, load some CI-specific settings
try:
ci = os.environ.get('CI')
if ci:
from .ci import *
except KeyError:
pass
| kobiluria/openbudgets | openbudgets/settings/__init__.py | Python | bsd-3-clause | 9,314 |
#!/usr/bin/env python
from yape.initialize import initialize
from state import game_state
from listeners import dispatcher
from config import ASSETS_DIR, Config
from assets import Player, Level, Questions
from logic import logic
from graphics import render
def main():
# Initialize display screen and load assets
game_data = initialize(game_state, dispatcher, Config, ASSETS_DIR)
# Load the player, questions, and level before the game begins
player = Player(game_data.manager)
questions = Questions(game_data.manager, game_data.config)
level = Level(game_data.manager, game_data.config)
# Place player at the start location
player.x = level.map.player_start['x']
player.y = level.map.player_start['y']
# Run game loop
game_loop('exit', game_data, questions, level, player)
def game_loop(exit_state, game_data, questions, level, player):
while not game_data.state.is_state(exit_state):
game_data.dispatcher.handle_events(game_data, questions, level, player)
logic(game_data, questions, level, player)
render(game_data, questions, level, player)
if __name__ == "__main__":
main()
| calebsmith/razzytails | src/main.py | Python | gpl-3.0 | 1,163 |
# Copyright 2011 Kalamazoo College Computer Science Club
# <[email protected]>
# This file is part of LitHub.
#
# LitHub is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LitHub is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LitHub. If not, see <http://www.gnu.org/licenses/>.
from django.utils.http import urlencode
from django.conf import settings
from django.core.urlresolvers import reverse
import urllib2
from urlparse import parse_qs
import json
def lazy_prop(func):
"""Wrapper for properties that should be evaluated lazily
This calls the actual method only once per instance. On the first time
the property is read, it's value is stored in self.__dict__. The next
time onwards, the stored value is returned.
Note that this wrapper also wraps the property wrapper on the method, so
only the @lazy_prop wrapper needs to be used.
"""
def wrap(self, *args, **kwargs):
if not func.__name__ in self.__dict__:
self.__dict__[func.__name__] = func(self, *args, **kwargs)
return self.__dict__[func.__name__]
return property(wrap)
class FBConnect(object):
"""Access and run queries using the Facebook Connect API"""
def __init__(self, code=None, view=None, access_token=None):
if code != None:
self.access_token = ""
self._get_access_token(code, view)
elif access_token != None:
self.access_token = access_token
elif access_token==None and code==None:
raise ValueError('code and access_token cannot both be None.')
def _get_access_token(self, code, view=None):
LOOKUP_URL = "https://graph.facebook.com/oauth/access_token?"
opts = {'client_id':settings.FB_APP_ID,
'redirect_uri':_url_receiving_code(view),
'client_secret':settings.FB_APP_SECRET,
'code':code}
try:
fb_resp = urllib2.urlopen(LOOKUP_URL + urlencode(opts))
result = fb_resp.read()
fb_resp.close()
except urllib2.HTTPError:
raise ValueError("The code was invalid or there was a problem" \
+ " connecting to Facebook")
resp = parse_qs(result)
if not resp.has_key('access_token'):
raise ValueError("No access token returned")
self.access_token = resp['access_token'][0]
@lazy_prop
def basic_info(self):
LOOKUP_URL = "https://graph.facebook.com/me?"
opts = {'access_token':self.access_token,}
try:
fb_resp = urllib2.urlopen(LOOKUP_URL + urlencode(opts))
results = fb_resp.read()
fb_resp.close()
except urllib2.HTTPError:
raise ValueError("The token was invalid or there was a " +\
"problem connecting to facebook")
return json.loads(results)
@lazy_prop
def networks(self):
LOOKUP_URL = "https://api.facebook.com/method/fql.query?"
opts = {'query':"SELECT affiliations FROM user WHERE uid=%s"%\
self.userid, 'access_token':self.access_token,
'format':'json'}
try:
fb_resp = urllib2.urlopen(LOOKUP_URL + urlencode(opts))
results = fb_resp.read()
fb_resp.close()
except urllib2.HTTPError:
raise ValueError("The token was invalid or there was a" + \
"problem connecting to facebook")
return json.loads(results)[0]['affiliations']
@lazy_prop
def userid(self):
return self.basic_info['id']
def publish_og(self, action, obj_type, obj, params=None):
opts = {'access_token':self.access_token,
obj_type:obj}
if params:
opts.update(params)
# Allows overriding any of the options in opts
try:
fb_resp = urllib2.urlopen(\
'https://graph.facebook.com/me/%s:%s'%(\
settings.FB_APP_NAMESPACE, action),
urlencode(opts))
id = fb_resp.read()
fb_resp.close()
except urllib2.HTTPError as e:
raise ValueError("There was a problem connecting to facebook.")
return id
def _url_receiving_code(view=None):
view = view or 'fbconnect.views.receive_code'
extra = reverse(view)
return settings.FB_REDIRECT_URL + extra
def redirect_to_fb_url(view=None):
base_url = "https://www.facebook.com/dialog/oauth?"
opts = {'client_id':settings.FB_APP_ID,
'redirect_uri':_url_receiving_code(view),
'scope':'email,publish_actions',}
return base_url + urlencode(opts)
| umangv/LitHub | LitHub/fbconnect/utils.py | Python | gpl-3.0 | 5,141 |
# Generated by Django 3.0 on 2019-12-03 17:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(help_text='Maximum 250 characters.', max_length=250)),
('slug', models.SlugField(help_text='Suggested value automatically generated from title. Must be unique.', unique=True)),
],
options={
'verbose_name_plural': 'Categories',
'ordering': ['title'],
},
),
migrations.CreateModel(
name='Entry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('slug', models.SlugField(help_text='Suggested value automatically generated from title. Must be unique.', unique_for_date='pub_date')),
('body', models.TextField(help_text='Use Markdown to mark this up. http://daringfireball.net/projects/markdown/syntax')),
('body_html', models.TextField(blank=True, editable=False)),
('pub_date', models.DateTimeField(default=django.utils.timezone.now)),
('status', models.IntegerField(choices=[(1, 'Live'), (2, 'Draft'), (3, 'Hidden')], default=1)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='django_blog.Author')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='django_blog.Category')),
],
options={
'verbose_name_plural': 'Entries',
'ordering': ['-pub_date'],
},
),
]
| jbergantine/django-blog | django_blog/migrations/0001_initial.py | Python | mit | 2,509 |
from python_kemptech_api import *
# Specify the LoadMaster connection credentials here:
loadmaster_ip = ""
username = ""
password = ""
vs_ip_1 = ""
vs_ip_2 = ""
rs_ip_1 = ""
rs_ip_2 = ""
vs_port = ""
rs_port = ""
class RealServerPool(object):
healthcheck_parameters = [
"checktype",
"checkport",
"checkurl",
"checkheaders",
"checkuse1_1",
"checkuseget",
"checkpostdata",
"checkpattern",
"checkcodes",
"matchlen",
"enhancedhealthchecks",
"rsminimum"
]
rs_parameters = [
"enable",
"forward",
"weight",
"limit",
"critical",
"follow"
]
def __init__(self, rs_list=None, vs=None):
if rs_list is not None:
self.rs = []
for rs in rs_list:
if isinstance(rs, RealServer):
self.rs.append(rs)
else:
ip, port = rs.split(":")
mock_lm = {"endpoint": "", "ip_address": "", "vs": ""}
self.rs.append(RealServer(mock_lm, ip, port))
self.checktype = None
self.checkport = None
self.checkurl = None
self.checkheaders = None
self.checkuse1_1 = None
self.checkuseget = None
self.checkpostdata = None
self.checkpattern = None
self.checkcodes = None
self.matchlen = None
self.enhancedhealthchecks = None
self.rsminimum = None
elif vs is not None:
self.rs = vs.servers.values()
self.checktype = vs.checktype
self.checkport = vs.checkport
self.checkurl = vs.checkurl
self.checkheaders = vs.checkheaders
self.checkuse1_1 = vs.checkuse1_1
self.checkuseget = vs.checkuseget
self.checkpostdata = vs.checkpostdata
self.checkpattern = vs.checkpattern
self.checkcodes = vs.checkcodes
self.matchlen = vs.matchlen
self.enhancedhealthchecks = vs.enhancedhealthchecks
self.rsminimum = vs.rsminimum
def apply(self, vs):
[rs.delete() for rs in vs.servers.values()]
for rs in self.rs:
new_rs = vs.create_real_server(rs.rs, rs.rsport)
# Apply other settings
new_rs.save()
for attr in self.rs_parameters:
print("attr: {}".format(attr))
if hasattr(rs, attr) and rs.__getattribute__(attr) is not None:
print("set attr: {}={}".format(attr, rs.__getattribute__(attr)))
new_rs.__setattr__(attr, rs.__getattribute__(attr))
new_rs.update()
for attr in self.healthcheck_parameters:
print("attr: {}".format(attr))
if hasattr(self, attr) and self.__getattribute__(attr) is not None:
print("set attr: {}={}".format(attr, self.__getattribute__(attr)))
vs.__setattr__(attr, self.__getattribute__(attr))
vs.update()
# Create the LoadMaster object
lm = LoadMaster(loadmaster_ip, username, password)
# Delete all the existing VSs
[vs.delete() for vs in lm.vs.values()]
# Create a new VS
vs = lm.create_virtual_service(vs_ip_1, vs_port, "tcp")
vs.save()
# Configure some healthcheck options
vs.checktype = 'HTTPS'
vs.checkport = "8443"
vs.update()
# Add and save the first real server
rs1 = vs.create_real_server(rs_ip_1, rs_port)
rs1.save()
# Configure the weighting
rs1.weight = 200
rs1.update()
# Add and save the second real server
rs2 = vs.create_real_server(rs_ip_2, rs_port)
rs2.save()
# Disable the server
rs2.enable = 'N'
rs2.update()
# This will create a pool based on the VS and healthcheck settings of the VS
pool1 = RealServerPool(vs=vs)
# Create the second VS
vs2 = lm.create_virtual_service(vs_ip_2, vs_port, "tcp")
vs2.save()
# Apply the pool to the new VS. The RS and healthcheck settings will be applied
pool1.apply(vs2)
# Alternately, you can use a list of IP and ports to create a pool
rs_list = ["172.22.100.6:88", "172.22.100.7:88", "172.22.100.8:88", "172.22.100.9:88"]
pool2 = RealServerPool(rs_list)
# You can also apply healthcheck settings directly to a pool
pool2.checktype = "ICMP"
# Apply the pool to both VSs
pool2.apply(vs)
pool2.apply(vs2)
| KEMPtechnologies/python-kemptech-api | examples/real_server_pooling.py | Python | apache-2.0 | 4,377 |
"""
Implements a feature set based off of stemmer applied to words.
.. autoclass:: revscoring.languages.features.Stemmed
:members:
:member-order: bysource
Supporting classes
------------------
.. autoclass:: revscoring.languages.features.stemmed.Revision
:members:
:member-order: bysource
.. autoclass:: revscoring.languages.features.stemmed.Diff
:members:
:member-order: bysource
"""
from .stemmed import Stemmed
from .features import Revision, Diff
__all__ = [Stemmed, Revision, Diff]
| yafeunteun/wikipedia-spam-classifier | revscoring/revscoring/languages/features/stemmed/__init__.py | Python | mit | 516 |
""" Locate features in images: combine find and refine steps """
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
import pandas as pd
from .find import find_ellipse, find_ellipsoid, find_disks
from .refine import (refine_ellipse, refine_ellipsoid,
refine_ellipsoid_fast, refine_disks)
def locate_ellipse(frame, mode='ellipse_aligned', n=None, rad_range=None,
maxfit_size=2, spline_order=3, threshold=0.1):
"""Locates an ellipse in a 2D image and returns center coordinates and
radii along x, y.
Parameters
----------
frame: ndarray
n : int
number of points on the ellipse that are used for refine
spacing: float
spacing between points on an xy circle, for grid
rad_range: tuple of floats
length of the line (distance inwards, distance outwards)
maxfit_size: integer
pixels around maximum pixel that will be used in linear regression
spline_order: integer
interpolation order for edge crossections
threshold: float
a threshold is calculated based on the global maximum
fitregions are rejected if their average value is lower than this
Returns
-------
Series with yr, xr, yc, xc indices
ndarray with (y, x) contour
"""
assert frame.ndim == 2
columns = ['yr', 'xr', 'yc', 'xc']
params = find_ellipse(frame, mode)
params, r = refine_ellipse(frame, params, mode, n, rad_range,
maxfit_size, spline_order, threshold)
# params = [np.nan] * 4
# r = None
return pd.Series(params, index=columns), r
def locate_ellipsoid_fast(frame, n_xy=None, n_xz=None, rad_range=None,
maxfit_size=2, spline_order=3, threshold=0.1,
radius_rtol=0.5, radius_atol=30.0, center_atol=30.0):
"""Locates an ellipsoid in a 3D image and returns center coordinates and
radii along x, y, z. The function only analyzes YX and ZX middle slices.
Parameters
----------
image3d: 3D ndarray
n_xy: integer
number of points on the ellipse that are used for refine in xy plane
n_xz: integer
number of points on the ellipse that are used for refine in xz plane
rad_range: tuple of floats
length of the line (distance inwards, distance outwards)
maxfit_size: integer
pixels around maximum pixel that will be used in linear regression
spline_order: integer
interpolation order for edge crossections
threshold: float
a threshold is calculated based on the global maximum
fitregions are rejected if their average value is lower than this
radius_rtol : float, optional
the maximum relative tolerance for the difference between initial
and refined radii, Default 0.5
radius_atol : float, optional
the maximum absolute tolerance for the difference between initial
and refined radii, Default 30.
center_atol : float, optional
the maximum absolute tolerance for the difference between initial
and refined radii, Default 30.
Returns
-------
Series with zr, yr, xr, zc, yc, xc indices, ndarray with (y, x) contour
"""
assert frame.ndim == 3
columns = ['zr', 'yr', 'xr', 'zc', 'yc', 'xc']
params = find_ellipsoid(frame)
params, r = refine_ellipsoid_fast(frame, params, n_xy, n_xz, rad_range,
maxfit_size, spline_order, threshold,
radius_rtol, radius_atol, center_atol)
# except Exception:
# params = [np.nan] * 6
# r = None
return pd.Series(params, index=columns), r
def locate_ellipsoid(frame, spacing=1, rad_range=None, maxfit_size=2,
spline_order=3, threshold=0.1):
"""Locates an ellipsoid in a 3D image and returns center coordinates and
radii along x, y, z. The function fully analyzes the vesicle.
Parameters
----------
image3d: 3D ndarray
spacing: float
spacing between points on an xy circle, for grid
rad_range: tuple of floats
length of the line (distance inwards, distance outwards)
maxfit_size: integer
pixels around maximum pixel that will be used in linear regression
spline_order: integer
interpolation order for edge crossections
threshold: float
a threshold is calculated based on the global maximum
fitregions are rejected if their average value is lower than this
Returns
-------
Series with zr, yr, xr, zc, yc, xc, skew_y, skew_x indices
ndarray with (y, x) contour
"""
assert frame.ndim == 3
columns = ['zr', 'yr', 'xr', 'zc', 'yc', 'xc', 'skew_y', 'skew_x']
params = find_ellipsoid(frame)
params, r = refine_ellipsoid(frame, params, spacing, rad_range,
maxfit_size, spline_order, threshold)
r = r[np.abs(r[:, 0] - params[3]) < 0.5] # extract center coords
# except Exception:
# params = [np.nan] * 8
# r = None
return pd.Series(params, index=columns), r
def locate_disks(image, size_range, maximum=100, rad_range=None,
threshold=0.5, max_dev=1, min_points=10, min_contrast=0,
canny_sigma=1):
""" Find circular particles in the image """
blobs = find_disks(image, size_range, maximum, canny_sigma)
if blobs.empty:
return pd.DataFrame(columns=['r', 'y', 'x', 'dev'])
result = refine_disks(image, blobs, rad_range, threshold, max_dev,
min_points, min_contrast)
result = result.dropna()
result.reset_index(drop=True, inplace=True)
return result
| caspervdw/circletracking | circletracking/locate.py | Python | bsd-3-clause | 5,786 |
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script provides methods for clobbering build directories."""
import argparse
import os
import shutil
import subprocess
import sys
def extract_gn_build_commands(build_ninja_file):
"""Extracts from a build.ninja the commands to run GN.
The commands to run GN are the gn rule and build.ninja build step at the
top of the build.ninja file. We want to keep these when deleting GN builds
since we want to preserve the command-line flags to GN.
On error, returns the empty string."""
result = ""
with open(build_ninja_file, 'r') as f:
# Read until the second blank line. The first thing GN writes to the file
# is the "rule gn" and the second is the section for "build build.ninja",
# separated by blank lines.
num_blank_lines = 0
while num_blank_lines < 2:
line = f.readline()
if len(line) == 0:
return '' # Unexpected EOF.
result += line
if line[0] == '\n':
num_blank_lines = num_blank_lines + 1
return result
def delete_dir(build_dir):
# For unknown reasons (anti-virus?) rmtree of Chromium build directories
# often fails on Windows.
if sys.platform.startswith('win'):
subprocess.check_call(['rmdir', '/s', '/q', build_dir], shell=True)
else:
shutil.rmtree(build_dir)
def delete_build_dir(build_dir):
# GN writes a build.ninja.d file. Note that not all GN builds have args.gn.
build_ninja_d_file = os.path.join(build_dir, 'build.ninja.d')
if not os.path.exists(build_ninja_d_file):
delete_dir(build_dir)
return
# GN builds aren't automatically regenerated when you sync. To avoid
# messing with the GN workflow, erase everything but the args file, and
# write a dummy build.ninja file that will automatically rerun GN the next
# time Ninja is run.
build_ninja_file = os.path.join(build_dir, 'build.ninja')
build_commands = extract_gn_build_commands(build_ninja_file)
try:
gn_args_file = os.path.join(build_dir, 'args.gn')
with open(gn_args_file, 'r') as f:
args_contents = f.read()
except IOError:
args_contents = ''
e = None
try:
# delete_dir and os.mkdir() may fail, such as when chrome.exe is running,
# and we still want to restore args.gn/build.ninja/build.ninja.d, so catch
# the exception and rethrow it later.
delete_dir(build_dir)
os.mkdir(build_dir)
except Exception as e:
pass
# Put back the args file (if any).
if args_contents != '':
with open(gn_args_file, 'w') as f:
f.write(args_contents)
# Write the build.ninja file sufficiently to regenerate itself.
with open(os.path.join(build_dir, 'build.ninja'), 'w') as f:
if build_commands != '':
f.write(build_commands)
else:
# Couldn't parse the build.ninja file, write a default thing.
f.write('''rule gn
command = gn -q gen //out/%s/
description = Regenerating ninja files
build build.ninja: gn
generator = 1
depfile = build.ninja.d
''' % (os.path.split(build_dir)[1]))
# Write a .d file for the build which references a nonexistant file. This
# will make Ninja always mark the build as dirty.
with open(build_ninja_d_file, 'w') as f:
f.write('build.ninja: nonexistant_file.gn\n')
if e:
# Rethrow the exception we caught earlier.
raise e
def clobber(out_dir):
"""Clobber contents of build directory.
Don't delete the directory itself: some checkouts have the build directory
mounted."""
for f in os.listdir(out_dir):
path = os.path.join(out_dir, f)
if os.path.isfile(path):
os.unlink(path)
elif os.path.isdir(path):
delete_build_dir(path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('out_dir', help='The output directory to clobber')
args = parser.parse_args()
clobber(args.out_dir)
return 0
if __name__ == '__main__':
sys.exit(main())
| geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/build/clobber.py | Python | gpl-3.0 | 4,013 |
from __future__ import unicode_literals
from copy import deepcopy
import datetime
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import F
from django.db import transaction
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from .models import Company, Employee, Number, Experiment
class ExpressionsTests(TestCase):
def test_filter(self):
Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith")
)
Company.objects.create(
name="Foobar Ltd.", num_employees=3, num_chairs=4,
ceo=Employee.objects.create(firstname="Frank", lastname="Meyer")
)
Company.objects.create(
name="Test GmbH", num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname="Max", lastname="Mustermann")
)
company_query = Company.objects.values(
"name", "num_employees", "num_chairs"
).order_by(
"name", "num_employees", "num_chairs"
)
# We can filter for companies where the number of employees is greater
# than the number of chairs.
self.assertQuerysetEqual(
company_query.filter(num_employees__gt=F("num_chairs")), [
{
"num_chairs": 5,
"name": "Example Inc.",
"num_employees": 2300,
},
{
"num_chairs": 1,
"name": "Test GmbH",
"num_employees": 32
},
],
lambda o: o
)
# We can set one field to have the value of another field
# Make sure we have enough chairs
company_query.update(num_chairs=F("num_employees"))
self.assertQuerysetEqual(
company_query, [
{
"num_chairs": 2300,
"name": "Example Inc.",
"num_employees": 2300
},
{
"num_chairs": 3,
"name": "Foobar Ltd.",
"num_employees": 3
},
{
"num_chairs": 32,
"name": "Test GmbH",
"num_employees": 32
}
],
lambda o: o
)
# We can perform arithmetic operations in expressions
# Make sure we have 2 spare chairs
company_query.update(num_chairs=F("num_employees") + 2)
self.assertQuerysetEqual(
company_query, [
{
'num_chairs': 2302,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 5,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 34,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
# Law of order of operations is followed
company_query.update(
num_chairs=F('num_employees') + 2 * F('num_employees')
)
self.assertQuerysetEqual(
company_query, [
{
'num_chairs': 6900,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 9,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 96,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
# Law of order of operations can be overridden by parentheses
company_query.update(
num_chairs=((F('num_employees') + 2) * F('num_employees'))
)
self.assertQuerysetEqual(
company_query, [
{
'num_chairs': 5294600,
'name': 'Example Inc.',
'num_employees': 2300
},
{
'num_chairs': 15,
'name': 'Foobar Ltd.',
'num_employees': 3
},
{
'num_chairs': 1088,
'name': 'Test GmbH',
'num_employees': 32
}
],
lambda o: o,
)
# The relation of a foreign key can become copied over to an other
# foreign key.
self.assertEqual(
Company.objects.update(point_of_contact=F('ceo')),
3
)
self.assertQuerysetEqual(
Company.objects.all(), [
"Joe Smith",
"Frank Meyer",
"Max Mustermann",
],
lambda c: six.text_type(c.point_of_contact),
ordered=False
)
c = Company.objects.all()[0]
c.point_of_contact = Employee.objects.create(firstname="Guido", lastname="van Rossum")
c.save()
# F Expressions can also span joins
self.assertQuerysetEqual(
Company.objects.filter(ceo__firstname=F("point_of_contact__firstname")), [
"Foobar Ltd.",
"Test GmbH",
],
lambda c: c.name,
ordered=False
)
Company.objects.exclude(
ceo__firstname=F("point_of_contact__firstname")
).update(name="foo")
self.assertEqual(
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).get().name,
"foo",
)
with transaction.atomic():
with self.assertRaises(FieldError):
Company.objects.exclude(
ceo__firstname=F('point_of_contact__firstname')
).update(name=F('point_of_contact__lastname'))
# F expressions can be used to update attributes on single objects
test_gmbh = Company.objects.get(name="Test GmbH")
self.assertEqual(test_gmbh.num_employees, 32)
test_gmbh.num_employees = F("num_employees") + 4
test_gmbh.save()
test_gmbh = Company.objects.get(pk=test_gmbh.pk)
self.assertEqual(test_gmbh.num_employees, 36)
# F expressions cannot be used to update attributes which are foreign
# keys, or attributes which involve joins.
test_gmbh.point_of_contact = None
test_gmbh.save()
self.assertIsNone(test_gmbh.point_of_contact)
def test():
test_gmbh.point_of_contact = F("ceo")
self.assertRaises(ValueError, test)
test_gmbh.point_of_contact = test_gmbh.ceo
test_gmbh.save()
test_gmbh.name = F("ceo__last_name")
self.assertRaises(FieldError, test_gmbh.save)
# F expressions cannot be used to update attributes on objects which do
# not yet exist in the database
acme = Company(
name="The Acme Widget Co.", num_employees=12, num_chairs=5,
ceo=test_gmbh.ceo
)
acme.num_employees = F("num_employees") + 16
self.assertRaises(TypeError, acme.save)
def test_ticket_11722_iexact_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
Employee.objects.create(firstname="Test", lastname="test")
queryset = Employee.objects.filter(firstname__iexact=F('lastname'))
self.assertQuerysetEqual(queryset, ["<Employee: Test test>"])
@skipIfDBFeature('has_case_insensitive_like')
def test_ticket_16731_startswith_lookup(self):
Employee.objects.create(firstname="John", lastname="Doe")
e2 = Employee.objects.create(firstname="Jack", lastname="Jackson")
e3 = Employee.objects.create(firstname="Jack", lastname="jackson")
self.assertQuerysetEqual(
Employee.objects.filter(lastname__startswith=F('firstname')),
[e2], lambda x: x)
self.assertQuerysetEqual(
Employee.objects.filter(lastname__istartswith=F('firstname')).order_by('pk'),
[e2, e3], lambda x: x)
def test_ticket_18375_join_reuse(self):
# Test that reverse multijoin F() references and the lookup target
# the same join. Pre #18375 the F() join was generated first, and the
# lookup couldn't reuse that join.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'))
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering(self):
# The next query was dict-randomization dependent - if the "gte=1"
# was seen first, then the F() will reuse the join generated by the
# gte lookup, if F() was seen first, then it generated a join the
# other lookups could not reuse.
qs = Employee.objects.filter(
company_ceo_set__num_chairs=F('company_ceo_set__num_employees'),
company_ceo_set__num_chairs__gte=1)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_kwarg_ordering_2(self):
# Another similar case for F() than above. Now we have the same join
# in two filter kwargs, one in the lhs lookup, one in F. Here pre
# #18375 the amount of joins generated was random if dict
# randomization was enabled, that is the generated query dependent
# on which clause was seen first.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk'),
pk=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 1)
def test_ticket_18375_chained_filters(self):
# Test that F() expressions do not reuse joins from previous filter.
qs = Employee.objects.filter(
company_ceo_set__num_employees=F('pk')
).filter(
company_ceo_set__num_employees=F('company_ceo_set__num_employees')
)
self.assertEqual(str(qs.query).count('JOIN'), 2)
def test_F_object_deepcopy(self):
"""
Make sure F objects can be deepcopied (#23492)
"""
f = F("foo")
g = deepcopy(f)
self.assertEqual(f.name, g.name)
def test_f_reuse(self):
f = F('id')
n = Number.objects.create(integer=-1)
c = Company.objects.create(
name="Example Inc.", num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname="Joe", lastname="Smith")
)
c_qs = Company.objects.filter(id=f)
self.assertEqual(c_qs.get(), c)
# Reuse the same F-object for another queryset
n_qs = Number.objects.filter(id=f)
self.assertEqual(n_qs.get(), n)
# The original query still works correctly
self.assertEqual(c_qs.get(), c)
class ExpressionsNumericTests(TestCase):
def setUp(self):
Number(integer=-1).save()
Number(integer=42).save()
Number(integer=1337).save()
self.assertEqual(Number.objects.update(float=F('integer')), 3)
def test_fill_with_value_from_same_object(self):
"""
We can fill a value in all objects with an other value of the
same object.
"""
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 42, 42.000>',
'<Number: 1337, 1337.000>'
],
ordered=False
)
def test_increment_value(self):
"""
We can increment a value of all objects in a query set.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_filter_not_equals_other_field(self):
"""
We can filter for objects, where a value is not equals the value
of an other field.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.exclude(float=F('integer')),
[
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
],
ordered=False
)
def test_complex_expressions(self):
"""
Complex expressions of different connection types are possible.
"""
n = Number.objects.create(integer=10, float=123.45)
self.assertEqual(Number.objects.filter(pk=n.pk).update(
float=F('integer') + F('float') * 2), 1)
self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3))
def test_incorrect_field_expression(self):
with self.assertRaisesRegexp(FieldError, "Cannot resolve keyword u?'nope' into field.*"):
list(Employee.objects.filter(firstname=F('nope')))
class ExpressionOperatorTests(TestCase):
def setUp(self):
self.n = Number.objects.create(integer=42, float=15.5)
def test_lefthand_addition(self):
# LH Addition of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F('integer') + 15,
float=F('float') + 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_lefthand_subtraction(self):
# LH Subtraction of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15,
float=F('float') - 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3))
def test_lefthand_multiplication(self):
# Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15,
float=F('float') * 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_lefthand_division(self):
# LH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2,
float=F('float') / 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3))
def test_lefthand_modulo(self):
# LH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_bitwise_and(self):
# LH Bitwise ands on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
@skipUnlessDBFeature('supports_bitwise_or')
def test_lefthand_bitwise_or(self):
# LH Bitwise or on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_power(self):
# LH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') ** 2,
float=F('float') ** 1.5)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 1764)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(61.02, places=2))
def test_right_hand_addition(self):
# Right hand operators
Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'),
float=42.7 + F('float'))
# RH Addition of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_right_hand_subtraction(self):
Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'),
float=42.7 - F('float'))
# RH Subtraction of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3))
def test_right_hand_multiplication(self):
# RH Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'),
float=42.7 * F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_right_hand_division(self):
# RH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'),
float=42.7 / F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3))
def test_right_hand_modulo(self):
# RH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_righthand_power(self):
# RH Powert arithmetic operation on floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=2 ** F('integer'),
float=1.5 ** F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 4398046511104)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(536.308, places=3))
class FTimeDeltaTests(TestCase):
def setUp(self):
sday = datetime.date(2010, 6, 25)
stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
midnight = datetime.time(0)
delta0 = datetime.timedelta(0)
delta1 = datetime.timedelta(microseconds=253000)
delta2 = datetime.timedelta(seconds=44)
delta3 = datetime.timedelta(hours=21, minutes=8)
delta4 = datetime.timedelta(days=10)
# Test data is set so that deltas and delays will be
# strictly increasing.
self.deltas = []
self.delays = []
self.days_long = []
# e0: started same day as assigned, zero duration
end = stime + delta0
e0 = Experiment.objects.create(name='e0', assigned=sday, start=stime,
end=end, completed=end.date())
self.deltas.append(delta0)
self.delays.append(e0.start -
datetime.datetime.combine(e0.assigned, midnight))
self.days_long.append(e0.completed - e0.assigned)
# e1: started one day after assigned, tiny duration, data
# set so that end time has no fractional seconds, which
# tests an edge case on sqlite. This Experiment is only
# included in the test data when the DB supports microsecond
# precision.
if connection.features.supports_microsecond_precision:
delay = datetime.timedelta(1)
end = stime + delay + delta1
e1 = Experiment.objects.create(name='e1', assigned=sday,
start=stime + delay, end=end, completed=end.date())
self.deltas.append(delta1)
self.delays.append(e1.start -
datetime.datetime.combine(e1.assigned, midnight))
self.days_long.append(e1.completed - e1.assigned)
# e2: started three days after assigned, small duration
end = stime + delta2
e2 = Experiment.objects.create(name='e2',
assigned=sday - datetime.timedelta(3), start=stime, end=end,
completed=end.date())
self.deltas.append(delta2)
self.delays.append(e2.start -
datetime.datetime.combine(e2.assigned, midnight))
self.days_long.append(e2.completed - e2.assigned)
# e3: started four days after assigned, medium duration
delay = datetime.timedelta(4)
end = stime + delay + delta3
e3 = Experiment.objects.create(name='e3',
assigned=sday, start=stime + delay, end=end, completed=end.date())
self.deltas.append(delta3)
self.delays.append(e3.start -
datetime.datetime.combine(e3.assigned, midnight))
self.days_long.append(e3.completed - e3.assigned)
# e4: started 10 days after assignment, long duration
end = stime + delta4
e4 = Experiment.objects.create(name='e4',
assigned=sday - datetime.timedelta(10), start=stime, end=end,
completed=end.date())
self.deltas.append(delta4)
self.delays.append(e4.start -
datetime.datetime.combine(e4.assigned, midnight))
self.days_long.append(e4.completed - e4.assigned)
self.expnames = [e.name for e in Experiment.objects.all()]
def test_multiple_query_compilation(self):
# Ticket #21643
queryset = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
q1 = str(queryset.query)
q2 = str(queryset.query)
self.assertEqual(q1, q2)
def test_query_clone(self):
# Ticket #21643
qs = Experiment.objects.filter(end__lt=F('start') + datetime.timedelta(hours=1))
qs2 = qs.all()
list(qs)
list(qs2)
def test_delta_add(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_subtract(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(start__gt=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__gte=F('end') - delta)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_exclude(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.exclude(end__lt=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i:])
test_set = [e.name for e in
Experiment.objects.exclude(end__lte=F('start') + delta)]
self.assertEqual(test_set, self.expnames[i + 1:])
def test_date_comparison(self):
for i in range(len(self.days_long)):
days = self.days_long[i]
test_set = [e.name for e in
Experiment.objects.filter(completed__lt=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(completed__lte=F('assigned') + days)]
self.assertEqual(test_set, self.expnames[:i + 1])
@skipUnlessDBFeature("supports_mixed_date_datetime_comparisons")
def test_mixed_comparisons1(self):
for i in range(len(self.delays)):
delay = self.delays[i]
if not connection.features.supports_microsecond_precision:
delay = datetime.timedelta(delay.days, delay.seconds)
test_set = [e.name for e in
Experiment.objects.filter(assigned__gt=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(assigned__gte=F('start') - delay)]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_mixed_comparisons2(self):
delays = [datetime.timedelta(delay.days) for delay in self.delays]
for i in range(len(delays)):
delay = delays[i]
test_set = [e.name for e in
Experiment.objects.filter(start__lt=F('assigned') + delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__lte=F('assigned') + delay +
datetime.timedelta(1))]
self.assertEqual(test_set, self.expnames[:i + 1])
def test_delta_update(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
exps = Experiment.objects.all()
expected_durations = [e.duration() for e in exps]
expected_starts = [e.start + delta for e in exps]
expected_ends = [e.end + delta for e in exps]
Experiment.objects.update(start=F('start') + delta, end=F('end') + delta)
exps = Experiment.objects.all()
new_starts = [e.start for e in exps]
new_ends = [e.end for e in exps]
new_durations = [e.duration() for e in exps]
self.assertEqual(expected_starts, new_starts)
self.assertEqual(expected_ends, new_ends)
self.assertEqual(expected_durations, new_durations)
def test_delta_invalid_op_mult(self):
raised = False
try:
repr(Experiment.objects.filter(end__lt=F('start') * self.deltas[0]))
except TypeError:
raised = True
self.assertTrue(raised, "TypeError not raised on attempt to multiply datetime by timedelta.")
def test_delta_invalid_op_div(self):
raised = False
try:
repr(Experiment.objects.filter(end__lt=F('start') / self.deltas[0]))
except TypeError:
raised = True
self.assertTrue(raised, "TypeError not raised on attempt to divide datetime by timedelta.")
def test_delta_invalid_op_mod(self):
raised = False
try:
repr(Experiment.objects.filter(end__lt=F('start') % self.deltas[0]))
except TypeError:
raised = True
self.assertTrue(raised, "TypeError not raised on attempt to modulo divide datetime by timedelta.")
def test_delta_invalid_op_and(self):
raised = False
try:
repr(Experiment.objects.filter(end__lt=F('start').bitand(self.deltas[0])))
except TypeError:
raised = True
self.assertTrue(raised, "TypeError not raised on attempt to binary and a datetime with a timedelta.")
def test_delta_invalid_op_or(self):
raised = False
try:
repr(Experiment.objects.filter(end__lt=F('start').bitor(self.deltas[0])))
except TypeError:
raised = True
self.assertTrue(raised, "TypeError not raised on attempt to binary or a datetime with a timedelta.")
| ulope/django | tests/expressions/tests.py | Python | bsd-3-clause | 28,502 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from unittest import mock
from osc_lib.cli import format_columns
from osc_lib import exceptions
from osc_lib import utils as common_utils
from openstackclient.compute.v2 import server_backup
from openstackclient.tests.unit.compute.v2 import fakes as compute_fakes
from openstackclient.tests.unit.image.v2 import fakes as image_fakes
class TestServerBackup(compute_fakes.TestComputev2):
def setUp(self):
super(TestServerBackup, self).setUp()
# Get a shortcut to the compute client ServerManager Mock
self.app.client_manager.sdk_connection = mock.Mock()
self.app.client_manager.sdk_connection.compute = mock.Mock()
self.sdk_client = self.app.client_manager.sdk_connection.compute
# Get a shortcut to the image client ImageManager Mock
self.images_mock = self.app.client_manager.image
self.images_mock.find_image.reset_mock()
# Set object attributes to be tested. Could be overwritten in subclass.
self.attrs = {}
# Set object methods to be tested. Could be overwritten in subclass.
self.methods = {}
def setup_servers_mock(self, count):
servers = compute_fakes.FakeServer.create_sdk_servers(
attrs=self.attrs,
methods=self.methods,
count=count,
)
# This is the return value for compute_client.find_server()
self.sdk_client.find_server = compute_fakes.FakeServer.get_servers(
servers,
0,
)
return servers
class TestServerBackupCreate(TestServerBackup):
# Just return whatever Image is testing with these days
def image_columns(self, image):
# columnlist = tuple(sorted(image.keys()))
columnlist = (
'id', 'name', 'owner', 'protected', 'status', 'tags', 'visibility'
)
return columnlist
def image_data(self, image):
datalist = (
image['id'],
image['name'],
image['owner_id'],
image['is_protected'],
'active',
format_columns.ListColumn(image.get('tags')),
image['visibility'],
)
return datalist
def setUp(self):
super(TestServerBackupCreate, self).setUp()
# Get the command object to test
self.cmd = server_backup.CreateServerBackup(self.app, None)
self.methods = {
'backup': None,
}
def setup_images_mock(self, count, servers=None):
if servers:
images = image_fakes.create_images(
attrs={
'name': servers[0].name,
'status': 'active',
},
count=count,
)
else:
images = image_fakes.create_images(
attrs={
'status': 'active',
},
count=count,
)
# self.images_mock.get = mock.Mock(side_effect=images)
self.images_mock.find_image = mock.Mock(side_effect=images)
return images
def test_server_backup_defaults(self):
servers = self.setup_servers_mock(count=1)
images = self.setup_images_mock(count=1, servers=servers)
arglist = [
servers[0].id,
]
verifylist = [
('name', None),
('type', None),
('rotate', None),
('wait', False),
('server', servers[0].id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.backup_server.assert_called_with(
servers[0].id,
servers[0].name,
'',
1,
)
self.assertEqual(self.image_columns(images[0]), columns)
self.assertCountEqual(self.image_data(images[0]), data)
def test_server_backup_create_options(self):
servers = self.setup_servers_mock(count=1)
images = self.setup_images_mock(count=1, servers=servers)
arglist = [
'--name', 'image',
'--type', 'daily',
'--rotate', '2',
servers[0].id,
]
verifylist = [
('name', 'image'),
('type', 'daily'),
('rotate', 2),
('server', servers[0].id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.backup_server.assert_called_with(
servers[0].id,
'image',
'daily',
2,
)
self.assertEqual(self.image_columns(images[0]), columns)
self.assertCountEqual(self.image_data(images[0]), data)
@mock.patch.object(common_utils, 'wait_for_status', return_value=False)
def test_server_backup_wait_fail(self, mock_wait_for_status):
servers = self.setup_servers_mock(count=1)
images = self.setup_images_mock(count=1, servers=servers)
self.images_mock.get_image = mock.Mock(
side_effect=images[0],
)
arglist = [
'--name', 'image',
'--type', 'daily',
'--wait',
servers[0].id,
]
verifylist = [
('name', 'image'),
('type', 'daily'),
('wait', True),
('server', servers[0].id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
self.assertRaises(
exceptions.CommandError,
self.cmd.take_action,
parsed_args,
)
self.sdk_client.backup_server.assert_called_with(
servers[0].id,
'image',
'daily',
1,
)
mock_wait_for_status.assert_called_once_with(
self.images_mock.get_image,
images[0].id,
callback=mock.ANY
)
@mock.patch.object(common_utils, 'wait_for_status', return_value=True)
def test_server_backup_wait_ok(self, mock_wait_for_status):
servers = self.setup_servers_mock(count=1)
images = self.setup_images_mock(count=1, servers=servers)
self.images_mock.get_image = mock.Mock(
side_effect=images[0],
)
arglist = [
'--name', 'image',
'--type', 'daily',
'--wait',
servers[0].id,
]
verifylist = [
('name', 'image'),
('type', 'daily'),
('wait', True),
('server', servers[0].id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
# In base command class ShowOne in cliff, abstract method take_action()
# returns a two-part tuple with a tuple of column names and a tuple of
# data to be shown.
columns, data = self.cmd.take_action(parsed_args)
self.sdk_client.backup_server.assert_called_with(
servers[0].id,
'image',
'daily',
1,
)
mock_wait_for_status.assert_called_once_with(
self.images_mock.get_image,
images[0].id,
callback=mock.ANY
)
self.assertEqual(self.image_columns(images[0]), columns)
self.assertCountEqual(self.image_data(images[0]), data)
| openstack/python-openstackclient | openstackclient/tests/unit/compute/v2/test_server_backup.py | Python | apache-2.0 | 8,309 |
# coding: utf8
import logging
from django.conf import settings
from django.utils import six
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.generic import View
from django.shortcuts import redirect, get_object_or_404
from django.forms import ValidationError
from django.db.models.loading import get_model
from getpaid.backends.epaydk import PaymentProcessor
from getpaid.signals import order_additional_validation
from getpaid.utils import qs_to_ordered_params
from .forms import EpaydkOnlineForm, EpaydkCancellForm
if six.PY3:
unicode = str
logger = logging.getLogger(__name__)
class CallbackView(View):
"""
This View answers on Epay.dk online request that is acknowledge of payment
status change.
The most important logic of this view is delegated
to ``PaymentProcessor.online()`` method.
"""
http_method_names = ['get', ]
def get(self, request, *args, **kwargs):
cb_secret_path = PaymentProcessor\
.get_backend_setting('callback_secret_path', '')
if cb_secret_path:
if not kwargs.get('secret_path', ''):
logger.debug("empty secret path")
return HttpResponseBadRequest('400 Bad Request')
if cb_secret_path != kwargs.get('secret_path', ''):
logger.debug("invalid secret path")
return HttpResponseBadRequest('400 Bad Request')
form = EpaydkOnlineForm(request.GET)
if form.is_valid():
params = qs_to_ordered_params(request.META['QUERY_STRING'])
if PaymentProcessor.is_received_request_valid(params):
try:
PaymentProcessor.confirmed(form.cleaned_data)
return HttpResponse('OK')
except AssertionError:
pass
else:
logger.error("MD5 hash check failed")
logger.error('CallbackView received invalid request')
logger.debug("GET: %s", request.GET)
logger.debug("form errors: %s", form.errors)
return HttpResponseBadRequest('400 Bad Request')
class AcceptView(View):
"""
This view is called after the payment is submitted for processing.
Redirects to GETPAID_SUCCESS_URL_NAME if it's defined
otherwise to getpaid-success-fallback.
"""
http_method_names = ['get', ]
def get(self, request):
Payment = get_model('getpaid', 'Payment')
form = EpaydkOnlineForm(request.GET)
if not form.is_valid():
logger.debug("EpaydkOnlineForm not valid")
logger.debug("form errors: %s", form.errors)
return HttpResponseBadRequest("Bad request")
params = qs_to_ordered_params(request.META['QUERY_STRING'])
if not PaymentProcessor.is_received_request_valid(params):
logger.error("MD5 hash check failed")
return HttpResponseBadRequest("Bad request")
payment = get_object_or_404(Payment,
id=form.cleaned_data['orderid'])
try:
order_additional_validation\
.send(sender=self, request=self.request,
order=payment.order,
backend=PaymentProcessor.BACKEND)
except ValidationError:
logger.debug("order_additional_validation raised ValidationError")
return HttpResponseBadRequest("Bad request")
try:
PaymentProcessor.accepted_for_processing(payment_id=payment.id)
except AssertionError as exc:
logger.debug("PaymentProcessor.accepted_for_processing"
" raised AssertionError %s", exc, exc_info=1)
return HttpResponseBadRequest("Bad request")
url_name = getattr(settings, 'GETPAID_SUCCESS_URL_NAME', None)
if url_name:
return redirect(url_name, pk=payment.order.pk)
return redirect('getpaid-success-fallback', pk=payment.pk)
class CancelView(View):
"""
This view is called after the payment is submitted for processing.
Redirects to GETPAID_FAILURE_URL_NAME if it's defined
otherwise to getpaid-failure-fallback.
"""
http_method_names = ['get', ]
def get(self, request):
"""
Receives params: orderid as int payment id and error as negative int.
@warning: epay.dk doesn't send hash param!
"""
Payment = get_model('getpaid', 'Payment')
form = EpaydkCancellForm(request.GET)
if not form.is_valid():
logger.debug("EpaydkCancellForm not valid")
logger.debug("form errors: %s", form.errors)
return HttpResponseBadRequest("Bad request")
payment = get_object_or_404(Payment, id=form.cleaned_data['orderid'])
try:
order_additional_validation\
.send(sender=self, request=self.request,
order=payment.order,
backend=PaymentProcessor.BACKEND)
except ValidationError:
logger.debug("order_additional_validation raised ValidationError")
return HttpResponseBadRequest("Bad request")
PaymentProcessor.cancelled(payment_id=payment.id)
url_name = getattr(settings, 'GETPAID_FAILURE_URL_NAME', None)
if url_name:
return redirect(url_name, pk=payment.order.pk)
return redirect('getpaid-failure-fallback', pk=payment.pk)
| pawciobiel/django-getpaid | getpaid/backends/epaydk/views.py | Python | mit | 5,437 |
"""
byceps.blueprints.api.v1.tourney.match.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from __future__ import annotations
from datetime import datetime
from itertools import chain
from typing import Any, Iterator, Optional
from flask import abort, jsonify, request, url_for
from marshmallow import ValidationError
from marshmallow.schema import SchemaMeta
from .......services.orga_team import service as orga_team_service
from .......services.tourney import (
match_comment_service as comment_service,
match_service,
)
from .......services.tourney.transfer.models import (
Match,
MatchID,
MatchComment,
MatchCommentID,
)
from .......services.user import service as user_service
from .......services.user.transfer.models import User
from .......signals import tourney as tourney_signals
from .......typing import UserID
from .......util.framework.blueprint import create_blueprint
from .......util.views import respond_created, respond_no_content
from .....decorators import api_token_required
from .schemas import (
CreateMatchCommentRequest,
ModerateMatchCommentRequest,
UpdateMatchCommentRequest,
)
blueprint = create_blueprint('tourney_match_comments', __name__)
@blueprint.get('/match_comments/<uuid:comment_id>')
@api_token_required
def get_comment(comment_id):
"""Return the comment."""
comment = _get_comment_or_404(comment_id)
party_id = request.args.get('party_id')
if party_id:
user_ids = set(_get_user_ids_for_comment(comment))
orga_ids = orga_team_service.select_orgas_for_party(user_ids, party_id)
else:
orga_ids = set()
comment_dict = _comment_to_json(comment, orga_ids)
return jsonify(comment_dict)
@blueprint.get('/matches/<uuid:match_id>/comments')
@api_token_required
def get_comments_for_match(match_id):
"""Return the comments on the match."""
match = _get_match_or_404(match_id)
comments = comment_service.get_comments(match.id, include_hidden=True)
party_id = request.args.get('party_id')
if party_id:
user_ids = set(
chain.from_iterable(map(_get_user_ids_for_comment, comments))
)
orga_ids = orga_team_service.select_orgas_for_party(user_ids, party_id)
else:
orga_ids = set()
comment_dicts = [
_comment_to_json(comment, orga_ids) for comment in comments
]
return jsonify(
{
'comments': comment_dicts,
}
)
def _get_user_ids_for_comment(comment: MatchComment) -> Iterator[UserID]:
yield comment.created_by.id
last_editor = comment.last_edited_by
if last_editor:
yield last_editor.id
moderator = comment.hidden_by
if moderator:
yield moderator.id
def _comment_to_json(
comment: MatchComment, orga_ids: set[UserID]
) -> dict[str, Any]:
creator = comment.created_by
last_editor = comment.last_edited_by
moderator = comment.hidden_by
return {
'comment_id': str(comment.id),
'match_id': str(comment.match_id),
'created_at': comment.created_at.isoformat(),
'creator': _user_to_json(creator, orga_ids),
'body_text': comment.body_text,
'body_html': comment.body_html,
'last_edited_at': _potential_datetime_to_json(comment.last_edited_at),
'last_editor': _potential_user_to_json(last_editor, orga_ids),
'hidden': comment.hidden,
'hidden_at': _potential_datetime_to_json(comment.hidden_at),
'hidden_by': _potential_user_to_json(moderator, orga_ids),
}
def _potential_datetime_to_json(dt: Optional[datetime]) -> Optional[str]:
return dt.isoformat() if (dt is not None) else None
def _potential_user_to_json(
user: Optional[User], orga_ids: set[UserID]
) -> Optional[dict[str, Any]]:
return _user_to_json(user, orga_ids) if (user is not None) else None
def _user_to_json(user: User, orga_ids: set[UserID]) -> dict[str, Any]:
return {
'user_id': str(user.id),
'screen_name': user.screen_name,
'suspended': user.suspended,
'deleted': user.deleted,
'avatar_url': user.avatar_url,
'is_orga': user.id in orga_ids,
}
blueprint.add_url_rule(
'/match_comments/<uuid:comment_id>',
endpoint='view',
build_only=True,
)
@blueprint.post('/match_comments')
@api_token_required
@respond_created
def create():
"""Create a comment on a match."""
req = _parse_request(CreateMatchCommentRequest)
match = match_service.find_match(req['match_id'])
if not match:
abort(400, 'Unknown match ID')
creator = user_service.find_active_user(req['creator_id'])
if not creator:
abort(400, 'Creator ID does not reference an active user.')
body = req['body'].strip()
comment = comment_service.create_comment(match.id, creator.id, body)
tourney_signals.match_comment_created.send(None, comment_id=comment.id)
return url_for('.view', comment_id=comment.id)
@blueprint.patch('/match_comments/<uuid:comment_id>')
@api_token_required
@respond_no_content
def update(comment_id):
"""Update a comment on a match."""
comment = _get_comment_or_404(comment_id)
req = _parse_request(UpdateMatchCommentRequest)
editor = user_service.find_active_user(req['editor_id'])
if not editor:
abort(400, 'Editor ID does not reference an active user.')
body = req['body'].strip()
comment_service.update_comment(comment.id, editor.id, body)
@blueprint.post('/match_comments/<uuid:comment_id>/flags/hidden')
@api_token_required
@respond_no_content
def hide(comment_id):
"""Hide the match comment."""
comment = _get_comment_or_404(comment_id)
req = _parse_request(ModerateMatchCommentRequest)
initiator = user_service.find_active_user(req['initiator_id'])
if not initiator:
abort(400, 'Initiator ID does not reference an active user.')
comment_service.hide_comment(comment.id, initiator.id)
@blueprint.delete('/match_comments/<uuid:comment_id>/flags/hidden')
@api_token_required
@respond_no_content
def unhide(comment_id):
"""Un-hide the match comment."""
comment = _get_comment_or_404(comment_id)
req = _parse_request(ModerateMatchCommentRequest)
initiator = user_service.find_active_user(req['initiator_id'])
if not initiator:
abort(400, 'Initiator ID does not reference an active user.')
comment_service.unhide_comment(comment.id, initiator.id)
def _get_match_or_404(match_id: MatchID) -> Match:
match = match_service.find_match(match_id)
if match is None:
abort(404)
return match
def _get_comment_or_404(comment_id: MatchCommentID) -> MatchComment:
comment = comment_service.find_comment(comment_id)
if comment is None:
abort(404)
return comment
def _parse_request(schema_class: SchemaMeta) -> dict[str, Any]:
schema = schema_class()
request_data = request.get_json()
try:
req = schema.load(request_data)
except ValidationError as e:
abort(400, str(e.normalized_messages()))
return req
| homeworkprod/byceps | byceps/blueprints/api/v1/tourney/match/comments/views.py | Python | bsd-3-clause | 7,187 |
from django.core.management.base import BaseCommand
from hs_core.models import BaseResource
from hs_access_control.models import PrivilegeCodes
from django_irods.icommands import SessionException
class Command(BaseCommand):
help = "Add quotaUserName AVU to all resources in iRODS as needed"
def handle(self, *args, **options):
resources = BaseResource.objects.all()
for res in resources:
try:
if not res.raccess.get_quota_holder():
# if quota_holder is not set for the resource, set it to resource's creator
# for some resource, for some reason, the creator of the resource is not the
# owner, hence not allowed to be set as quota holder. This is an artifact that
# needs to be patched since the person who initially uploaded resource should
# be the owner of the resource. Hence, add resource creator to owner list if
# creator is not already in the owner list
if not res.creator.uaccess.owns_resource(res):
first_owner = res.raccess.owners.first()
if first_owner:
first_owner.uaccess.share_resource_with_user(res, res.creator,
PrivilegeCodes.OWNER)
else:
# this resource has no owner, which should never be allowed and never
# happen
print res.short_id + ' does not have an owner'
continue
res.raccess.set_quota_holder(res.creator, res.creator)
except SessionException:
# this is needed for migration testing where some resources copied from www
# for testing do not exist in the iRODS backend, hence need to skip these
# test artifects
continue
except AttributeError:
# when federation is not set up correctly, istorage does not have a session
# attribute, hence raise AttributeError - ignore for testing and it should not
# happen in production where federation is set up properly
continue
| RENCI/xDCIShare | theme/management/commands/add_quota_avu.py | Python | bsd-3-clause | 2,389 |
# coding=utf-8
'''
Created on 2013-8-15
流程协调
@author: gudh
'''
import traceback,os
import bookcrawl,bookshot,bookorm,bookconfig,bookimg,bookupload
def get_book_ids(urls=["http://e.jd.com/ebook.html"]):
'''获取一个页面内的所有id'''
idss = []
for url in urls:
content = bookcrawl.get_url_content(url)
ids = bookcrawl.regex_all('''href="http://e.jd.com/(\d{5,10}).html"''', content, 1)
print ids
idss.extend(ids)
return set(idss)
def shot_cates(args):
'''抓取一个分类里面指定页数内的所有书籍'''
urls = []
# 获取所有id
for i in range(args[1], args[2]):
urls.append(args[0] % i)
book_ids = get_book_ids(urls)
err_num = 0
# 循环拍书
for book_id in book_ids:
if not shot_one_book(book_id):
err_num += 1
if err_num >= 5:
print "连续失败书过多,结束拍书"
return
def shot_one_book(book_id):
'''抓一本书,返回是否成功'''
try:
print "=" * 50
# 如果存在则跳过
if bookorm.exist_book(book_id):
print "%s has exist, continue" % book_id
return True
# 开始抓取
print "begin crawl : %s" % book_id
book = bookcrawl.crawl_book(book_id)
if book != None:
print book.bookName
if bookcrawl.add_book_to_lebook(book_id):
print "add book to lebook ok: " + book_id
if bookorm.insert_book_chapter(book):
print "insert book ok: %s" % book_id
d_t = book.bookSize / 50 # 根据文件大小计算下载时间,每秒50k
if d_t < 15:
d_t = 15
if not bookshot.shot_first_book(book, down_time=d_t):
return False
else:
print "insert book fail: %s" % book_id
else:
print "add book to lebook fail: " + book_id
else:
print "crawl book fail: " + book_id
except:
traceback.print_stack()
return False
return True
def shot_no_success(id_seq_file):
'''抓取已经添加但没成功的书籍'''
lines = open(id_seq_file, "r").read().split("\n")
infos = [line.split("\t") for line in lines if line]
for info in infos:
try:
mode = bookorm.get_book(info[0])
loc = int(info[1])
print "=" * 50
print mode.bookName
print "shot point book nid: " + mode.nid + " loc:" + str(loc)
bookshot.shot_point_book(mode, loc)
except Exception, e:
traceback.print_exc()
print e
def complete(id_seq_file):
'''数据库删除的数据补充,临时用'''
lines = open(id_seq_file, "r").read().split("\n")
infos = [line.split("\t") for line in lines if line]
for info in infos:
book_id = info[3][:-1]
book = bookcrawl.crawl_book(book_id)
print book.bookName
book.createTime = "2013-08-10 00:00:00"
nid = book.nid
path = bookconfig.rootpath + "20130810/content/l/" + nid[0:2] + "/" + nid[2:4] + "/" + nid[4:] + "/1/"
ll = [p for p in os.listdir(path) if p.endswith(".jpg")]
print "get imgCount", len(ll)
book.imgCount = int(len(ll))
book.upTime()
bookorm.insert_book_chapter(book)
def move_zip_book(book, zip_path):
'''上线书籍'''
print "=" * 50
print u"%s %s" % (book.nid, book.bookName)
print u"1、开始分章移动更新"
if bookimg.move_update_book(book):
print u"分章移动更新更新成功"
zip_file = "%s/nid_%s.zip" % (zip_path, book.nid)
print u"2、开始打包zip: %s" % zip_file
if bookimg.zip_book(book, zip_file):
print u"打包书籍成功"
name = os.path.basename(zip_file)
line = "/ftp/ebook_zip/%s\t%s\t%d\n" % (name, book.createTime[0:10].replace("-", ""), (2 * book.imgCount + 1))
file = open(bookconfig.uploadfile, "a")
file.write(line)
file.close()
return True
else:
print u"打包书籍失败"
else:
print "分章移动更新失败"
return False
def upload_ftp_book(book):
'''上传到ftp'''
zip_file = bookconfig.rootpath + book.createTime[0:10].replace("-", "") + ("nid_%s.zip" % book.nid)
ftp_url = "/ebook_zip/%s" % zip_file
print u"3、开始上传到ftp: %s" % ftp_url
if bookupload.upload_update_book(book, zip_file, ftp_url):
print u"上传ftp成功"
print u"3、开始发送书籍信息: %s" % ftp_url
if bookupload.push_update_book(book):
print u"发送书籍信息成功"
return True
else:
print u"发送数据信息失败"
else:
print u"上传ftp失败"
return False | Yhzhtk/bookcatch | bookrun.py | Python | gpl-2.0 | 4,970 |
""" Modified version of build_scripts that handles building scripts from functions.
"""
from __future__ import division, absolute_import, print_function
from distutils.command.build_scripts import build_scripts as old_build_scripts
from numpy.distutils import log
from numpy.distutils.misc_util import is_string
class build_scripts(old_build_scripts):
def generate_scripts(self, scripts):
new_scripts = []
func_scripts = []
for script in scripts:
if is_string(script):
new_scripts.append(script)
else:
func_scripts.append(script)
if not func_scripts:
return new_scripts
build_dir = self.build_dir
self.mkpath(build_dir)
for func in func_scripts:
script = func(build_dir)
if not script:
continue
if is_string(script):
log.info(" adding '%s' to scripts" % (script,))
new_scripts.append(script)
else:
[log.info(" adding '%s' to scripts" % (s,)) for s in script]
new_scripts.extend(list(script))
return new_scripts
def run(self):
if not self.scripts:
return
self.scripts = self.generate_scripts(self.scripts)
# Now make sure that the distribution object has this list of scripts.
# setuptools' develop command requires that this be a list of filenames,
# not functions.
self.distribution.scripts = self.scripts
return old_build_scripts.run(self)
def get_source_files(self):
from numpy.distutils.misc_util import get_script_files
return get_script_files(self.scripts)
| DailyActie/Surrogate-Model | 01-codes/numpy-master/numpy/distutils/command/build_scripts.py | Python | mit | 1,731 |
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.vyos.vyos import vyos_provider_spec
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
socket_path = None
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using network_cli and will be ignored')
elif self._play_context.connection == 'local':
provider = load_provider(vyos_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'vyos'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
pc.timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
else:
return {'failed': True, 'msg': 'Connection type %s is not valid for this module' % self._play_context.connection}
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('abort')
out = conn.get_prompt()
result = super(ActionModule, self).run(task_vars=task_vars)
return result
| wrouesnel/ansible | lib/ansible/plugins/action/vyos.py | Python | gpl-3.0 | 3,846 |
import string,math
import db_base as base
######################
#initial
def read_raw_time_data(file_name,sepper=','):
fin=open(file_name,'r');
length=fin.readline().count(',')+1;
fin.seek(0);
print 'number of item each line:',length
res=[];
for line in fin:
items=line.split(sepper);
for i in range(length):
if items[i]!='' and items[i]!='\n':
res.append((i+1,float(items[i])))
fin.close()
return res;
def trans_raw_time_to_lists(data):
dict={}
for nid,time in data:
dict.setdefault(nid,[]).append(time)
for line in dict.values():
line.sort()
return [dict[k] for k in sorted(dict.keys())]
def read_raw_cue_data(name,sepper=','):
fin=open(name,'r');
length=fin.readline().count(',')+1;
fin.seek(0);
print length,'each line'
res=[];
last_time=0.0;
last_state=0;
for line in fin:
items=line[:-1].split(sepper)
time=float(items[0]);
state=int(items[1]);
if state==3:
res.append([last_state,last_time,time,time-last_time]);
last_time=time;
last_state=state;
fin.close();
for i in range(1,len(res)):
res[i-1].append(res[i][1]-res[i-1][2])
res[-1].append(0)
return res;
######################
#neuron: IO
#read list format neuron
def read_lists_from_db_time(table_name,block_size=5000,conn=None):
#read data from db
if conn==None:
con=get_db_con();
else:
con=conn
cursor=con.cursor();
cursor.execute("select count(*) from "+table_name+"_neuron")
n=cursor.fetchone()[0];
res=[[] for i in range(n)]
cursor.execute("select * from "+table_name+" order by time")
count=0;
block=cursor.fetchmany(block_size)
while block:
for (nid,time) in block:
res[nid-1].append(time)
count+=block_size;
if count%50000==0:
print count,'pieces processed'
block=cursor.fetchmany(block_size)
if conn==None:
con.close()
print count,'pieces processed'
return res;
#read list format neuron
def read_lists_from_list_file(file_name):
f=open(file_name,'r')
res=[]
for line in f:
l=line.split(' ')
for i in range(len(l)):
l[i]=float(l[i])
res.append(l)
f.close()
return res
def write_to_one_file(res,filename):
#write 2D data to file
fout=open(filename,'w')
for line in res:
l=len(line)
if l!=0:
fout.write(str(line[0]))
for i in range(1,l):
fout.write(' ')
fout.write(str(line[i]))
fout.write('\n')
fout.close();
def write_to_n_files(res,filename_prefix,ext):
#write 2D data to n files (each line for a file)
if ext[0]!='.':
ext='.'+ext
count=0;
for line in res:
count+=1
fout=open(filename_prefix+str(count)+ext,'w')
l=len(line)
if l!=0:
fout.write(str(line[0]))
for i in range(1,l):
fout.write(' ')
fout.write(str(line[i]))
fout.write('\n')
fout.close();
def export_to_file(data,all_name,sep_name_prefix=None,sep_name_suffix=None):
print 'writing to one file'
write_to_one_file(data,all_name)
if sep_name_prefix!=None and sep_name_suffic!=None:
print 'writing to n files'
write_to_n_files(data,sep_name_prefix,sep_name_suffix)
#####################
#update neuron
def cal_neuron_dif(data,mode_unit_size=1e-3):
res=[]
nid=0;
for line in data:
nid+=1
n=len(line)-1
dif_min=line[1]-line[0]
dif_max=line[1]-line[0]
s1=0; s2=0; s3=0; s4=0;
occur={}
zero_count=0
for i in range(n):
t=line[i+1]-line[i]
v=int(t/mode_unit_size)
if v in occur:
occur[v]+=1
else:
occur[v]=1
if t==0:
zero_count+=1
elif t<dif_min:
dif_min=t
elif t>dif_max:
dif_max=t
s1+=t; s2+=t**2; s3+=t**3; s4+=t**4;
s1/=float(n); s2/=float(n); s3/=float(n); s4/=float(n);
#moment related
mean=s1
std=math.sqrt(s2-s1**2)
cm3=s3-3*s2*s1+2*s1**3
cm4=s4-4*s3*s1+6*s2*s1**2-3*s1**4;
#mode(most often)
(mode,freq)=max(occur.iteritems(),key=lambda x:x[1])
mode=(mode+0.5)*mode_unit_size
# print mode,freq
t={'zero_count':zero_count,'min':dif_min,'max':dif_max,'mode':mode,'mode_count':freq,
'mean':mean,'std':std,'cm3':cm3,'cm4':cm4}
res.append(t)
return res
#####################
#dif
'''get the unsorted difference list from line1 to line2, (both line1 and line2 are non-decreasing)
(0 difference is NOT returned in list, but its occurrence number returned separated)
One difference value: for i find smallest line2[j] (>line1[i]), the difference is line2[j]-line1[i]
noJump=True: value (line2[j]-line1[i]) is considered only when line1[i] is also the largest value smaller than line2[j] in line1.
i.e. no other k satisfies: line1[i]<line1[k]<line2[j] . and for all k where line1[i]!=line1[k] satisfies: line1[k]<line1[i] or line1[k]>line2[j]
'''
def cal_dif_list(line1,line2,noJump=False,epsilon=1e-6):
j=0;
length1=len(line1)
length2=len(line2)
res=[]
equal_count=0;
n_dig=int(math.ceil(-math.log10(epsilon)))
for i in range(length1):
v=line1[i]
while j<length2 and line2[j]<=v+epsilon:
if abs(line2[j]-v)<=epsilon:
equal_count+=1
j+=1
if j==length2:
break
if noJump and i+1<length1 and line1[i+1]<line2[j] and v<line1[i+1]:
continue
res.append(round(line2[j]-v,n_dig))
return (equal_count,res)
'''calculate the statistical information of the given UNSORTED dif list
(dif is SORTED in this function)
'''
def cal_dif_list_info(zero_count,dif,quantile_points,mode_unit_size):
dif.sort()
n=len(dif)
s1=0; s2=0; s3=0; s4=0;
s_min=dif[0]; s_max=dif[-1];
occur=[]
last=-1
c=0
for t in dif:
s1+=t; s2+=t**2; s3+=t**3; s4+=t**4;
if int(t/mode_unit_size)==last:
c+=1
else:
occur.append((last,c))
c=1
last=int(t/mode_unit_size)
s1/=float(n); s2/=float(n); s3/=float(n); s4/=float(n);
#moment
mean=s1
std=math.sqrt(s2-s1**2)
cm3=s3-3*s2*s1+2*s1**3
cm4=s4-4*s3*s1+6*s2*s1**2-3*s1**4
#quantile
quantile=[dif[int(round(n*v))] for v in quantile_points]
#mode
(mode,freq)=max(occur,key=lambda x:x[1])
mode=(mode+0.5)*mode_unit_size
return {'count':n,'zero_count':zero_count,'min':s_min,'max':s_max,
'mode':mode,'mode_count':freq,
'mean':mean,'std':std,'cm3':cm3,'cm4':cm4,'quantile':quantile}
def cal_col_dif(data,quantile_points,noJump=False,mode_unit_size=1e-3,epsilon=1e-6,col_file_prefix=None):
length=len(data)
res=[[() for i in range(length)] for i in range(length)];
for i in range(length):
print 'Processing correlation from',(i+1)
l=[]
for j in range(length):
(zero_count,dif)=cal_dif_list(data[i],data[j],noJump,epsilon)
if col_file_prefix!=None:
l.append(dif)
res[i][j]=cal_dif_list_info(zero_count,dif,quantile_points,mode_unit_size)
if col_file_prefix!=None:
write_to_one_file(l,col_file_prefix+str(i+1)+'.txt')
return res
#####################
#final
def init_db(basic_table_name,quantile_points,mode_unit_size=1e-3,epsilon=1e-6,col_file_prefix=None,con=None):
print 'creating tables:'
base.create_tables(basic_table_name,quantile_points,con)
print 'importing data:'
#data=base.import_to_db('R108-122911-spike.csv',read_raw_time_data,basic_table_name)
data=read_raw_time_data('R108-122911-spike.csv')
base.insert_template(data,basic_table_name,con)
#base.import_to_db('R108-122911-beh.csv',read_raw_cue_data,basic_table_name+'_beh(type,begin,end,duration,rest)')
base.insert_template(read_raw_cue_data('R108-122911-beh.csv'),
basic_table_name+'_beh(type,begin,end,duration,rest)',con)
print 'initializing neuron info:'
base.init_neuron(basic_table_name,con)
data=trans_raw_time_to_lists(data)
neu_dif_data=cal_neuron_dif(data,mode_unit_size)
base.update_neuron_dif(neu_dif_data,basic_table_name,con)
#del neu_dif_data
print 'writing difference matrix(jump):'
diff_j=cal_col_dif(data,quantile_points,False,mode_unit_size,epsilon,
col_file_prefix+'j_' if col_file_prefix else None)
base.insert_dif(diff_j,basic_table_name,False,con)
#del diff_j
print 'writing difference matrix(no-jump):'
diff_nj=cal_col_dif(data,quantile_points,True,mode_unit_size,epsilon,
col_file_prefix+'nj_' if col_file_prefix else None)
base.insert_dif(diff_nj,basic_table_name,True,con)
#del diff_nj
if __name__=='__main__':
basic_table_name='r108_122911'
quantile_points=[0.05*i for i in range(1,19)]
#init_db(basic_table_name,quantile_points)
#print 'reading data lists from db'
#data=read_db_time_data(basic_table_name)
#print 'reading data lists from file'
#data=read_file_time_data('./all.txt')
#data=trans_raw_time_to_lists(read_raw_time_data('R108-122911-spike.csv'))
#export_to_file(data,'all.txt','','.txt')
#base.update_neuron_dif(data,basic_table_name)
cal_dif(data,basic_table_name)
| yanxiangtianji/Neuron | dataInit/db.py | Python | gpl-2.0 | 8,453 |
from flask import abort, jsonify
from sqlalchemy.orm import with_polymorphic
from .api_blueprint import api_blueprint
from app.models.quiz import Quiz
from app.models.quiz_section import QuizSection
from app.models.question \
import Question, TextQuestion, SingleChoiceQuestion, QuestionChoice
from .api_blueprint import api_blueprint
from app.views.auth import auth_blueprint
from app.db import db
@api_blueprint.route('/quiz/<int:id>', methods=['GET'])
def quiz(id):
quiz = Quiz.query.filter_by(id=id).first()
quiz_dict = {
'title': quiz.title,
'description': quiz.description,
'quizSections': []
}
quiz_sections = QuizSection.query.filter_by(quiz_id=quiz.id)
for quiz_section in quiz_sections:
quiz_section_dict = {
'title': quiz_section.title,
'description': quiz_section.description,
'questions': []
}
question_polymorphic = with_polymorphic(
Question, [TextQuestion, SingleChoiceQuestion])
questions = (db.session.query(question_polymorphic)
.filter_by(section_id=quiz_section.id).all())
for question in questions:
question_dict = {
'type': question.type,
'question': question.question,
'explanation': question.explanation,
'complement': question.complement,
'answer': question.answer
}
if question.type == 'single_choice_question':
question_dict['choices'] = []
choices = (QuestionChoice.query
.filter_by(question_id=question.id).all())
for choice in choices:
question_dict['choices'].append(choice.choice)
quiz_section_dict['questions'].append(question_dict)
quiz_dict['quizSections'].append(quiz_section_dict)
return jsonify(quiz_dict)
| Frederick-S/quiz | app/api_v1/quiz.py | Python | mit | 1,949 |
from django.contrib.auth.models import User
from loginurl.models import Key
class LoginUrlBackend:
"""
Authentication backend that checks the given ``key`` to a record in the
``Key`` model. If the record is found, then ``is_valid()`` method is called
to check if the key is still valid.
"""
def authenticate(self, key):
"""
Check if the key is valid.
"""
data = Key.objects.filter(key=key)
if len(data) == 0:
return None
data = data[0]
if not data.is_valid():
return None
return data.user
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
| vanschelven/cmsplugin-journal | loginurl/backends.py | Python | bsd-3-clause | 767 |
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the graph for sampling from Coconet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from magenta.models.coconet import lib_graph
from magenta.models.coconet import lib_hparams
import numpy as np
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
class CoconetSampleGraph(object):
"""Graph for Gibbs sampling from Coconet."""
def __init__(self, chkpt_path, placeholders=None):
"""Initializes inputs for the Coconet sampling graph.
Does not build or restore the graph. That happens lazily if you call run(),
or explicitly using instantiate_sess_and_restore_checkpoint.
Args:
chkpt_path: Checkpoint directory for loading the model.
Uses the latest checkpoint.
placeholders: Optional placeholders.
"""
self.chkpt_path = chkpt_path
self.hparams = lib_hparams.load_hparams(chkpt_path)
if placeholders is None:
self.placeholders = self.get_placeholders()
else:
self.placeholders = placeholders
self.samples = None
self.sess = None
def get_placeholders(self):
hparams = self.hparams
return dict(
pianorolls=tf.placeholder(
tf.bool,
[None, None, hparams.num_pitches, hparams.num_instruments],
"pianorolls"),
# The default value is only used for checking if completion masker
# should be evoked. It can't be used directly as the batch size
# and length of pianorolls are unknown during static time.
outer_masks=tf.placeholder_with_default(
np.zeros(
(1, 1, hparams.num_pitches, hparams.num_instruments),
dtype=np.float32),
[None, None, hparams.num_pitches, hparams.num_instruments],
"outer_masks"),
sample_steps=tf.placeholder_with_default(0, (), "sample_steps"),
total_gibbs_steps=tf.placeholder_with_default(
0, (), "total_gibbs_steps"),
current_step=tf.placeholder_with_default(0, (), "current_step"),
temperature=tf.placeholder_with_default(0.99, (), "temperature"))
@property
def inputs(self):
return self.placeholders
def make_outer_masks(self, outer_masks, input_pianorolls):
"""Returns outer masks, if all zeros created by completion masking."""
outer_masks = tf.to_float(outer_masks)
# If outer_masks come in as all zeros, it means there's no masking,
# which also means nothing will be generated. In this case, use
# completion mask to make new outer masks.
outer_masks = tf.cond(
tf.reduce_all(tf.equal(outer_masks, 0)),
lambda: make_completion_masks(input_pianorolls),
lambda: outer_masks)
return outer_masks
def build_sample_graph(self, input_pianorolls=None, outer_masks=None,
total_gibbs_steps=None):
"""Builds the tf.while_loop based sampling graph.
Args:
input_pianorolls: Optional input pianorolls override. If None, uses the
pianorolls placeholder.
outer_masks: Optional input outer_masks override. If None, uses the
outer_masks placeholder.
total_gibbs_steps: Optional input total_gibbs_steps override. If None,
uses the total_gibbs_steps placeholder.
Returns:
The output op of the graph.
"""
if input_pianorolls is None:
input_pianorolls = self.inputs["pianorolls"]
if outer_masks is None:
outer_masks = self.inputs["outer_masks"]
tt = tf.shape(input_pianorolls)[1]
sample_steps = tf.to_float(self.inputs["sample_steps"])
if total_gibbs_steps is None:
total_gibbs_steps = self.inputs["total_gibbs_steps"]
temperature = self.inputs["temperature"]
input_pianorolls = tf.to_float(input_pianorolls)
outer_masks = self.make_outer_masks(outer_masks, input_pianorolls)
# Calculate total_gibbs_steps as steps * num_instruments if not given.
total_gibbs_steps = tf.cond(
tf.equal(total_gibbs_steps, 0),
lambda: tf.to_float(tt * self.hparams.num_instruments),
lambda: tf.to_float(total_gibbs_steps))
# sample_steps is set to total_gibbs_steps if not given.
sample_steps = tf.cond(
tf.equal(sample_steps, 0),
lambda: total_gibbs_steps,
lambda: tf.to_float(sample_steps))
def infer_step(pianorolls, step_count):
"""Called by tf.while_loop, takes a Gibbs step."""
mask_prob = compute_mask_prob_from_yao_schedule(step_count,
total_gibbs_steps)
# 1 indicates mask out, 0 is not mask.
masks = make_bernoulli_masks(tf.shape(pianorolls), mask_prob,
outer_masks)
logits = self.predict(pianorolls, masks)
samples = sample_with_temperature(logits, temperature=temperature)
outputs = pianorolls * (1 - masks) + samples * masks
check_completion_op = tf.assert_equal(
tf.where(tf.equal(tf.reduce_max(masks, axis=2), 1.),
tf.reduce_max(outputs, axis=2),
tf.reduce_max(pianorolls, axis=2)),
1.)
with tf.control_dependencies([check_completion_op]):
outputs = tf.identity(outputs)
step_count += 1
return outputs, step_count
current_step = tf.to_float(self.inputs["current_step"])
# Initializes pianorolls by evaluating the model once to fill in all gaps.
logits = self.predict(tf.to_float(input_pianorolls), outer_masks)
samples = sample_with_temperature(logits, temperature=temperature)
tf.get_variable_scope().reuse_variables()
self.samples, current_step = tf.while_loop(
lambda samples, current_step: current_step < sample_steps,
infer_step, [samples, current_step],
shape_invariants=[
tf.TensorShape([None, None, None, None]),
tf.TensorShape(None),
],
back_prop=False,
parallel_iterations=1,
name="coco_while")
self.samples.set_shape(input_pianorolls.shape)
return self.samples
def predict(self, pianorolls, masks):
"""Evalutes the model once and returns predictions."""
direct_inputs = dict(
pianorolls=pianorolls, masks=masks,
lengths=tf.to_float([tf.shape(pianorolls)[1]]))
model = lib_graph.build_graph(
is_training=False,
hparams=self.hparams,
direct_inputs=direct_inputs,
use_placeholders=False)
self.logits = model.logits
return self.logits
def instantiate_sess_and_restore_checkpoint(self):
"""Instantiates session and restores from self.chkpt_path."""
if self.samples is None:
self.build_sample_graph()
sess = tf.Session()
saver = tf.train.Saver()
chkpt_fpath = tf.train.latest_checkpoint(self.chkpt_path)
tf.logging.info("loading checkpoint %s", chkpt_fpath)
saver.restore(sess, chkpt_fpath)
tf.get_variable_scope().reuse_variables()
self.sess = sess
return self.sess
def run(self,
pianorolls,
masks=None,
sample_steps=0,
current_step=0,
total_gibbs_steps=0,
temperature=0.99,
timeout_ms=0):
"""Given input pianorolls, runs Gibbs sampling to fill in the rest.
When total_gibbs_steps is 0, total_gibbs_steps is set to
time * instruments. If faster sampling is desired on the expanse of sample
quality, total_gibbs_steps can be explicitly set to a lower number,
possibly to the value of sample_steps if do not plan on stopping sample
early to obtain intermediate results.
This function can be used to return intermediate results by setting the
sample_steps to when results should be returned and leaving
total_gibbs_steps to be 0.
To continue sampling from intermediate results, set current_step to the
number of steps taken, and feed in the intermediate pianorolls. Again
leaving total_gibbs_steps as 0.
Builds the graph and restores checkpoint if necessary.
Args:
pianorolls: a 4D numpy array of shape (batch, time, pitch, instrument)
masks: a 4D numpy array of the same shape as pianorolls, with 1s
indicating mask out. If is None, then the masks will be where have 1s
where there are no notes, indicating to the model they should be
filled in.
sample_steps: an integer indicating the number of steps to sample in this
call. If set to 0, then it defaults to total_gibbs_steps.
current_step: an integer indicating how many steps might have already
sampled before.
total_gibbs_steps: an integer indicating the total number of steps that
a complete sampling procedure would take.
temperature: a float indicating the temperature for sampling from softmax.
timeout_ms: Timeout for session.Run. Set to zero for no timeout.
Returns:
A dictionary, consisting of "pianorolls" which is a 4D numpy array of
the sampled results and "time_taken" which is the time taken in sampling.
"""
if self.sess is None:
# Build graph and restore checkpoint.
self.instantiate_sess_and_restore_checkpoint()
if masks is None:
masks = np.zeros_like(pianorolls)
start_time = time.time()
run_options = None
if timeout_ms:
run_options = tf.RunOptions(timeout_in_ms=timeout_ms)
new_piece = self.sess.run(
self.samples,
feed_dict={
self.placeholders["pianorolls"]: pianorolls,
self.placeholders["outer_masks"]: masks,
self.placeholders["sample_steps"]: sample_steps,
self.placeholders["total_gibbs_steps"]: total_gibbs_steps,
self.placeholders["current_step"]: current_step,
self.placeholders["temperature"]: temperature
}, options=run_options)
label = "independent blocked gibbs"
time_taken = (time.time() - start_time) / 60.0
tf.logging.info("exit %s (%.3fmin)" % (label, time_taken))
return dict(pianorolls=new_piece, time_taken=time_taken)
def make_completion_masks(pianorolls, outer_masks=1.):
pianorolls = tf.to_float(pianorolls)
masks = tf.reduce_all(tf.equal(pianorolls, 0), axis=2, keep_dims=True)
inner_masks = tf.to_float(masks) + 0 * pianorolls
return inner_masks * outer_masks
def make_bernoulli_masks(shape, pm, outer_masks=1.):
bb = shape[0]
tt = shape[1]
pp = shape[2]
ii = shape[3]
probs = tf.random_uniform([bb, tt, ii])
masks = tf.tile(tf.to_float(tf.less(probs, pm))[:, :, None, :], [1, 1, pp, 1])
return masks * outer_masks
def sample_with_temperature(logits, temperature):
"""Either argmax after softmax or random sample along the pitch axis.
Args:
logits: a Tensor of shape (batch, time, pitch, instrument).
temperature: a float 0.0=argmax 1.0=random
Returns:
a Tensor of the same shape, with one_hots on the pitch dimension.
"""
logits = tf.transpose(logits, [0, 1, 3, 2])
pitch_range = tf.shape(logits)[-1]
def sample_from_logits(logits):
with tf.control_dependencies([tf.assert_greater(temperature, 0.0)]):
logits = tf.identity(logits)
reshaped_logits = (
tf.reshape(logits, [-1, tf.shape(logits)[-1]]) / temperature)
choices = tf.multinomial(reshaped_logits, 1)
choices = tf.reshape(choices,
tf.shape(logits)[:logits.get_shape().ndims - 1])
return choices
choices = tf.cond(tf.equal(temperature, 0.0),
lambda: tf.argmax(tf.nn.softmax(logits), -1),
lambda: sample_from_logits(logits))
samples_onehot = tf.one_hot(choices, pitch_range)
return tf.transpose(samples_onehot, [0, 1, 3, 2])
def compute_mask_prob_from_yao_schedule(i, n, pmin=0.1, pmax=0.9, alpha=0.7):
wat = (pmax - pmin) * i/ n
return tf.maximum(pmin, pmax - wat / alpha)
def main(unused_argv):
checkpoint_path = FLAGS.checkpoint
sampler = CoconetSampleGraph(checkpoint_path)
batch_size = 1
decode_length = 4
target_shape = [batch_size, decode_length, 46, 4]
pianorolls = np.zeros(target_shape, dtype=np.float32)
generated_piece = sampler.run(pianorolls, sample_steps=16, temperature=0.99)
tf.logging.info("num of notes in piece %d", np.sum(generated_piece))
tf.logging.info("Done.")
if __name__ == "__main__":
tf.app.run()
| jesseengel/magenta | magenta/models/coconet/lib_tfsampling.py | Python | apache-2.0 | 12,908 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Test suite for the the Chasy program.
'''
import unittest
import logic
import supseq
import baseclock
import copy
import clocks.verboserussian as verboserussian
__author__ = "Mac Ryan"
__copyright__ = "Copyright 2011, Mac Ryan"
__license__ = "GPL v3"
__maintainer__ = "Mac Ryan"
__email__ = "[email protected]"
__status__ = "Development"
class LogicPrivateUtils(unittest.TestCase):
'''
Test all the logic behind the program intelligence.
'''
logic = logic.Logic(None, None, None, True) #debug configuration
phrases = ["it is five past one",
"it is one to two",
"it is two to three",
"it is three to four",
"it is four to five",
"it is five to six",
"it is four past seven",
"it is three past eight",
"it is two past nine",
"it is one past ten",
"it is eleven o'clock",
"it is twelve o'clock",
"it is one o'clock",
"it is two o'clock",
"it is three o'clock"]
expected_families = [("it is one to two", "it is three to four",
"it is four to five", "it is five to six",
"it is two to three"),
("it is two o'clock", "it is one o'clock",
"it is eleven o'clock", "it is three o'clock",
"it is twelve o'clock"),
("it is one past ten", "it is five past one",
"it is two past nine", "it is four past seven",
"it is three past eight")]
def testIsomorphicFamilies(self):
'''Grouping similar phrases together.'''
families = self.logic._get_isomorphic_families(self.phrases)
self.assertEqual(len(self.expected_families), len(families))
families = set([tuple(sorted(fam)) for fam in families])
expected_families = set([tuple(sorted(fam)) for fam in
self.expected_families])
self.assertEqual(families, expected_families)
def testOrphanFinder(self):
'''Finding orphans in families.'''
phrases = self.phrases[:]
expected_orphans = ['it is nine to nine', 'it is five to seven']
phrases.insert(3, expected_orphans[1])
phrases.insert(7, expected_orphans[0])
orphans = self.logic._get_orphans(phrases, self.expected_families)
self.assertEqual(sorted(orphans), sorted(expected_orphans))
class LogicPublicAPI(unittest.TestCase):
'''
Test all the logic behind the program intelligence.
'''
logic = logic.Logic(None, None, None, True) #debug configuration
def testHeuristicBasic(self):
'''Sequence generation basic test (against known solution)'''
phrases = ['aaa bbb ccc',
'ddd eee fff',
'ccc ddd']
order = 'aaa bbb ccc ddd eee fff'
sequence = self.logic.get_sequence(phrases=phrases, force_rerun=True)
self.assertEqual(sequence.get_sequence_as_string(), order)
def testHeuristicTricky(self):
'''Sequence generation stress test (valid solution):
- compare identical duplicate words (ccc)
- create a duplicate word which is not explicitly used twice in the
same sentence (eee)
- sort alternate sequences (fff ggg fff ggg)
'''
phrases = ['eee aaa bbb ccc', 'ccc ddd eee', 'ccc ccc ccc',
'eee fff ggg', 'ggg fff', 'ggg fff ggg']
# Anecdote: the test was initially designed to run against the known
# obviou soulution, which would be:
# 'eee aaa bbb ccc ccc ccc ddd eee fff ggg fff ggg'
# well, during debugging it turned out that the heuristic can find
# a better, less obvious valid one, which is:
# 'eee aaa ggg bbb fff ccc ccc ddd ccc eee ggg'
# It has an element less! .....WOW! :)
seq = self.logic.get_sequence(phrases, force_rerun=True)
self.assertTrue(seq.sanity_check())
def testHeuristicNonIsomorphic(self):
'''Sequence generation with few isomoprhic sentences (valid solution)
Tests for a valid superset of unknown length.
'''
phrases = ['the red carpet is unrolled on the floor',
'a cat is walking on the carpet',
'a cat is walking on a red floor',
'the logic of sentence generation is a red herring',
'no floor is too red not to get a herring carpet',
'the cat of the castle made a sentence',
'the logic of the herring is red',
'a cat and a herring are on the floor',
'no cat is on the carpet',
'no logic can hold against ideology']
sequence = self.logic.get_sequence(phrases, force_rerun=True)
self.assertTrue(sequence.sanity_check())
def testCoarseRedundancyLoop(self):
'''Coarse redundancy filter loop.'''
phrases = ["I have many flowers at home",
"I do not have that many animals at the farm",
"I have very few cabbages in the fridge",
"If I have or have not it is none of your business"]
ok_seq = """If I do not have or have very not that many few flowers
animals cabbages at in the home farm fridge it is none
of your business"""
noisy_seq = """If I do not have or have have very not that many few
flowers inserted snippet here animals cabbages at in
the home land farm fridge fridge it is none of your
business have"""
ok_seq = ' '.join(ok_seq.split())
sequence = noisy_seq
while True:
new_sequence = self.logic.coarse_redundancy_filter(sequence,
phrases)
if len(new_sequence) < len(sequence):
sequence = new_sequence
else:
break
self.assertEqual(new_sequence, ok_seq)
class Element(unittest.TestCase):
'''
Test the supseq.Element methods.
'''
def testAutoSpacing(self):
'''Can tell if a space is needed between two words.'''
phrases = ["It is one o'clock", "It is two o'clock",
"It is three o'clock", "It is four o'clock"]
seq = "It is one two three four o'clock"
# Basic case
t = supseq.SuperSequence(seq, phrases)
self.assertTrue(t[0].test_contact())
self.assertFalse(t[3].test_contact())
self.assertTrue(t[5].test_contact())
# Tricky: if autospaces have been added to the words!
for i in range(len(t)):
t[i].word += ' '
self.assertTrue(t[0].test_contact())
self.assertFalse(t[3].test_contact())
self.assertTrue(t[5].test_contact())
class SuperSequence(unittest.TestCase):
'''
Test the supseq.SuperSequence methods.
'''
def testSanityCheckBase(self):
'''Sanity check base test'''
phrases = ["I have many flowers at home",
"I do not have that many animals at the farm",
"I have very few cabbages in the fridge",
"If I have or have not it is none of your business"]
ok_seq = """If I do not have or have very not that many few flowers
animals cabbages at in the home farm fridge it is none
of your business"""
# Test for success
seq = supseq.SuperSequence(ok_seq, phrases)
self.assertTrue(seq.sanity_check())
# Test for failure
ko_seq = ok_seq[0:ok_seq.index('few')] + \
ok_seq[ok_seq.index('few')+4:]
seq = supseq.SuperSequence(ko_seq, phrases)
self.assertFalse(seq.sanity_check())
def testSanityCheckSubstrings(self):
'''Identical substrings in other words are ignored'''
phrases = ["aaa bbb ccc ddd eee fff"]
ko_seq = """aaaxxx bbb ccc ddd eee fff"""
seq = supseq.SuperSequence(ko_seq, phrases)
self.assertFalse(seq.sanity_check())
def testShiftingWords(self):
'''Shifting words in a list only if possible'''
phrases = ['I have one dog', 'I have two cats']
seq = 'I have one two dog cats'
sequence = supseq.SuperSequence(seq, phrases)
# "one" to right
t = copy.deepcopy(sequence)
self.assertTrue(t.shift_element(2, 'right'))
# "cats" to left
t = copy.deepcopy(sequence)
self.assertTrue(t.shift_element(5, 'left'))
# "I" to right
t = copy.deepcopy(sequence)
self.assertFalse(t.shift_element(0, 'right'))
# "have" to left
t = copy.deepcopy(sequence)
self.assertFalse(t.shift_element(1, 'left'))
# "have" to left with force
t = copy.deepcopy(sequence)
self.assertTrue(t.shift_element(1, 'left', only_if_sane=False))
def testShiftingToPosition(self):
'''Shifting words to designated postion'''
phrases = ['I have one dog', 'I have two cats']
seq = 'I have one two banana carrot dog cats'
sequence = supseq.SuperSequence(seq, phrases)
# 'cats' before 'two'
t = copy.deepcopy(sequence)
self.assertFalse(t.shift_element_to_position(-1, 3))
new_seq = t.get_sequence_as_string()
self.assertEqual(new_seq, 'I have one two cats banana carrot dog')
# 'dog' before 'two'
t = copy.deepcopy(sequence)
self.assertTrue(t.shift_element_to_position(-2, 3))
new_seq = t.get_sequence_as_string()
self.assertEqual(new_seq, 'I have one dog two banana carrot cats')
# 'cats' before 'two' with forcing
t = copy.deepcopy(sequence)
self.assertTrue(t.shift_element_to_position(-1, 3, only_if_sane=False))
new_seq = t.get_sequence_as_string()
self.assertEqual(new_seq, 'I have one cats two banana carrot dog')
# Shifting to right direction: 'two' after 'carrot'
t = copy.deepcopy(sequence)
self.assertTrue(t.shift_element_to_position(3, 5))
new_seq = t.get_sequence_as_string()
self.assertEqual(new_seq, 'I have one banana carrot two dog cats')
# Shifting to right direction: 'I' after 'dog'
t = copy.deepcopy(sequence)
self.assertFalse(t.shift_element_to_position(0, 6))
new_seq = t.get_sequence_as_string()
self.assertEqual(new_seq, 'I have one two banana carrot dog cats')
# Shifting to right direction: 'I' after 'dog' with forcing
t = copy.deepcopy(sequence)
self.assertTrue(t.shift_element_to_position(0, 6, only_if_sane=False))
new_seq = t.get_sequence_as_string()
self.assertEqual(new_seq, 'have one two banana carrot dog I cats')
def testConvergence(self):
'''Make two elements converge'''
phrases = ['aaa bbb ccc', 'ccc ddd eee']
sequence = 'aaa bbb ccc ddd eee aaa'
# Possible convergence
s = supseq.SuperSequence(sequence, phrases)
first_aaa = s[0]
second_aaa = s[-1]
self.assertTrue(s.converge_elements(first_aaa, second_aaa))
# Impossible convergence
s = supseq.SuperSequence(sequence, phrases)
first_aaa = s[0]
eee = s[-2]
s.converge_elements(first_aaa, eee)
self.assertFalse(s.converge_elements(first_aaa, eee))
# Convergence of two adjacent words
s = supseq.SuperSequence(sequence, phrases)
bbb = s[1]
ccc = s[2]
self.assertTrue(s.converge_elements(bbb, ccc))
def testGetDuplicateItems(self):
'''Find duplicates in the sequence'''
phrases = []
seq = 'I cats have one two dog cats dog'
s = supseq.SuperSequence(seq, phrases)
words = sorted(s.get_duplicate_words())
self.assertEqual(words, sorted(['cats', 'dog']))
def testEliminateRedundancies(self):
'''Eliminating redundant items in sequence'''
phrases = ['I have one dog', 'I have two cats']
seq = 'I cats have one two dog cats dog'
s = supseq.SuperSequence(seq, phrases)
s.eliminate_redundancies()
self.assertEqual(len(s), 6) #only 6 elements...
self.assertEqual(len(set(s)), 6) #...which are different...
self.assertTrue(s.sanity_check()) #...and the sequence is still sane!
def testGetContainingPairs(self):
'''Get containing pairs'''
# The trickery here is that two same words (cats) should not be
# returned as pair.
phrases = []
seq = 'I have a one two bone dog cats cats'
expected = sorted([('have', 'a'), ('bone', 'one'),
('cats', 'a'), ('cats', 'a')])
s = supseq.SuperSequence(seq, phrases)
tmp = sorted([(a.word, b.word) for a, b in s.get_containing_pairs()])
self.assertEqual(tmp, expected)
def testMergeSubstrings(self):
'''Merge words if one contains the other'''
# small words to rigth of large ones
phrases = ['I have one dog', 'I have two cats', 'I have a bone dog']
valid = ['I have two cats bone dog'] #uses 'a' from 'cats'
seq = 'I have a one two bone dog cats'
s = supseq.SuperSequence(seq, phrases)
s.substring_merging_optimisation()
self.assertTrue(s.get_sequence_as_string() in valid)
# small words to left of large ones
seq = 'I have a bone two one dog cats'
s = supseq.SuperSequence(seq, phrases)
s.substring_merging_optimisation()
self.assertTrue(s.get_sequence_as_string() in valid)
def testGetBestFit(self):
'''Test bin filling heuristics'''
phrases = ['I have one dog', 'I have two cats']
# Note that there are two possible perfect fits in this sequence:
# "two one cats" and "one_ dog two". The second one has a compulsory
# space between "one" and "dog", but gets found first. It would
# be nice to find an EFFICIENT way to prefer non-spaced sequences
# first, but so far... no luck!
seq = 'I have one two dog cats'
# With spaces
t = supseq.SuperSequence(seq, phrases)
tmp = t.get_best_fit(10, 2, new_line=True)
best_str = ' '.join(el.word for el in tmp[1:])
self.assertTrue(tmp[0])
self.assertEqual('one dog two', best_str)
# # Without spaces
# t = supseq.SuperSequence(seq, phrases)
# tmp = t.get_best_fit(10, 2, new_line=True)
# best_str = ' '.join(el.word for el in tmp[1:])
# self.assertTrue(tmp[0])
# self.assertEqual('two one cats', best_str)
class BaseClock(unittest.TestCase):
'''
Test the base clock from which plugins are derived.
'''
base_clock = baseclock.Clock()
def testWordSelect(self):
'''Word selection via dictionary with multiple keys.'''
test_list = {(1, 2, 'a', 'b'):'first',
(3, 4, 'c', 'two'):'second',
(5,):'third'}
# Test retrieval by all keys
for keys in (test_list):
for key in keys:
word = self.base_clock._word_select(key, test_list)
self.assertEqual(word, test_list[keys])
# Test exception
self.assertRaises(Exception,
self.base_clock._word_select, 'xxx', test_list)
class Russian(unittest.TestCase):
'''
Test Russian phrases generation.
'''
clock = verboserussian.Clock()
def testRoundHour(self):
'''Generation of russian "o'clock" sentences.'''
known_values = {(0, 0):'Сейчас полночь',
(1, 0):'Сейчас ровно один час ночи',
(2, 0):'Сейчас ровно два часа ночи',
(3, 0):'Сейчас ровно три часа ночи',
(4, 0):'Сейчас ровно четыре часа утра',
(5, 0):'Сейчас ровно пять часов утра',
(6, 0):'Сейчас ровно шесть часов утра',
(7, 0):'Сейчас ровно семь часов утра',
(8, 0):'Сейчас ровно восемь часов утра',
(9, 0):'Сейчас ровно девять часов утра',
(10, 0):'Сейчас ровно десять часов утра',
(11, 0):'Сейчас ровно одиннадцать часов утра',
(12, 0):'Сейчас полдень',
(13, 0):'Сейчас ровно один час дня',
(14, 0):'Сейчас ровно два часа дня',
(15, 0):'Сейчас ровно три часа дня',
(16, 0):'Сейчас ровно четыре часа дня',
(17, 0):'Сейчас ровно пять часов дня',
(18, 0):'Сейчас ровно шесть часов вечера',
(19, 0):'Сейчас ровно семь часов вечера',
(20, 0):'Сейчас ровно восемь часов вечера',
(21, 0):'Сейчас ровно девять часов вечера',
(22, 0):'Сейчас ровно десять часов вечера',
(23, 0):'Сейчас ровно одиннадцать часов вечера'}
for k in known_values:
self.assertEqual(self.clock.get_time_phrase(*k), known_values[k])
def testToHour(self):
'''Generation of russian sentences for <30 mins TO the round hour.'''
known_values = {(1, 55):'Сейчас без пяти минут два ночи',
(9, 45):'Сейчас без четверти десять утра'}
for k in known_values:
self.assertEqual(self.clock.get_time_phrase(*k), known_values[k])
def testPastHour(self):
'''Generation of russian sentences for <30 mins FROM the round hour.'''
known_values = {(8, 22):'Сейчас двадцать две минуты девятого утра',
(9, 30):'Сейчас половина десятого утра',
(9, 15):'Сейчас четверть десятого утра'}
for k in known_values:
self.assertEqual(self.clock.get_time_phrase(*k), known_values[k])
def testOlga(self):
'''Generation of russian sentences against manually verified list.'''
known_values = {( 0, 0):'Сейчас полночь',
( 0, 1):'Сейчас одна минута первого ночи',
( 0, 2):'Сейчас две минуты первого ночи',
( 0, 3):'Сейчас три минуты первого ночи',
( 0, 4):'Сейчас четыре минуты первого ночи',
( 0, 5):'Сейчас пять минут первого ночи',
( 0, 6):'Сейчас шесть минут первого ночи',
( 0, 7):'Сейчас семь минут первого ночи',
( 0, 8):'Сейчас восемь минут первого ночи',
( 0, 9):'Сейчас девять минут первого ночи',
( 0, 10):'Сейчас десять минут первого ночи',
( 0, 11):'Сейчас одиннадцать минут первого ночи',
( 0, 12):'Сейчас двенадцать минут первого ночи',
( 0, 13):'Сейчас тринадцать минут первого ночи',
( 0, 14):'Сейчас четырнадцать минут первого ночи',
( 0, 15):'Сейчас четверть первого ночи',
( 0, 16):'Сейчас шестнадцать минут первого ночи',
( 0, 17):'Сейчас семнадцать минут первого ночи',
( 0, 18):'Сейчас восемнадцать минут первого ночи',
( 0, 19):'Сейчас девятнадцать минут первого ночи',
( 0, 20):'Сейчас двадцать минут первого ночи',
( 0, 21):'Сейчас двадцать одна минута первого ночи',
( 0, 22):'Сейчас двадцать две минуты первого ночи',
( 0, 23):'Сейчас двадцать три минуты первого ночи',
( 0, 24):'Сейчас двадцать четыре минуты первого ночи',
( 0, 25):'Сейчас двадцать пять минут первого ночи',
( 0, 26):'Сейчас двадцать шесть минут первого ночи',
( 0, 27):'Сейчас двадцать семь минут первого ночи',
( 0, 28):'Сейчас двадцать восемь минут первого ночи',
( 0, 29):'Сейчас двадцать девять минут первого ночи',
( 0, 30):'Сейчас половина первого ночи',
( 0, 31):'Сейчас без двадцати девяти минут час ночи',
( 0, 32):'Сейчас без двадцати восьми минут час ночи',
( 0, 33):'Сейчас без двадцати семи минут час ночи',
( 0, 34):'Сейчас без двадцати шести минут час ночи',
( 0, 35):'Сейчас без двадцати пяти минут час ночи',
( 0, 36):'Сейчас без двадцати четырёх минут час ночи',
( 0, 37):'Сейчас без двадцати трёх минут час ночи',
( 0, 38):'Сейчас без двадцати двух минут час ночи',
( 0, 39):'Сейчас без двадцати одной минуты час ночи',
( 0, 40):'Сейчас без двадцати минут час ночи',
( 0, 41):'Сейчас без девятнадцати минут час ночи',
( 0, 42):'Сейчас без восемнадцати минут час ночи',
( 0, 43):'Сейчас без семнадцати минут час ночи',
( 0, 44):'Сейчас без шестнадцати минут час ночи',
( 0, 45):'Сейчас без четверти час ночи',
( 0, 46):'Сейчас без четырнадцати минут час ночи',
( 0, 47):'Сейчас без тринадцати минут час ночи',
( 0, 48):'Сейчас без двенадцати минут час ночи',
( 0, 49):'Сейчас без одиннадцати минут час ночи',
( 0, 50):'Сейчас без десяти минут час ночи',
( 0, 51):'Сейчас без девяти минут час ночи',
( 0, 52):'Сейчас без восьми минут час ночи',
( 0, 53):'Сейчас без семи минут час ночи',
( 0, 54):'Сейчас без шести минут час ночи',
( 0, 55):'Сейчас без пяти минут час ночи',
( 0, 56):'Сейчас без четырёх минут час ночи',
( 0, 57):'Сейчас без трёх минут час ночи',
( 0, 58):'Сейчас без двух минут час ночи',
( 0, 59):'Сейчас без одной минуты час ночи',
( 1, 0):'Сейчас ровно один час ночи',
( 1, 1):'Сейчас одна минута второго ночи',
( 1, 2):'Сейчас две минуты второго ночи',
( 1, 3):'Сейчас три минуты второго ночи',
( 1, 4):'Сейчас четыре минуты второго ночи',
( 1, 5):'Сейчас пять минут второго ночи',
( 1, 6):'Сейчас шесть минут второго ночи',
( 1, 7):'Сейчас семь минут второго ночи',
( 1, 8):'Сейчас восемь минут второго ночи',
( 1, 9):'Сейчас девять минут второго ночи',
( 1, 10):'Сейчас десять минут второго ночи',
( 1, 11):'Сейчас одиннадцать минут второго ночи',
( 1, 12):'Сейчас двенадцать минут второго ночи',
( 1, 13):'Сейчас тринадцать минут второго ночи',
( 1, 14):'Сейчас четырнадцать минут второго ночи',
( 1, 15):'Сейчас четверть второго ночи',
( 1, 16):'Сейчас шестнадцать минут второго ночи',
( 1, 17):'Сейчас семнадцать минут второго ночи',
( 1, 18):'Сейчас восемнадцать минут второго ночи',
( 1, 19):'Сейчас девятнадцать минут второго ночи',
( 1, 20):'Сейчас двадцать минут второго ночи',
( 1, 21):'Сейчас двадцать одна минута второго ночи',
( 1, 22):'Сейчас двадцать две минуты второго ночи',
( 1, 23):'Сейчас двадцать три минуты второго ночи',
( 1, 24):'Сейчас двадцать четыре минуты второго ночи',
( 1, 25):'Сейчас двадцать пять минут второго ночи',
( 1, 26):'Сейчас двадцать шесть минут второго ночи',
( 1, 27):'Сейчас двадцать семь минут второго ночи',
( 1, 28):'Сейчас двадцать восемь минут второго ночи',
( 1, 29):'Сейчас двадцать девять минут второго ночи',
( 1, 30):'Сейчас половина второго ночи',
( 1, 31):'Сейчас без двадцати девяти минут два ночи',
( 1, 32):'Сейчас без двадцати восьми минут два ночи',
( 1, 33):'Сейчас без двадцати семи минут два ночи',
( 1, 34):'Сейчас без двадцати шести минут два ночи',
( 1, 35):'Сейчас без двадцати пяти минут два ночи',
( 1, 36):'Сейчас без двадцати четырёх минут два ночи',
( 1, 37):'Сейчас без двадцати трёх минут два ночи',
( 1, 38):'Сейчас без двадцати двух минут два ночи',
( 1, 39):'Сейчас без двадцати одной минуты два ночи',
( 1, 40):'Сейчас без двадцати минут два ночи',
( 1, 41):'Сейчас без девятнадцати минут два ночи',
( 1, 42):'Сейчас без восемнадцати минут два ночи',
( 1, 43):'Сейчас без семнадцати минут два ночи',
( 1, 44):'Сейчас без шестнадцати минут два ночи',
( 1, 45):'Сейчас без четверти два ночи',
( 1, 46):'Сейчас без четырнадцати минут два ночи',
( 1, 47):'Сейчас без тринадцати минут два ночи',
( 1, 48):'Сейчас без двенадцати минут два ночи',
( 1, 49):'Сейчас без одиннадцати минут два ночи',
( 1, 50):'Сейчас без десяти минут два ночи',
( 1, 51):'Сейчас без девяти минут два ночи',
( 1, 52):'Сейчас без восьми минут два ночи',
( 1, 53):'Сейчас без семи минут два ночи',
( 1, 54):'Сейчас без шести минут два ночи',
( 1, 55):'Сейчас без пяти минут два ночи',
( 1, 56):'Сейчас без четырёх минут два ночи',
( 1, 57):'Сейчас без трёх минут два ночи',
( 1, 58):'Сейчас без двух минут два ночи',
( 1, 59):'Сейчас без одной минуты два ночи',
( 2, 0):'Сейчас ровно два часа ночи',
( 2, 1):'Сейчас одна минута третьего ночи',
( 2, 2):'Сейчас две минуты третьего ночи',
( 2, 3):'Сейчас три минуты третьего ночи',
( 2, 4):'Сейчас четыре минуты третьего ночи',
( 2, 5):'Сейчас пять минут третьего ночи',
( 2, 6):'Сейчас шесть минут третьего ночи',
( 2, 7):'Сейчас семь минут третьего ночи',
( 2, 8):'Сейчас восемь минут третьего ночи',
( 2, 9):'Сейчас девять минут третьего ночи',
( 2, 10):'Сейчас десять минут третьего ночи',
( 2, 11):'Сейчас одиннадцать минут третьего ночи',
( 2, 12):'Сейчас двенадцать минут третьего ночи',
( 2, 13):'Сейчас тринадцать минут третьего ночи',
( 2, 14):'Сейчас четырнадцать минут третьего ночи',
( 2, 15):'Сейчас четверть третьего ночи',
( 2, 16):'Сейчас шестнадцать минут третьего ночи',
( 2, 17):'Сейчас семнадцать минут третьего ночи',
( 2, 18):'Сейчас восемнадцать минут третьего ночи',
( 2, 19):'Сейчас девятнадцать минут третьего ночи',
( 2, 20):'Сейчас двадцать минут третьего ночи',
( 2, 21):'Сейчас двадцать одна минута третьего ночи',
( 2, 22):'Сейчас двадцать две минуты третьего ночи',
( 2, 23):'Сейчас двадцать три минуты третьего ночи',
( 2, 24):'Сейчас двадцать четыре минуты третьего ночи',
( 2, 25):'Сейчас двадцать пять минут третьего ночи',
( 2, 26):'Сейчас двадцать шесть минут третьего ночи',
( 2, 27):'Сейчас двадцать семь минут третьего ночи',
( 2, 28):'Сейчас двадцать восемь минут третьего ночи',
( 2, 29):'Сейчас двадцать девять минут третьего ночи',
( 2, 30):'Сейчас половина третьего ночи',
( 2, 31):'Сейчас без двадцати девяти минут три часа ночи',
( 2, 32):'Сейчас без двадцати восьми минут три часа ночи',
( 2, 33):'Сейчас без двадцати семи минут три часа ночи',
( 2, 34):'Сейчас без двадцати шести минут три часа ночи',
( 2, 35):'Сейчас без двадцати пяти минут три часа ночи',
( 2, 36):'Сейчас без двадцати четырёх минут три часа ночи',
( 2, 37):'Сейчас без двадцати трёх минут три часа ночи',
( 2, 38):'Сейчас без двадцати двух минут три часа ночи',
( 2, 39):'Сейчас без двадцати одной минуты три часа ночи',
( 2, 40):'Сейчас без двадцати минут три часа ночи',
( 2, 41):'Сейчас без девятнадцати минут три часа ночи',
( 2, 42):'Сейчас без восемнадцати минут три часа ночи',
( 2, 43):'Сейчас без семнадцати минут три часа ночи',
( 2, 44):'Сейчас без шестнадцати минут три часа ночи',
( 2, 45):'Сейчас без четверти три часа ночи',
( 2, 46):'Сейчас без четырнадцати минут три часа ночи',
( 2, 47):'Сейчас без тринадцати минут три часа ночи',
( 2, 48):'Сейчас без двенадцати минут три часа ночи',
( 2, 49):'Сейчас без одиннадцати минут три часа ночи',
( 2, 50):'Сейчас без десяти минут три часа ночи',
( 2, 51):'Сейчас без девяти минут три часа ночи',
( 2, 52):'Сейчас без восьми минут три часа ночи',
( 2, 53):'Сейчас без семи минут три часа ночи',
( 2, 54):'Сейчас без шести минут три часа ночи',
( 2, 55):'Сейчас без пяти минут три часа ночи',
( 2, 56):'Сейчас без четырёх минут три часа ночи',
( 2, 57):'Сейчас без трёх минут три часа ночи',
( 2, 58):'Сейчас без двух минут три часа ночи',
( 2, 59):'Сейчас без одной минуты три часа ночи',
( 3, 0):'Сейчас ровно три часа ночи',
( 3, 1):'Сейчас одна минута четвёртого утра',
( 3, 2):'Сейчас две минуты четвёртого утра',
( 3, 3):'Сейчас три минуты четвёртого утра',
( 3, 4):'Сейчас четыре минуты четвёртого утра',
( 3, 5):'Сейчас пять минут четвёртого утра',
( 3, 6):'Сейчас шесть минут четвёртого утра',
( 3, 7):'Сейчас семь минут четвёртого утра',
( 3, 8):'Сейчас восемь минут четвёртого утра',
( 3, 9):'Сейчас девять минут четвёртого утра',
( 3, 10):'Сейчас десять минут четвёртого утра',
( 3, 11):'Сейчас одиннадцать минут четвёртого утра',
( 3, 12):'Сейчас двенадцать минут четвёртого утра',
( 3, 13):'Сейчас тринадцать минут четвёртого утра',
( 3, 14):'Сейчас четырнадцать минут четвёртого утра',
( 3, 15):'Сейчас четверть четвёртого утра',
( 3, 16):'Сейчас шестнадцать минут четвёртого утра',
( 3, 17):'Сейчас семнадцать минут четвёртого утра',
( 3, 18):'Сейчас восемнадцать минут четвёртого утра',
( 3, 19):'Сейчас девятнадцать минут четвёртого утра',
( 3, 20):'Сейчас двадцать минут четвёртого утра',
( 3, 21):'Сейчас двадцать одна минута четвёртого утра',
( 3, 22):'Сейчас двадцать две минуты четвёртого утра',
( 3, 23):'Сейчас двадцать три минуты четвёртого утра',
( 3, 24):'Сейчас двадцать четыре минуты четвёртого утра',
( 3, 25):'Сейчас двадцать пять минут четвёртого утра',
( 3, 26):'Сейчас двадцать шесть минут четвёртого утра',
( 3, 27):'Сейчас двадцать семь минут четвёртого утра',
( 3, 28):'Сейчас двадцать восемь минут четвёртого утра',
( 3, 29):'Сейчас двадцать девять минут четвёртого утра',
( 3, 30):'Сейчас половина четвёртого утра',
( 3, 31):'Сейчас без двадцати девяти минут четыре утра',
( 3, 32):'Сейчас без двадцати восьми минут четыре утра',
( 3, 33):'Сейчас без двадцати семи минут четыре утра',
( 3, 34):'Сейчас без двадцати шести минут четыре утра',
( 3, 35):'Сейчас без двадцати пяти минут четыре утра',
( 3, 36):'Сейчас без двадцати четырёх минут четыре утра',
( 3, 37):'Сейчас без двадцати трёх минут четыре утра',
( 3, 38):'Сейчас без двадцати двух минут четыре утра',
( 3, 39):'Сейчас без двадцати одной минуты четыре утра',
( 3, 40):'Сейчас без двадцати минут четыре утра',
( 3, 41):'Сейчас без девятнадцати минут четыре утра',
( 3, 42):'Сейчас без восемнадцати минут четыре утра',
( 3, 43):'Сейчас без семнадцати минут четыре утра',
( 3, 44):'Сейчас без шестнадцати минут четыре утра',
( 3, 45):'Сейчас без четверти четыре утра',
( 3, 46):'Сейчас без четырнадцати минут четыре утра',
( 3, 47):'Сейчас без тринадцати минут четыре утра',
( 3, 48):'Сейчас без двенадцати минут четыре утра',
( 3, 49):'Сейчас без одиннадцати минут четыре утра',
( 3, 50):'Сейчас без десяти минут четыре утра',
( 3, 51):'Сейчас без девяти минут четыре утра',
( 3, 52):'Сейчас без восьми минут четыре утра',
( 3, 53):'Сейчас без семи минут четыре утра',
( 3, 54):'Сейчас без шести минут четыре утра',
( 3, 55):'Сейчас без пяти минут четыре утра',
( 3, 56):'Сейчас без четырёх минут четыре утра',
( 3, 57):'Сейчас без трёх минут четыре утра',
( 3, 58):'Сейчас без двух минут четыре утра',
( 3, 59):'Сейчас без одной минуты четыре утра',
( 4, 0):'Сейчас ровно четыре часа утра',
( 4, 1):'Сейчас одна минута пятого утра',
( 4, 2):'Сейчас две минуты пятого утра',
( 4, 3):'Сейчас три минуты пятого утра',
( 4, 4):'Сейчас четыре минуты пятого утра',
( 4, 5):'Сейчас пять минут пятого утра',
( 4, 6):'Сейчас шесть минут пятого утра',
( 4, 7):'Сейчас семь минут пятого утра',
( 4, 8):'Сейчас восемь минут пятого утра',
( 4, 9):'Сейчас девять минут пятого утра',
( 4, 10):'Сейчас десять минут пятого утра',
( 4, 11):'Сейчас одиннадцать минут пятого утра',
( 4, 12):'Сейчас двенадцать минут пятого утра',
( 4, 13):'Сейчас тринадцать минут пятого утра',
( 4, 14):'Сейчас четырнадцать минут пятого утра',
( 4, 15):'Сейчас четверть пятого утра',
( 4, 16):'Сейчас шестнадцать минут пятого утра',
( 4, 17):'Сейчас семнадцать минут пятого утра',
( 4, 18):'Сейчас восемнадцать минут пятого утра',
( 4, 19):'Сейчас девятнадцать минут пятого утра',
( 4, 20):'Сейчас двадцать минут пятого утра',
( 4, 21):'Сейчас двадцать одна минута пятого утра',
( 4, 22):'Сейчас двадцать две минуты пятого утра',
( 4, 23):'Сейчас двадцать три минуты пятого утра',
( 4, 24):'Сейчас двадцать четыре минуты пятого утра',
( 4, 25):'Сейчас двадцать пять минут пятого утра',
( 4, 26):'Сейчас двадцать шесть минут пятого утра',
( 4, 27):'Сейчас двадцать семь минут пятого утра',
( 4, 28):'Сейчас двадцать восемь минут пятого утра',
( 4, 29):'Сейчас двадцать девять минут пятого утра',
( 4, 30):'Сейчас половина пятого утра',
( 4, 31):'Сейчас без двадцати девяти минут пять утра',
( 4, 32):'Сейчас без двадцати восьми минут пять утра',
( 4, 33):'Сейчас без двадцати семи минут пять утра',
( 4, 34):'Сейчас без двадцати шести минут пять утра',
( 4, 35):'Сейчас без двадцати пяти минут пять утра',
( 4, 36):'Сейчас без двадцати четырёх минут пять утра',
( 4, 37):'Сейчас без двадцати трёх минут пять утра',
( 4, 38):'Сейчас без двадцати двух минут пять утра',
( 4, 39):'Сейчас без двадцати одной минуты пять утра',
( 4, 40):'Сейчас без двадцати минут пять утра',
( 4, 41):'Сейчас без девятнадцати минут пять утра',
( 4, 42):'Сейчас без восемнадцати минут пять утра',
( 4, 43):'Сейчас без семнадцати минут пять утра',
( 4, 44):'Сейчас без шестнадцати минут пять утра',
( 4, 45):'Сейчас без четверти пять утра',
( 4, 46):'Сейчас без четырнадцати минут пять утра',
( 4, 47):'Сейчас без тринадцати минут пять утра',
( 4, 48):'Сейчас без двенадцати минут пять утра',
( 4, 49):'Сейчас без одиннадцати минут пять утра',
( 4, 50):'Сейчас без десяти минут пять утра',
( 4, 51):'Сейчас без девяти минут пять утра',
( 4, 52):'Сейчас без восьми минут пять утра',
( 4, 53):'Сейчас без семи минут пять утра',
( 4, 54):'Сейчас без шести минут пять утра',
( 4, 55):'Сейчас без пяти минут пять утра',
( 4, 56):'Сейчас без четырёх минут пять утра',
( 4, 57):'Сейчас без трёх минут пять утра',
( 4, 58):'Сейчас без двух минут пять утра',
( 4, 59):'Сейчас без одной минуты пять утра',
( 5, 0):'Сейчас ровно пять часов утра',
( 5, 1):'Сейчас одна минута шестого утра',
( 5, 2):'Сейчас две минуты шестого утра',
( 5, 3):'Сейчас три минуты шестого утра',
( 5, 4):'Сейчас четыре минуты шестого утра',
( 5, 5):'Сейчас пять минут шестого утра',
( 5, 6):'Сейчас шесть минут шестого утра',
( 5, 7):'Сейчас семь минут шестого утра',
( 5, 8):'Сейчас восемь минут шестого утра',
( 5, 9):'Сейчас девять минут шестого утра',
( 5, 10):'Сейчас десять минут шестого утра',
( 5, 11):'Сейчас одиннадцать минут шестого утра',
( 5, 12):'Сейчас двенадцать минут шестого утра',
( 5, 13):'Сейчас тринадцать минут шестого утра',
( 5, 14):'Сейчас четырнадцать минут шестого утра',
( 5, 15):'Сейчас четверть шестого утра',
( 5, 16):'Сейчас шестнадцать минут шестого утра',
( 5, 17):'Сейчас семнадцать минут шестого утра',
( 5, 18):'Сейчас восемнадцать минут шестого утра',
( 5, 19):'Сейчас девятнадцать минут шестого утра',
( 5, 20):'Сейчас двадцать минут шестого утра',
( 5, 21):'Сейчас двадцать одна минута шестого утра',
( 5, 22):'Сейчас двадцать две минуты шестого утра',
( 5, 23):'Сейчас двадцать три минуты шестого утра',
( 5, 24):'Сейчас двадцать четыре минуты шестого утра',
( 5, 25):'Сейчас двадцать пять минут шестого утра',
( 5, 26):'Сейчас двадцать шесть минут шестого утра',
( 5, 27):'Сейчас двадцать семь минут шестого утра',
( 5, 28):'Сейчас двадцать восемь минут шестого утра',
( 5, 29):'Сейчас двадцать девять минут шестого утра',
( 5, 30):'Сейчас половина шестого утра',
( 5, 31):'Сейчас без двадцати девяти минут шесть утра',
( 5, 32):'Сейчас без двадцати восьми минут шесть утра',
( 5, 33):'Сейчас без двадцати семи минут шесть утра',
( 5, 34):'Сейчас без двадцати шести минут шесть утра',
( 5, 35):'Сейчас без двадцати пяти минут шесть утра',
( 5, 36):'Сейчас без двадцати четырёх минут шесть утра',
( 5, 37):'Сейчас без двадцати трёх минут шесть утра',
( 5, 38):'Сейчас без двадцати двух минут шесть утра',
( 5, 39):'Сейчас без двадцати одной минуты шесть утра',
( 5, 40):'Сейчас без двадцати минут шесть утра',
( 5, 41):'Сейчас без девятнадцати минут шесть утра',
( 5, 42):'Сейчас без восемнадцати минут шесть утра',
( 5, 43):'Сейчас без семнадцати минут шесть утра',
( 5, 44):'Сейчас без шестнадцати минут шесть утра',
( 5, 45):'Сейчас без четверти шесть утра',
( 5, 46):'Сейчас без четырнадцати минут шесть утра',
( 5, 47):'Сейчас без тринадцати минут шесть утра',
( 5, 48):'Сейчас без двенадцати минут шесть утра',
( 5, 49):'Сейчас без одиннадцати минут шесть утра',
( 5, 50):'Сейчас без десяти минут шесть утра',
( 5, 51):'Сейчас без девяти минут шесть утра',
( 5, 52):'Сейчас без восьми минут шесть утра',
( 5, 53):'Сейчас без семи минут шесть утра',
( 5, 54):'Сейчас без шести минут шесть утра',
( 5, 55):'Сейчас без пяти минут шесть утра',
( 5, 56):'Сейчас без четырёх минут шесть утра',
( 5, 57):'Сейчас без трёх минут шесть утра',
( 5, 58):'Сейчас без двух минут шесть утра',
( 5, 59):'Сейчас без одной минуты шесть утра',
( 6, 0):'Сейчас ровно шесть часов утра',
( 6, 1):'Сейчас одна минута седьмого утра',
( 6, 2):'Сейчас две минуты седьмого утра',
( 6, 3):'Сейчас три минуты седьмого утра',
( 6, 4):'Сейчас четыре минуты седьмого утра',
( 6, 5):'Сейчас пять минут седьмого утра',
( 6, 6):'Сейчас шесть минут седьмого утра',
( 6, 7):'Сейчас семь минут седьмого утра',
( 6, 8):'Сейчас восемь минут седьмого утра',
( 6, 9):'Сейчас девять минут седьмого утра',
( 6, 10):'Сейчас десять минут седьмого утра',
( 6, 11):'Сейчас одиннадцать минут седьмого утра',
( 6, 12):'Сейчас двенадцать минут седьмого утра',
( 6, 13):'Сейчас тринадцать минут седьмого утра',
( 6, 14):'Сейчас четырнадцать минут седьмого утра',
( 6, 15):'Сейчас четверть седьмого утра',
( 6, 16):'Сейчас шестнадцать минут седьмого утра',
( 6, 17):'Сейчас семнадцать минут седьмого утра',
( 6, 18):'Сейчас восемнадцать минут седьмого утра',
( 6, 19):'Сейчас девятнадцать минут седьмого утра',
( 6, 20):'Сейчас двадцать минут седьмого утра',
( 6, 21):'Сейчас двадцать одна минута седьмого утра',
( 6, 22):'Сейчас двадцать две минуты седьмого утра',
( 6, 23):'Сейчас двадцать три минуты седьмого утра',
( 6, 24):'Сейчас двадцать четыре минуты седьмого утра',
( 6, 25):'Сейчас двадцать пять минут седьмого утра',
( 6, 26):'Сейчас двадцать шесть минут седьмого утра',
( 6, 27):'Сейчас двадцать семь минут седьмого утра',
( 6, 28):'Сейчас двадцать восемь минут седьмого утра',
( 6, 29):'Сейчас двадцать девять минут седьмого утра',
( 6, 30):'Сейчас половина седьмого утра',
( 6, 31):'Сейчас без двадцати девяти минут семь утра',
( 6, 32):'Сейчас без двадцати восьми минут семь утра',
( 6, 33):'Сейчас без двадцати семи минут семь утра',
( 6, 34):'Сейчас без двадцати шести минут семь утра',
( 6, 35):'Сейчас без двадцати пяти минут семь утра',
( 6, 36):'Сейчас без двадцати четырёх минут семь утра',
( 6, 37):'Сейчас без двадцати трёх минут семь утра',
( 6, 38):'Сейчас без двадцати двух минут семь утра',
( 6, 39):'Сейчас без двадцати одной минуты семь утра',
( 6, 40):'Сейчас без двадцати минут семь утра',
( 6, 41):'Сейчас без девятнадцати минут семь утра',
( 6, 42):'Сейчас без восемнадцати минут семь утра',
( 6, 43):'Сейчас без семнадцати минут семь утра',
( 6, 44):'Сейчас без шестнадцати минут семь утра',
( 6, 45):'Сейчас без четверти семь утра',
( 6, 46):'Сейчас без четырнадцати минут семь утра',
( 6, 47):'Сейчас без тринадцати минут семь утра',
( 6, 48):'Сейчас без двенадцати минут семь утра',
( 6, 49):'Сейчас без одиннадцати минут семь утра',
( 6, 50):'Сейчас без десяти минут семь утра',
( 6, 51):'Сейчас без девяти минут семь утра',
( 6, 52):'Сейчас без восьми минут семь утра',
( 6, 53):'Сейчас без семи минут семь утра',
( 6, 54):'Сейчас без шести минут семь утра',
( 6, 55):'Сейчас без пяти минут семь утра',
( 6, 56):'Сейчас без четырёх минут семь утра',
( 6, 57):'Сейчас без трёх минут семь утра',
( 6, 58):'Сейчас без двух минут семь утра',
( 6, 59):'Сейчас без одной минуты семь утра',
( 7, 0):'Сейчас ровно семь часов утра',
( 7, 1):'Сейчас одна минута восьмого утра',
( 7, 2):'Сейчас две минуты восьмого утра',
( 7, 3):'Сейчас три минуты восьмого утра',
( 7, 4):'Сейчас четыре минуты восьмого утра',
( 7, 5):'Сейчас пять минут восьмого утра',
( 7, 6):'Сейчас шесть минут восьмого утра',
( 7, 7):'Сейчас семь минут восьмого утра',
( 7, 8):'Сейчас восемь минут восьмого утра',
( 7, 9):'Сейчас девять минут восьмого утра',
( 7, 10):'Сейчас десять минут восьмого утра',
( 7, 11):'Сейчас одиннадцать минут восьмого утра',
( 7, 12):'Сейчас двенадцать минут восьмого утра',
( 7, 13):'Сейчас тринадцать минут восьмого утра',
( 7, 14):'Сейчас четырнадцать минут восьмого утра',
( 7, 15):'Сейчас четверть восьмого утра',
( 7, 16):'Сейчас шестнадцать минут восьмого утра',
( 7, 17):'Сейчас семнадцать минут восьмого утра',
( 7, 18):'Сейчас восемнадцать минут восьмого утра',
( 7, 19):'Сейчас девятнадцать минут восьмого утра',
( 7, 20):'Сейчас двадцать минут восьмого утра',
( 7, 21):'Сейчас двадцать одна минута восьмого утра',
( 7, 22):'Сейчас двадцать две минуты восьмого утра',
( 7, 23):'Сейчас двадцать три минуты восьмого утра',
( 7, 24):'Сейчас двадцать четыре минуты восьмого утра',
( 7, 25):'Сейчас двадцать пять минут восьмого утра',
( 7, 26):'Сейчас двадцать шесть минут восьмого утра',
( 7, 27):'Сейчас двадцать семь минут восьмого утра',
( 7, 28):'Сейчас двадцать восемь минут восьмого утра',
( 7, 29):'Сейчас двадцать девять минут восьмого утра',
( 7, 30):'Сейчас половина восьмого утра',
( 7, 31):'Сейчас без двадцати девяти минут восемь утра',
( 7, 32):'Сейчас без двадцати восьми минут восемь утра',
( 7, 33):'Сейчас без двадцати семи минут восемь утра',
( 7, 34):'Сейчас без двадцати шести минут восемь утра',
( 7, 35):'Сейчас без двадцати пяти минут восемь утра',
( 7, 36):'Сейчас без двадцати четырёх минут восемь утра',
( 7, 37):'Сейчас без двадцати трёх минут восемь утра',
( 7, 38):'Сейчас без двадцати двух минут восемь утра',
( 7, 39):'Сейчас без двадцати одной минуты восемь утра',
( 7, 40):'Сейчас без двадцати минут восемь утра',
( 7, 41):'Сейчас без девятнадцати минут восемь утра',
( 7, 42):'Сейчас без восемнадцати минут восемь утра',
( 7, 43):'Сейчас без семнадцати минут восемь утра',
( 7, 44):'Сейчас без шестнадцати минут восемь утра',
( 7, 45):'Сейчас без четверти восемь утра',
( 7, 46):'Сейчас без четырнадцати минут восемь утра',
( 7, 47):'Сейчас без тринадцати минут восемь утра',
( 7, 48):'Сейчас без двенадцати минут восемь утра',
( 7, 49):'Сейчас без одиннадцати минут восемь утра',
( 7, 50):'Сейчас без десяти минут восемь утра',
( 7, 51):'Сейчас без девяти минут восемь утра',
( 7, 52):'Сейчас без восьми минут восемь утра',
( 7, 53):'Сейчас без семи минут восемь утра',
( 7, 54):'Сейчас без шести минут восемь утра',
( 7, 55):'Сейчас без пяти минут восемь утра',
( 7, 56):'Сейчас без четырёх минут восемь утра',
( 7, 57):'Сейчас без трёх минут восемь утра',
( 7, 58):'Сейчас без двух минут восемь утра',
( 7, 59):'Сейчас без одной минуты восемь утра',
( 8, 0):'Сейчас ровно восемь часов утра',
( 8, 1):'Сейчас одна минута девятого утра',
( 8, 2):'Сейчас две минуты девятого утра',
( 8, 3):'Сейчас три минуты девятого утра',
( 8, 4):'Сейчас четыре минуты девятого утра',
( 8, 5):'Сейчас пять минут девятого утра',
( 8, 6):'Сейчас шесть минут девятого утра',
( 8, 7):'Сейчас семь минут девятого утра',
( 8, 8):'Сейчас восемь минут девятого утра',
( 8, 9):'Сейчас девять минут девятого утра',
( 8, 10):'Сейчас десять минут девятого утра',
( 8, 11):'Сейчас одиннадцать минут девятого утра',
( 8, 12):'Сейчас двенадцать минут девятого утра',
( 8, 13):'Сейчас тринадцать минут девятого утра',
( 8, 14):'Сейчас четырнадцать минут девятого утра',
( 8, 15):'Сейчас четверть девятого утра',
( 8, 16):'Сейчас шестнадцать минут девятого утра',
( 8, 17):'Сейчас семнадцать минут девятого утра',
( 8, 18):'Сейчас восемнадцать минут девятого утра',
( 8, 19):'Сейчас девятнадцать минут девятого утра',
( 8, 20):'Сейчас двадцать минут девятого утра',
( 8, 21):'Сейчас двадцать одна минута девятого утра',
( 8, 22):'Сейчас двадцать две минуты девятого утра',
( 8, 23):'Сейчас двадцать три минуты девятого утра',
( 8, 24):'Сейчас двадцать четыре минуты девятого утра',
( 8, 25):'Сейчас двадцать пять минут девятого утра',
( 8, 26):'Сейчас двадцать шесть минут девятого утра',
( 8, 27):'Сейчас двадцать семь минут девятого утра',
( 8, 28):'Сейчас двадцать восемь минут девятого утра',
( 8, 29):'Сейчас двадцать девять минут девятого утра',
( 8, 30):'Сейчас половина девятого утра',
( 8, 31):'Сейчас без двадцати девяти минут девять утра',
( 8, 32):'Сейчас без двадцати восьми минут девять утра',
( 8, 33):'Сейчас без двадцати семи минут девять утра',
( 8, 34):'Сейчас без двадцати шести минут девять утра',
( 8, 35):'Сейчас без двадцати пяти минут девять утра',
( 8, 36):'Сейчас без двадцати четырёх минут девять утра',
( 8, 37):'Сейчас без двадцати трёх минут девять утра',
( 8, 38):'Сейчас без двадцати двух минут девять утра',
( 8, 39):'Сейчас без двадцати одной минуты девять утра',
( 8, 40):'Сейчас без двадцати минут девять утра',
( 8, 41):'Сейчас без девятнадцати минут девять утра',
( 8, 42):'Сейчас без восемнадцати минут девять утра',
( 8, 43):'Сейчас без семнадцати минут девять утра',
( 8, 44):'Сейчас без шестнадцати минут девять утра',
( 8, 45):'Сейчас без четверти девять утра',
( 8, 46):'Сейчас без четырнадцати минут девять утра',
( 8, 47):'Сейчас без тринадцати минут девять утра',
( 8, 48):'Сейчас без двенадцати минут девять утра',
( 8, 49):'Сейчас без одиннадцати минут девять утра',
( 8, 50):'Сейчас без десяти минут девять утра',
( 8, 51):'Сейчас без девяти минут девять утра',
( 8, 52):'Сейчас без восьми минут девять утра',
( 8, 53):'Сейчас без семи минут девять утра',
( 8, 54):'Сейчас без шести минут девять утра',
( 8, 55):'Сейчас без пяти минут девять утра',
( 8, 56):'Сейчас без четырёх минут девять утра',
( 8, 57):'Сейчас без трёх минут девять утра',
( 8, 58):'Сейчас без двух минут девять утра',
( 8, 59):'Сейчас без одной минуты девять утра',
( 9, 0):'Сейчас ровно девять часов утра',
( 9, 1):'Сейчас одна минута десятого утра',
( 9, 2):'Сейчас две минуты десятого утра',
( 9, 3):'Сейчас три минуты десятого утра',
( 9, 4):'Сейчас четыре минуты десятого утра',
( 9, 5):'Сейчас пять минут десятого утра',
( 9, 6):'Сейчас шесть минут десятого утра',
( 9, 7):'Сейчас семь минут десятого утра',
( 9, 8):'Сейчас восемь минут десятого утра',
( 9, 9):'Сейчас девять минут десятого утра',
( 9, 10):'Сейчас десять минут десятого утра',
( 9, 11):'Сейчас одиннадцать минут десятого утра',
( 9, 12):'Сейчас двенадцать минут десятого утра',
( 9, 13):'Сейчас тринадцать минут десятого утра',
( 9, 14):'Сейчас четырнадцать минут десятого утра',
( 9, 15):'Сейчас четверть десятого утра',
( 9, 16):'Сейчас шестнадцать минут десятого утра',
( 9, 17):'Сейчас семнадцать минут десятого утра',
( 9, 18):'Сейчас восемнадцать минут десятого утра',
( 9, 19):'Сейчас девятнадцать минут десятого утра',
( 9, 20):'Сейчас двадцать минут десятого утра',
( 9, 21):'Сейчас двадцать одна минута десятого утра',
( 9, 22):'Сейчас двадцать две минуты десятого утра',
( 9, 23):'Сейчас двадцать три минуты десятого утра',
( 9, 24):'Сейчас двадцать четыре минуты десятого утра',
( 9, 25):'Сейчас двадцать пять минут десятого утра',
( 9, 26):'Сейчас двадцать шесть минут десятого утра',
( 9, 27):'Сейчас двадцать семь минут десятого утра',
( 9, 28):'Сейчас двадцать восемь минут десятого утра',
( 9, 29):'Сейчас двадцать девять минут десятого утра',
( 9, 30):'Сейчас половина десятого утра',
( 9, 31):'Сейчас без двадцати девяти минут десять утра',
( 9, 32):'Сейчас без двадцати восьми минут десять утра',
( 9, 33):'Сейчас без двадцати семи минут десять утра',
( 9, 34):'Сейчас без двадцати шести минут десять утра',
( 9, 35):'Сейчас без двадцати пяти минут десять утра',
( 9, 36):'Сейчас без двадцати четырёх минут десять утра',
( 9, 37):'Сейчас без двадцати трёх минут десять утра',
( 9, 38):'Сейчас без двадцати двух минут десять утра',
( 9, 39):'Сейчас без двадцати одной минуты десять утра',
( 9, 40):'Сейчас без двадцати минут десять утра',
( 9, 41):'Сейчас без девятнадцати минут десять утра',
( 9, 42):'Сейчас без восемнадцати минут десять утра',
( 9, 43):'Сейчас без семнадцати минут десять утра',
( 9, 44):'Сейчас без шестнадцати минут десять утра',
( 9, 45):'Сейчас без четверти десять утра',
( 9, 46):'Сейчас без четырнадцати минут десять утра',
( 9, 47):'Сейчас без тринадцати минут десять утра',
( 9, 48):'Сейчас без двенадцати минут десять утра',
( 9, 49):'Сейчас без одиннадцати минут десять утра',
( 9, 50):'Сейчас без десяти минут десять утра',
( 9, 51):'Сейчас без девяти минут десять утра',
( 9, 52):'Сейчас без восьми минут десять утра',
( 9, 53):'Сейчас без семи минут десять утра',
( 9, 54):'Сейчас без шести минут десять утра',
( 9, 55):'Сейчас без пяти минут десять утра',
( 9, 56):'Сейчас без четырёх минут десять утра',
( 9, 57):'Сейчас без трёх минут десять утра',
( 9, 58):'Сейчас без двух минут десять утра',
( 9, 59):'Сейчас без одной минуты десять утра',
(10, 0):'Сейчас ровно десять часов утра',
(10, 1):'Сейчас одна минута одиннадцатого утра',
(10, 2):'Сейчас две минуты одиннадцатого утра',
(10, 3):'Сейчас три минуты одиннадцатого утра',
(10, 4):'Сейчас четыре минуты одиннадцатого утра',
(10, 5):'Сейчас пять минут одиннадцатого утра',
(10, 6):'Сейчас шесть минут одиннадцатого утра',
(10, 7):'Сейчас семь минут одиннадцатого утра',
(10, 8):'Сейчас восемь минут одиннадцатого утра',
(10, 9):'Сейчас девять минут одиннадцатого утра',
(10, 10):'Сейчас десять минут одиннадцатого утра',
(10, 11):'Сейчас одиннадцать минут одиннадцатого утра',
(10, 12):'Сейчас двенадцать минут одиннадцатого утра',
(10, 13):'Сейчас тринадцать минут одиннадцатого утра',
(10, 14):'Сейчас четырнадцать минут одиннадцатого утра',
(10, 15):'Сейчас четверть одиннадцатого утра',
(10, 16):'Сейчас шестнадцать минут одиннадцатого утра',
(10, 17):'Сейчас семнадцать минут одиннадцатого утра',
(10, 18):'Сейчас восемнадцать минут одиннадцатого утра',
(10, 19):'Сейчас девятнадцать минут одиннадцатого утра',
(10, 20):'Сейчас двадцать минут одиннадцатого утра',
(10, 21):'Сейчас двадцать одна минута одиннадцатого утра',
(10, 22):'Сейчас двадцать две минуты одиннадцатого утра',
(10, 23):'Сейчас двадцать три минуты одиннадцатого утра',
(10, 24):'Сейчас двадцать четыре минуты одиннадцатого утра',
(10, 25):'Сейчас двадцать пять минут одиннадцатого утра',
(10, 26):'Сейчас двадцать шесть минут одиннадцатого утра',
(10, 27):'Сейчас двадцать семь минут одиннадцатого утра',
(10, 28):'Сейчас двадцать восемь минут одиннадцатого утра',
(10, 29):'Сейчас двадцать девять минут одиннадцатого утра',
(10, 30):'Сейчас половина одиннадцатого утра',
(10, 31):'Сейчас без двадцати девяти минут одиннадцать утра',
(10, 32):'Сейчас без двадцати восьми минут одиннадцать утра',
(10, 33):'Сейчас без двадцати семи минут одиннадцать утра',
(10, 34):'Сейчас без двадцати шести минут одиннадцать утра',
(10, 35):'Сейчас без двадцати пяти минут одиннадцать утра',
(10, 36):'Сейчас без двадцати четырёх минут одиннадцать утра',
(10, 37):'Сейчас без двадцати трёх минут одиннадцать утра',
(10, 38):'Сейчас без двадцати двух минут одиннадцать утра',
(10, 39):'Сейчас без двадцати одной минуты одиннадцать утра',
(10, 40):'Сейчас без двадцати минут одиннадцать утра',
(10, 41):'Сейчас без девятнадцати минут одиннадцать утра',
(10, 42):'Сейчас без восемнадцати минут одиннадцать утра',
(10, 43):'Сейчас без семнадцати минут одиннадцать утра',
(10, 44):'Сейчас без шестнадцати минут одиннадцать утра',
(10, 45):'Сейчас без четверти одиннадцать утра',
(10, 46):'Сейчас без четырнадцати минут одиннадцать утра',
(10, 47):'Сейчас без тринадцати минут одиннадцать утра',
(10, 48):'Сейчас без двенадцати минут одиннадцать утра',
(10, 49):'Сейчас без одиннадцати минут одиннадцать утра',
(10, 50):'Сейчас без десяти минут одиннадцать утра',
(10, 51):'Сейчас без девяти минут одиннадцать утра',
(10, 52):'Сейчас без восьми минут одиннадцать утра',
(10, 53):'Сейчас без семи минут одиннадцать утра',
(10, 54):'Сейчас без шести минут одиннадцать утра',
(10, 55):'Сейчас без пяти минут одиннадцать утра',
(10, 56):'Сейчас без четырёх минут одиннадцать утра',
(10, 57):'Сейчас без трёх минут одиннадцать утра',
(10, 58):'Сейчас без двух минут одиннадцать утра',
(10, 59):'Сейчас без одной минуты одиннадцать утра',
(11, 0):'Сейчас ровно одиннадцать часов утра',
(11, 1):'Сейчас одна минута двенадцатого утра',
(11, 2):'Сейчас две минуты двенадцатого утра',
(11, 3):'Сейчас три минуты двенадцатого утра',
(11, 4):'Сейчас четыре минуты двенадцатого утра',
(11, 5):'Сейчас пять минут двенадцатого утра',
(11, 6):'Сейчас шесть минут двенадцатого утра',
(11, 7):'Сейчас семь минут двенадцатого утра',
(11, 8):'Сейчас восемь минут двенадцатого утра',
(11, 9):'Сейчас девять минут двенадцатого утра',
(11, 10):'Сейчас десять минут двенадцатого утра',
(11, 11):'Сейчас одиннадцать минут двенадцатого утра',
(11, 12):'Сейчас двенадцать минут двенадцатого утра',
(11, 13):'Сейчас тринадцать минут двенадцатого утра',
(11, 14):'Сейчас четырнадцать минут двенадцатого утра',
(11, 15):'Сейчас четверть двенадцатого утра',
(11, 16):'Сейчас шестнадцать минут двенадцатого утра',
(11, 17):'Сейчас семнадцать минут двенадцатого утра',
(11, 18):'Сейчас восемнадцать минут двенадцатого утра',
(11, 19):'Сейчас девятнадцать минут двенадцатого утра',
(11, 20):'Сейчас двадцать минут двенадцатого утра',
(11, 21):'Сейчас двадцать одна минута двенадцатого утра',
(11, 22):'Сейчас двадцать две минуты двенадцатого утра',
(11, 23):'Сейчас двадцать три минуты двенадцатого утра',
(11, 24):'Сейчас двадцать четыре минуты двенадцатого утра',
(11, 25):'Сейчас двадцать пять минут двенадцатого утра',
(11, 26):'Сейчас двадцать шесть минут двенадцатого утра',
(11, 27):'Сейчас двадцать семь минут двенадцатого утра',
(11, 28):'Сейчас двадцать восемь минут двенадцатого утра',
(11, 29):'Сейчас двадцать девять минут двенадцатого утра',
(11, 30):'Сейчас половина двенадцатого дня',
(11, 31):'Сейчас без двадцати девяти минут двенадцать дня',
(11, 32):'Сейчас без двадцати восьми минут двенадцать дня',
(11, 33):'Сейчас без двадцати семи минут двенадцать дня',
(11, 34):'Сейчас без двадцати шести минут двенадцать дня',
(11, 35):'Сейчас без двадцати пяти минут двенадцать дня',
(11, 36):'Сейчас без двадцати четырёх минут двенадцать дня',
(11, 37):'Сейчас без двадцати трёх минут двенадцать дня',
(11, 38):'Сейчас без двадцати двух минут двенадцать дня',
(11, 39):'Сейчас без двадцати одной минуты двенадцать дня',
(11, 40):'Сейчас без двадцати минут двенадцать дня',
(11, 41):'Сейчас без девятнадцати минут двенадцать дня',
(11, 42):'Сейчас без восемнадцати минут двенадцать дня',
(11, 43):'Сейчас без семнадцати минут двенадцать дня',
(11, 44):'Сейчас без шестнадцати минут двенадцать дня',
(11, 45):'Сейчас без четверти двенадцать дня',
(11, 46):'Сейчас без четырнадцати минут двенадцать дня',
(11, 47):'Сейчас без тринадцати минут двенадцать дня',
(11, 48):'Сейчас без двенадцати минут двенадцать дня',
(11, 49):'Сейчас без одиннадцати минут двенадцать дня',
(11, 50):'Сейчас без десяти минут двенадцать дня',
(11, 51):'Сейчас без девяти минут двенадцать дня',
(11, 52):'Сейчас без восьми минут двенадцать дня',
(11, 53):'Сейчас без семи минут двенадцать дня',
(11, 54):'Сейчас без шести минут двенадцать дня',
(11, 55):'Сейчас без пяти минут двенадцать дня',
(11, 56):'Сейчас без четырёх минут двенадцать дня',
(11, 57):'Сейчас без трёх минут двенадцать дня',
(11, 58):'Сейчас без двух минут двенадцать дня',
(11, 59):'Сейчас без одной минуты двенадцать дня',
(12, 0):'Сейчас полдень',
(12, 1):'Сейчас одна минута первого дня',
(12, 2):'Сейчас две минуты первого дня',
(12, 3):'Сейчас три минуты первого дня',
(12, 4):'Сейчас четыре минуты первого дня',
(12, 5):'Сейчас пять минут первого дня',
(12, 6):'Сейчас шесть минут первого дня',
(12, 7):'Сейчас семь минут первого дня',
(12, 8):'Сейчас восемь минут первого дня',
(12, 9):'Сейчас девять минут первого дня',
(12, 10):'Сейчас десять минут первого дня',
(12, 11):'Сейчас одиннадцать минут первого дня',
(12, 12):'Сейчас двенадцать минут первого дня',
(12, 13):'Сейчас тринадцать минут первого дня',
(12, 14):'Сейчас четырнадцать минут первого дня',
(12, 15):'Сейчас четверть первого дня',
(12, 16):'Сейчас шестнадцать минут первого дня',
(12, 17):'Сейчас семнадцать минут первого дня',
(12, 18):'Сейчас восемнадцать минут первого дня',
(12, 19):'Сейчас девятнадцать минут первого дня',
(12, 20):'Сейчас двадцать минут первого дня',
(12, 21):'Сейчас двадцать одна минута первого дня',
(12, 22):'Сейчас двадцать две минуты первого дня',
(12, 23):'Сейчас двадцать три минуты первого дня',
(12, 24):'Сейчас двадцать четыре минуты первого дня',
(12, 25):'Сейчас двадцать пять минут первого дня',
(12, 26):'Сейчас двадцать шесть минут первого дня',
(12, 27):'Сейчас двадцать семь минут первого дня',
(12, 28):'Сейчас двадцать восемь минут первого дня',
(12, 29):'Сейчас двадцать девять минут первого дня',
(12, 30):'Сейчас половина первого дня',
(12, 31):'Сейчас без двадцати девяти минут час дня',
(12, 32):'Сейчас без двадцати восьми минут час дня',
(12, 33):'Сейчас без двадцати семи минут час дня',
(12, 34):'Сейчас без двадцати шести минут час дня',
(12, 35):'Сейчас без двадцати пяти минут час дня',
(12, 36):'Сейчас без двадцати четырёх минут час дня',
(12, 37):'Сейчас без двадцати трёх минут час дня',
(12, 38):'Сейчас без двадцати двух минут час дня',
(12, 39):'Сейчас без двадцати одной минуты час дня',
(12, 40):'Сейчас без двадцати минут час дня',
(12, 41):'Сейчас без девятнадцати минут час дня',
(12, 42):'Сейчас без восемнадцати минут час дня',
(12, 43):'Сейчас без семнадцати минут час дня',
(12, 44):'Сейчас без шестнадцати минут час дня',
(12, 45):'Сейчас без четверти час дня',
(12, 46):'Сейчас без четырнадцати минут час дня',
(12, 47):'Сейчас без тринадцати минут час дня',
(12, 48):'Сейчас без двенадцати минут час дня',
(12, 49):'Сейчас без одиннадцати минут час дня',
(12, 50):'Сейчас без десяти минут час дня',
(12, 51):'Сейчас без девяти минут час дня',
(12, 52):'Сейчас без восьми минут час дня',
(12, 53):'Сейчас без семи минут час дня',
(12, 54):'Сейчас без шести минут час дня',
(12, 55):'Сейчас без пяти минут час дня',
(12, 56):'Сейчас без четырёх минут час дня',
(12, 57):'Сейчас без трёх минут час дня',
(12, 58):'Сейчас без двух минут час дня',
(12, 59):'Сейчас без одной минуты час дня',
(13, 0):'Сейчас ровно один час дня',
(13, 1):'Сейчас одна минута второго дня',
(13, 2):'Сейчас две минуты второго дня',
(13, 3):'Сейчас три минуты второго дня',
(13, 4):'Сейчас четыре минуты второго дня',
(13, 5):'Сейчас пять минут второго дня',
(13, 6):'Сейчас шесть минут второго дня',
(13, 7):'Сейчас семь минут второго дня',
(13, 8):'Сейчас восемь минут второго дня',
(13, 9):'Сейчас девять минут второго дня',
(13, 10):'Сейчас десять минут второго дня',
(13, 11):'Сейчас одиннадцать минут второго дня',
(13, 12):'Сейчас двенадцать минут второго дня',
(13, 13):'Сейчас тринадцать минут второго дня',
(13, 14):'Сейчас четырнадцать минут второго дня',
(13, 15):'Сейчас четверть второго дня',
(13, 16):'Сейчас шестнадцать минут второго дня',
(13, 17):'Сейчас семнадцать минут второго дня',
(13, 18):'Сейчас восемнадцать минут второго дня',
(13, 19):'Сейчас девятнадцать минут второго дня',
(13, 20):'Сейчас двадцать минут второго дня',
(13, 21):'Сейчас двадцать одна минута второго дня',
(13, 22):'Сейчас двадцать две минуты второго дня',
(13, 23):'Сейчас двадцать три минуты второго дня',
(13, 24):'Сейчас двадцать четыре минуты второго дня',
(13, 25):'Сейчас двадцать пять минут второго дня',
(13, 26):'Сейчас двадцать шесть минут второго дня',
(13, 27):'Сейчас двадцать семь минут второго дня',
(13, 28):'Сейчас двадцать восемь минут второго дня',
(13, 29):'Сейчас двадцать девять минут второго дня',
(13, 30):'Сейчас половина второго дня',
(13, 31):'Сейчас без двадцати девяти минут два часа дня',
(13, 32):'Сейчас без двадцати восьми минут два часа дня',
(13, 33):'Сейчас без двадцати семи минут два часа дня',
(13, 34):'Сейчас без двадцати шести минут два часа дня',
(13, 35):'Сейчас без двадцати пяти минут два часа дня',
(13, 36):'Сейчас без двадцати четырёх минут два часа дня',
(13, 37):'Сейчас без двадцати трёх минут два часа дня',
(13, 38):'Сейчас без двадцати двух минут два часа дня',
(13, 39):'Сейчас без двадцати одной минуты два часа дня',
(13, 40):'Сейчас без двадцати минут два часа дня',
(13, 41):'Сейчас без девятнадцати минут два часа дня',
(13, 42):'Сейчас без восемнадцати минут два часа дня',
(13, 43):'Сейчас без семнадцати минут два часа дня',
(13, 44):'Сейчас без шестнадцати минут два часа дня',
(13, 45):'Сейчас без четверти два часа дня',
(13, 46):'Сейчас без четырнадцати минут два часа дня',
(13, 47):'Сейчас без тринадцати минут два часа дня',
(13, 48):'Сейчас без двенадцати минут два часа дня',
(13, 49):'Сейчас без одиннадцати минут два часа дня',
(13, 50):'Сейчас без десяти минут два часа дня',
(13, 51):'Сейчас без девяти минут два часа дня',
(13, 52):'Сейчас без восьми минут два часа дня',
(13, 53):'Сейчас без семи минут два часа дня',
(13, 54):'Сейчас без шести минут два часа дня',
(13, 55):'Сейчас без пяти минут два часа дня',
(13, 56):'Сейчас без четырёх минут два часа дня',
(13, 57):'Сейчас без трёх минут два часа дня',
(13, 58):'Сейчас без двух минут два часа дня',
(13, 59):'Сейчас без одной минуты два часа дня',
(14, 0):'Сейчас ровно два часа дня',
(14, 1):'Сейчас одна минута третьего дня',
(14, 2):'Сейчас две минуты третьего дня',
(14, 3):'Сейчас три минуты третьего дня',
(14, 4):'Сейчас четыре минуты третьего дня',
(14, 5):'Сейчас пять минут третьего дня',
(14, 6):'Сейчас шесть минут третьего дня',
(14, 7):'Сейчас семь минут третьего дня',
(14, 8):'Сейчас восемь минут третьего дня',
(14, 9):'Сейчас девять минут третьего дня',
(14, 10):'Сейчас десять минут третьего дня',
(14, 11):'Сейчас одиннадцать минут третьего дня',
(14, 12):'Сейчас двенадцать минут третьего дня',
(14, 13):'Сейчас тринадцать минут третьего дня',
(14, 14):'Сейчас четырнадцать минут третьего дня',
(14, 15):'Сейчас четверть третьего дня',
(14, 16):'Сейчас шестнадцать минут третьего дня',
(14, 17):'Сейчас семнадцать минут третьего дня',
(14, 18):'Сейчас восемнадцать минут третьего дня',
(14, 19):'Сейчас девятнадцать минут третьего дня',
(14, 20):'Сейчас двадцать минут третьего дня',
(14, 21):'Сейчас двадцать одна минута третьего дня',
(14, 22):'Сейчас двадцать две минуты третьего дня',
(14, 23):'Сейчас двадцать три минуты третьего дня',
(14, 24):'Сейчас двадцать четыре минуты третьего дня',
(14, 25):'Сейчас двадцать пять минут третьего дня',
(14, 26):'Сейчас двадцать шесть минут третьего дня',
(14, 27):'Сейчас двадцать семь минут третьего дня',
(14, 28):'Сейчас двадцать восемь минут третьего дня',
(14, 29):'Сейчас двадцать девять минут третьего дня',
(14, 30):'Сейчас половина третьего дня',
(14, 31):'Сейчас без двадцати девяти минут три часа дня',
(14, 32):'Сейчас без двадцати восьми минут три часа дня',
(14, 33):'Сейчас без двадцати семи минут три часа дня',
(14, 34):'Сейчас без двадцати шести минут три часа дня',
(14, 35):'Сейчас без двадцати пяти минут три часа дня',
(14, 36):'Сейчас без двадцати четырёх минут три часа дня',
(14, 37):'Сейчас без двадцати трёх минут три часа дня',
(14, 38):'Сейчас без двадцати двух минут три часа дня',
(14, 39):'Сейчас без двадцати одной минуты три часа дня',
(14, 40):'Сейчас без двадцати минут три часа дня',
(14, 41):'Сейчас без девятнадцати минут три часа дня',
(14, 42):'Сейчас без восемнадцати минут три часа дня',
(14, 43):'Сейчас без семнадцати минут три часа дня',
(14, 44):'Сейчас без шестнадцати минут три часа дня',
(14, 45):'Сейчас без четверти три часа дня',
(14, 46):'Сейчас без четырнадцати минут три часа дня',
(14, 47):'Сейчас без тринадцати минут три часа дня',
(14, 48):'Сейчас без двенадцати минут три часа дня',
(14, 49):'Сейчас без одиннадцати минут три часа дня',
(14, 50):'Сейчас без десяти минут три часа дня',
(14, 51):'Сейчас без девяти минут три часа дня',
(14, 52):'Сейчас без восьми минут три часа дня',
(14, 53):'Сейчас без семи минут три часа дня',
(14, 54):'Сейчас без шести минут три часа дня',
(14, 55):'Сейчас без пяти минут три часа дня',
(14, 56):'Сейчас без четырёх минут три часа дня',
(14, 57):'Сейчас без трёх минут три часа дня',
(14, 58):'Сейчас без двух минут три часа дня',
(14, 59):'Сейчас без одной минуты три часа дня',
(15, 0):'Сейчас ровно три часа дня',
(15, 1):'Сейчас одна минута четвёртого дня',
(15, 2):'Сейчас две минуты четвёртого дня',
(15, 3):'Сейчас три минуты четвёртого дня',
(15, 4):'Сейчас четыре минуты четвёртого дня',
(15, 5):'Сейчас пять минут четвёртого дня',
(15, 6):'Сейчас шесть минут четвёртого дня',
(15, 7):'Сейчас семь минут четвёртого дня',
(15, 8):'Сейчас восемь минут четвёртого дня',
(15, 9):'Сейчас девять минут четвёртого дня',
(15, 10):'Сейчас десять минут четвёртого дня',
(15, 11):'Сейчас одиннадцать минут четвёртого дня',
(15, 12):'Сейчас двенадцать минут четвёртого дня',
(15, 13):'Сейчас тринадцать минут четвёртого дня',
(15, 14):'Сейчас четырнадцать минут четвёртого дня',
(15, 15):'Сейчас четверть четвёртого дня',
(15, 16):'Сейчас шестнадцать минут четвёртого дня',
(15, 17):'Сейчас семнадцать минут четвёртого дня',
(15, 18):'Сейчас восемнадцать минут четвёртого дня',
(15, 19):'Сейчас девятнадцать минут четвёртого дня',
(15, 20):'Сейчас двадцать минут четвёртого дня',
(15, 21):'Сейчас двадцать одна минута четвёртого дня',
(15, 22):'Сейчас двадцать две минуты четвёртого дня',
(15, 23):'Сейчас двадцать три минуты четвёртого дня',
(15, 24):'Сейчас двадцать четыре минуты четвёртого дня',
(15, 25):'Сейчас двадцать пять минут четвёртого дня',
(15, 26):'Сейчас двадцать шесть минут четвёртого дня',
(15, 27):'Сейчас двадцать семь минут четвёртого дня',
(15, 28):'Сейчас двадцать восемь минут четвёртого дня',
(15, 29):'Сейчас двадцать девять минут четвёртого дня',
(15, 30):'Сейчас половина четвёртого дня',
(15, 31):'Сейчас без двадцати девяти минут четыре часа дня',
(15, 32):'Сейчас без двадцати восьми минут четыре часа дня',
(15, 33):'Сейчас без двадцати семи минут четыре часа дня',
(15, 34):'Сейчас без двадцати шести минут четыре часа дня',
(15, 35):'Сейчас без двадцати пяти минут четыре часа дня',
(15, 36):'Сейчас без двадцати четырёх минут четыре часа дня',
(15, 37):'Сейчас без двадцати трёх минут четыре часа дня',
(15, 38):'Сейчас без двадцати двух минут четыре часа дня',
(15, 39):'Сейчас без двадцати одной минуты четыре часа дня',
(15, 40):'Сейчас без двадцати минут четыре часа дня',
(15, 41):'Сейчас без девятнадцати минут четыре часа дня',
(15, 42):'Сейчас без восемнадцати минут четыре часа дня',
(15, 43):'Сейчас без семнадцати минут четыре часа дня',
(15, 44):'Сейчас без шестнадцати минут четыре часа дня',
(15, 45):'Сейчас без четверти четыре часа дня',
(15, 46):'Сейчас без четырнадцати минут четыре часа дня',
(15, 47):'Сейчас без тринадцати минут четыре часа дня',
(15, 48):'Сейчас без двенадцати минут четыре часа дня',
(15, 49):'Сейчас без одиннадцати минут четыре часа дня',
(15, 50):'Сейчас без десяти минут четыре часа дня',
(15, 51):'Сейчас без девяти минут четыре часа дня',
(15, 52):'Сейчас без восьми минут четыре часа дня',
(15, 53):'Сейчас без семи минут четыре часа дня',
(15, 54):'Сейчас без шести минут четыре часа дня',
(15, 55):'Сейчас без пяти минут четыре часа дня',
(15, 56):'Сейчас без четырёх минут четыре часа дня',
(15, 57):'Сейчас без трёх минут четыре часа дня',
(15, 58):'Сейчас без двух минут четыре часа дня',
(15, 59):'Сейчас без одной минуты четыре часа дня',
(16, 0):'Сейчас ровно четыре часа дня',
(16, 1):'Сейчас одна минута пятого дня',
(16, 2):'Сейчас две минуты пятого дня',
(16, 3):'Сейчас три минуты пятого дня',
(16, 4):'Сейчас четыре минуты пятого дня',
(16, 5):'Сейчас пять минут пятого дня',
(16, 6):'Сейчас шесть минут пятого дня',
(16, 7):'Сейчас семь минут пятого дня',
(16, 8):'Сейчас восемь минут пятого дня',
(16, 9):'Сейчас девять минут пятого дня',
(16, 10):'Сейчас десять минут пятого дня',
(16, 11):'Сейчас одиннадцать минут пятого дня',
(16, 12):'Сейчас двенадцать минут пятого дня',
(16, 13):'Сейчас тринадцать минут пятого дня',
(16, 14):'Сейчас четырнадцать минут пятого дня',
(16, 15):'Сейчас четверть пятого дня',
(16, 16):'Сейчас шестнадцать минут пятого дня',
(16, 17):'Сейчас семнадцать минут пятого дня',
(16, 18):'Сейчас восемнадцать минут пятого дня',
(16, 19):'Сейчас девятнадцать минут пятого дня',
(16, 20):'Сейчас двадцать минут пятого дня',
(16, 21):'Сейчас двадцать одна минута пятого дня',
(16, 22):'Сейчас двадцать две минуты пятого дня',
(16, 23):'Сейчас двадцать три минуты пятого дня',
(16, 24):'Сейчас двадцать четыре минуты пятого дня',
(16, 25):'Сейчас двадцать пять минут пятого дня',
(16, 26):'Сейчас двадцать шесть минут пятого дня',
(16, 27):'Сейчас двадцать семь минут пятого дня',
(16, 28):'Сейчас двадцать восемь минут пятого дня',
(16, 29):'Сейчас двадцать девять минут пятого дня',
(16, 30):'Сейчас половина пятого дня',
(16, 31):'Сейчас без двадцати девяти минут пять дня',
(16, 32):'Сейчас без двадцати восьми минут пять дня',
(16, 33):'Сейчас без двадцати семи минут пять дня',
(16, 34):'Сейчас без двадцати шести минут пять дня',
(16, 35):'Сейчас без двадцати пяти минут пять дня',
(16, 36):'Сейчас без двадцати четырёх минут пять дня',
(16, 37):'Сейчас без двадцати трёх минут пять дня',
(16, 38):'Сейчас без двадцати двух минут пять дня',
(16, 39):'Сейчас без двадцати одной минуты пять дня',
(16, 40):'Сейчас без двадцати минут пять дня',
(16, 41):'Сейчас без девятнадцати минут пять дня',
(16, 42):'Сейчас без восемнадцати минут пять дня',
(16, 43):'Сейчас без семнадцати минут пять дня',
(16, 44):'Сейчас без шестнадцати минут пять дня',
(16, 45):'Сейчас без четверти пять дня',
(16, 46):'Сейчас без четырнадцати минут пять дня',
(16, 47):'Сейчас без тринадцати минут пять дня',
(16, 48):'Сейчас без двенадцати минут пять дня',
(16, 49):'Сейчас без одиннадцати минут пять дня',
(16, 50):'Сейчас без десяти минут пять дня',
(16, 51):'Сейчас без девяти минут пять дня',
(16, 52):'Сейчас без восьми минут пять дня',
(16, 53):'Сейчас без семи минут пять дня',
(16, 54):'Сейчас без шести минут пять дня',
(16, 55):'Сейчас без пяти минут пять дня',
(16, 56):'Сейчас без четырёх минут пять дня',
(16, 57):'Сейчас без трёх минут пять дня',
(16, 58):'Сейчас без двух минут пять дня',
(16, 59):'Сейчас без одной минуты пять дня',
(17, 0):'Сейчас ровно пять часов дня',
(17, 1):'Сейчас одна минута шестого вечера',
(17, 2):'Сейчас две минуты шестого вечера',
(17, 3):'Сейчас три минуты шестого вечера',
(17, 4):'Сейчас четыре минуты шестого вечера',
(17, 5):'Сейчас пять минут шестого вечера',
(17, 6):'Сейчас шесть минут шестого вечера',
(17, 7):'Сейчас семь минут шестого вечера',
(17, 8):'Сейчас восемь минут шестого вечера',
(17, 9):'Сейчас девять минут шестого вечера',
(17, 10):'Сейчас десять минут шестого вечера',
(17, 11):'Сейчас одиннадцать минут шестого вечера',
(17, 12):'Сейчас двенадцать минут шестого вечера',
(17, 13):'Сейчас тринадцать минут шестого вечера',
(17, 14):'Сейчас четырнадцать минут шестого вечера',
(17, 15):'Сейчас четверть шестого вечера',
(17, 16):'Сейчас шестнадцать минут шестого вечера',
(17, 17):'Сейчас семнадцать минут шестого вечера',
(17, 18):'Сейчас восемнадцать минут шестого вечера',
(17, 19):'Сейчас девятнадцать минут шестого вечера',
(17, 20):'Сейчас двадцать минут шестого вечера',
(17, 21):'Сейчас двадцать одна минута шестого вечера',
(17, 22):'Сейчас двадцать две минуты шестого вечера',
(17, 23):'Сейчас двадцать три минуты шестого вечера',
(17, 24):'Сейчас двадцать четыре минуты шестого вечера',
(17, 25):'Сейчас двадцать пять минут шестого вечера',
(17, 26):'Сейчас двадцать шесть минут шестого вечера',
(17, 27):'Сейчас двадцать семь минут шестого вечера',
(17, 28):'Сейчас двадцать восемь минут шестого вечера',
(17, 29):'Сейчас двадцать девять минут шестого вечера',
(17, 30):'Сейчас половина шестого вечера',
(17, 31):'Сейчас без двадцати девяти минут шесть вечера',
(17, 32):'Сейчас без двадцати восьми минут шесть вечера',
(17, 33):'Сейчас без двадцати семи минут шесть вечера',
(17, 34):'Сейчас без двадцати шести минут шесть вечера',
(17, 35):'Сейчас без двадцати пяти минут шесть вечера',
(17, 36):'Сейчас без двадцати четырёх минут шесть вечера',
(17, 37):'Сейчас без двадцати трёх минут шесть вечера',
(17, 38):'Сейчас без двадцати двух минут шесть вечера',
(17, 39):'Сейчас без двадцати одной минуты шесть вечера',
(17, 40):'Сейчас без двадцати минут шесть вечера',
(17, 41):'Сейчас без девятнадцати минут шесть вечера',
(17, 42):'Сейчас без восемнадцати минут шесть вечера',
(17, 43):'Сейчас без семнадцати минут шесть вечера',
(17, 44):'Сейчас без шестнадцати минут шесть вечера',
(17, 45):'Сейчас без четверти шесть вечера',
(17, 46):'Сейчас без четырнадцати минут шесть вечера',
(17, 47):'Сейчас без тринадцати минут шесть вечера',
(17, 48):'Сейчас без двенадцати минут шесть вечера',
(17, 49):'Сейчас без одиннадцати минут шесть вечера',
(17, 50):'Сейчас без десяти минут шесть вечера',
(17, 51):'Сейчас без девяти минут шесть вечера',
(17, 52):'Сейчас без восьми минут шесть вечера',
(17, 53):'Сейчас без семи минут шесть вечера',
(17, 54):'Сейчас без шести минут шесть вечера',
(17, 55):'Сейчас без пяти минут шесть вечера',
(17, 56):'Сейчас без четырёх минут шесть вечера',
(17, 57):'Сейчас без трёх минут шесть вечера',
(17, 58):'Сейчас без двух минут шесть вечера',
(17, 59):'Сейчас без одной минуты шесть вечера',
(18, 0):'Сейчас ровно шесть часов вечера',
(18, 1):'Сейчас одна минута седьмого вечера',
(18, 2):'Сейчас две минуты седьмого вечера',
(18, 3):'Сейчас три минуты седьмого вечера',
(18, 4):'Сейчас четыре минуты седьмого вечера',
(18, 5):'Сейчас пять минут седьмого вечера',
(18, 6):'Сейчас шесть минут седьмого вечера',
(18, 7):'Сейчас семь минут седьмого вечера',
(18, 8):'Сейчас восемь минут седьмого вечера',
(18, 9):'Сейчас девять минут седьмого вечера',
(18, 10):'Сейчас десять минут седьмого вечера',
(18, 11):'Сейчас одиннадцать минут седьмого вечера',
(18, 12):'Сейчас двенадцать минут седьмого вечера',
(18, 13):'Сейчас тринадцать минут седьмого вечера',
(18, 14):'Сейчас четырнадцать минут седьмого вечера',
(18, 15):'Сейчас четверть седьмого вечера',
(18, 16):'Сейчас шестнадцать минут седьмого вечера',
(18, 17):'Сейчас семнадцать минут седьмого вечера',
(18, 18):'Сейчас восемнадцать минут седьмого вечера',
(18, 19):'Сейчас девятнадцать минут седьмого вечера',
(18, 20):'Сейчас двадцать минут седьмого вечера',
(18, 21):'Сейчас двадцать одна минута седьмого вечера',
(18, 22):'Сейчас двадцать две минуты седьмого вечера',
(18, 23):'Сейчас двадцать три минуты седьмого вечера',
(18, 24):'Сейчас двадцать четыре минуты седьмого вечера',
(18, 25):'Сейчас двадцать пять минут седьмого вечера',
(18, 26):'Сейчас двадцать шесть минут седьмого вечера',
(18, 27):'Сейчас двадцать семь минут седьмого вечера',
(18, 28):'Сейчас двадцать восемь минут седьмого вечера',
(18, 29):'Сейчас двадцать девять минут седьмого вечера',
(18, 30):'Сейчас половина седьмого вечера',
(18, 31):'Сейчас без двадцати девяти минут семь вечера',
(18, 32):'Сейчас без двадцати восьми минут семь вечера',
(18, 33):'Сейчас без двадцати семи минут семь вечера',
(18, 34):'Сейчас без двадцати шести минут семь вечера',
(18, 35):'Сейчас без двадцати пяти минут семь вечера',
(18, 36):'Сейчас без двадцати четырёх минут семь вечера',
(18, 37):'Сейчас без двадцати трёх минут семь вечера',
(18, 38):'Сейчас без двадцати двух минут семь вечера',
(18, 39):'Сейчас без двадцати одной минуты семь вечера',
(18, 40):'Сейчас без двадцати минут семь вечера',
(18, 41):'Сейчас без девятнадцати минут семь вечера',
(18, 42):'Сейчас без восемнадцати минут семь вечера',
(18, 43):'Сейчас без семнадцати минут семь вечера',
(18, 44):'Сейчас без шестнадцати минут семь вечера',
(18, 45):'Сейчас без четверти семь вечера',
(18, 46):'Сейчас без четырнадцати минут семь вечера',
(18, 47):'Сейчас без тринадцати минут семь вечера',
(18, 48):'Сейчас без двенадцати минут семь вечера',
(18, 49):'Сейчас без одиннадцати минут семь вечера',
(18, 50):'Сейчас без десяти минут семь вечера',
(18, 51):'Сейчас без девяти минут семь вечера',
(18, 52):'Сейчас без восьми минут семь вечера',
(18, 53):'Сейчас без семи минут семь вечера',
(18, 54):'Сейчас без шести минут семь вечера',
(18, 55):'Сейчас без пяти минут семь вечера',
(18, 56):'Сейчас без четырёх минут семь вечера',
(18, 57):'Сейчас без трёх минут семь вечера',
(18, 58):'Сейчас без двух минут семь вечера',
(18, 59):'Сейчас без одной минуты семь вечера',
(19, 0):'Сейчас ровно семь часов вечера',
(19, 1):'Сейчас одна минута восьмого вечера',
(19, 2):'Сейчас две минуты восьмого вечера',
(19, 3):'Сейчас три минуты восьмого вечера',
(19, 4):'Сейчас четыре минуты восьмого вечера',
(19, 5):'Сейчас пять минут восьмого вечера',
(19, 6):'Сейчас шесть минут восьмого вечера',
(19, 7):'Сейчас семь минут восьмого вечера',
(19, 8):'Сейчас восемь минут восьмого вечера',
(19, 9):'Сейчас девять минут восьмого вечера',
(19, 10):'Сейчас десять минут восьмого вечера',
(19, 11):'Сейчас одиннадцать минут восьмого вечера',
(19, 12):'Сейчас двенадцать минут восьмого вечера',
(19, 13):'Сейчас тринадцать минут восьмого вечера',
(19, 14):'Сейчас четырнадцать минут восьмого вечера',
(19, 15):'Сейчас четверть восьмого вечера',
(19, 16):'Сейчас шестнадцать минут восьмого вечера',
(19, 17):'Сейчас семнадцать минут восьмого вечера',
(19, 18):'Сейчас восемнадцать минут восьмого вечера',
(19, 19):'Сейчас девятнадцать минут восьмого вечера',
(19, 20):'Сейчас двадцать минут восьмого вечера',
(19, 21):'Сейчас двадцать одна минута восьмого вечера',
(19, 22):'Сейчас двадцать две минуты восьмого вечера',
(19, 23):'Сейчас двадцать три минуты восьмого вечера',
(19, 24):'Сейчас двадцать четыре минуты восьмого вечера',
(19, 25):'Сейчас двадцать пять минут восьмого вечера',
(19, 26):'Сейчас двадцать шесть минут восьмого вечера',
(19, 27):'Сейчас двадцать семь минут восьмого вечера',
(19, 28):'Сейчас двадцать восемь минут восьмого вечера',
(19, 29):'Сейчас двадцать девять минут восьмого вечера',
(19, 30):'Сейчас половина восьмого вечера',
(19, 31):'Сейчас без двадцати девяти минут восемь вечера',
(19, 32):'Сейчас без двадцати восьми минут восемь вечера',
(19, 33):'Сейчас без двадцати семи минут восемь вечера',
(19, 34):'Сейчас без двадцати шести минут восемь вечера',
(19, 35):'Сейчас без двадцати пяти минут восемь вечера',
(19, 36):'Сейчас без двадцати четырёх минут восемь вечера',
(19, 37):'Сейчас без двадцати трёх минут восемь вечера',
(19, 38):'Сейчас без двадцати двух минут восемь вечера',
(19, 39):'Сейчас без двадцати одной минуты восемь вечера',
(19, 40):'Сейчас без двадцати минут восемь вечера',
(19, 41):'Сейчас без девятнадцати минут восемь вечера',
(19, 42):'Сейчас без восемнадцати минут восемь вечера',
(19, 43):'Сейчас без семнадцати минут восемь вечера',
(19, 44):'Сейчас без шестнадцати минут восемь вечера',
(19, 45):'Сейчас без четверти восемь вечера',
(19, 46):'Сейчас без четырнадцати минут восемь вечера',
(19, 47):'Сейчас без тринадцати минут восемь вечера',
(19, 48):'Сейчас без двенадцати минут восемь вечера',
(19, 49):'Сейчас без одиннадцати минут восемь вечера',
(19, 50):'Сейчас без десяти минут восемь вечера',
(19, 51):'Сейчас без девяти минут восемь вечера',
(19, 52):'Сейчас без восьми минут восемь вечера',
(19, 53):'Сейчас без семи минут восемь вечера',
(19, 54):'Сейчас без шести минут восемь вечера',
(19, 55):'Сейчас без пяти минут восемь вечера',
(19, 56):'Сейчас без четырёх минут восемь вечера',
(19, 57):'Сейчас без трёх минут восемь вечера',
(19, 58):'Сейчас без двух минут восемь вечера',
(19, 59):'Сейчас без одной минуты восемь вечера',
(20, 0):'Сейчас ровно восемь часов вечера',
(20, 1):'Сейчас одна минута девятого вечера',
(20, 2):'Сейчас две минуты девятого вечера',
(20, 3):'Сейчас три минуты девятого вечера',
(20, 4):'Сейчас четыре минуты девятого вечера',
(20, 5):'Сейчас пять минут девятого вечера',
(20, 6):'Сейчас шесть минут девятого вечера',
(20, 7):'Сейчас семь минут девятого вечера',
(20, 8):'Сейчас восемь минут девятого вечера',
(20, 9):'Сейчас девять минут девятого вечера',
(20, 10):'Сейчас десять минут девятого вечера',
(20, 11):'Сейчас одиннадцать минут девятого вечера',
(20, 12):'Сейчас двенадцать минут девятого вечера',
(20, 13):'Сейчас тринадцать минут девятого вечера',
(20, 14):'Сейчас четырнадцать минут девятого вечера',
(20, 15):'Сейчас четверть девятого вечера',
(20, 16):'Сейчас шестнадцать минут девятого вечера',
(20, 17):'Сейчас семнадцать минут девятого вечера',
(20, 18):'Сейчас восемнадцать минут девятого вечера',
(20, 19):'Сейчас девятнадцать минут девятого вечера',
(20, 20):'Сейчас двадцать минут девятого вечера',
(20, 21):'Сейчас двадцать одна минута девятого вечера',
(20, 22):'Сейчас двадцать две минуты девятого вечера',
(20, 23):'Сейчас двадцать три минуты девятого вечера',
(20, 24):'Сейчас двадцать четыре минуты девятого вечера',
(20, 25):'Сейчас двадцать пять минут девятого вечера',
(20, 26):'Сейчас двадцать шесть минут девятого вечера',
(20, 27):'Сейчас двадцать семь минут девятого вечера',
(20, 28):'Сейчас двадцать восемь минут девятого вечера',
(20, 29):'Сейчас двадцать девять минут девятого вечера',
(20, 30):'Сейчас половина девятого вечера',
(20, 31):'Сейчас без двадцати девяти минут девять вечера',
(20, 32):'Сейчас без двадцати восьми минут девять вечера',
(20, 33):'Сейчас без двадцати семи минут девять вечера',
(20, 34):'Сейчас без двадцати шести минут девять вечера',
(20, 35):'Сейчас без двадцати пяти минут девять вечера',
(20, 36):'Сейчас без двадцати четырёх минут девять вечера',
(20, 37):'Сейчас без двадцати трёх минут девять вечера',
(20, 38):'Сейчас без двадцати двух минут девять вечера',
(20, 39):'Сейчас без двадцати одной минуты девять вечера',
(20, 40):'Сейчас без двадцати минут девять вечера',
(20, 41):'Сейчас без девятнадцати минут девять вечера',
(20, 42):'Сейчас без восемнадцати минут девять вечера',
(20, 43):'Сейчас без семнадцати минут девять вечера',
(20, 44):'Сейчас без шестнадцати минут девять вечера',
(20, 45):'Сейчас без четверти девять вечера',
(20, 46):'Сейчас без четырнадцати минут девять вечера',
(20, 47):'Сейчас без тринадцати минут девять вечера',
(20, 48):'Сейчас без двенадцати минут девять вечера',
(20, 49):'Сейчас без одиннадцати минут девять вечера',
(20, 50):'Сейчас без десяти минут девять вечера',
(20, 51):'Сейчас без девяти минут девять вечера',
(20, 52):'Сейчас без восьми минут девять вечера',
(20, 53):'Сейчас без семи минут девять вечера',
(20, 54):'Сейчас без шести минут девять вечера',
(20, 55):'Сейчас без пяти минут девять вечера',
(20, 56):'Сейчас без четырёх минут девять вечера',
(20, 57):'Сейчас без трёх минут девять вечера',
(20, 58):'Сейчас без двух минут девять вечера',
(20, 59):'Сейчас без одной минуты девять вечера',
(21, 0):'Сейчас ровно девять часов вечера',
(21, 1):'Сейчас одна минута десятого вечера',
(21, 2):'Сейчас две минуты десятого вечера',
(21, 3):'Сейчас три минуты десятого вечера',
(21, 4):'Сейчас четыре минуты десятого вечера',
(21, 5):'Сейчас пять минут десятого вечера',
(21, 6):'Сейчас шесть минут десятого вечера',
(21, 7):'Сейчас семь минут десятого вечера',
(21, 8):'Сейчас восемь минут десятого вечера',
(21, 9):'Сейчас девять минут десятого вечера',
(21, 10):'Сейчас десять минут десятого вечера',
(21, 11):'Сейчас одиннадцать минут десятого вечера',
(21, 12):'Сейчас двенадцать минут десятого вечера',
(21, 13):'Сейчас тринадцать минут десятого вечера',
(21, 14):'Сейчас четырнадцать минут десятого вечера',
(21, 15):'Сейчас четверть десятого вечера',
(21, 16):'Сейчас шестнадцать минут десятого вечера',
(21, 17):'Сейчас семнадцать минут десятого вечера',
(21, 18):'Сейчас восемнадцать минут десятого вечера',
(21, 19):'Сейчас девятнадцать минут десятого вечера',
(21, 20):'Сейчас двадцать минут десятого вечера',
(21, 21):'Сейчас двадцать одна минута десятого вечера',
(21, 22):'Сейчас двадцать две минуты десятого вечера',
(21, 23):'Сейчас двадцать три минуты десятого вечера',
(21, 24):'Сейчас двадцать четыре минуты десятого вечера',
(21, 25):'Сейчас двадцать пять минут десятого вечера',
(21, 26):'Сейчас двадцать шесть минут десятого вечера',
(21, 27):'Сейчас двадцать семь минут десятого вечера',
(21, 28):'Сейчас двадцать восемь минут десятого вечера',
(21, 29):'Сейчас двадцать девять минут десятого вечера',
(21, 30):'Сейчас половина десятого вечера',
(21, 31):'Сейчас без двадцати девяти минут десять вечера',
(21, 32):'Сейчас без двадцати восьми минут десять вечера',
(21, 33):'Сейчас без двадцати семи минут десять вечера',
(21, 34):'Сейчас без двадцати шести минут десять вечера',
(21, 35):'Сейчас без двадцати пяти минут десять вечера',
(21, 36):'Сейчас без двадцати четырёх минут десять вечера',
(21, 37):'Сейчас без двадцати трёх минут десять вечера',
(21, 38):'Сейчас без двадцати двух минут десять вечера',
(21, 39):'Сейчас без двадцати одной минуты десять вечера',
(21, 40):'Сейчас без двадцати минут десять вечера',
(21, 41):'Сейчас без девятнадцати минут десять вечера',
(21, 42):'Сейчас без восемнадцати минут десять вечера',
(21, 43):'Сейчас без семнадцати минут десять вечера',
(21, 44):'Сейчас без шестнадцати минут десять вечера',
(21, 45):'Сейчас без четверти десять вечера',
(21, 46):'Сейчас без четырнадцати минут десять вечера',
(21, 47):'Сейчас без тринадцати минут десять вечера',
(21, 48):'Сейчас без двенадцати минут десять вечера',
(21, 49):'Сейчас без одиннадцати минут десять вечера',
(21, 50):'Сейчас без десяти минут десять вечера',
(21, 51):'Сейчас без девяти минут десять вечера',
(21, 52):'Сейчас без восьми минут десять вечера',
(21, 53):'Сейчас без семи минут десять вечера',
(21, 54):'Сейчас без шести минут десять вечера',
(21, 55):'Сейчас без пяти минут десять вечера',
(21, 56):'Сейчас без четырёх минут десять вечера',
(21, 57):'Сейчас без трёх минут десять вечера',
(21, 58):'Сейчас без двух минут десять вечера',
(21, 59):'Сейчас без одной минуты десять вечера',
(22, 0):'Сейчас ровно десять часов вечера',
(22, 1):'Сейчас одна минута одиннадцатого вечера',
(22, 2):'Сейчас две минуты одиннадцатого вечера',
(22, 3):'Сейчас три минуты одиннадцатого вечера',
(22, 4):'Сейчас четыре минуты одиннадцатого вечера',
(22, 5):'Сейчас пять минут одиннадцатого вечера',
(22, 6):'Сейчас шесть минут одиннадцатого вечера',
(22, 7):'Сейчас семь минут одиннадцатого вечера',
(22, 8):'Сейчас восемь минут одиннадцатого вечера',
(22, 9):'Сейчас девять минут одиннадцатого вечера',
(22, 10):'Сейчас десять минут одиннадцатого вечера',
(22, 11):'Сейчас одиннадцать минут одиннадцатого вечера',
(22, 12):'Сейчас двенадцать минут одиннадцатого вечера',
(22, 13):'Сейчас тринадцать минут одиннадцатого вечера',
(22, 14):'Сейчас четырнадцать минут одиннадцатого вечера',
(22, 15):'Сейчас четверть одиннадцатого вечера',
(22, 16):'Сейчас шестнадцать минут одиннадцатого вечера',
(22, 17):'Сейчас семнадцать минут одиннадцатого вечера',
(22, 18):'Сейчас восемнадцать минут одиннадцатого вечера',
(22, 19):'Сейчас девятнадцать минут одиннадцатого вечера',
(22, 20):'Сейчас двадцать минут одиннадцатого вечера',
(22, 21):'Сейчас двадцать одна минута одиннадцатого вечера',
(22, 22):'Сейчас двадцать две минуты одиннадцатого вечера',
(22, 23):'Сейчас двадцать три минуты одиннадцатого вечера',
(22, 24):'Сейчас двадцать четыре минуты одиннадцатого вечера',
(22, 25):'Сейчас двадцать пять минут одиннадцатого вечера',
(22, 26):'Сейчас двадцать шесть минут одиннадцатого вечера',
(22, 27):'Сейчас двадцать семь минут одиннадцатого вечера',
(22, 28):'Сейчас двадцать восемь минут одиннадцатого вечера',
(22, 29):'Сейчас двадцать девять минут одиннадцатого вечера',
(22, 30):'Сейчас половина одиннадцатого вечера',
(22, 31):'Сейчас без двадцати девяти минут одиннадцать вечера',
(22, 32):'Сейчас без двадцати восьми минут одиннадцать вечера',
(22, 33):'Сейчас без двадцати семи минут одиннадцать вечера',
(22, 34):'Сейчас без двадцати шести минут одиннадцать вечера',
(22, 35):'Сейчас без двадцати пяти минут одиннадцать вечера',
(22, 36):'Сейчас без двадцати четырёх минут одиннадцать вечера',
(22, 37):'Сейчас без двадцати трёх минут одиннадцать вечера',
(22, 38):'Сейчас без двадцати двух минут одиннадцать вечера',
(22, 39):'Сейчас без двадцати одной минуты одиннадцать вечера',
(22, 40):'Сейчас без двадцати минут одиннадцать вечера',
(22, 41):'Сейчас без девятнадцати минут одиннадцать вечера',
(22, 42):'Сейчас без восемнадцати минут одиннадцать вечера',
(22, 43):'Сейчас без семнадцати минут одиннадцать вечера',
(22, 44):'Сейчас без шестнадцати минут одиннадцать вечера',
(22, 45):'Сейчас без четверти одиннадцать вечера',
(22, 46):'Сейчас без четырнадцати минут одиннадцать вечера',
(22, 47):'Сейчас без тринадцати минут одиннадцать вечера',
(22, 48):'Сейчас без двенадцати минут одиннадцать вечера',
(22, 49):'Сейчас без одиннадцати минут одиннадцать вечера',
(22, 50):'Сейчас без десяти минут одиннадцать вечера',
(22, 51):'Сейчас без девяти минут одиннадцать вечера',
(22, 52):'Сейчас без восьми минут одиннадцать вечера',
(22, 53):'Сейчас без семи минут одиннадцать вечера',
(22, 54):'Сейчас без шести минут одиннадцать вечера',
(22, 55):'Сейчас без пяти минут одиннадцать вечера',
(22, 56):'Сейчас без четырёх минут одиннадцать вечера',
(22, 57):'Сейчас без трёх минут одиннадцать вечера',
(22, 58):'Сейчас без двух минут одиннадцать вечера',
(22, 59):'Сейчас без одной минуты одиннадцать вечера',
(23, 0):'Сейчас ровно одиннадцать часов вечера',
(23, 1):'Сейчас одна минута двенадцатого ночи',
(23, 2):'Сейчас две минуты двенадцатого ночи',
(23, 3):'Сейчас три минуты двенадцатого ночи',
(23, 4):'Сейчас четыре минуты двенадцатого ночи',
(23, 5):'Сейчас пять минут двенадцатого ночи',
(23, 6):'Сейчас шесть минут двенадцатого ночи',
(23, 7):'Сейчас семь минут двенадцатого ночи',
(23, 8):'Сейчас восемь минут двенадцатого ночи',
(23, 9):'Сейчас девять минут двенадцатого ночи',
(23, 10):'Сейчас десять минут двенадцатого ночи',
(23, 11):'Сейчас одиннадцать минут двенадцатого ночи',
(23, 12):'Сейчас двенадцать минут двенадцатого ночи',
(23, 13):'Сейчас тринадцать минут двенадцатого ночи',
(23, 14):'Сейчас четырнадцать минут двенадцатого ночи',
(23, 15):'Сейчас четверть двенадцатого ночи',
(23, 16):'Сейчас шестнадцать минут двенадцатого ночи',
(23, 17):'Сейчас семнадцать минут двенадцатого ночи',
(23, 18):'Сейчас восемнадцать минут двенадцатого ночи',
(23, 19):'Сейчас девятнадцать минут двенадцатого ночи',
(23, 20):'Сейчас двадцать минут двенадцатого ночи',
(23, 21):'Сейчас двадцать одна минута двенадцатого ночи',
(23, 22):'Сейчас двадцать две минуты двенадцатого ночи',
(23, 23):'Сейчас двадцать три минуты двенадцатого ночи',
(23, 24):'Сейчас двадцать четыре минуты двенадцатого ночи',
(23, 25):'Сейчас двадцать пять минут двенадцатого ночи',
(23, 26):'Сейчас двадцать шесть минут двенадцатого ночи',
(23, 27):'Сейчас двадцать семь минут двенадцатого ночи',
(23, 28):'Сейчас двадцать восемь минут двенадцатого ночи',
(23, 29):'Сейчас двадцать девять минут двенадцатого ночи',
(23, 30):'Сейчас половина двенадцатого ночи',
(23, 31):'Сейчас без двадцати девяти минут двенадцать ночи',
(23, 32):'Сейчас без двадцати восьми минут двенадцать ночи',
(23, 33):'Сейчас без двадцати семи минут двенадцать ночи',
(23, 34):'Сейчас без двадцати шести минут двенадцать ночи',
(23, 35):'Сейчас без двадцати пяти минут двенадцать ночи',
(23, 36):'Сейчас без двадцати четырёх минут двенадцать ночи',
(23, 37):'Сейчас без двадцати трёх минут двенадцать ночи',
(23, 38):'Сейчас без двадцати двух минут двенадцать ночи',
(23, 39):'Сейчас без двадцати одной минуты двенадцать ночи',
(23, 40):'Сейчас без двадцати минут двенадцать ночи',
(23, 41):'Сейчас без девятнадцати минут двенадцать ночи',
(23, 42):'Сейчас без восемнадцати минут двенадцать ночи',
(23, 43):'Сейчас без семнадцати минут двенадцать ночи',
(23, 44):'Сейчас без шестнадцати минут двенадцать ночи',
(23, 45):'Сейчас без четверти двенадцать ночи',
(23, 46):'Сейчас без четырнадцати минут двенадцать ночи',
(23, 47):'Сейчас без тринадцати минут двенадцать ночи',
(23, 48):'Сейчас без двенадцати минут двенадцать ночи',
(23, 49):'Сейчас без одиннадцати минут двенадцать ночи',
(23, 50):'Сейчас без десяти минут двенадцать ночи',
(23, 51):'Сейчас без девяти минут двенадцать ночи',
(23, 52):'Сейчас без восьми минут двенадцать ночи',
(23, 53):'Сейчас без семи минут двенадцать ночи',
(23, 54):'Сейчас без шести минут двенадцать ночи',
(23, 55):'Сейчас без пяти минут двенадцать ночи',
(23, 56):'Сейчас без четырёх минут двенадцать ночи',
(23, 57):'Сейчас без трёх минут двенадцать ночи',
(23, 58):'Сейчас без двух минут двенадцать ночи',
(23, 59):'Сейчас без одной минуты двенадцать ночи',}
for k in known_values:
self.assertEqual(self.clock.get_time_phrase(*k), known_values[k])
if __name__ == "__main__":
unittest.main()
| quasipedia/Chasy | src/testsuite.py | Python | gpl-3.0 | 174,862 |
"""
Implements Autodock Vina's pose-generation in tensorflow.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import warnings
import numpy as np
import tensorflow as tf
from deepchem.models import Model
from deepchem.nn import model_ops
import deepchem.utils.rdkit_util as rdkit_util
def compute_neighbor_list(coords, nbr_cutoff, N, M, n_cells, ndim=3, k=5):
"""Computes a neighbor list from atom coordinates.
Parameters
----------
coords: tf.Tensor
Shape (N, ndim)
N: int
Max number atoms
M: int
Max number neighbors
ndim: int
Dimensionality of space.
k: int
Number of nearest neighbors to pull down.
Returns
-------
nbr_list: tf.Tensor
Shape (N, M) of atom indices
"""
start = tf.cast(tf.reduce_min(coords), tf.int32)
stop = tf.cast(tf.reduce_max(coords), tf.int32)
cells = get_cells(start, stop, nbr_cutoff, ndim=ndim)
# Associate each atom with cell it belongs to. O(N*n_cells)
# Shape (n_cells, k)
atoms_in_cells, _ = put_atoms_in_cells(coords, cells, N, n_cells, ndim, k)
# Shape (N, 1)
cells_for_atoms = get_cells_for_atoms(coords, cells, N, n_cells, ndim)
# Associate each cell with its neighbor cells. Assumes periodic boundary
# conditions, so does wrapround. O(constant)
# Shape (n_cells, 26)
neighbor_cells = compute_neighbor_cells(cells, ndim, n_cells)
# Shape (N, 26)
neighbor_cells = tf.squeeze(tf.gather(neighbor_cells, cells_for_atoms))
# coords of shape (N, ndim)
# Shape (N, 26, k, ndim)
tiled_coords = tf.tile(tf.reshape(coords, (N, 1, 1, ndim)), (1, 26, k, 1))
# Shape (N, 26, k)
nbr_inds = tf.gather(atoms_in_cells, neighbor_cells)
# Shape (N, 26, k)
atoms_in_nbr_cells = tf.gather(atoms_in_cells, neighbor_cells)
# Shape (N, 26, k, ndim)
nbr_coords = tf.gather(coords, atoms_in_nbr_cells)
# For smaller systems especially, the periodic boundary conditions can
# result in neighboring cells being seen multiple times. Maybe use tf.unique to
# make sure duplicate neighbors are ignored?
# TODO(rbharath): How does distance need to be modified here to
# account for periodic boundary conditions?
# Shape (N, 26, k)
dists = tf.reduce_sum((tiled_coords - nbr_coords)**2, axis=3)
# Shape (N, 26*k)
dists = tf.reshape(dists, [N, -1])
# TODO(rbharath): This will cause an issue with duplicates!
# Shape (N, M)
closest_nbr_locs = tf.nn.top_k(dists, k=M)[1]
# N elts of size (M,) each
split_closest_nbr_locs = [
tf.squeeze(locs) for locs in tf.split(closest_nbr_locs, N)
]
# Shape (N, 26*k)
nbr_inds = tf.reshape(nbr_inds, [N, -1])
# N elts of size (26*k,) each
split_nbr_inds = [tf.squeeze(split) for split in tf.split(nbr_inds, N)]
# N elts of size (M,) each
neighbor_list = [
tf.gather(nbr_inds, closest_nbr_locs)
for (nbr_inds,
closest_nbr_locs) in zip(split_nbr_inds, split_closest_nbr_locs)
]
# Shape (N, M)
neighbor_list = tf.stack(neighbor_list)
return neighbor_list
def get_cells_for_atoms(coords, cells, N, n_cells, ndim=3):
"""Compute the cells each atom belongs to.
Parameters
----------
coords: tf.Tensor
Shape (N, ndim)
cells: tf.Tensor
(box_size**ndim, ndim) shape.
Returns
-------
cells_for_atoms: tf.Tensor
Shape (N, 1)
"""
n_cells = int(n_cells)
# Tile both cells and coords to form arrays of size (n_cells*N, ndim)
tiled_cells = tf.tile(cells, (N, 1))
# N tensors of shape (n_cells, 1)
tiled_cells = tf.split(tiled_cells, N)
# Shape (N*n_cells, 1) after tile
tiled_coords = tf.reshape(tf.tile(coords, (1, n_cells)), (n_cells * N, ndim))
# List of N tensors of shape (n_cells, 1)
tiled_coords = tf.split(tiled_coords, N)
# Lists of length N
coords_rel = [
tf.cast(coords, tf.float32) - tf.cast(cells, tf.float32)
for (coords, cells) in zip(tiled_coords, tiled_cells)
]
coords_norm = [tf.reduce_sum(rel**2, axis=1) for rel in coords_rel]
# Lists of length n_cells
# Get indices of k atoms closest to each cell point
closest_inds = [tf.nn.top_k(-norm, k=1)[1] for norm in coords_norm]
# TODO(rbharath): tf.stack for tf 1.0
return tf.stack(closest_inds)
def compute_closest_neighbors(coords,
cells,
atoms_in_cells,
neighbor_cells,
N,
n_cells,
ndim=3,
k=5):
"""Computes nearest neighbors from neighboring cells.
TODO(rbharath): Make this pass test
Parameters
---------
atoms_in_cells: list
Of length n_cells. Each entry tensor of shape (k, ndim)
neighbor_cells: tf.Tensor
Of shape (n_cells, 26).
N: int
Number atoms
"""
n_cells = int(n_cells)
# Tensor of shape (n_cells, k, ndim)
#atoms_in_cells = tf.stack(atoms_in_cells)
cells_for_atoms = get_cells_for_atoms(coords, cells, N, n_cells, ndim)
all_closest = []
for atom in range(N):
atom_vec = coords[atom]
cell = cells_for_atoms[atom]
nbr_inds = tf.gather(neighbor_cells, tf.cast(cell, tf.int32))
# Tensor of shape (26, k, ndim)
nbr_atoms = tf.gather(atoms_in_cells, nbr_inds)
# Reshape to (26*k, ndim)
nbr_atoms = tf.reshape(nbr_atoms, (-1, 3))
# Subtract out atom vector. Still of shape (26*k, ndim) due to broadcast.
nbr_atoms = nbr_atoms - atom_vec
# Dists of shape (26*k, 1)
nbr_dists = tf.reduce_sum(nbr_atoms**2, axis=1)
# Of shape (k, ndim)
closest_inds = tf.nn.top_k(nbr_dists, k=k)[1]
all_closest.append(closest_inds)
return all_closest
def get_cells(start, stop, nbr_cutoff, ndim=3):
"""Returns the locations of all grid points in box.
Suppose start is -10 Angstrom, stop is 10 Angstrom, nbr_cutoff is 1.
Then would return a list of length 20^3 whose entries would be
[(-10, -10, -10), (-10, -10, -9), ..., (9, 9, 9)]
Returns
-------
cells: tf.Tensor
(box_size**ndim, ndim) shape.
"""
ranges = [tf.range(start, stop, nbr_cutoff) for _ in range(ndim)]
return tf.reshape(tf.transpose(tf.stack(tf.meshgrid(*ranges))), (-1, ndim))
def put_atoms_in_cells(coords, cells, N, n_cells, ndim, k=5):
"""Place each atom into cells. O(N) runtime.
Let N be the number of atoms.
Parameters
----------
coords: tf.Tensor
(N, 3) shape.
cells: tf.Tensor
(n_cells, ndim) shape.
N: int
Number atoms
ndim: int
Dimensionality of input space
k: int
Number of nearest neighbors.
Returns
-------
closest_atoms: tf.Tensor
Of shape (n_cells, k, ndim)
"""
n_cells = int(n_cells)
# Tile both cells and coords to form arrays of size (n_cells*N, ndim)
tiled_cells = tf.reshape(tf.tile(cells, (1, N)), (n_cells * N, ndim))
# TODO(rbharath): Change this for tf 1.0
# n_cells tensors of shape (N, 1)
tiled_cells = tf.split(tiled_cells, n_cells)
# Shape (N*n_cells, 1) after tile
tiled_coords = tf.tile(coords, (n_cells, 1))
# List of n_cells tensors of shape (N, 1)
tiled_coords = tf.split(tiled_coords, n_cells)
# Lists of length n_cells
coords_rel = [
tf.cast(coords, tf.float32) - tf.cast(cells, tf.float32)
for (coords, cells) in zip(tiled_coords, tiled_cells)
]
coords_norm = [tf.reduce_sum(rel**2, axis=1) for rel in coords_rel]
# Lists of length n_cells
# Get indices of k atoms closest to each cell point
closest_inds = [tf.nn.top_k(norm, k=k)[1] for norm in coords_norm]
# n_cells tensors of shape (k, ndim)
closest_atoms = tf.stack([tf.gather(coords, inds) for inds in closest_inds])
# Tensor of shape (n_cells, k)
closest_inds = tf.stack(closest_inds)
return closest_inds, closest_atoms
# TODO(rbharath):
# - Need to find neighbors of the cells (+/- 1 in every dimension).
# - Need to group closest atoms amongst cell neighbors
# - Need to do another top_k to find indices of closest neighbors.
# - Return N lists corresponding to neighbors for every atom.
def compute_neighbor_cells(cells, ndim, n_cells):
"""Compute neighbors of cells in grid.
# TODO(rbharath): Do we need to handle periodic boundary conditions
properly here?
# TODO(rbharath): This doesn't handle boundaries well. We hard-code
# looking for 26 neighbors, which isn't right for boundary cells in
# the cube.
Note n_cells is box_size**ndim. 26 is the number of neighbors of a cube in
a grid (including diagonals).
Parameters
----------
cells: tf.Tensor
(n_cells, 26) shape.
"""
n_cells = int(n_cells)
if ndim != 3:
raise ValueError("Not defined for dimensions besides 3")
# Number of neighbors of central cube in 3-space is
# 3^2 (top-face) + 3^2 (bottom-face) + (3^2-1) (middle-band)
# TODO(rbharath)
k = 9 + 9 + 8 # (26 faces on Rubik's cube for example)
#n_cells = int(cells.get_shape()[0])
# Tile cells to form arrays of size (n_cells*n_cells, ndim)
# Two tilings (a, b, c, a, b, c, ...) vs. (a, a, a, b, b, b, etc.)
# Tile (a, a, a, b, b, b, etc.)
tiled_centers = tf.reshape(
tf.tile(cells, (1, n_cells)), (n_cells * n_cells, ndim))
# Tile (a, b, c, a, b, c, ...)
tiled_cells = tf.tile(cells, (n_cells, 1))
# Lists of n_cells tensors of shape (N, 1)
tiled_centers = tf.split(tiled_centers, n_cells)
tiled_cells = tf.split(tiled_cells, n_cells)
# Lists of length n_cells
coords_rel = [
tf.cast(cells, tf.float32) - tf.cast(centers, tf.float32)
for (cells, centers) in zip(tiled_centers, tiled_cells)
]
coords_norm = [tf.reduce_sum(rel**2, axis=1) for rel in coords_rel]
# Lists of length n_cells
# Get indices of k atoms closest to each cell point
# n_cells tensors of shape (26,)
closest_inds = tf.stack([tf.nn.top_k(norm, k=k)[1] for norm in coords_norm])
return closest_inds
def cutoff(d, x):
"""Truncates interactions that are too far away."""
return tf.where(d < 8, x, tf.zeros_like(x))
def gauss_1(d):
"""Computes first Gaussian interaction term.
Note that d must be in Angstrom
"""
return tf.exp(-(d / 0.5)**2)
def gauss_2(d):
"""Computes second Gaussian interaction term.
Note that d must be in Angstrom.
"""
return tf.exp(-((d - 3) / 2)**2)
def repulsion(d):
"""Computes repulsion interaction term."""
return tf.where(d < 0, d**2, tf.zeros_like(d))
def hydrophobic(d):
"""Compute hydrophobic interaction term."""
where = tf.where(d < 1.5, 1.5 - d, tf.zeros_like(d))
return tf.where(d < 0.5, tf.ones_like(d), where)
def hbond(d):
"""Computes hydrogen bond term."""
where = tf.where(d < 0, (1.0 / 0.7) * (0 - d), tf.zeros_like(d))
return tf.where(d < -0.7, tf.ones_like(d), where)
def g(c, Nrot):
"""Nonlinear function mapping interactions to free energy."""
w = tf.Variable(tf.random_normal([
1,
], stddev=.3))
return c / (1 + w * Nrot)
def h(d):
"""Sum of energy terms used in Autodock Vina.
.. math:: h_{t_i,t_j}(d) = w_1\textrm{gauss}_1(d) + w_2\textrm{gauss}_2(d) + w_3\textrm{repulsion}(d) + w_4\textrm{hydrophobic}(d) + w_5\textrm{hbond}(d)
"""
w_1 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_2 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_3 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_4 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
w_5 = tf.Variable(tf.random_normal([
1,
], stddev=.3))
return w_1 * gauss_1(d) + w_2 * gauss_2(d) + w_3 * repulsion(
d) + w_4 * hydrophobic(d) + w_5 * hbond(d)
class VinaModel(Model):
def __init__(self, logdir=None, batch_size=50):
"""Vina models.
.. math:: c = \sum_{i < j} f_{t_i,t_j}(r_{ij})
Over all pairs of atoms that can move relative to one-another. :math:`t_i` is the
atomtype of atom :math:`i`.
Can view as
.. math:: c = c_\textrm{inter} + c_\textrm{intra}
depending on whether atoms can move relative to one another. Free energy is
predicted only from :math:`c_\textrm{inter}`. Let :math:`R_t` be the Van der Waal's radius of
atom of type t. Then define surface distance
.. math:: d_{ij} = r_{ij} - R_{t_i} - R_{t_j}
Then the energy term is
.. math:: f_{t_i,t_j}(r_{ij}) = \textrm{cutoff}(d_{ij}, h_{t_i,t_j}(d_{ij}))
where
.. math:: \textrm{cutoff}(d, x) = \begin{cases} x & d < 8 \textrm{ Angstrom} \\ 0 & \textrm{otherwise} \end{cases}
The inner function can be further broken down into a sum of terms
.. math:: h_{t_i,t_j}(d) = w_1\textrm{gauss}_1(d) + w_2\textrm{gauss}_2(d) + w_3\textrm{repulsion}(d) + w_4\textrm{hydrophobic}(d) + w_5\textrm{hbond}(d)
these terms are defined as follows (all constants are in Angstroms):
.. math::
\textrm{gauss}_1(d) = \exp(-(d/(0.5))^2)
\textrm{gauss}_2(d) = \exp(-((d-3)/(2))^2)
\textrm{repulsion}(d) = \begin{cases} d^2 & d < 0 \\ 0 & d \geq 0 \end{cases}
\textrm{hydrophobic}(d) = \begin{cases} 1 & d < 0.5 \\ 1.5 - d & \textrm{otherwise} \\ 0 & d > 1.5 \end{cases}
\textrm{hbond}(d) = \begin{cases} 1 & d < -0.7 \\ (1.0/.7)(0 - d) & \textrm{otherwise} \\ 0 & d > 0 \end{cases}
The free energy of binding is computed as a function of the intermolecular interactions
..math:: s = g(c_\textrm{inter})
This function is defined as
..math:: g(c) = \frac{c}{1 + wN_\textrm{rot}}
Where :math:`w` is a weight parameter and :math:`N_\textrm{rot}` is the number of
rotatable bonds between heavy atoms in the ligand.
Gradients are taken backwards through the binding-free energy function with
respect to the position of the ligand and with respect to the torsions of
rotatable bonds and flexible ligands.
TODO(rbharath): It's not clear to me how the effect of the torsions on the :math:`d_{ij}` is
computed. Is there a way to get distances from torsions?
The idea is that mutations are applied to the ligand, and then gradient descent is
used to optimize starting from the initial structure. The code to compute the mutations
is specified
https://github.com/mwojcikowski/smina/blob/master/src/lib/mutate.cpp
Seems to do random quaternion rotations of the ligand. It's not clear to me yet
how the flexible and rotatable bonds are handled for the system.
Need to know an initial search space for the compound. Typically a cubic
binding box.
References
----------
Autodock Vina Paper:
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3041641/
Smina Paper:
http://pubs.acs.org/doi/pdf/10.1021/ci300604z
Omega Paper (ligand conformation generation):
http://www.sciencedirect.com/science/article/pii/S1093326302002048
QuickVina:
http://www.cil.ntu.edu.sg/Courses/papers/journal/QuickVina.pdf
"""
pass
def __init__(self, max_local_steps=10, max_mutations=10):
warnings.warn(
"VinaModel is deprecated. "
"Will be removed in DeepChem 1.4.", DeprecationWarning)
self.max_local_steps = max_local_steps
self.max_mutations = max_mutations
self.graph, self.input_placeholders, self.output_placeholder = self.construct_graph(
)
self.sess = tf.Session(graph=self.graph)
def construct_graph(self,
N_protein=1000,
N_ligand=100,
M=50,
ndim=3,
k=5,
nbr_cutoff=6):
"""Builds the computational graph for Vina."""
graph = tf.Graph()
with graph.as_default():
n_cells = 64
# TODO(rbharath): Make this handle minibatches
protein_coords_placeholder = tf.placeholder(
tf.float32, shape=(N_protein, 3))
ligand_coords_placeholder = tf.placeholder(
tf.float32, shape=(N_ligand, 3))
protein_Z_placeholder = tf.placeholder(tf.int32, shape=(N_protein,))
ligand_Z_placeholder = tf.placeholder(tf.int32, shape=(N_ligand,))
label_placeholder = tf.placeholder(tf.float32, shape=(1,))
# Shape (N_protein+N_ligand, 3)
coords = tf.concat(
[protein_coords_placeholder, ligand_coords_placeholder], axis=0)
# Shape (N_protein+N_ligand,)
Z = tf.concat([protein_Z_placeholder, ligand_Z_placeholder], axis=0)
# Shape (N_protein+N_ligand, M)
nbr_list = compute_neighbor_list(
coords, nbr_cutoff, N_protein + N_ligand, M, n_cells, ndim=ndim, k=k)
all_interactions = []
# Shape (N_protein+N_ligand,)
all_atoms = tf.range(N_protein + N_ligand)
# Shape (N_protein+N_ligand, 3)
atom_coords = tf.gather(coords, all_atoms)
# Shape (N_protein+N_ligand,)
atom_Z = tf.gather(Z, all_atoms)
# Shape (N_protein+N_ligand, M)
nbrs = tf.squeeze(tf.gather(nbr_list, all_atoms))
# Shape (N_protein+N_ligand, M, 3)
nbr_coords = tf.gather(coords, nbrs)
# Shape (N_protein+N_ligand, M)
nbr_Z = tf.gather(Z, nbrs)
# Shape (N_protein+N_ligand, M, 3)
tiled_atom_coords = tf.tile(
tf.reshape(atom_coords, (N_protein + N_ligand, 1, 3)), (1, M, 1))
# Shape (N_protein+N_ligand, M)
dists = tf.reduce_sum((tiled_atom_coords - nbr_coords)**2, axis=2)
# TODO(rbharath): Need to subtract out Van-der-Waals radii from dists
# Shape (N_protein+N_ligand, M)
atom_interactions = h(dists)
# Shape (N_protein+N_ligand, M)
cutoff_interactions = cutoff(dists, atom_interactions)
# TODO(rbharath): Use RDKit to compute number of rotatable bonds in ligand.
Nrot = 1
# TODO(rbharath): Autodock Vina only uses protein-ligand interactions in
# computing free-energy. This implementation currently uses all interaction
# terms. Not sure if this makes a difference.
# Shape (N_protein+N_ligand, M)
free_energy = g(cutoff_interactions, Nrot)
# Shape () -- scalar
energy = tf.reduce_sum(atom_interactions)
loss = 0.5 * (energy - label_placeholder)**2
return (graph, (protein_coords_placeholder, protein_Z_placeholder,
ligand_coords_placeholder, ligand_Z_placeholder),
label_placeholder)
def fit(self, X_protein, Z_protein, X_ligand, Z_ligand, y):
"""Fit to actual data."""
return
def mutate_conformer(protein, ligand):
"""Performs a mutation on the ligand position."""
return
def generate_conformation(self, protein, ligand, max_steps=10):
"""Performs the global search for conformations."""
best_conf = None
best_score = np.inf
conf = self.sample_random_conformation()
for i in range(max_steps):
mut_conf = self.mutate_conformer(conf)
loc_conf = self.gradient_minimize(mut_conf)
if best_conf is None:
best_conf = loc_conf
else:
loc_score = self.score(loc_conf)
if loc_score < best_score:
best_conf = loc_conf
return best_conf
| ktaneishi/deepchem | contrib/vina_model/vina_model.py | Python | mit | 18,997 |
from cheetax.adapters.adapter_type import AdapterType
class Query(object):
"""
Query builder entry functions
"""
@classmethod
def _builder(cls):
return QueryBuilder()
@classmethod
def import_(cls, source_type):
return cls._builder().import_(source_type)
@classmethod
def select_(cls, select='*'):
return cls._builder().select_(select)
@classmethod
def insert_(cls, insert):
return cls._builder().insert_(insert)
class QueryBuilder(object):
"""
Query Builder is the main class that generates sql
"""
def __init__(self, quote_char='"'):
self._select = None
self._insert = None
self._subquery = None
self._into = None
self._values = None
self._from = None
self._import = None
self._login = None
self._uri = None
self._statement = None
self._where = []
self._order = None
self._desc = None
self.quote_char = quote_char
def import_(self, import_type):
if not isinstance(import_type, AdapterType):
raise KeyError('Import type is not a SourceType object')
if self._import == AdapterType.XML:
raise KeyError('Importing a XML is not supported')
if import_type == AdapterType.CSV:
self._import = 'CSV'
else:
self._import = "JDBC DRIVER ='{adapter}'".format(
adapter=import_type.value
)
return self
def select_(self, select):
self._select = select
return self
def insert_(self, insert):
self._insert = insert
return self
def into_(self, schema, table, quote_char='"'):
self._into = '{schema}.{quote}{table}{quote}'.format(schema=schema, table=table, quote=quote_char)
return self
def where_(self, where):
if where is not None:
self._where.append(where)
return self
def order_(self, order):
self._order = order
return self
def uri_(self, uri):
self._uri = uri
return self
def from_(self, from_):
self._from = 'FROM {from_}'.format(from_=from_)
return self
def values_(self, values):
self._values = 'VALUES ({values})'.format(values=values)
return self
def subquery_(self, subquery):
self._subquery = '{subquery}'.format(subquery=subquery)
return self
def login_(self, user, passwd):
self._login = "USER '{user}' IDENTIFIED BY '{passwd}'".format(
user=user,
passwd=passwd
)
return self
def statement_(self, uri):
self._statement = "statement '{uri}'".format(uri=uri)
return self
def __str__(self):
return self.get_sql()
def get_sql(self):
if not (self._import or self._select or self._insert):
return ''
if self._import:
querystring = self._import_sql()
if self._select:
querystring = self._select_sql()
if self._insert:
querystring = self._insert_sql()
return querystring
def _import_sql(self):
return 'IMPORT {into} FROM {driver} {uri} {login} {statement}'.format(
into='INTO TABLE {table}'.format(table=self._into) if self._into else '',
uri="AT '{uri}'".format(uri=self._uri) if self._uri else '',
driver=self._import,
login=self._login if self._login else '',
statement=self._statement if self._statement else ''
)
def _select_sql(self):
return 'SELECT {select} {subquery} {from_} {where} {order}'.format(
select=self._select,
subquery='FROM ({})'.format(self._subquery) if self._subquery else '',
from_=self._from if self._from else '',
where='WHERE {where}'.format(where=' AND '.join(self._where)) if len(self._where) > 0 else '',
order='ORDER BY {order}'.format(order=self._order) if self._order else ''
)
def _insert_sql(self):
return 'INSERT {into} {insert} {subquery} {values}'.format(
insert='({insert})'.format(insert=self._insert),
into='INTO {table}'.format(table=self._into) if self._into else '',
subquery=self._subquery if self._subquery else '',
values=self._values if self._values else ''
) | Fredehagelund92/Cheetax | cheetax/sql/query.py | Python | apache-2.0 | 4,477 |
name0_0_0_0_0_2_0 = None
name0_0_0_0_0_2_1 = None
name0_0_0_0_0_2_2 = None
name0_0_0_0_0_2_3 = None
name0_0_0_0_0_2_4 = None | goodwinnk/intellij-community | python/testData/completion/heavyStarPropagation/lib/_pkg0/_pkg0_0/_pkg0_0_0/_pkg0_0_0_0/_pkg0_0_0_0_0/_mod0_0_0_0_0_2.py | Python | apache-2.0 | 128 |
import os
import pytest
import pandas as pd
from astropy import units as u
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from tardis.io.config_reader import Configuration
from tardis.model import Radial1DModel
from tardis.io.decay import IsotopeAbundances
def data_path(filename):
return os.path.abspath(os.path.join('tardis/io/tests/data/', filename))
class TestModelFromPaper1Config:
def setup(self):
filename = 'paper1_tardis_configv1.yml'
self.config = Configuration.from_yaml(data_path(filename))
self.model = Radial1DModel.from_config(self.config)
def test_abundances(self):
oxygen_abundance = self.config.model.abundances.O
assert_array_almost_equal(oxygen_abundance,
self.model.abundance.loc[8].values)
def test_velocities(self):
velocity = self.config.model.structure.velocity
assert_almost_equal(velocity.start.cgs.value,
self.model.v_inner[0].cgs.value)
assert_almost_equal(velocity.stop.cgs.value,
self.model.v_outer[-1].cgs.value)
assert len(self.model.v_outer) == velocity.num
def test_densities(self):
assert_almost_equal(self.model.density[0].cgs.value,
(7.542803599143591e-14 * u.Unit('g/cm^3')).value)
assert_almost_equal(self.model.density[-1].cgs.value,
(1.432259798833509e-15 * u.Unit('g/cm^3')).value)
def test_time_explosion(self):
assert_almost_equal(self.model.time_explosion.to(u.day).value, 13.0)
class TestModelFromASCIIDensity:
def setup(self):
filename = 'tardis_configv1_ascii_density.yml'
self.config = Configuration.from_yaml(data_path(filename))
self.model = Radial1DModel.from_config(self.config)
def test_velocities(self):
assert self.model.v_inner.unit == u.Unit('cm/s')
assert_almost_equal(self.model.v_inner[0].value, 1e4 * 1e5)
def test_abundances(self):
oxygen_abundance = self.config.model.abundances.O
assert_array_almost_equal(oxygen_abundance,
self.model.abundance.loc[8].values)
class TestModelFromArtisDensity:
def setup(self):
filename = 'tardis_configv1_artis_density.yml'
self.config = Configuration.from_yaml(data_path(filename))
self.model = Radial1DModel.from_config(self.config)
def test_velocities(self):
assert self.model.v_inner.unit == u.Unit('cm/s')
assert_almost_equal(self.model.v_inner[0].value, 1.259375e+03 * 1e5)
def test_abundances(self):
oxygen_abundance = self.config.model.abundances.O
assert_array_almost_equal(oxygen_abundance,
self.model.abundance.loc[8].values)
class TestModelFromArtisDensityAbundances:
def setup(self):
filename = 'tardis_configv1_artis_density.yml'
self.config = Configuration.from_yaml(data_path(filename))
self.config.model.abundances.type = 'file'
self.config.model.abundances.filename = 'artis_abundances.dat'
self.config.model.abundances.filetype = 'artis'
self.model = Radial1DModel.from_config(self.config)
def test_velocities(self):
assert self.model.v_inner.unit == u.Unit('cm/s')
assert_almost_equal(self.model.v_inner[0].value, 1.259375e+03 * 1e5)
def test_abundances(self):
assert_almost_equal(self.model.abundance.loc[14, 54],
0.21864420000000001)
class TestModelFromArtisDensityAbundancesVSlice:
def setup(self):
filename = 'tardis_configv1_artis_density_v_slice.yml'
self.config = Configuration.from_yaml(data_path(filename))
self.config.model.abundances.type = 'file'
self.config.model.abundances.filename = 'artis_abundances.dat'
self.config.model.abundances.filetype = 'artis'
self.model = Radial1DModel.from_config(self.config)
def test_velocities(self):
assert self.model.v_inner.unit == u.Unit('cm/s')
assert_almost_equal(self.model.v_inner[0].to(u.km / u.s).value, 9000)
def test_abundances(self):
assert_almost_equal(self.model.abundance.loc[14, 31], 2.156751e-01)
class TestModelFromUniformDensity:
def setup(self):
filename = 'tardis_configv1_uniform_density.yml'
self.config = Configuration.from_yaml(data_path(filename))
self.model = Radial1DModel.from_config(self.config)
def test_density(self):
assert_array_almost_equal(self.model.density.to(u.Unit('g / cm3')).value,
1.e-14)
class TestModelFromInitialTinner:
def setup(self):
filename = 'tardis_configv1_uniform_density.yml'
self.config = Configuration.from_yaml(data_path(filename))
self.config.plasma.initial_t_inner = 2508 * u.K
self.model = Radial1DModel.from_config(self.config)
def test_initial_temperature(self):
assert_almost_equal(self.model.t_inner.value, 2508)
class TestModelFromArtisDensityAbundancesAllAscii:
def setup(self):
filename = 'tardis_configv1_ascii_density_abund.yml'
self.config = Configuration.from_yaml(data_path(filename))
self.config.model.structure.filename = 'density.dat'
self.config.model.abundances.filename = 'abund.dat'
self.model = Radial1DModel.from_config(self.config)
def test_velocities(self):
assert self.model.v_inner.unit == u.Unit('cm/s')
assert_almost_equal(self.model.v_inner[0].to(u.km / u.s).value, 11000)
def test_abundances(self):
assert_almost_equal(self.model.abundance.loc[14, 0], 0.1)
assert_almost_equal(self.model.abundance.loc[14, 1], 0.2)
assert_almost_equal(self.model.abundance.loc[14, 2], 0.2)
assert_almost_equal(self.model.abundance.loc[14, 3], 0.2)
assert_almost_equal(self.model.abundance.loc[14, 4], 0.2)
assert_almost_equal(self.model.abundance.loc[14, 5], 0.2)
assert_almost_equal(self.model.abundance.loc[14, 6], 0.0)
assert_almost_equal(self.model.abundance.loc[6, 0], 0.0)
assert_almost_equal(self.model.abundance.loc[6, 1], 0.0)
assert_almost_equal(self.model.abundance.loc[6, 2], 0.0)
assert_almost_equal(self.model.abundance.loc[6, 3], 0.0)
assert_almost_equal(self.model.abundance.loc[6, 4], 0.0)
assert_almost_equal(self.model.abundance.loc[6, 5], 0.0)
assert_almost_equal(self.model.abundance.loc[6, 6], 0.5)
def test_densities(self):
assert_almost_equal(self.model.density[0].to(u.Unit('g/cm3')).value, 9.7656229e-11 / 13.0**3)
assert_almost_equal(self.model.density[1].to(u.Unit('g/cm3')).value, 4.8170911e-11 / 13.0**3)
assert_almost_equal(self.model.density[2].to(u.Unit('g/cm3')).value, 2.5600000e-11 / 13.0**3)
assert_almost_equal(self.model.density[3].to(u.Unit('g/cm3')).value, 1.4450533e-11 / 13.0**3)
assert_almost_equal(self.model.density[4].to(u.Unit('g/cm3')).value, 8.5733893e-11 / 13.0**3)
assert_almost_equal(self.model.density[5].to(u.Unit('g/cm3')).value, 5.3037103e-11 / 13.0**3)
assert_almost_equal(self.model.density[6].to(u.Unit('g/cm3')).value, 3.3999447e-11 / 13.0**3)
def test_ascii_reader_power_law():
filename = 'tardis_configv1_density_power_law_test.yml'
config = Configuration.from_yaml(data_path(filename))
model = Radial1DModel.from_config(config)
expected_densites = [3.29072513e-14, 2.70357804e-14, 2.23776573e-14,
1.86501954e-14, 1.56435277e-14, 1.32001689e-14,
1.12007560e-14, 9.55397475e-15, 8.18935779e-15,
7.05208050e-15, 6.09916083e-15, 5.29665772e-15,
4.61758699e-15, 4.04035750e-15, 3.54758837e-15,
3.12520752e-15, 2.76175961e-15, 2.44787115e-15,
2.17583442e-15, 1.93928168e-15]
assert model.no_of_shells == 20
for i, mdens in enumerate(expected_densites):
assert_almost_equal(model.density[i].to(u.Unit('g / (cm3)')).value,
mdens)
def test_ascii_reader_exponential_law():
filename = 'tardis_configv1_density_exponential_test.yml'
config = Configuration.from_yaml(data_path(filename))
model = Radial1DModel.from_config(config)
expected_densites = [5.18114795e-14, 4.45945537e-14, 3.83828881e-14,
3.30364579e-14, 2.84347428e-14, 2.44740100e-14,
2.10649756e-14, 1.81307925e-14, 1.56053177e-14,
1.34316215e-14, 1.15607037e-14, 9.95038990e-15,
8.56437996e-15, 7.37143014e-15, 6.34464872e-15,
5.46088976e-15, 4.70023138e-15, 4.04552664e-15,
3.48201705e-15, 2.99699985e-15]
expected_unit = 'g / (cm3)'
assert model.no_of_shells == 20
for i, mdens in enumerate(expected_densites):
assert_almost_equal(model.density[i].value, mdens)
assert model.density[i].unit == u.Unit(expected_unit)
@pytest.fixture
def simple_isotope_abundance():
index = pd.MultiIndex.from_tuples([(6, 14), (12, 28)],
names=['atomic_number', 'mass_number'])
abundance = [[0.2] * 20] * 2
return IsotopeAbundances(abundance, index=index)
def test_model_decay(simple_isotope_abundance):
filename = 'tardis_configv1_verysimple.yml'
config = Configuration.from_yaml(data_path(filename))
model = Radial1DModel.from_config(config)
model.raw_isotope_abundance = simple_isotope_abundance
decayed = simple_isotope_abundance.decay(
model.time_explosion).as_atoms()
norm_factor = 1.4
assert_almost_equal(
model.abundance.loc[8][0], model.raw_abundance.loc[8][0] / norm_factor, decimal=4)
assert_almost_equal(model.abundance.loc[14][0], (
model.raw_abundance.loc[14][0] + decayed.loc[14][0]) / norm_factor, decimal=4)
assert_almost_equal(model._abundance.loc[12][5], (
model.raw_abundance.loc[12][5] + decayed.loc[12][5]) / norm_factor, decimal=4)
assert_almost_equal(
model.abundance.loc[6][12], (decayed.loc[6][12]) / norm_factor, decimal=4)
###
# Save and Load
###
@pytest.fixture(scope="module", autouse=True)
def to_hdf_buffer(hdf_file_path, simulation_verysimple):
simulation_verysimple.model.to_hdf(hdf_file_path)
model_scalar_attrs = ['t_inner']
@pytest.mark.parametrize("attr", model_scalar_attrs)
def test_hdf_model_scalars(hdf_file_path, simulation_verysimple, attr):
path = os.path.join('model', 'scalars')
expected = pd.read_hdf(hdf_file_path, path)[attr]
actual = getattr(simulation_verysimple.model, attr)
if hasattr(actual, 'cgs'):
actual = actual.cgs.value
assert_almost_equal(actual, expected)
model_nparray_attrs = ['w', 'v_inner', 'v_outer']
@pytest.mark.parametrize("attr", model_nparray_attrs)
def test_hdf_model_nparray(hdf_file_path, simulation_verysimple, attr):
path = os.path.join('model', attr)
expected = pd.read_hdf(hdf_file_path, path)
actual = getattr(simulation_verysimple.model, attr)
if hasattr(actual, 'cgs'):
actual = actual.cgs.value
assert_almost_equal(actual, expected.values)
| kaushik94/tardis | tardis/model/tests/test_base.py | Python | bsd-3-clause | 11,368 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# check dir for files older 60s so we are sure to have complete transfered files
# then create a tmp dir for them
# put a screenshot
# use bento4 to create different qualities
# hls
# dash
# webm
# when finished remove file and move tmp directory
try:
from os import scandir
except ImportError:
from scandir import scandir
import logging.handlers
import os
import sys
import urllib.request
from time import time
from lib.filetype import BasicFile
from lib.singleton import SingleInstance
from zenlog import log, logging
def main(root_dir: str, callback: str = ""):
return_code = 0
log.info("Checking %s", root_dir)
dirs = 0
for e in scandir(root_dir):
if e.is_dir():
dirs += 1
log.debug("checking %s", e.name)
changed = False
dirstart = time()
is_watch_dir = False
for entry in scandir(e.path):
fh = BasicFile.get_instance(entry)
if fh:
is_watch_dir = True
log.debug("Found a file %s with type %s", entry.name, repr(fh))
start = time()
res = fh.process()
changed = False
for i in res:
if res[i] == fh.UPDATED:
changed = True
log.info("updated {} - {}".format(entry.name, i))
if res[i] == fh.ERROR:
log.error(
"ERROR: {} could not be processed - {}\n".format(
entry.name, i
)
)
return_code = -1
if time() - start > 1:
log.info("For %s took: %d", entry.name, int(time() - start))
if is_watch_dir:
if BasicFile.foldername_string(e.name) != e.name:
log.warning("The Foldername is not nice for urls - fixing it")
BasicFile.move_directory(
e.path,
os.path.join(
os.path.dirname(e.path), BasicFile.foldername_string(e.name)
),
)
if time() - dirstart > 1:
log.info("For dir %s took: %d", e.path, int(time() - dirstart))
if changed:
if len(callback) > 0:
if callback.startswith("http"):
log.info("Opening url %s" % callback)
try:
urllib.request.urlopen(callback).read()
except:
log.error("Error opening this url")
else:
log.error("callback must start with http: %s", callback)
log.info("Looked at %d dirs", dirs)
sys.exit(return_code)
def configure_logging(root_dir: str):
logger = logging.getLogger()
hdlr = logging.handlers.RotatingFileHandler(
os.path.join(root_dir, ".web_video.log"),
maxBytes=1024 * 1024 * 10,
backupCount=5,
)
formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
try:
from logging.handlers import SysLogHandler
syslog = SysLogHandler()
syslog.setFormatter(formatter)
logger.addHandler(syslog)
except Exception as e:
log.warning("No syslog handler: %s", e)
lock = None
def run_it():
global lock
configure_logging(sys.argv[1])
lock = SingleInstance("videotransformer")
# if not BasicFile.test_requirements():
# log.error("Some program requirements not met")
# sys.exit(-1)
# first argument is folder which we will check
# second argument a callback-url
main(*sys.argv[1:])
if __name__ == "__main__":
run_it()
| balrok/web_video | web_video/run.py | Python | mit | 4,090 |
"""
Django settings for locallibrary project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
'''
##
from unipath import Path
PROJECT_DIR = Path(__file__).parent
STATIC_ROOT = PROJECT_DIR.parent.child('staticfiles')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
PROJECT_DIR.child('static'),
)
##
'''
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
#SECRET_KEY = '70x=f-90^x9n1&4zgvqn)3kb_y+_loktvb#@ez10w5h63tgvf1'
import os
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', 'cg#p$g+j9tax!#a3cup@1$8obt2_+&k3q+pmu)5%asj6yjpkag')
# SECURITY WARNING: don't run with debug turned on in production!
#DEBUG = True
DEBUG = bool( os.environ.get('DJANGO_DEBUG', True) )
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalog.apps.CatalogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'locallibrary.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['./templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'locallibrary.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'locallibrary',
'USER': 'omer',
'PASSWORD': 'mypass',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC' #'JST'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/'
# Heroku: Update database configuration from $DATABASE_URL.
import dj_database_url
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
# The absolute path to the directory where collectstatic will collect static files for deployment.
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# The URL to use when referring to static files (where they will be served from)
STATIC_URL = '/static/'
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage' | Konosh93/locallibrary | locallibrary/settings.py | Python | mit | 4,331 |
"""
Simple environment with known optimal policy and value function.
Action 0 then 0 yields randomly -1 or 1 reward and terminates the session.
Action 0 then 1 yields randomly 0, 0, or 9 reward and terminates the session.
Action 0 then 0 yields randomly 0 or 2 reward and terminates the session.
Action 1 then 1 yields randomly 2 or 3 reward and terminates the session.
Optimal policy: action 0 then 1.
Optimal value function v(observation): (this is a fully observable MDP so observation==state)
v(0)= 3 (you get observation 0 after taking action 0)
v(1)= 2.5 (you get observation 1 after taking action 1)
v(2)= 3 (you get observation 2 in the starting state)
"""
import gym
import random
from gym import spaces
from gym.utils import seeding
class TwoRoundNondeterministicRewardEnv(gym.Env):
def __init__(self):
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Discrete(3)
self._reset()
def _step(self, action):
rewards = [
[
[-1, 1], #expected value 0
[0, 0, 9] #expected value 3. This is the best path.
],
[
[0, 2], #expected value 1
[2, 3] #expected value 2.5
]
]
assert self.action_space.contains(action)
if self.firstAction is None:
self.firstAction = action
reward = 0
done = False
else:
reward = random.choice(rewards[self.firstAction][action])
done = True
return self._get_obs(), reward, done, {}
def _get_obs(self):
if self.firstAction is None:
return 2
else:
return self.firstAction
def _reset(self):
self.firstAction = None
return self._get_obs()
def _seed(self, seed=None):
seed = seed if seed else seeding.hash_seed(seed) % 2**32
random.seed(seed)
return [seed]
| d1hotpep/openai_gym | gym/envs/debugging/two_round_nondeterministic_reward.py | Python | mit | 1,959 |
#-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
# OpenCV is used to read the video file
import cv
# PyGame is used to control the display
import pygame
from pygame.locals import *
from libopensesame import item
from libqtopensesame.items.qtautoplugin import qtautoplugin
from libqtopensesame.widgets import pool_widget
from libopensesame.exceptions import osexception
class video_player(item.item):
description = u'An OpenCV-based image-only video player'
def __init__(self, name, experiment, script=None):
"""
Constructor.
Arguments:
name -- The item name.
experiment -- The experiment object.
Keyword arguments:
script -- A definition script. (default=None)
"""
self.duration = u"keypress"
self.fullscreen = u"yes"
self.frame_dur = 50
self.video_src = u""
item.item.__init__(self, name, experiment, script)
def prepare(self):
"""Opens the video file for playback."""
if self.experiment.get(u'canvas_backend') != u'legacy':
raise osexception( \
u'The video_player plug-in requires the legacy back-end!')
item.item.prepare(self)
path = self.experiment.get_file(self.get(u'video_src'))
# Open the video file
self.video = cv.CreateFileCapture(path)
# Convert the string to a boolean, for slightly faster evaluations in
# the run phase
self._fullscreen = self.get(u'fullscreen') == u"yes"
# The dimensions of the video
self._w = int(cv.GetCaptureProperty(self.video, cv.CV_CAP_PROP_FRAME_WIDTH))
self._h = int(cv.GetCaptureProperty(self.video, cv.CV_CAP_PROP_FRAME_HEIGHT))
if self._fullscreen:
# In fullscreen mode, the video is always shown in the top-left and the
# temporary images need to be fullscreen size
self._x = 0
self._y = 0
self.src_tmp = cv.CreateMat(self.experiment.height, \
self.experiment.width, cv.CV_8UC3)
self.src_rgb = cv.CreateMat(self.experiment.height, \
self.experiment.width, cv.CV_8UC3)
else:
# Otherwise the location of the video depends on its dimensions and the
# temporary image is the same size as the video
self._x = max(0, (self.experiment.width - self._w) / 2)
self._y = max(0, (self.experiment.height - self._h) / 2)
self.src_rgb = cv.CreateMat(self._h, self._w, cv.CV_8UC3)
def run(self):
"""Handles the actual video playback."""
# Log the onset time of the item
self.set_item_onset()
t = pygame.time.get_ticks()
start_t = t
# Loop until a key is pressed
go = True
while go:
# Get the frame
self.src = cv.QueryFrame(self.video)
# Check for the end of the video
if self.src == None:
break
# Resize if requested and convert the resulting image to
# RGB format, which is compatible with PyGame
if self._fullscreen:
cv.Resize(self.src, self.src_tmp)
cv.CvtColor(self.src_tmp, self.src_rgb, cv.CV_BGR2RGB)
else:
cv.CvtColor(self.src, self.src_rgb, cv.CV_BGR2RGB)
# Convert the image to PyGame format
pg_img = pygame.image.frombuffer(self.src_rgb.tostring(), \
cv.GetSize(self.src_rgb), u"RGB")
# Show the video frame!
self.experiment.surface.blit(pg_img, (self._x, self._y))
pygame.display.flip()
# Pause before jumping to the next frame
pygame.time.wait(
self.get(u'frame_dur') - pygame.time.get_ticks() + t)
t = pygame.time.get_ticks()
if type(self.get(u'duration')) == int:
# Wait for a specified duration
if t - start_t >= self.get(u'duration'):
go = False
# Catch escape presses
for event in pygame.event.get():
if event.type == KEYDOWN:
if event.key == pygame.K_ESCAPE:
raise osexception(u"The escape key was pressed.")
if self.get(u'duration') == u"keypress":
go = False
if event.type == MOUSEBUTTONDOWN and self.get(u'duration') == \
u"mouseclick":
go = False
# Release the camera
# Note: This function appears to be missing. Perhaps it's ok
# and Python will release it automatically?
# cv.ReleaseCapture(self.video)
class qtvideo_player(video_player, qtautoplugin):
def __init__(self, name, experiment, script=None):
video_player.__init__(self, name, experiment, script)
qtautoplugin.__init__(self, __file__)
| SCgeeker/OpenSesame | plugins/video_player/video_player.py | Python | gpl-3.0 | 4,816 |
#!/usr/bin/env python3
import argparse
import os
def process(filename):
cmd = ["./cvc4_2019_05_14",
"--lang=smtlib2.0",
"--output-lang=smtlib2.6.1",
"--dump=raw-benchmark",
"--preprocess-only",
"--no-simplification",
"--no-ite-simp",
"--no-ext-rew-prep",
"--dump-to=processed.smt2"]
cmd.append(filename)
print("Translating %s" % filename)
os.system(" ".join(cmd))
print("Post-processing %s" % filename)
with open("processed.smt2", "r") as fd:
tmp = fd.readlines()
os.unlink("processed.smt2")
with open(filename, "w") as fd:
fd.write(";; This file has been translated by CVC4 from an earlier\n")
fd.write(";; version of SMTLIB to 2.6 using the script\n")
fd.write(";; translate_benchmarks.py in repo smtlib_schanda.\n")
fd.write("\n")
for line in tmp:
if "set-option :incremental" in line:
pass
elif "meta-info :smt-lib-version 2.500000" in line:
fd.write("(set-info :smt-lib-version 2.6)\n")
elif "meta-info :" in line:
fd.write(line.replace("meta-info", "set-info"))
else:
fd.write(line)
def main():
ap = argparse.ArgumentParser()
ap.add_argument("benchmark_dir")
options = ap.parse_args()
if os.path.isfile(options.benchmark_dir):
process(options.benchmark_dir)
return
if not os.path.isdir(options.benchmark_dir):
ap.error("%s is not a directory" % options.benchmark_dir)
for path, dirs, files in os.walk(options.benchmark_dir):
for f in files:
if f.endswith(".smt2"):
process(os.path.join(path, f))
if __name__ == "__main__":
main()
| florianschanda/smtlib_schanda | translate_benchmarks.py | Python | gpl-3.0 | 1,812 |
from platform import system
import sys
import os
from os import mkdir, path, access, R_OK # W_OK for write permission.
from shutil import copy
from platform import system
version = "1.0"
description = "This templates allows you to create simple documents in PDF format"
# pwd = .../templates/pdf-simple
local_path = path.dirname(path.realpath(__file__))
in_images_path = local_path + "/images"
def usage_message() :
if system() is "Windows":
print "To compile : make-pdf-simple.bat"
print "To clean : make-clean.bat"
else:
print "To compile : make pdf-simple"
print "To clean : make clean"
def generate(out_path) :
#print "local path is : " + local_path
out_common_path = out_path + "/common"
out_bin_path = out_path + "/bin"
out_images_path = out_path + "/images"
mkdir(out_common_path)
copy(local_path + "/document.md", out_path)
print "-- Example document.md copied"
copy(local_path + "/generate_files", out_bin_path)
print "-- Copying generate_files script"
copy(local_path + "/configuration.json", out_path)
print "-- Example configuration.json copied"
copy(local_path + "/Makefile",out_path)
print "-- Makefile copied"
copy(local_path + "/make-clean.bat",out_path)
print "-- make-clean.bat copied"
copy(local_path + "/make-pdf-simple.bat",out_path)
print "-- make-pdf-simple.bat copied"
copy(local_path + "/enumitem.sty", out_path + "/common")
print "-- Adding last v 3.5.2 of enumitem.sty"
copy(local_path + "/pdf-template-simple.tex", out_common_path )
print "-- Simple pdf template (pdf-template-simple.tex) copied"
copy(local_path + "/tplformd-simple.sty", out_common_path )
print "-- Simple tpl4md stylesheet (tplformd-simple.sty) copied"
copy(local_path + "/doc/README_pdf-simple.md", out_path + "/doc/")
print "-- Letter template documentation (README_pdf-simple.md) copied"
copy(local_path + "/doc/README_pdf-simple.pdf", out_path + "/doc/")
print "-- Letter template documentation (README_pdf-simple.pdf) copied"
print "- Copying image files"
mkdir(out_images_path)
print "-- image directory created"
copy(in_images_path + "/hw.jpg", out_images_path)
print "-- hello world image copied" | dloureiro/tpl4md | share/templates/pdf-simple/generator.py | Python | agpl-3.0 | 2,150 |
### Open Provenance March 2016 - https://myveryown.org
### Bitcoin Blockchain Information using python-bitcoinlib
### Count of Transactions in first 10,001 Blocks
### Donate to Open Provenance: 1opDUZQ9nsL1LJALBdV1dvqSMtcvNj9EC
## In this script we search the blockchain and count the no of transactions in the first 10,001 blocks
# Hint 10,092
## Import the modules required and setup a connection to bitcoin
import bitcoin
## Create a proxy object and connect to the bitcoin.rpc
import bitcoin.rpc
myproxy = bitcoin.rpc.Proxy()
## Declare some variables used by our search
starting_block = 0
ending_block = 10000
total_txs = 0
print "Counting Transactions ..."
## Now search block by block until we find what we are looking for
for blockno in range (starting_block, ending_block) :
block_info = myproxy.getblock(myproxy.getblockhash(blockno))
vtx = block_info.vtx
tx_count = len(block_info.vtx)
total_txs = total_txs + tx_count
print " "
print "Total Transactions: ", total_txs | OpenProvenance/python-bitcoinlib-scripting | 10-TxCount.py | Python | mit | 994 |
#!/usr/bin/python
#
# Copyright (c) 2017 Bruno Medina Bolanos Cacho <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_managed_disk
version_added: "2.4"
short_description: Manage Azure Manage Disks
description:
- Create, update and delete an Azure Managed Disk
options:
resource_group:
description:
- "Name of a resource group where the managed disk exists or will be created."
required: true
name:
description:
- Name of the managed disk
required: true
state:
description:
- Assert the state of the managed disk. Use 'present' to create or update a managed disk and
'absent' to delete a managed disk.
default: present
choices:
- absent
- present
required: false
location:
description:
- Valid Azure location. Defaults to location of the resource group.
default: resource_group location
required: false
storage_account_type:
description:
- "Type of storage for the managed disk: 'Standard_LRS' or 'Premium_LRS'. If not specified the disk is created 'Standard_LRS'"
choices:
- Standard_LRS
- Premium_LRS
required: false
create_option:
description:
- "Allowed values: empty, import, copy. 'import' from a VHD file in 'source_uri' and 'copy' from previous managed disk 'source_resource_uri'."
choices:
- empty
- import
- copy
required: false
source_uri:
description:
- URI to a valid VHD file to be used when 'create_option' is 'import'.
required: false
source_resource_uri:
description:
- The resource ID of the managed disk to copy when 'create_option' is 'copy'.
required: false
os_type:
description:
- "Type of Operating System: 'linux' or 'windows'. Used when 'create_option' is either 'copy' or 'import' and the source is an OS disk."
choices:
- linux
- windows
required: false
disk_size_gb:
description:
-Size in GB of the managed disk to be created. If 'create_option' is 'copy' then the value must be greater than or equal to the source's size.
required: true
tags:
description:
- Tags to assign to the managed disk.
required: false
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Bruno Medina (@brusMX)"
'''
EXAMPLES = '''
- name: Create managed disk
azure_rm_managed_disk:
name: mymanageddisk
location: eastus
resource_group: Testing
disk_size_gb: 4
- name: Delete managed disk
azure_rm_manage_disk:
name: mymanageddisk
location: eastus
resource_group: Testing
state: absent
'''
RETURN = '''
id:
description: The managed disk resource ID.
returned: always
type: dict
state:
description: Current state of the managed disk
returned: always
type: dict
changed:
description: Whether or not the resource has changed
returned: always
type: bool
'''
import re
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.compute.models import DiskCreateOption
from azure.mgmt.compute.models import DiskSku
except ImportError:
# This is handled in azure_rm_common
pass
def managed_disk_to_dict(managed_disk):
os_type = None
if managed_disk.os_type:
os_type = managed_disk.os_type.name
return dict(
id=managed_disk.id,
name=managed_disk.name,
location=managed_disk.location,
tags=managed_disk.tags,
disk_size_gb=managed_disk.disk_size_gb,
os_type=os_type,
storage_account_type='Premium_LRS' if managed_disk.sku.tier == 'Premium' else 'Standard_LRS'
)
class AzureRMManagedDisk(AzureRMModuleBase):
"""Configuration class for an Azure RM Managed Disk resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
required=False,
default='present',
choices=['present', 'absent']
),
location=dict(
type='str',
required=False
),
storage_account_type=dict(
type='str',
required=False,
choices=['Standard_LRS', 'Premium_LRS']
),
create_option=dict(
type='str',
required=False,
choices=['empty', 'import', 'copy']
),
source_uri=dict(
type='str',
required=False
),
source_resource_uri=dict(
type='str',
required=False
),
os_type=dict(
type='str',
required=False,
choices=['linux', 'windows']
),
disk_size_gb=dict(
type='int',
required=False
)
)
required_if = [
('create_option', 'import', ['source_uri']),
('create_option', 'copy', ['source_resource_uri']),
('state', 'present', ['disk_size_gb'])
]
self.results = dict(
changed=False,
state=dict())
self.resource_group = None
self.name = None
self.location = None
self.storage_account_type = None
self.create_option = None
self.source_uri = None
self.source_resource_uri = None
self.os_type = None
self.disk_size_gb = None
self.tags = None
super(AzureRMManagedDisk, self).__init__(
derived_arg_spec=self.module_arg_spec,
required_if=required_if,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
results = dict()
resource_group = None
response = None
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
if self.state == 'present':
self.results['state'] = self.create_or_update_managed_disk()
elif self.state == 'absent':
self.delete_managed_disk()
return self.results
def create_or_update_managed_disk(self):
# Scaffolding empty managed disk
disk_params = {}
creation_data = {}
disk_params['location'] = self.location
disk_params['tags'] = self.tags
if self.storage_account_type:
storage_account_type = DiskSku(self.storage_account_type)
disk_params['sku'] = storage_account_type
disk_params['disk_size_gb'] = self.disk_size_gb
# TODO: Add support for EncryptionSettings
creation_data['create_option'] = DiskCreateOption.empty
if self.create_option == 'import':
creation_data['create_option'] = DiskCreateOption.import_enum
creation_data['source_uri'] = self.source_uri
elif self.create_option == 'copy':
creation_data['create_option'] = DiskCreateOption.copy
creation_data['source_resource_id'] = self.source_resource_uri
try:
# CreationData cannot be changed after creation
disk_params['creation_data'] = creation_data
found_prev_disk = self.get_managed_disk()
if found_prev_disk:
if not self.is_different(found_prev_disk, disk_params):
return found_prev_disk
if not self.check_mode:
poller = self.compute_client.disks.create_or_update(
self.resource_group,
self.name,
disk_params)
aux = self.get_poller_result(poller)
result = managed_disk_to_dict(aux)
else:
result = True
self.results['changed'] = True
except CloudError as e:
self.fail("Error creating the managed disk: {0}".format(str(e)))
return result
# This method accounts for the difference in structure between the
# Azure retrieved disk and the parameters for the new disk to be created.
def is_different(self, found_disk, new_disk):
resp = False
if new_disk.get('disk_size_gb'):
if not found_disk['disk_size_gb'] == new_disk['disk_size_gb']:
resp = True
if new_disk.get('sku'):
if not found_disk['storage_account_type'] == new_disk['sku'].name:
resp = True
# Check how to implement tags
if new_disk.get('tags') is not None:
if not found_disk['tags'] == new_disk['tags']:
resp = True
return resp
def delete_managed_disk(self):
try:
if not self.check_mode:
poller = self.compute_client.disks.delete(
self.resource_group,
self.name)
result = self.get_poller_result(poller)
else:
result = True
self.results['changed'] = True
except CloudError as e:
self.fail("Error deleting the managed disk: {0}".format(str(e)))
return result
def get_managed_disk(self):
resp = False
try:
resp = self.compute_client.disks.get(
self.resource_group,
self.name)
except CloudError as e:
self.log('Did not find managed disk')
if resp:
resp = managed_disk_to_dict(
resp)
return resp
def main():
"""Main execution"""
AzureRMManagedDisk()
if __name__ == '__main__':
main()
| Azulinho/ansible | lib/ansible/modules/cloud/azure/azure_rm_managed_disk.py | Python | gpl-3.0 | 10,719 |
# -*- coding: utf-8 -*-
import allauth.account.forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django import forms
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from settings import models
INLIST_DELETE_CONFIRM_LABEL = _('Ask for confirmation when deleting ' + \
'inlist item')
ACTION_DELETE_CONFIRM_LABEL = _('Ask for confirmation when deleting actions')
class SettingsForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(SettingsForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit('confirm', _('Save')))
class Meta:
model = models.Settings
exclude = ('user',)
labels = {
'language': _('Language'),
'inlist_delete_confirm': INLIST_DELETE_CONFIRM_LABEL,
'action_delete_confirm': ACTION_DELETE_CONFIRM_LABEL,
}
class ChangePasswordForm(allauth.account.forms.ChangePasswordForm):
def __init__(self, *args, **kwargs):
super(ChangePasswordForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_action = reverse('settings:change_password')
self.helper.add_input(Submit('change_password', _('Change password')))
| XeryusTC/projman | settings/forms.py | Python | mit | 1,338 |
# Copyright 2018 Inap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pyeapi
from hamcrest import assert_that, is_
from pyeapi.api.vlans import Vlans
from pyeapi.eapilib import CommandError
from tests.util.global_reactor import TEST_SWITCHES
class TestAristaRestApi(unittest.TestCase):
switch_name = "arista"
def setUp(self):
conf = TEST_SWITCHES[self.switch_name]
self.node = pyeapi.connect(transport="http", host="127.0.0.1", port=conf["http"],
username="root", password="root", return_node=True)
self.connection = self.node.connection
def test_get_vlan(self):
result = self.node.api('vlans').get(1)
assert_that(result, is_({
"name": "default",
"state": "active",
"trunk_groups": [],
"vlan_id": 1
}))
def test_execute_show_vlan(self):
result = self.connection.execute("show vlan")
assert_that(result, is_({
"id": AnyId(),
"jsonrpc": "2.0",
"result": [
{
"sourceDetail": "",
"vlans": {
"1": {
"dynamic": False,
"interfaces": {},
"name": "default",
"status": "active"
}
}
}
]
}))
def test_execute_show_vlan_unknown_vlan(self):
with self.assertRaises(CommandError) as expect:
self.connection.execute("show vlan 999")
assert_that(str(expect.exception), is_(
"Error [1000]: CLI command 1 of 1 'show vlan 999' failed: could not run command "
"[VLAN 999 not found in current VLAN database]"
))
assert_that(expect.exception.output, is_([
{
'vlans': {},
'sourceDetail': '',
'errors': ['VLAN 999 not found in current VLAN database']
}
]))
def test_execute_show_vlan_invalid_input(self):
with self.assertRaises(CommandError) as expect:
self.connection.execute("show vlan shizzle")
assert_that(str(expect.exception), is_(
"Error [1002]: CLI command 1 of 1 'show vlan shizzle' failed: invalid command "
"[Invalid input]"
))
def test_add_and_remove_vlan(self):
result = Vlans(self.node).configure_vlan("737", ["name wwaaat!"])
assert_that(result, is_(True))
result = Vlans(self.node).delete("737")
assert_that(result, is_(True))
class AnyId(object):
def __eq__(self, o):
try:
int(o)
except ValueError:
return False
return True
| internap/fake-switches | tests/arista/test_arista_rest_api.py | Python | apache-2.0 | 3,331 |
#! /usr/bin/python
# coding: utf-8
import sys
import io
import tempfile
import os
import time
import json
import Queue
import urllib
import urllib2
import contextlib
nodes = []
server_nodes = []
switch_nodes = []
server = {}
server_priority = []
switches = {}
switches_dpid = {}
adjacent = []
queue_property = []
existing_rules = {}
max_column = 0
#indicate max bandwidth on each link
speed = []
max_bandwidth = 10
#linkbandwidth = 10.0
traffic_file_name = "/home/mininet/floodlight-qos-beta-master/traffic.txt"
traffic_tmp_file_name = "/home/mininet/floodlight-qos-beta-master/traffic.txt.tmp"
traffic_bak_file_name = "/home/mininet/floodlight-qos-beta-master/traffic.txt.bak"
switchnum = 0
traffic_data = {}
name_index = {}
bandwidthout = [[]]
f_ptr = 0
flow_map = {}
#dummy data for test
#poll_map = [
#[3000000, 3000000, 3000000, 3000000, 3000000],
#[3000000, 3000000, 3000000, 3000000, 3000000],
#[3000000, 3000000, 3000000, 3000000, 3000000],
#[3000000, 3000000, 3000000, 3000000, 3000000],
#[3000000, 3000000, 3000000, 3000000]]
def build_port_name():
global name_index
#page = urllib.urlopen('http://localhost:8080/wm/core/controller/switches/json')
with contextlib.closing(urllib2.urlopen('http://localhost:8080/wm/core/controller/switches/json')) as page:
line = page.read().decode("utf-8")
#line = page.read().decode("utf-8")
collections = json.loads(line)
switchnum = len(collections)
for sw in collections:
dpid = sw["dpid"]
port_detail = {}
ports = sw["ports"]
for each_port in ports:
port_detail[each_port["portNumber"]] = each_port["name"]
name_index[dpid] = port_detail
#print name_index
def measure_bandwidth():
global traffic_data
global traffic_file_name
global name_index
global f_ptr
global bandwidthout
global switchnum
#page = urllib.urlopen('http://localhost:8080/wm/core/switch/all/flow/json')
with contextlib.closing(urllib2.urlopen('http://localhost:8080/wm/core/switch/all/flow/json')) as page:
line = page.read().decode("utf-8")
#line = page.read().decode("utf-8")
new_traffic_data = {}
#create tmp count flow (has row equal to number of switch)
tmp_count_flow = [[] for i in range( len(adjacent))]
for i in range( len(tmp_count_flow)):
#each switch has column equal to number of its port
tmp_count_flow[i] = [[] for j in range( len(adjacent[i]) )]
#each port has number of counter equal to number of queue (number of server)
for j in range ( len( tmp_count_flow[i]) ):
tmp_count_flow[i][j] = [0 for k in range( len(server_nodes) )]
rule_max_bw = {}
existing_rules = [[] for i in range( len(switches) )]
switch_dicts = json.loads(line)
for switch_id in switch_dicts:
if switch_id in name_index:
switch_index = nodes.index(switches[switch_id])
rule_update = [[] for i in range(len(adjacent[switch_index]))]
for i in range( len(rule_update) ):
rule_update[i] = [[] for j in range( len(server_nodes) )]
for flow in switch_dicts[switch_id]:
match = flow["match"]
key_match = match["networkDestination"]+match["networkSource"]
actions = flow["actions"]
for action in actions:
port_int = int(action["port"])
#in case it want to connect with controller, using NAT to connect outside topology
if port_int > len(bandwidthout[switch_index]):
continue
if action["type"] == "OUTPUT" or action["type"] == "OPAQUE_ENQUEUE":
total_duration = 0
total_byte = 0
found = False
buildkey = (switch_id,port_int)
if buildkey in traffic_data:
temp_traffic = traffic_data[buildkey]
if key_match in temp_traffic:
temp_flow = temp_traffic[key_match]
old_duration = temp_flow["duration"]
old_bytecount = temp_flow["byteCount"]
total_duration = (flow["durationSeconds"]+flow["durationNanoseconds"]/1000000000.0)-old_duration
if total_duration >= 0:
found = True
total_byte = flow["byteCount"]-old_bytecount
if not found:
total_duration = (flow["durationSeconds"]+flow["durationNanoseconds"]/1000000000.0)
total_byte = flow["byteCount"]
if buildkey not in new_traffic_data:
new_traffic_data[buildkey] = {}
#instead of using the whole match use only src,dst will be fine for this testing
new_traffic_data[buildkey][key_match] = {}
new_traffic_data[buildkey][key_match]["duration"] = (flow["durationSeconds"]+flow["durationNanoseconds"]/1000000000.0)
new_traffic_data[buildkey][key_match]["byteCount"] = flow["byteCount"]
bw = 0.0
#there is someething wrong??
if total_duration > 0:
bw = ((total_byte*8.0/1000000.0)/(total_duration))
else:
bw = 0.0
#print "raw sw : " + switch_id
#print "sw id : " + str(switch_index)
#print "raw port : " + str(action["port"])
#print "port : " + str(int(action["port"])-1)
'''
print "****************************"
print "bytecount"
print flow["byteCount"]
print "duration"
print flow["durationSeconds"]
print "total_byte"
print total_byte
print "total_duration"
print total_duration
print "bw"
print bw
print "****************************"
'''
bandwidthout[switch_index][port_int-1][0] = bandwidthout[switch_index][port_int-1][0] - bw
destination = match["networkDestination"]
source = match["networkSource"]
#print "destination : " + destination
if destination in server:
#server_name = server[destination]['name']
#server_index = server_nodes.index(server_name)+1
server_index = server[destination]['id']
#add 1 to server index because [0] is reserved for available
bandwidthout[switch_index][port_int-1][server_index+1] = bandwidthout[switch_index][port_int-1][server_index+1] + bw
#print tmp_count_flow[switch_index]
if action["type"] == "OPAQUE_ENQUEUE":
tmp_count_flow[switch_index][port_int-1][server_index] = tmp_count_flow[switch_index][port_int-1][server_index] + 1
#print adjacent
#check if it is the src node
server_name = server[destination]['name']
server_nodes_index = nodes.index(server_name)
#generate rules name to mark for update in its port after finish all flow in a switch
rule_name = source+"-"+destination
rule_update[port_int-1][server_index].append(rule_name)
elif source in server:
server_index = server[source]['id']
bandwidthout[switch_index][port_int-1][server_index+1] = bandwidthout[switch_index][port_int-1][server_index+1] + bw
#print tmp_count_flow[switch_index]
if action["type"] == "OPAQUE_ENQUEUE":
tmp_count_flow[switch_index][port_int-1][server_index] = tmp_count_flow[switch_index][port_int-1][server_index] + 1
server_name = server[source]['name']
server_nodes_index = nodes.index(server_name)
#print "port : " + str(port_int)
#generate rules name to mark for update in its port after finish all flow in a switch
rule_name = source+"-"+destination
#print "rule name : " + rule_name
rule_update[port_int-1][server_index].append(rule_name)
#endif action = output / opaque queue
#endloop each action
#endloop each flow
#print rule_update
#begin count and show associate rule with this port
#print "sw : " + str(switch_index+1)
#loop i for all port in switch
for i in range( len(adjacent[switch_index]) ):
#print "port : " + str( i+1 )
tmp_cal = [0 for j in range( len(tmp_count_flow[switch_index][i]) )]
total_momentum = 0
#loop j for all queue (#server) in a port
for j in range( len(tmp_count_flow[switch_index][i]) ):
flow = tmp_count_flow[switch_index][i][j]
#print "count : " + str(flow)
momentum = flow*server_priority[j]
tmp_cal[j] = momentum
#print "momentum : " + str(momentum)
total_momentum = total_momentum + momentum
#print "total momentum : " + str(total_momentum)
#loop j for all queue in a port
for j in range( len(tmp_cal) ):
if total_momentum > 0:
# 0 is an index for min bandwidth
queue_property[switch_index][i][j][0] = (tmp_cal[j] * speed[switch_index][i]) / total_momentum
#try set for max
#queue_property[switch_index][i][j][1] = (tmp_cal[j] * speed[switch_index][i]) / total_momentum
#give min 5% in case of no momentum or no route in case a flow without queue need to use a route on action["OUTPUT"]
if queue_property[switch_index][i][j][1] < speed[switch_index][i]/20.0 :
queue_property[switch_index][i][j][1] = speed[switch_index][i]/20.0
else:
#queue_property[switch_index][i][j][0] = 0.0
queue_property[switch_index][i][j][1] = speed[switch_index][i]/20.0
#try set for max
#queue_property[switch_index][i][j][1] = speed[switch_index][i]
queue_property[switch_index][i][j][1] = speed[switch_index][i]
#try set for min
#queue_property[switch_index][i][j][0] = queue_property[switch_index][i][j][1]
#try set both bound
#need to check for update the max bandwidth for the rule
for each_rule in rule_update[i][j]:
if each_rule in rule_max_bw:
if rule_max_bw[each_rule] < queue_property[switch_index][i][j][0]:
rule_max_bw[each_rule] = queue_property[switch_index][i][j][0]
else:
rule_max_bw[each_rule] = queue_property[switch_index][i][j][0]
# 1 is an index for max bandwidth
#queue_property[switch_index][i][j][1] = speed[switch_index][i]
#try set for both bound
#print "queue : " + str(j) + " min : " + str(queue_property[switch_index][i][j][0]) + " max : " + str(queue_property[switch_index][i][j][1])
#endloop each port in switch
#for rule_name in existing_rules:
#print tmp_count_flow
#print "after count"
existing_rules[switch_index] = rule_update
#endif known switch
#endloop each switch
#print queue_property
#need to determine max bandwidth by the max ratio found in the path
#for switch_row in range( len(queue_property) ):
# for port_row in range( len( queue_property[switch_row] ) ):
# for queue_row in range( len( queue_property[switch_row][port_row]) ):
for switch_row in range( len(existing_rules) ):
for port_row in range( len( existing_rules[switch_row] ) ):
for queue_row in range( len( existing_rules[switch_row][port_row]) ):
#get a list that contain every rule name on that queue( in the focusing port )
rule_port_dict = existing_rules[switch_row][port_row][queue_row]
#begin to find the max out of the existing rule in this queue
tmp_max = 0
for rule_focus_name in rule_port_dict:
if tmp_max < rule_max_bw[rule_focus_name]:
tmp_max = rule_max_bw[rule_focus_name]
#get the max ratio of all the path
if queue_property[switch_row][port_row][queue_row][1] > tmp_max:
queue_property[switch_row][port_row][queue_row][1] = tmp_max
#print rule_max_bw
#print "---------------------------------"
#print existing_rules
#dirname, basename = os.path.split(traffic_file_name)
#f_ptr = tempfile.NamedTemporaryFile(prefix=basename, dir=dirname)
#print f_ptr.name
f_ptr = io.open(traffic_tmp_file_name,'w',encoding='utf-8')
#previously write number of switches and assume they all have less/equal than maxport
f_ptr.write( unicode(str(len(switches)) + "\n"))
f_ptr.flush()
#need to change into a list of how many switches and how many port each switch has
for key in switches:
f_ptr.write(key + "\n" + unicode(str(nodes.index( switches[key] ))) +"\n")
f_ptr.flush()
#print bandwidthout
for sw in bandwidthout:
f_ptr.write( unicode(str(len(sw)) + " "))
for port in sw :
f_ptr.write(unicode( str( port[0] if port[0] >= 0 else 0) + " "))
f_ptr.write(u"\n")
f_ptr.flush()
#print("-----------------------------------------------------")
os.fsync(f_ptr.fileno())
f_ptr.closed
os.rename(traffic_file_name, traffic_bak_file_name)
#print "successfully rename current file to tmp file"
os.rename(traffic_tmp_file_name , traffic_file_name)
#print "successfully rename tmp file to current file"
os.remove(traffic_bak_file_name)
#print "successfully remove bak file"
traffic_data = new_traffic_data
#get available bandwidth
def get_avai_bandwidth_on_link(switch,outport):
sw = bandwidthout[switch]
bandwidth = sw[outport][0]
return bandwidth
#switch ,outport, server are 0 based
def get_exists_bandwidth_on_link(switch,outport,server):
server_index = server+1
bw = bandwidthout[switch][outport][server_index]
return bw
def allocate_queue():
#begin loop to look all switch
#print "allocate queue get called here"
for index in range(0, len(queue_property) ):
focus_dpid = switches_dpid[switch_nodes[index]]
#print switch_nodes[index] + " : " + focus_dpid
#begin loop to look all port
for port_num in range(0, len(queue_property[index]) ):
if focus_dpid in name_index:
#print "allocate queue get called here"
port = name_index[focus_dpid][port_num+1]
qosmax = speed[index][port_num]
queuecmd = "sudo ovs-vsctl -- set port %s qos=@qosport%s -- " % ( port , port )
queuecmd = queuecmd + "--id=@qosport%s create qos type=linux-htb other-config:max-rate=%d " % ( port , int(qosmax) )
queuecmd = queuecmd + "queues=0=@q0"
for i in range(1, len(queue_property[index][port_num])+1):
queuecmd = queuecmd + "," + str(i) + "=@q" + str(i)
queuecmd = queuecmd + " -- "
queuecmd = queuecmd + "--id=@q0 create queue other-config:max-rate=%d other-config:min-rate=%s " % ( int(qosmax) , "0" )
for i in range(1, len(queue_property[index][port_num])+1):
queuecmd = queuecmd + "-- --id=@q%s create queue other-config:max-rate=%s other-config:min-rate=%s " %( str(i), str(queue_property[index][port_num][i-1][1]), str(queue_property[index][port_num][i-1][0]) )
#queuecmd = queuecmd + "-- --id=@q%s create queue other-config:max-rate=%s other-config:min-rate=%s " %( str(i), int(queue_property[index][port_num][i-1][0]), int(queue_property[index][port_num][i-1][0]) )
print "result : \n\n "
print queuecmd
print os.popen(queuecmd).read()
#os.popen(queuecmd)
#print "end result : \n\n "
def allocate_bandwidthout():
global bandwidthout
global adjacent
#initial banwidthout
bandwidthout = []
for adj_row in adjacent:
bw_row = [[] for i in range( len(adj_row)) ]
for i in range( len(bw_row) ):
bw_row[i] = [[] for j in range( len(server_nodes)+1 ) ]
bandwidthout.append(bw_row)
def reset_bandwidthout():
global bandwidthout
global speed
#initial bandwidthout
for i in range( len(bandwidthout) ):
bw_row = bandwidthout[i]
for j in range( len(bw_row) ):
bw_row[j][0] = speed[i][j]/1000000.0
for k in range ( 1, len(server_nodes)+1):
bw_row[j][k] = 0
#print bandwidthout
def display_bandwidthout():
global bandwidthout
#initial banwidthouy
for i in range( len(bandwidthout) ):
bw_row = bandwidthout[i]
for j in range( len(bw_row) ):
print str(bw_row[j]) + " "
print "\n"
if __name__ == '__main__':
#global nodes
#global server_nodes
#global switch_nodes
#global switches
#global servers
#global adjacent
#global speed
#global bandwidthout
#global max_bandwidth
build_port_name()
topo_detail = open('topology_detail.txt','r')
line = topo_detail.readline()
item = line.split()
#start mapping nodes
while len(item) == 0 or item[0] != 'node':
line = topo_detail.readline()
item = line.split()
line = topo_detail.readline()
item = line.split()
while len(item) > 0:
#found server
if "." in item[1]:
server[item[1]] = {}
server[item[1]]['name'] = item[0]
server[item[1]]['id'] = len(server_nodes)
server[item[1]]['queue'] = int(item[2])
server_priority.append( float(item[3]) )
#server[item[1]]['priority'] = int(item[2])
server_nodes.append(item[0])
#found switch
elif ":" in item[1]:
switch_nodes.append(item[0])
switches[item[1]] = item[0]
switches_dpid[item[0]] = item[1]
line = topo_detail.readline()
item = line.split()
nodes = switch_nodes + server_nodes
#print server
#start building adjacent matrix
#adjacent information on tology_detail.txt contains only switches' adjacent information
#row of adjacent equal to number of switch
line = topo_detail.readline()
item = line.split()
while len(item) == 0 or item[0] != 'adjacent':
line = topo_detail.readline()
item = line.split()
adjacent = [[] for i in range( len(switch_nodes) )]
speed = [[] for i in range( len(switch_nodes) )]
queue_property = [[] for i in range( len(switch_nodes) )]
line = topo_detail.readline()
item = line.split()
while len(item) > 0:
row = []
speed_row = []
item_info = []
for i in range(1,len(item)):
item_info = item[i].split(',')
if item_info[0] in nodes:
row.append( nodes.index(item_info[0]) )
else:
row.append( -1 )
speed_row.append(float(item_info[1])*1000000.0)
switch_index = nodes.index( switches[item[0]] )
adjacent[switch_index] = row
speed[switch_index] = speed_row
queue_property[switch_index] = [ [] for i in range( len(row) )]
for i in range( len( queue_property[switch_index]) ):
queue_property[switch_index][i] = [ [] for j in range( len(server) )]
for j in range( len(server) ):
queue_property[switch_index][i][j] = [0,0]
#if len(row) > max_column:
# max_column = len(row)
line = topo_detail.readline()
item = line.split()
topo_detail.close()
#print adjacent
# for speed_row in speed:
# for speed_detail in speed_row:
# print speed_detail + " "
# print "\n"
allocate_bandwidthout()
#begin the loop
while True :
#for t in range(1):
reset_bandwidthout()
time.sleep(3)
#call method to allocate queue on switch periodically
#it already has a map so it just has to get bandwidth information from polling
measure_bandwidth()
allocate_queue()
#end the loop
| mixisbad/qos | apps/qos/queue-allocator2.py | Python | apache-2.0 | 23,330 |
__author__ = 'dnovogrodsky'
import re
import pathlib
import os
from subprocess import call
#machineName = re.compile('(deflogix-pc.def-logix.local)')
machineName = re.compile('(andre_sp3.def-logix.local)')
inputSyslogMessages = open('messages','r')
outputSyslogMessages = open('syslogMessages.txt', 'w')
# read the input syslog line by line
for line in inputSyslogMessages:
# testing
print(line)
# check for matching the machine name in question
searchResults = re.search(machineName, line)
if searchResults:
# if there is a match, write line to new file
outputSyslogMessages.write(line)
# at end of all lines close file
inputSyslogMessages.close()
outputSyslogMessages.close()
# check that the file exists
input = pathlib.Path('syslogMessages.txt')
# run command line it ingest the file
if input.exists():
# ingest file into Hadoop
print("ready to ingest")
# run this command on the command line
## sudo -u hdfs hadoop fs -copyFromLocal ~/Desktop/CDRecords.txt /user/cloudera/vector/callRecords/
call("sudo -u hdfs hadoop fs -copyFromLocal syslogMessages.txt /user/cloudera/vector/callRecords/",
shell=True)
call("hadoop fs -copyFromLocal syslogMessages.txt /user/cloudera/vector/callRecords/",
shell=True)
os.system('sudo -u hdfs hadoop fs -copyFromLocal syslogMessages.txt /user/cloudera/vector/callRecords/')
# if there is no file print an error message
print('no file to ingest')
currentDirectory = os.getcwd()
print(currentDirectory) | DavidNovo/ExplorationsWithPython | importSyslogFiles.py | Python | mit | 1,528 |
# python imports
import json
# django imports
from django.contrib.auth.decorators import permission_required
from django.core.paginator import Paginator
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.http import require_POST
# lfs imports
import lfs.core.utils
from lfs.caching.utils import lfs_get_object_or_404
from lfs.core.utils import LazyEncoder
# review imports
from reviews.models import Review
# Views
@permission_required("core.manage_shop")
def review(request, review_id, template_name="manage/reviews/review.html"):
"""Displays review with provided review id.
"""
review = lfs_get_object_or_404(Review, pk=review_id)
return render(request, template_name, {
"review_inline": review_inline(request, review_id),
"review_filters_inline": review_filters_inline(request, review_id),
"selectable_reviews_inline": selectable_reviews_inline(request, review_id),
"review": review,
})
@permission_required("core.manage_shop")
def reviews(request, template_name="manage/reviews/reviews.html"):
"""Base view to display reviews overview.
"""
return render(request, template_name, {
"reviews_inline": reviews_inline(request),
"reviews_filters_inline": reviews_filters_inline(request),
})
# Parts
def review_inline(request, review_id, template_name="manage/reviews/review_inline.html"):
"""Displays review with provided review id.
"""
review_filters = request.session.get("review-filters", {})
review = lfs_get_object_or_404(Review, pk=review_id)
return render_to_string(template_name, request=request, context={
"review": review,
"name": review_filters.get("name", ""),
"active": review_filters.get("active", ""),
})
def reviews_inline(request, template_name="manage/reviews/reviews_inline.html"):
"""Renders the reviews section of the reviews overview view.
"""
review_filters = request.session.get("review-filters", {})
reviews = _get_filtered_reviews(request, review_filters)
paginator = Paginator(reviews, 30)
page = (request.POST if request.method == 'POST' else request.GET).get("page", 1)
page = paginator.page(page)
return render_to_string(template_name, request=request, context={
"reviews": reviews,
"page": page,
"paginator": paginator,
"start": review_filters.get("start", ""),
"end": review_filters.get("end", ""),
"active": review_filters.get("active", ""),
"name": review_filters.get("name", ""),
"ordering": request.session.get("review-ordering", "id"),
})
def review_filters_inline(request, review_id, template_name="manage/reviews/review_filters_inline.html"):
"""Renders the filter section of the review view.
"""
review_filters = request.session.get("review-filters", {})
review = lfs_get_object_or_404(Review, pk=review_id)
return render_to_string(template_name, request=request, context={
"review": review,
"name": review_filters.get("name", ""),
"active": review_filters.get("active", ""),
})
def reviews_filters_inline(request, template_name="manage/reviews/reviews_filters_inline.html"):
"""Renders the reviews filters section of the reviews overview view.
"""
review_filters = request.session.get("review-filters", {})
reviews = _get_filtered_reviews(request, review_filters)
paginator = Paginator(reviews, 30)
page = (request.POST if request.method == 'POST' else request.GET).get("page", 1)
page = paginator.page(page)
return render_to_string(template_name, request=request, context={
"page": page,
"paginator": paginator,
"start": review_filters.get("start", ""),
"end": review_filters.get("end", ""),
"active": review_filters.get("active", ""),
"name": review_filters.get("name", ""),
})
def selectable_reviews_inline(request, review_id, template_name="manage/reviews/selectable_reviews_inline.html"):
"""Display selectable reviews.
"""
review_filters = request.session.get("review-filters", {})
reviews = _get_filtered_reviews(request, review_filters)
paginator = Paginator(reviews, 30)
try:
page = int((request.POST if request.method == 'POST' else request.GET).get("page", 1))
except TypeError:
page = 1
page = paginator.page(page)
return render_to_string(template_name, request=request, context={
"paginator": paginator,
"page": page,
"review_id": int(review_id),
})
# Actions
@permission_required("core.manage_shop")
def set_reviews_page(request):
"""Sets the page for the reviews overview view.
"""
result = json.dumps({
"html": (
("#reviews-inline", reviews_inline(request)),
("#reviews-filters-inline", reviews_filters_inline(request)),
),
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
def set_selectable_reviews_page(request):
"""Sets the page of selectable reviews.
"""
review_id = request.GET.get("review-id", 1)
html = (
("#selectable-reviews", selectable_reviews_inline(request, review_id)),
("#selectable-reviews-inline", selectable_reviews_inline(request, review_id)),
)
result = json.dumps({
"html": html,
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
def set_ordering(request, ordering):
"""Sets review ordering given by passed request.
"""
req = request.POST if request.method == 'POST' else request.GET
request.session["review-ordering"] = ordering
if ordering == request.session.get("review-ordering"):
if request.session.get("review-ordering-order", "") == "":
request.session["review-ordering-order"] = "-"
else:
request.session["review-ordering-order"] = ""
else:
request.session["review-ordering-order"] = ""
if req.get("came-from") == "review":
review_id = req.get("review-id")
html = (
("#selectable-reviews-inline", selectable_reviews_inline(request, review_id)),
("#review-inline", review_inline(request, review_id)),
)
else:
html = (("#reviews-inline", reviews_inline(request)),)
result = json.dumps({
"html": html,
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
def set_review_filters(request):
"""Sets review filters given by passed request.
"""
req = request.POST if request.method == 'POST' else request.GET
review_filters = request.session.get("review-filters", {})
if request.POST.get("name", "") != "":
review_filters["name"] = request.POST.get("name")
else:
if review_filters.get("name"):
del review_filters["name"]
if request.POST.get("active", "") != "":
review_filters["active"] = request.POST.get("active")
else:
if review_filters.get("active"):
del review_filters["active"]
request.session["review-filters"] = review_filters
if req.get("came-from") == "review":
review_id = req.get("review-id")
html = (
("#selectable-reviews-inline", selectable_reviews_inline(request, review_id)),
("#review-inline", review_inline(request, review_id)),
)
else:
html = (
("#reviews-inline", reviews_inline(request)),
("#reviews-filters-inline", reviews_filters_inline(request)),
)
msg = _(u"Review filters have been set")
result = json.dumps({
"html": html,
"message": msg,
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
def reset_review_filters(request):
"""Resets all review filters.
"""
req = request.POST if request.method == 'POST' else request.GET
if "review-filters" in request.session:
del request.session["review-filters"]
if req.get("came-from") == "review":
review_id = req.get("review-id")
html = (
("#selectable-reviews-inline", selectable_reviews_inline(request, review_id)),
("#review-inline", review_inline(request, review_id)),
("#review-filters-inline", review_filters_inline(request, review_id)),
)
else:
html = (
("#reviews-inline", reviews_inline(request)),
("#reviews-filters-inline", reviews_filters_inline(request)),
)
msg = _(u"Review filters have been reset")
result = json.dumps({
"html": html,
"message": msg,
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
@permission_required("core.manage_shop")
@require_POST
def delete_review(request, review_id):
"""Deletes review with passed review id.
"""
try:
review = Review.objects.get(pk=review_id)
except Review.DoesNotExist:
pass
else:
review.delete()
try:
ordering = "%s%s" % (request.session.get("review-ordering-order", ""), request.session.get("review-ordering", "id"))
review = Review.objects.all().order_by(ordering)[0]
except IndexError:
url = reverse("lfs_manage_reviews")
else:
url = reverse("lfs_manage_review", kwargs={"review_id": review.id})
return lfs.core.utils.set_message_cookie(url, _(u"Review has been deleted."))
@permission_required("core.manage_shop")
def set_review_state(request, review_id):
"""Sets the state for given review.
"""
try:
review = Review.objects.get(pk=review_id)
except Review.DoesNotExist:
pass
else:
review.active = int(request.POST.get("active"))
review.save()
html = (
("#selectable-reviews-inline", selectable_reviews_inline(request, review_id)),
("#review-inline", review_inline(request, review_id)),
)
msg = _(u"Review state has been set")
result = json.dumps({
"html": html,
"message": msg,
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json')
# Private Methods
def _get_filtered_reviews(request, review_filters):
"""
"""
reviews = Review.objects.all()
review_ordering = request.session.get("review-ordering", "id")
review_ordering_order = request.session.get("review-ordering-order", "")
# Filter
name = review_filters.get("name", "")
if name != "":
reviews = reviews.filter(user_name__icontains=name)
active = review_filters.get("active", "")
if active != "":
reviews = reviews.filter(active=active)
# Ordering
if review_ordering == "product":
reviews = list(reviews)
if review_ordering_order == "-":
reviews.sort(key=lambda k: k.content.get_name(), reverse=True)
else:
reviews.sort(key=lambda k: k.content.get_name())
else:
reviews = reviews.order_by("%s%s" % (review_ordering_order, review_ordering))
return reviews
| diefenbach/django-lfs | lfs/manage/views/review.py | Python | bsd-3-clause | 11,457 |
#! /usr/bin/python
# -*- coding: utf8 -*-
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import set_keep
import numpy as np
import time
from PIL import Image
import os
import io
import json
"""
You will learn:
1. How to save time-series data (e.g. sentence) into TFRecord format file.
2. How to read time-series data from TFRecord format file.
3. How to create inputs, targets and mask.
Reference
----------
1. Google's im2txt - MSCOCO Image Captioning example
2. TFRecord in http://www.wildml.com/2016/08/rnns-in-tensorflow-a-practical-guide-and-undocumented-features/
3. Batching and Padding data in http://www.wildml.com/2016/08/rnns-in-tensorflow-a-practical-guide-and-undocumented-features/
"""
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto,
e.g, An integer label.
"""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto,
e.g, an image in byte
"""
# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto,
e.g, sentence in list of ints
"""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
def _bytes_feature_list(values):
"""Wrapper for inserting a bytes FeatureList into a SequenceExample proto,
e.g, sentence in list of bytes
"""
return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])
## 1. Save data into TFRecord =====================================================
cwd = os.getcwd()
IMG_DIR = cwd + '/data/cat/'
SEQ_FIR = cwd + '/data/cat_caption.json'
VOC_FIR = cwd + '/vocab.txt'
# read image captions from JSON
with tf.gfile.FastGFile(SEQ_FIR, "r") as f:
caption_data = json.loads(str(f.read(), encoding = "utf-8"))
processed_capts, img_capts = [], []
for idx in range(len(caption_data['images'])):
img_capt = caption_data['images'][idx]['caption']
img_capts.append(img_capt)
processed_capts.append(tl.nlp.process_sentence(img_capt, start_word="<S>", end_word="</S>"))
print("Original Captions: %s" % img_capts)
print("Processed Captions: %s\n" % processed_capts)
# build vocab
_ = tl.nlp.create_vocab(processed_capts, word_counts_output_file=VOC_FIR, min_word_count=1)
vocab = tl.nlp.Vocabulary(VOC_FIR, start_word="<S>", end_word="</S>", unk_word="<UNK>")
# save
writer = tf.python_io.TFRecordWriter("train.cat_caption")
for idx in range(len(caption_data['images'])):
# get data
img_name = caption_data['images'][idx]['file_name']
img_capt = '<S> ' + caption_data['images'][idx]['caption'] + ' </S>'
img_capt_ids = [vocab.word_to_id(word) for word in img_capt.split(' ')]
print("%s : %s : %s" % (img_name, img_capt, img_capt_ids))
img = Image.open(IMG_DIR+img_name)
img = img.resize((299, 299))
# tl.visualize.frame(I=img, second=0.2, saveable=False, name=img_name, fig_idx=12234)
img_raw = img.tobytes()
img_capt_b = [v.encode() for v in img_capt.split(' ')]
context = tf.train.Features(feature={ # Non-serial data uses Feature
"image/img_raw": _bytes_feature(img_raw),
})
feature_lists = tf.train.FeatureLists(feature_list={ # Serial data uses FeatureLists
"image/caption": _bytes_feature_list(img_capt_b),
"image/caption_ids": _int64_feature_list(img_capt_ids)
})
sequence_example = tf.train.SequenceExample(
context=context, feature_lists=feature_lists)
writer.write(sequence_example.SerializeToString()) # Serialize To String
writer.close()
## 2. Simple read one image =======================================================
filename_queue = tf.train.string_input_producer(["train.cat_caption"])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue) # return the file and the name of file
# features, sequence_features = tf.parse_single_example(serialized_example, # see parse_single_sequence_example for sequence example
features, sequence_features = tf.parse_single_sequence_example(serialized_example,
context_features={
'image/img_raw' : tf.FixedLenFeature([], tf.string),
},
sequence_features={
"image/caption": tf.FixedLenSequenceFeature([], dtype=tf.string),
"image/caption_ids": tf.FixedLenSequenceFeature([], dtype=tf.int64),
}
)
c = tf.contrib.learn.run_n(features, n=1, feed_dict=None)
from PIL import Image
im = Image.frombytes('RGB', (299, 299), c[0]['image/img_raw'])
tl.visualize.frame(np.asarray(im), second=1, saveable=False, name='frame', fig_idx=1236)
c = tf.contrib.learn.run_n(sequence_features, n=1, feed_dict=None)
print(c[0])
## 3. Prefetch serialized SequenceExample protos ==================================
def distort_image(image, thread_id):
"""Perform random distortions on an image.
Args:
image: A float32 Tensor of shape [height, width, 3] with values in [0, 1).
thread_id: Preprocessing thread id used to select the ordering of color
distortions. There should be a multiple of 2 preprocessing threads.
Returns:````
distorted_image: A float32 Tensor of shape [height, width, 3] with values in
[0, 1].
"""
# Randomly flip horizontally.
with tf.name_scope("flip_horizontal"):#, values=[image]): # DH MOdify
# with tf.name_scope("flip_horizontal", values=[image]):
image = tf.image.random_flip_left_right(image)
# Randomly distort the colors based on thread id.
color_ordering = thread_id % 2
with tf.name_scope("distort_color"):#, values=[image]): # DH MOdify
# with tf.name_scope("distort_color", values=[image]): # DH MOdify
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.032)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.032)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
# def process_image(encoded_image,
# is_training,
# height,
# width,
# resize_height=346,
# resize_width=346,
# thread_id=0,
# image_format="jpeg"):
# """Decode an image, resize and apply random distortions.
# In training, images are distorted slightly differently depending on thread_id.
# Args:
# encoded_image: String Tensor containing the image.
# is_training: Boolean; whether preprocessing for training or eval.
# height: Height of the output image.
# width: Width of the output image.
# resize_height: If > 0, resize height before crop to final dimensions.
# resize_width: If > 0, resize width before crop to final dimensions.
# thread_id: Preprocessing thread id used to select the ordering of color
# distortions. There should be a multiple of 2 preprocessing threads.
# image_format: "jpeg" or "png".
# Returns:
# A float32 Tensor of shape [height, width, 3] with values in [-1, 1].
# Raises:
# ValueError: If image_format is invalid.
# """
# # Helper function to log an image summary to the visualizer. Summaries are
# # only logged in thread 0.
# def image_summary(name, image):
# if not thread_id:
# tf.image_summary(name, tf.expand_dims(image, 0))
#
# # Decode image into a float32 Tensor of shape [?, ?, 3] with values in [0, 1).
# with tf.name_scope("decode"):#, values=[encoded_image]): # DH modify
# # with tf.name_scope("decode", values=[encoded_image]): # DH modify
# if image_format == "jpeg":
# image = tf.image.decode_jpeg(encoded_image, channels=3)
# elif image_format == "png":
# image = tf.image.decode_png(encoded_image, channels=3)
# else:
# raise ValueError("Invalid image format: %s" % image_format)
# image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# image_summary("original_image", image)
#
# # Resize image.
# assert (resize_height > 0) == (resize_width > 0)
# if resize_height:
# # image = tf.image.resize_images(image,
# # size=[resize_height, resize_width],
# # method=tf.image.ResizeMethod.BILINEAR)
#
# image = tf.image.resize_images(image, # DH Modify
# new_height=resize_height,
# new_width=resize_width,
# method=tf.image.ResizeMethod.BILINEAR)
#
# # Crop to final dimensions.
# if is_training:
# image = tf.random_crop(image, [height, width, 3])
# else:
# # Central crop, assuming resize_height > height, resize_width > width.
# image = tf.image.resize_image_with_crop_or_pad(image, height, width)
#
# image_summary("resized_image", image)
#
# # Randomly distort the image.
# if is_training:
# image = distort_image(image, thread_id)
#
# image_summary("final_image", image)
#
# # Rescale to [-1,1] instead of [0, 1]
# image = tf.sub(image, 0.5)
# image = tf.mul(image, 2.0)
# return image
def prefetch_input_data(reader,
file_pattern,
is_training,
batch_size,
values_per_shard,
input_queue_capacity_factor=16,
num_reader_threads=1,
shard_queue_name="filename_queue",
value_queue_name="input_queue"):
"""Prefetches string values from disk into an input queue.
In training the capacity of the queue is important because a larger queue
means better mixing of training examples between shards. The minimum number of
values kept in the queue is values_per_shard * input_queue_capacity_factor,
where input_queue_memory factor should be chosen to trade-off better mixing
with memory usage.
Args:
reader: Instance of tf.ReaderBase.
file_pattern: Comma-separated list of file patterns (e.g.
/tmp/train_data-?????-of-00100).
is_training: Boolean; whether prefetching for training or eval.
batch_size: Model batch size used to determine queue capacity.
values_per_shard: Approximate number of values per shard.
input_queue_capacity_factor: Minimum number of values to keep in the queue
in multiples of values_per_shard. See comments above.
num_reader_threads: Number of reader threads to fill the queue.
shard_queue_name: Name for the shards filename queue.
value_queue_name: Name for the values input queue.
Returns:
A Queue containing prefetched string values.
"""
data_files = []
for pattern in file_pattern.split(","):
data_files.extend(tf.gfile.Glob(pattern))
if not data_files:
tf.logging.fatal("Found no input files matching %s", file_pattern)
else:
tf.logging.info("Prefetching values from %d files matching %s",
len(data_files), file_pattern)
if is_training:
print(" is_training == True : RandomShuffleQueue")
filename_queue = tf.train.string_input_producer(
data_files, shuffle=True, capacity=16, name=shard_queue_name)
min_queue_examples = values_per_shard * input_queue_capacity_factor
capacity = min_queue_examples + 100 * batch_size
values_queue = tf.RandomShuffleQueue(
capacity=capacity,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string],
name="random_" + value_queue_name)
else:
print(" is_training == False : FIFOQueue")
filename_queue = tf.train.string_input_producer(
data_files, shuffle=False, capacity=1, name=shard_queue_name)
capacity = values_per_shard + 3 * batch_size
values_queue = tf.FIFOQueue(
capacity=capacity, dtypes=[tf.string], name="fifo_" + value_queue_name)
enqueue_ops = []
for _ in range(num_reader_threads):
_, value = reader.read(filename_queue)
enqueue_ops.append(values_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(
values_queue, enqueue_ops))
tf.scalar_summary(
"queue/%s/fraction_of_%d_full" % (values_queue.name, capacity),
tf.cast(values_queue.size(), tf.float32) * (1. / capacity))
return values_queue
is_training = True
resize_height = resize_width = 346
height = width = 299
# start to read
reader = tf.TFRecordReader()
input_queue = prefetch_input_data(
reader,
file_pattern = "train.cat_caption", # sets train.???_caption to read many files
is_training = is_training, # if training, shuffle and random choice
batch_size = 4,
values_per_shard = 2300, # mixing between shards in training.
input_queue_capacity_factor = 2, # minimum number of shards to keep in the input queue.
num_reader_threads = 1 # number of threads for prefetching SequenceExample protos.
)
serialized_sequence_example = input_queue.dequeue()
# serialized_sequence_example = tf.train.string_input_producer(["train.cat_caption"]) # don't work
context, sequence = tf.parse_single_sequence_example(
serialized=serialized_sequence_example,
context_features={
"image/img_raw": tf.FixedLenFeature([], dtype=tf.string)
},
sequence_features={
"image/caption": tf.FixedLenSequenceFeature([], dtype=tf.string),
"image/caption_ids": tf.FixedLenSequenceFeature([], dtype=tf.int64),
}
)
img = tf.decode_raw(context["image/img_raw"], tf.uint8)
img = tf.reshape(img, [height, width, 3])
img = tf.image.convert_image_dtype(img, dtype=tf.float32)
try:
# for TensorFlow 0.11
img = tf.image.resize_images(img,
size=(resize_height, resize_width),
method=tf.image.ResizeMethod.BILINEAR)
except:
# for TensorFlow 0.10
img = tf.image.resize_images(img,
new_height=resize_height,
new_width=resize_width,
method=tf.image.ResizeMethod.BILINEAR)
# Crop to final dimensions.
if is_training:
img = tf.random_crop(img, [height, width, 3])
else:
# Central crop, assuming resize_height > height, resize_width > width.
img = tf.image.resize_image_with_crop_or_pad(img, height, width)
# Randomly distort the image.
if is_training:
img = distort_image(img, thread_id=0)
# Rescale to [-1, 1] instead of [0, 1]
img = tf.sub(img, 0.5)
img = tf.mul(img, 2.0)
img_cap = sequence["image/caption"]
img_cap_ids = sequence["image/caption_ids"]
img_batch, img_cap_batch, img_cap_ids_batch = tf.train.batch([img, img_cap, img_cap_ids], # Note: shuffle_batch doesn't support dynamic_pad
batch_size=4,
capacity=50000,
dynamic_pad=True, # string list pad with '', int list pad with 0
num_threads=4)
sess = tf.Session()
# sess.run(tf.initialize_all_variables())
tl.layers.initialize_global_variables(sess)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(3):
print("Step %s" % _)
# print(sess.run([img, img_cap, img_cap_ids])) # one example only
imgs, caps, caps_id = sess.run([img_batch, img_cap_batch, img_cap_ids_batch]) # batch of examples with dynamic_pad
print(caps)
print(caps_id)
tl.visualize.images2d((imgs+1)/2, second=1, saveable=False, name='batch', dtype=None, fig_idx=202025)
coord.request_stop()
coord.join(threads)
sess.close()
## 4. Prefetch serialized SequenceExample protos. Create MASK and TARGET =======
def batch_with_dynamic_pad(images_and_captions,
batch_size,
queue_capacity,
add_summaries=True):
"""Batches input images and captions.
This function splits the caption into an input sequence and a target sequence,
where the target sequence is the input sequence right-shifted by 1. Input and
target sequences are batched and padded up to the maximum length of sequences
in the batch. A mask is created to distinguish real words from padding words.
Example:
Actual captions in the batch ('-' denotes padded character):
[
[ 1 2 5 4 5 ],
[ 1 2 3 4 - ],
[ 1 2 3 - - ],
]
input_seqs:
[
[ 1 2 3 4 ],
[ 1 2 3 - ],
[ 1 2 - - ],
]
target_seqs:
[
[ 2 3 4 5 ],
[ 2 3 4 - ],
[ 2 3 - - ],
]
mask:
[
[ 1 1 1 1 ],
[ 1 1 1 0 ],
[ 1 1 0 0 ],
]
Args:
images_and_captions: A list of pairs [image, caption], where image is a
Tensor of shape [height, width, channels] and caption is a 1-D Tensor of
any length. Each pair will be processed and added to the queue in a
separate thread.
batch_size: Batch size.
queue_capacity: Queue capacity.
add_summaries: If true, add caption length summaries.
Returns:
images: A Tensor of shape [batch_size, height, width, channels].
input_seqs: An int32 Tensor of shape [batch_size, padded_length].
target_seqs: An int32 Tensor of shape [batch_size, padded_length].
mask: An int32 0/1 Tensor of shape [batch_size, padded_length].
"""
enqueue_list = []
for image, caption in images_and_captions:
caption_length = tf.shape(caption)[0]
input_length = tf.expand_dims(tf.sub(caption_length, 1), 0)
input_seq = tf.slice(caption, [0], input_length)
target_seq = tf.slice(caption, [1], input_length)
indicator = tf.ones(input_length, dtype=tf.int32)
enqueue_list.append([image, input_seq, target_seq, indicator])
images, input_seqs, target_seqs, mask = tf.train.batch_join(
enqueue_list,
batch_size=batch_size,
capacity=queue_capacity,
dynamic_pad=True,
name="batch_and_pad")
if add_summaries:
lengths = tf.add(tf.reduce_sum(mask, 1), 1)
tf.scalar_summary("caption_length/batch_min", tf.reduce_min(lengths))
tf.scalar_summary("caption_length/batch_max", tf.reduce_max(lengths))
tf.scalar_summary("caption_length/batch_mean", tf.reduce_mean(lengths))
return images, input_seqs, target_seqs, mask
images, input_seqs, target_seqs, input_mask = (
batch_with_dynamic_pad(images_and_captions=[[img, img_cap]],
batch_size=4,
queue_capacity=50000)
)
sess = tf.Session()
sess.run(tf.initialize_all_variables())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(3):
print("Step %s" % _)
imgs, inputs, targets, masks = sess.run([images, input_seqs, target_seqs, input_mask])
print(inputs)
print(targets)
print(masks)
tl.visualize.images2d((imgs+1)/2, second=1, saveable=False, name='batch', dtype=None, fig_idx=202025)
coord.request_stop()
coord.join(threads)
sess.close()
#
| trhongbinwang/data_science_journey | deep_learning/tensorlayer/tutorial_tfrecord3.py | Python | apache-2.0 | 19,856 |
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2013 [email protected] |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
from lib import *
import time
import config
def ajax_action():
try:
action = html.var("action")
if action == "reschedule":
action_reschedule()
else:
raise MKGeneralException("Invalid action '%s'" % action)
except Exception, e:
html.write("['ERROR', %r]\n" % str(e))
def action_reschedule():
if not config.may("action.reschedule"):
raise MKGeneralException("You are not allowed to reschedule checks.")
site = html.var("site")
host = html.var("host", "")
if not host:
raise MKGeneralException("Action reschedule: missing host name")
service = html.var_utf8("service", "")
wait_svc = html.var_utf8("wait_svc", "")
if service:
cmd = "SVC"
what = "service"
spec = "%s;%s" % (host, service.encode("utf-8"))
if wait_svc:
wait_spec = u'%s;%s' % (host, wait_svc)
add_filter = "Filter: service_description = %s\n" % wait_svc
else:
wait_spec = spec
add_filter = "Filter: service_description = %s\n" % service
else:
cmd = "HOST"
what = "host"
spec = host
wait_spec = spec
add_filter = ""
try:
now = int(time.time())
html.live.command("[%d] SCHEDULE_FORCED_%s_CHECK;%s;%d" % (now, cmd, spec, now), site)
html.live.set_only_sites([site])
query = u"GET %ss\n" \
"WaitObject: %s\n" \
"WaitCondition: last_check >= %d\n" \
"WaitTimeout: %d\n" \
"WaitTrigger: check\n" \
"Columns: last_check state plugin_output\n" \
"Filter: host_name = %s\n%s" \
% (what, wait_spec, now, config.reschedule_timeout * 1000, host, add_filter)
row = html.live.query_row(query)
html.live.set_only_sites()
last_check = row[0]
if last_check < now:
html.write("['TIMEOUT', 'Check not executed within %d seconds']\n" % (config.reschedule_timeout))
else:
if service == "Check_MK":
# Passive services triggered by Check_MK often are updated
# a few ms later. We introduce a small wait time in order
# to increase the chance for the passive services already
# updated also when we return.
time.sleep(0.7);
html.write("['OK', %d, %d, %r]\n" % (row[0], row[1], row[2].encode("utf-8")))
except Exception, e:
html.live.set_only_sites()
raise MKGeneralException(_("Cannot reschedule check: %s") % e)
| tomas-edwardsson/check_mk | web/htdocs/actions.py | Python | gpl-2.0 | 4,100 |
import os
import random
from scrapy.conf import settings
from fake_useragent import UserAgent
class RandomUserAgentMiddleware(object):
def __init__(self):
super(RandomUserAgentMiddleware, self).__init__()
self.ua = UserAgent()
def process_request(self, request, spider):
request.headers.setdefault('User-Agent', self.ua.random) | rdempsey/ddl-data-wrangling | part-two-web-scraping/govbenefitsspider/govbenefitsspider/middlewares.py | Python | mit | 361 |
from scrapy.exceptions import IgnoreRequest
import redis_const
class HttpStatusMiddleware(object):
def process_response(self, request, response, spider):
if response.status == 200:
pass
elif response.status == 404 or response.status == 410 or response.status == 400:
if "com/people" in response.url:
url_name = response.url.split("/")[-1]
spider.flog.warning("get invalid status:@"+response.url+" give up")
spider.rclient.smove(spider.seen_S, spider.fail_S, url_name)
raise IgnoreRequest()
elif response.status == 403:
spider.send_mail("http status 403", "see if cookie invalide:" + response.body)
raise IgnoreRequest()
return response
| jiady/htdb | crawler/crawler/HttpStatusMiddleware.py | Python | mit | 786 |
import datetime
import logging
import os
import socket
import time
from ...common.interfaces import AbstractInfoWidget
from ..Console import screen
logger = logging.getLogger(__name__)
class PhantomProgressBarWidget(AbstractInfoWidget):
"""
Widget that displays progressbar
"""
def get_index(self):
return 0
def __init__(self, sender):
AbstractInfoWidget.__init__(self)
self.krutilka = screen.krutilka()
self.owner = sender
self.ammo_progress = 0
self.eta_file = None
info = self.owner.get_info()
if info:
self.ammo_count = int(info.ammo_count)
self.test_duration = int(info.duration)
else:
self.ammo_count = 1
self.test_duration = 1
def render(self, screen):
res = ""
dur_seconds = int(time.time()) - int(self.owner.start_time)
eta_time = 'N/A'
eta_secs = -1
progress = 0
color_bg = screen.markup.BG_CYAN
color_fg = screen.markup.CYAN
if self.test_duration and self.test_duration >= dur_seconds:
color_bg = screen.markup.BG_GREEN
color_fg = screen.markup.GREEN
eta_secs = self.test_duration - dur_seconds
eta_time = datetime.timedelta(seconds=eta_secs)
progress = float(dur_seconds) / self.test_duration
elif self.ammo_progress:
left_part = self.ammo_count - self.ammo_progress
if left_part > 0:
eta_secs = int(
float(dur_seconds) / float(self.ammo_progress) * float(left_part))
else:
eta_secs = 0
eta_time = datetime.timedelta(seconds=eta_secs)
if self.ammo_progress < self.ammo_count:
progress = float(self.ammo_progress) / float(self.ammo_count)
else:
progress = 0.5
if self.eta_file:
handle = open(self.eta_file, 'w')
handle.write(str(eta_secs))
handle.close()
perc = float(int(1000 * progress)) / 10
str_perc = str(perc) + "%"
pb_width = screen.right_panel_width - 1 - len(str_perc)
progress_chars = '=' * (int(pb_width * progress) - 1)
progress_chars += next(self.krutilka)
res += color_bg + progress_chars + screen.markup.RESET + color_fg
res += '~' * (pb_width - int(pb_width * progress)
) + screen.markup.RESET + ' '
res += str_perc + "\n"
eta = 'ETA: %s' % eta_time
dur = 'Duration: %s' % str(datetime.timedelta(seconds=dur_seconds))
spaces = ' ' * (screen.right_panel_width - len(eta) - len(dur) - 1)
res += dur + ' ' + spaces + eta
return res
def on_aggregated_data(self, data, stats):
self.ammo_progress += data["overall"]["interval_real"]["len"]
class PhantomInfoWidget(AbstractInfoWidget):
"""
Widget with information about current run state
"""
def get_index(self):
return 2
def __init__(self, sender):
AbstractInfoWidget.__init__(self)
self.owner = sender
self.instances = 0
self.planned = 0
self.RPS = 0
self.selfload = 0
self.time_lag = 0
self.planned_rps_duration = 0
info = self.owner.get_info()
if info:
self.instances_limit = int(info.instances)
self.ammo_count = int(info.ammo_count)
else:
self.instances_limit = 1
self.ammo_count = 1
def render(self, screen):
res = ''
info = self.owner.get_info()
if self.owner.phantom:
template = "Hosts: %s => %s:%s\n Ammo: %s\nCount: %s\n Load: %s"
data = (
socket.gethostname(), info.address, info.port,
os.path.basename(info.ammo_file), self.ammo_count,
' '.join(info.rps_schedule))
res = template % data
res += "\n\n"
res += "Active instances: "
if float(self.instances) / self.instances_limit > 0.8:
res += screen.markup.RED + str(self.instances) + screen.markup.RESET
elif float(self.instances) / self.instances_limit > 0.5:
res += screen.markup.YELLOW + str(
self.instances) + screen.markup.RESET
else:
res += str(self.instances)
res += "\nPlanned requests: %s for %s\nActual responses: " % (
self.planned, datetime.timedelta(seconds=self.planned_rps_duration))
if not self.planned == self.RPS:
res += screen.markup.YELLOW + str(self.RPS) + screen.markup.RESET
else:
res += str(self.RPS)
res += "\n Accuracy: "
if self.selfload < 80:
res += screen.markup.RED + (
'%.2f' % self.selfload) + screen.markup.RESET
elif self.selfload < 95:
res += screen.markup.YELLOW + (
'%.2f' % self.selfload) + screen.markup.RESET
else:
res += ('%.2f' % self.selfload)
res += "%\n Time lag: "
if self.time_lag > self.owner.buffered_seconds * 5:
logger.debug("Time lag: %s", self.time_lag)
res += screen.markup.RED + str(
datetime.timedelta(seconds=self.time_lag)) + screen.markup.RESET
elif self.time_lag > self.owner.buffered_seconds:
res += screen.markup.YELLOW + str(
datetime.timedelta(seconds=self.time_lag)) + screen.markup.RESET
else:
res += str(datetime.timedelta(seconds=self.time_lag))
return res
def on_aggregated_data(self, data, stats):
self.RPS = data["overall"]["interval_real"]["len"]
self.planned = stats["metrics"]["reqps"]
self.instances = stats["metrics"]["instances"]
# TODO:
# self.selfload = second_aggregate_data.overall.selfload
# self.time_lag = int(time.time() - time.mktime(
# second_aggregate_data.time.timetuple()))
| yandex/yandex-tank | yandextank/plugins/Phantom/widget.py | Python | lgpl-2.1 | 6,082 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2004 Werner Mayer LGPL
import os,sys,string
#os.chdir("E:\\Develop\\FreeCADWin\\scripts")
file = open(sys.argv[1])
if(len(sys.argv) > 3):
sys.stderr.write("Wrong Parameter\n Usage:\n PythonToCPP Infile.py [Outfile]\n")
if(len(sys.argv) > 2):
out = open(sys.argv[2],"w");
else:
out = sys.stdout
lines = file.readlines()
# We want to use this script for files in another directory, so we extract the actual file name
fn = os.path.basename(sys.argv[1])
out.write("const char " + fn[:-3] + "[] =")
for line in lines:
# remove new line
line2 = string.rstrip(line)
# replace special chars
line2 = string.replace(line2,'\\','\\\\')
line2 = string.replace(line2,'\"','\\\"')
line2 = string.replace(line2,"\'","\\\'")
# output
#out.write(line)
out.write( '\"' + line2 + '\\n\"\n')
out.write(";\n\n\n");
| YuanYouYuan/FreeCAD | src/Tools/PythonToCPP.py | Python | lgpl-2.1 | 949 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr
from frappe import msgprint, _
def execute(filters=None):
if not filters: filters = {}
salary_slips = get_salary_slips(filters)
columns, earning_types, ded_types = get_columns(salary_slips)
ss_earning_map = get_ss_earning_map(salary_slips)
ss_ded_map = get_ss_ded_map(salary_slips)
data = []
for ss in salary_slips:
row = [ss.employee, ss.employee_name, ss.branch, ss.department, ss.designation,
ss.company, ss.month, ss.leave_withut_pay, ss.payment_days]
for e in earning_types:
row.append(ss_earning_map.get(ss.name, {}).get(e))
row += [ss.arrear_amount, ss.leave_encashment_amount, ss.gross_pay]
for d in ded_types:
row.append(ss_ded_map.get(ss.name, {}).get(d))
row += [ss.total_deduction, ss.net_pay]
data.append(row)
return columns, data
def get_columns(salary_slips):
columns = [
_("Employee") + ":Link/Employee:120", _("Employee Name") + "::140", _("Branch") + ":Link/Branch:120",
_("Department") + ":Link/Department:120", _("Designation") + ":Link/Designation:120",
_("Company") + ":Link/Company:120", _("Month") + "::80", _("Leave Without Pay") + ":Float:130",
_("Payment Days") + ":Float:120"
]
earning_types = frappe.db.sql_list("""select distinct e_type from `tabSalary Slip Earning`
where e_modified_amount != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]))
ded_types = frappe.db.sql_list("""select distinct d_type from `tabSalary Slip Deduction`
where d_modified_amount != 0 and parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]))
columns = columns + [(e + ":Currency:120") for e in earning_types] + \
["Arrear Amount:Currency:120", "Leave Encashment Amount:Currency:150",
"Gross Pay:Currency:120"] + [(d + ":Currency:120") for d in ded_types] + \
["Total Deduction:Currency:120", "Net Pay:Currency:120"]
return columns, earning_types, ded_types
def get_salary_slips(filters):
conditions, filters = get_conditions(filters)
salary_slips = frappe.db.sql("""select * from `tabSalary Slip` where docstatus = 1 %s
order by employee, month""" % conditions, filters, as_dict=1)
if not salary_slips:
msgprint(_("No salary slip found for month: ") + cstr(filters.get("month")) +
_(" and year: ") + cstr(filters.get("fiscal_year")), raise_exception=1)
return salary_slips
def get_conditions(filters):
conditions = ""
if filters.get("month"):
month = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
filters["month"] = month
conditions += " and month = %(month)s"
if filters.get("fiscal_year"): conditions += " and fiscal_year = %(fiscal_year)s"
if filters.get("company"): conditions += " and company = %(company)s"
if filters.get("employee"): conditions += " and employee = %(employee)s"
return conditions, filters
def get_ss_earning_map(salary_slips):
ss_earnings = frappe.db.sql("""select parent, e_type, e_modified_amount
from `tabSalary Slip Earning` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_earning_map = {}
for d in ss_earnings:
ss_earning_map.setdefault(d.parent, frappe._dict()).setdefault(d.e_type, [])
ss_earning_map[d.parent][d.e_type] = flt(d.e_modified_amount)
return ss_earning_map
def get_ss_ded_map(salary_slips):
ss_deductions = frappe.db.sql("""select parent, d_type, d_modified_amount
from `tabSalary Slip Deduction` where parent in (%s)""" %
(', '.join(['%s']*len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1)
ss_ded_map = {}
for d in ss_deductions:
ss_ded_map.setdefault(d.parent, frappe._dict()).setdefault(d.d_type, [])
ss_ded_map[d.parent][d.d_type] = flt(d.d_modified_amount)
return ss_ded_map | mahabuber/erpnext | erpnext/hr/report/monthly_salary_register/monthly_salary_register.py | Python | agpl-3.0 | 4,082 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for nova.compute.rpcapi
"""
from oslo.config import cfg
from nova.compute import rpcapi as compute_rpcapi
from nova import context
from nova import db
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
from nova import test
CONF = cfg.CONF
class ComputeRpcAPITestCase(test.TestCase):
def setUp(self):
super(ComputeRpcAPITestCase, self).setUp()
self.context = context.get_admin_context()
inst = db.instance_create(self.context, {'host': 'fake_host',
'instance_type_id': 1})
self.fake_instance = jsonutils.to_primitive(inst)
def test_serialized_instance_has_name(self):
self.assertTrue('name' in self.fake_instance)
def _test_compute_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
if 'rpcapi_class' in kwargs:
rpcapi_class = kwargs['rpcapi_class']
del kwargs['rpcapi_class']
else:
rpcapi_class = compute_rpcapi.ComputeAPI
rpcapi = rpcapi_class()
expected_retval = 'foo' if method == 'call' else None
expected_version = kwargs.pop('version', rpcapi.BASE_RPC_API_VERSION)
expected_msg = rpcapi.make_msg(method, **kwargs)
if 'host_param' in expected_msg['args']:
host_param = expected_msg['args']['host_param']
del expected_msg['args']['host_param']
expected_msg['args']['host'] = host_param
elif 'host' in expected_msg['args']:
del expected_msg['args']['host']
if 'destination' in expected_msg['args']:
del expected_msg['args']['destination']
expected_msg['version'] = expected_version
cast_and_call = ['confirm_resize', 'stop_instance']
if rpc_method == 'call' and method in cast_and_call:
kwargs['cast'] = False
if 'host' in kwargs:
host = kwargs['host']
elif 'destination' in kwargs:
host = kwargs['destination']
else:
host = kwargs['instance']['host']
expected_topic = '%s.%s' % (CONF.compute_topic, host)
self.fake_args = None
self.fake_kwargs = None
def _fake_rpc_method(*args, **kwargs):
self.fake_args = args
self.fake_kwargs = kwargs
if expected_retval:
return expected_retval
self.stubs.Set(rpc, rpc_method, _fake_rpc_method)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
expected_args = [ctxt, expected_topic, expected_msg]
for arg, expected_arg in zip(self.fake_args, expected_args):
self.assertEqual(arg, expected_arg)
def test_add_aggregate_host(self):
self._test_compute_api('add_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
slave_info={}, version='2.14')
def test_add_fixed_ip_to_instance(self):
self._test_compute_api('add_fixed_ip_to_instance', 'cast',
instance=self.fake_instance, network_id='id')
def test_attach_volume(self):
self._test_compute_api('attach_volume', 'cast',
instance=self.fake_instance, volume_id='id', mountpoint='mp')
def test_change_instance_metadata(self):
self._test_compute_api('change_instance_metadata', 'cast',
instance=self.fake_instance, diff={})
def test_check_can_live_migrate_destination(self):
self._test_compute_api('check_can_live_migrate_destination', 'call',
instance=self.fake_instance,
destination='dest', block_migration=True,
disk_over_commit=True)
def test_check_can_live_migrate_source(self):
self._test_compute_api('check_can_live_migrate_source', 'call',
instance=self.fake_instance,
dest_check_data={"test": "data"})
def test_check_instance_shared_storage(self):
self._test_compute_api('check_instance_shared_storage', 'call',
instance=self.fake_instance, data='foo', version='2.28')
def test_confirm_resize_cast(self):
self._test_compute_api('confirm_resize', 'cast',
instance=self.fake_instance, migration={'id': 'foo'},
host='host', reservations=list('fake_res'), version='2.7')
def test_confirm_resize_call(self):
self._test_compute_api('confirm_resize', 'call',
instance=self.fake_instance, migration={'id': 'foo'},
host='host', reservations=list('fake_res'), version='2.7')
def test_detach_volume(self):
self._test_compute_api('detach_volume', 'cast',
instance=self.fake_instance, volume_id='id')
def test_finish_resize(self):
self._test_compute_api('finish_resize', 'cast',
instance=self.fake_instance, migration={'id': 'foo'},
image='image', disk_info='disk_info', host='host',
reservations=list('fake_res'), version='2.8')
def test_finish_revert_resize(self):
self._test_compute_api('finish_revert_resize', 'cast',
instance=self.fake_instance, migration={'id': 'fake_id'},
host='host', reservations=list('fake_res'), version='2.13')
def test_get_console_output(self):
self._test_compute_api('get_console_output', 'call',
instance=self.fake_instance, tail_length='tl')
def test_get_console_pool_info(self):
self._test_compute_api('get_console_pool_info', 'call',
console_type='type', host='host')
def test_get_console_topic(self):
self._test_compute_api('get_console_topic', 'call', host='host')
def test_get_diagnostics(self):
self._test_compute_api('get_diagnostics', 'call',
instance=self.fake_instance)
def test_get_vnc_console(self):
self._test_compute_api('get_vnc_console', 'call',
instance=self.fake_instance, console_type='type')
def test_get_spice_console(self):
self._test_compute_api('get_spice_console', 'call',
instance=self.fake_instance, console_type='type',
version='2.24')
def test_validate_console_port(self):
self._test_compute_api('validate_console_port', 'call',
instance=self.fake_instance, port="5900",
console_type="novnc",
version="2.26")
def test_host_maintenance_mode(self):
self._test_compute_api('host_maintenance_mode', 'call',
host_param='param', mode='mode', host='host')
def test_host_power_action(self):
self._test_compute_api('host_power_action', 'call', action='action',
host='host')
def test_inject_file(self):
self._test_compute_api('inject_file', 'cast',
instance=self.fake_instance, path='path', file_contents='fc')
def test_inject_network_info(self):
self._test_compute_api('inject_network_info', 'cast',
instance=self.fake_instance)
def test_live_migration(self):
self._test_compute_api('live_migration', 'cast',
instance=self.fake_instance, dest='dest',
block_migration='blockity_block', host='tsoh',
migrate_data={})
def test_post_live_migration_at_destination(self):
self._test_compute_api('post_live_migration_at_destination', 'call',
instance=self.fake_instance, block_migration='block_migration',
host='host')
def test_pause_instance(self):
self._test_compute_api('pause_instance', 'cast',
instance=self.fake_instance)
def test_power_off_instance(self):
self._test_compute_api('power_off_instance', 'cast',
instance=self.fake_instance)
def test_power_on_instance(self):
self._test_compute_api('power_on_instance', 'cast',
instance=self.fake_instance)
def test_soft_delete_instance(self):
self._test_compute_api('soft_delete_instance', 'cast',
instance=self.fake_instance,
reservations=['uuid1', 'uuid2'],
version='2.27')
def test_restore_instance(self):
self._test_compute_api('restore_instance', 'cast',
instance=self.fake_instance)
def test_pre_live_migration(self):
self._test_compute_api('pre_live_migration', 'call',
instance=self.fake_instance, block_migration='block_migration',
disk='disk', host='host', migrate_data=None,
version='2.21')
def test_prep_resize(self):
self._test_compute_api('prep_resize', 'cast',
instance=self.fake_instance, instance_type='fake_type',
image='fake_image', host='host',
reservations=list('fake_res'),
request_spec='fake_spec',
filter_properties={'fakeprop': 'fakeval'},
node='node',
version='2.20')
def test_reboot_instance(self):
self.maxDiff = None
self._test_compute_api('reboot_instance', 'cast',
instance=self.fake_instance,
block_device_info={},
reboot_type='type',
version='2.23')
def test_rebuild_instance(self):
self._test_compute_api('rebuild_instance', 'cast',
instance=self.fake_instance, new_pass='pass',
injected_files='files', image_ref='ref',
orig_image_ref='orig_ref', bdms=[], recreate=False,
on_shared_storage=False, orig_sys_metadata='orig_sys_metadata',
version='2.22')
def test_rebuild_instance_with_shared(self):
self._test_compute_api('rebuild_instance', 'cast', new_pass='None',
injected_files='None', image_ref='None', orig_image_ref='None',
bdms=[], instance=self.fake_instance, host='new_host',
orig_sys_metadata=None, recreate=True, on_shared_storage=True,
version='2.22')
def test_reserve_block_device_name(self):
self._test_compute_api('reserve_block_device_name', 'call',
instance=self.fake_instance, device='device', volume_id='id',
version='2.3')
def refresh_provider_fw_rules(self):
self._test_compute_api('refresh_provider_fw_rules', 'cast',
host='host')
def test_refresh_security_group_rules(self):
self._test_compute_api('refresh_security_group_rules', 'cast',
rpcapi_class=compute_rpcapi.SecurityGroupAPI,
security_group_id='id', host='host')
def test_refresh_security_group_members(self):
self._test_compute_api('refresh_security_group_members', 'cast',
rpcapi_class=compute_rpcapi.SecurityGroupAPI,
security_group_id='id', host='host')
def test_remove_aggregate_host(self):
self._test_compute_api('remove_aggregate_host', 'cast',
aggregate={'id': 'fake_id'}, host_param='host', host='host',
slave_info={}, version='2.15')
def test_remove_fixed_ip_from_instance(self):
self._test_compute_api('remove_fixed_ip_from_instance', 'cast',
instance=self.fake_instance, address='addr')
def test_remove_volume_connection(self):
self._test_compute_api('remove_volume_connection', 'call',
instance=self.fake_instance, volume_id='id', host='host')
def test_rescue_instance(self):
self._test_compute_api('rescue_instance', 'cast',
instance=self.fake_instance, rescue_password='pw')
def test_reset_network(self):
self._test_compute_api('reset_network', 'cast',
instance=self.fake_instance)
def test_resize_instance(self):
self._test_compute_api('resize_instance', 'cast',
instance=self.fake_instance, migration={'id': 'fake_id'},
image='image', instance_type={'id': 1},
reservations=list('fake_res'), version='2.16')
def test_resume_instance(self):
self._test_compute_api('resume_instance', 'cast',
instance=self.fake_instance)
def test_revert_resize(self):
self._test_compute_api('revert_resize', 'cast',
instance=self.fake_instance, migration={'id': 'fake_id'},
host='host', reservations=list('fake_res'), version='2.12')
def test_rollback_live_migration_at_destination(self):
self._test_compute_api('rollback_live_migration_at_destination',
'cast', instance=self.fake_instance, host='host')
def test_run_instance(self):
self._test_compute_api('run_instance', 'cast',
instance=self.fake_instance, host='fake_host',
request_spec='fake_spec', filter_properties={},
requested_networks='networks', injected_files='files',
admin_password='pw', is_first_time=True, node='node',
version='2.19')
def test_set_admin_password(self):
self._test_compute_api('set_admin_password', 'call',
instance=self.fake_instance, new_pass='pw')
def test_set_host_enabled(self):
self._test_compute_api('set_host_enabled', 'call',
enabled='enabled', host='host')
def test_get_host_uptime(self):
self._test_compute_api('get_host_uptime', 'call', host='host')
def test_snapshot_instance(self):
self._test_compute_api('snapshot_instance', 'cast',
instance=self.fake_instance, image_id='id', image_type='type',
backup_type='type', rotation='rotation')
def test_live_snapshot_instance(self):
self._test_compute_api('live_snapshot_instance', 'cast',
instance=self.fake_instance, image_id='id', version='2.30')
def test_start_instance(self):
self._test_compute_api('start_instance', 'cast',
instance=self.fake_instance, version='2.29')
def test_stop_instance_cast(self):
self._test_compute_api('stop_instance', 'cast',
instance=self.fake_instance, version='2.29')
def test_stop_instance_call(self):
self._test_compute_api('stop_instance', 'call',
instance=self.fake_instance, version='2.29')
def test_suspend_instance(self):
self._test_compute_api('suspend_instance', 'cast',
instance=self.fake_instance)
def test_terminate_instance(self):
self._test_compute_api('terminate_instance', 'cast',
instance=self.fake_instance, bdms=[],
reservations=['uuid1', 'uuid2'],
version='2.27')
def test_unpause_instance(self):
self._test_compute_api('unpause_instance', 'cast',
instance=self.fake_instance)
def test_unrescue_instance(self):
self._test_compute_api('unrescue_instance', 'cast',
instance=self.fake_instance)
| shootstar/novatest | nova/tests/compute/test_rpcapi.py | Python | apache-2.0 | 15,851 |
# swift_build_support/products/swiftdriver.py -------------------*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
from . import cmark
from . import foundation
from . import indexstoredb
from . import libcxx
from . import libdispatch
from . import libicu
from . import llbuild
from . import llvm
from . import product
from . import swift
from . import xctest
class SwiftDriver(product.Product):
@classmethod
def product_source_name(cls):
return "swift-driver"
@classmethod
def is_build_script_impl_product(cls):
return False
def should_build(self, host_target):
return self.args.build_swift_driver
@classmethod
def get_dependencies(cls):
return [cmark.CMark,
llvm.LLVM,
libcxx.LibCXX,
libicu.LibICU,
swift.Swift,
libdispatch.LibDispatch,
foundation.Foundation,
xctest.XCTest,
llbuild.LLBuild]
def should_clean(self, host_target):
return self.args.clean_swift_driver
def clean(self, host_target):
indexstoredb.run_build_script_helper(
'clean', host_target, self, self.args)
def build(self, host_target):
indexstoredb.run_build_script_helper(
'build', host_target, self, self.args)
def should_test(self, host_target):
return self.args.test_swift_driver
def test(self, host_target):
indexstoredb.run_build_script_helper(
'test', host_target, self, self.args,
self.args.test_sourcekitlsp_sanitize_all)
def should_install(self, host_target):
return self.args.install_swift_driver
def install(self, host_target):
indexstoredb.run_build_script_helper(
'install', host_target, self, self.args)
| CodaFi/swift | utils/swift_build_support/swift_build_support/products/swiftdriver.py | Python | apache-2.0 | 2,222 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 21 14:37:53 2017
@author: christopherbridge
"""
import pickle
def materials_default():
"""
Return a dictionary containing default materials properties
Name: yeild_stress, Pa
ultimate_stress, Pa
possions_ratio, -
youngs_modulus, Pa
density, kg/m^3
thermal_expansion, -
notes
"""
return {
'default' :
{'yield_stress': 358.5e6,
'ultimate_stress': 394.3e6,
'possions_ratio': 0.3,
'youngs_modulus': 205.0e9,
'density': 7850.0,
'thermal_expansion': 1.2e-6,
'notes': 'Default is Steel x52'
},
'steel' :
{'yield_stress': 358.5e6,
'ultimate_stress': 394.3e6,
'possions_ratio': 0.3,
'youngs_modulus': 205.0e9,
'density': 7850.0,
'thermal_expansion': 1.20e-6
},
'steel x52' :
{'yield_stress': 358.5e6,
'ultimate_stress': 394.3e6,
'possions_ratio': 0.3,
'youngs_modulus': 205.0e9,
'density': 7850.0,
'thermal_expansion': 1.20e-6
},
'stainless steel' :
{'yield_stress': 290.0e6,
'ultimate_stress': 495.0e6,
'possions_ratio': 0.3,
'youngs_modulus': 193e9,
'density': 8030.0,
'thermal_expansion': 1.2e-6
},
}
def youngs_modulus_reference():
youngs_data = {}
youngs_data['rubber'] = 0.05e9
youngs_data['low-density polyethylene'] = 0.5e9
youngs_data['diatom frustules'] = 2e9
youngs_data['ptfe'] = 0.5e9
youngs_data['hdpe'] = 0.8e9
youngs_data['bacteriophage capsids'] = 2e9
youngs_data['polypropylene'] = 1.75e9
youngs_data['polyethylene terephthalate'] = 2.2e9
youngs_data['nylon'] = 3e9
youngs_data['polystyrene, solid'] = 3.25e9
youngs_data['polystyrene, foam'] = 0.005e9
youngs_data['medium-density fiberboard'] = 4e9
youngs_data['wood'] = 11e9
youngs_data['human cortical bone'] = 14e9
youngs_data['glass-reinforced polyester matrix'] = 17.2e9
youngs_data['aromatic peptide nanotubes'] = 24e9
youngs_data['high-strength concrete'] = 30e9
youngs_data['carbon fiber reinforced plastic'] = 40e9
youngs_data['hemp fiber'] = 35e9
youngs_data['magnesium metal'] = 45e9
youngs_data['glass'] = 70e9
youngs_data['flax fiber'] = 58e9
youngs_data['aluminum'] = 69e9
youngs_data['mother-of-pearl'] = 70e9
youngs_data['aramid'] = 100e9
youngs_data['tooth enamel'] = 83e9
youngs_data['stinging nettle fiber'] = 87e9
youngs_data['bronze'] = 110e9
youngs_data['brass'] = 110e9
youngs_data['titanium'] = 110.3e9
youngs_data['titanium alloys'] = 112e9
youngs_data['copper'] = 117e9
youngs_data['carbon fiber reinforced plastic'] = 181e9
youngs_data['silicon single crystal'] = 160e9
youngs_data['wrought iron'] = 200e9
youngs_data['steel'] = 209e9
youngs_data['polycrystalline yttrium iron garnet'] = 193e9
youngs_data['single-crystal yttrium iron garnet'] = 200e9
youngs_data['cobalt-chrome'] = 240e9
youngs_data['aromatic peptide nanospheres'] = 250e9
youngs_data['beryllium'] = 287e9
youngs_data['molybdenum'] = 330e9
youngs_data['tungsten'] = 405e9
youngs_data['silicon carbide'] = 450e9
youngs_data['tungsten carbide'] = 550e9
youngs_data['osmium'] = 550e9
youngs_data['single-walled carbon nanotube'] = 1000e9
youngs_data['graphene'] = 1050e9
youngs_data['diamond'] = 1100e9
youngs_data['carbyne'] = 32100e9
youngs_data['pvc'] = 2.9e9
return youngs_data
#
#
class Materials(object):
"""
Material object
"""
def __init__(self, material_name='default'):
#
# Set up the default variables
self._materials_data = materials_default()
self._name = material_name
self._yield = 0.0
self._ultimate = 0.0
self._possions = 0.0
self._youngs = 0.0
self._density = 0.0
self._thermal_expansion = 0.0
#
# Read from the materials data
self.get_material(material_name)
#
self._databse_filename = 'Materials.txt'
def name(self, material_name=None):
"""Return the material name"""
if material_name != None:
self._name = material_name
else:
return self._name
def yield_stress(self, stress=None):
"""Return the yield stress"""
if stress != None:
self._yield = stress
else:
return self._yield
def ultimate_stress(self, stress=None):
"""Return the ultimate stress"""
if stress != None:
self._ultimate = stress
else:
return self._ultimate
def possions(self, possions_ratio=None):
"""Return the Possions Ratio"""
if possions_ratio != None:
self._possions = possions_ratio
else:
return self._possions
def youngs(self, youngs_modulus=None):
"""Return the Youngs Modulus"""
if youngs_modulus != None:
self._youngs = youngs_modulus
else:
return self._youngs
def density(self, density=None):
"""Return the density of the material"""
if density != None:
self._density = density
else:
return self._density
def thermalexpansion(self, thermal_expansion=None):
"""Return the thermal expansion of the material"""
if thermal_expansion != None:
self._thermal_expansion = thermal_expansion
else:
return self._thermal_expansion
def addmat(self, name, yield_stress_Pa, ultimate_Pa, youngs_Pa, possions, density_kgm3, thermal):
"""Add a meterial to the database and set it as the current material"""
self.database_add(name, yield_stress_Pa, ultimate_Pa, youngs_Pa, possions, density_kgm3, thermal)
self.get_material(name)
# -------------------------------------------------------------------------
# Database functions
# -------------------------------------------------------------------------
def get_material(self, material_name='default'):
"""Get the meterial from the materials database"""
material_data = self._materials_data.get(material_name)
#
if not material_data:
return False
else:
self._name = material_name
self._yield = material_data.get('yield_stress')
self._ultimate = material_data.get('ultimate_stress')
self._possions = material_data.get('possions_ratio')
self._youngs = material_data.get('youngs_modulus')
self._density = material_data.get('density')
self._thermal_expansion = material_data.get('thermal_expansion')
return True
def __repr__(self):
return "Material {}".format(self._name) \
+ " Yield {:5.1f} MPa".format(self._yeild)
#, Ultimate {:5.1f} MPa, Youngs Modulus {:5.1f} GPa, Possions Ratio {:5.3f}, Thermal Expansion {:5.2e}".format(self.MaterialName, 0.000001 * self.Data['yield'], 0.000001 * self.Data['ultimate'], 0.000000001 * self.Data['youngs'], self.Data['possions'], self.Data['thermalexpansion']))
def database_add(self, name, yield_stress_Pa, ultimate_Pa, youngs_Pa, possions, density_kgm3, thermal):
"""Add another mateiral to the database"""
self._materials_data[name] = {'yield_stress': yield_stress_Pa,
'ultimate_stress': ultimate_Pa,
'possions_ratio': possions,
'youngs_modulus': youngs_Pa,
'density': density_kgm3,
'thermal_expansion': thermal
}
def database_display(self):
"""Print out the database to the command line"""
print("Material Yield Ultimate Youngs Possions Thermal")
print("Name Stress Stress Modulus Ratio Expansion")
print(" (MPa) (MPa) (GPa) (-) (-)")
for material_name in sorted(self._material_data):
material = self._materials_data[material_name]
print ("{:30} {:5.1f} {:6.1f} {:5.1f} {:5.3f} {:5.2e}".format(material_name,
1e-6 * material['yield'],
1e-6 * material['ultimate'],
1e-9 * material['youngs'],
material['possions'],
material['thermalexpansion']))
def database_save(self):
"""Save the database using pickle"""
with open(self._databse_filename, 'wb') as save_file:
save_file.write(pickle.dumps(self.__dict__))
def database_load(self):
"""Load the database using pickle from an existing file"""
with open(self._databse_filename, 'rb') as load_file:
dict_data = load_file.read()
self.__dict__ = pickle.loads(dict_data)
| PowerPlum/pipetoolbox | pipetoolbox/materials.py | Python | mit | 9,429 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# === setup.py ------------------------------------------------------------===
# Copyright © 2011-2012, RokuSigma Inc. and contributors. See AUTHORS for more
# details.
#
# Some rights reserved.
#
# Redistribution and use in source and binary forms of the software as well as
# documentation, with or without modification, are permitted provided that the
# following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The names of the copyright holders or contributors may not be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE AND DOCUMENTATION IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE AND
# DOCUMENTATION, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ===----------------------------------------------------------------------===
import os
from distutils.core import setup
VERSION = (0,0,8, 'alpha', 0)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%s pre-alpha' % version
else:
if VERSION[3] != 'final':
version = '%s %s' % (version, VERSION[3])
if VERSION[4] != 0:
version = '%s%s' % (version, VERSION[4])
return version
# Compile the list of packages available, because distutils doesn't have an
# easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir:
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('haiku'):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
pkg = dirpath.replace(os.path.sep, '.')
if os.path.altsep:
pkg = pkg.replace(os.path.altsep, '.')
packages.append(pkg)
elif filenames:
prefix = dirpath[6:] # Strip "haiku/" or "haiku\"
for f in filenames:
data_files.append(os.path.join(prefix, f))
version = get_version().replace(' ', '-')
setup(name='haiku-lang',
version=version,
description='An embedable LISP implemented on top of the Python interpreter.',
install_requires=[
'LEPL>=5.1.1',
'bitstring>=3.0.2',
'python-patterns>=0.0.1',
],
author='RokuSigma Inc.',
author_email='[email protected]',
url='http://www.github.com/monetizeio/haiku-lang/',
download_url='http://pypi.python.org/packages/source/h/haiku-lang/haiku-lang-%s.tar.gz' % version,
package_dir={'haiku': 'haiku'},
packages=packages,
package_data={'haiku': data_files},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: Other/Proprietary License',
'Operating System :: OS Independent',
'Programming Language :: Lisp',
'Programming Language :: Python',
'Topic :: Software Development :: Interpreters',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
# ===----------------------------------------------------------------------===
# End of File
# ===----------------------------------------------------------------------===
| maaku/haiku-lang | setup.py | Python | bsd-3-clause | 4,196 |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import copy
import os
import tempfile
import numpy as np
import six
from google.protobuf import message
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.meta_graph_transform import meta_graph_transform
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import metrics as metrics_lib
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary as core_summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import training_util
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existence of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if tensor_util.is_tensor(x) or y is not None and tensor_util.is_tensor(y):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
@deprecated(None, 'Please specify feature columns explicitly.')
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
@deprecated(None, 'Please specify feature columns explicitly.')
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2', 'VarHandleOp'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas,
worker_device=worker_device,
merge_devices=True,
ps_ops=ps_ops,
cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError('Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics,
predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError('Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics,
labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
results = []
for k, v in sorted(dictionary.items()):
if isinstance(v, float) or isinstance(v, np.float32) or isinstance(
v, int) or isinstance(v, np.int64) or isinstance(v, np.int32):
results.append('%s = %s' % (k, v))
else:
results.append('Type of %s = %s' % (k, type(v)))
return ', '.join(results)
def _write_dict_to_summary(output_dir, dictionary, current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = core_summary.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
summary_proto.value.add(tag=key, simple_value=float(dictionary[key]))
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
summary_proto.value.add(tag=key, simple_value=int(dictionary[key]))
elif isinstance(dictionary[key], six.string_types):
try:
summ = summary_pb2.Summary.FromString(dictionary[key])
for i, _ in enumerate(summ.value):
summ.value[i].tag = key
summary_proto.value.extend(summ.value)
except message.DecodeError:
logging.warn('Skipping summary for %s, cannot parse string to Summary.',
key)
continue
elif isinstance(dictionary[key], np.ndarray):
value = summary_proto.value.add()
value.tag = key
value.node_name = key
tensor_proto = tensor_util.make_tensor_proto(dictionary[key])
value.tensor.CopyFrom(tensor_proto)
logging.info(
'Summary for np.ndarray is not visible in Tensorboard by default. '
'Consider using a Tensorboard plugin for visualization (see '
'https://github.com/tensorflow/tensorboard-plugin-example/blob/master/README.md'
' for more information).')
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int or np.ndarray or a serialized string of Summary.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
GraphRewriteSpec = collections.namedtuple('GraphRewriteSpec',
['tags', 'transforms'])
class BaseEstimator(sklearn.BaseEstimator, evaluable.Evaluable,
trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Users should not instantiate or subclass this class. Instead, use an
`Estimator`.
"""
# Note that for Google users, this is overridden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
@deprecated(None, 'Please replace uses of any Estimator from tf.contrib.learn'
' with an Estimator from tf.estimator.*')
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
'model_dir are set both in constructor and RunConfig, but with '
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
# pylint: enable=g-doc-exception
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@property
def model_fn(self):
"""Returns the model_fn which is bound to self.params.
Returns:
The model_fn with the following signature:
`def model_fn(features, labels, mode, metrics)`
"""
def public_model_fn(features, labels, mode, config):
return self._call_model_fn(features, labels, mode, config=config)
return public_model_fn
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def fit(self,
x=None,
y=None,
input_fn=None,
steps=None,
batch_size=None,
monitors=None,
max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def partial_fit(self,
x=None,
y=None,
input_fn=None,
steps=1,
batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(
x=x,
y=y,
input_fn=input_fn,
steps=steps,
batch_size=batch_size,
monitors=monitors)
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('y', None), ('batch_size', None))
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics, name)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS,
('x', None), ('batch_size', None), ('as_iterable', True))
def predict(self,
x=None,
input_fn=None,
batch_size=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
iterate_batches: If True, yield the whole batch at once instead of
decomposing the batch into individual samples. Only relevant when
as_iterable is True.
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable,
iterate_batches=iterate_batches)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(
self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.', str(labels),
str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = checkpoint_management.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval'
if not name else 'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps == 0:
logging.warning('evaluation steps are 0. If `input_fn` does not raise '
'`OutOfRangeError`, the evaluation will never stop. '
'Use steps=None if intended.')
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = checkpoint_management.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
training_util.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions)
if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = training_util.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
training_util._get_or_create_global_step_read() # pylint: disable=protected-access
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend(hooks)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
keep_checkpoint_every_n_hours=(
self._config.keep_checkpoint_every_n_hours),
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any(
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
)
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
max_wait_secs=self._config.session_creation_timeout_secs,
config=self._session_config) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
return loss
def latest_checkpoint(self):
"""Finds the filename of the latest saved checkpoint file in `model_dir`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was
found.
"""
return checkpoint_management.latest_checkpoint(self.model_dir)
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) does not have a params '
'argument, but params (%s) were passed to the '
'Estimator\'s constructor.' % (model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode, metrics=None, config=None):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
metrics: Dict of metrics.
config: RunConfig.
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
if config:
kwargs['config'] = config
else:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
model_fn_ops = model_fn_results
else:
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
model_fn_ops = model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(
_make_metrics_ops(metrics, features, labels,
model_fn_ops.predictions))
return model_fn_ops
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(features, labels,
model_fn_lib.ModeKeys.EVAL, metrics)
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(self,
export_dir_base,
serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None,
graph_rewrite_specs=(GraphRewriteSpec(
(tag_constants.SERVING,), ()),),
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
graph_rewrite_specs: an iterable of `GraphRewriteSpec`. Each element will
produce a separate MetaGraphDef within the exported SavedModel, tagged
and rewritten as specified. Defaults to a single entry using the
default serving tag ("serve") and no rewriting.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued
Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
# pylint: enable=line-too-long
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = checkpoint_management.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError(
"Couldn't find trained model at %s." % self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
# We'll write the SavedModel to a temporary directory and then atomically
# rename it at the end. This helps to avoid corrupt / incomplete outputs,
# which could otherwise occur if the job is preempted or otherwise fails
# in the middle of SavedModel creation.
temp_export_dir = saved_model_export_utils.get_temp_export_dir(export_dir)
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
# Build the base graph
with ops.Graph().as_default() as g:
training_util.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
init_op = control_flow_ops.group(variables.local_variables_initializer(),
resources.initialize_resources(
resources.shared_resources()),
lookup_ops.tables_initializer())
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
# Export the first MetaGraphDef with variables, assets etc.
with tf_session.Session('') as session:
# pylint: disable=protected-access
saveables = variables._all_saveable_objects()
# pylint: enable=protected-access
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
elif saveables:
saver_for_restore = saver.Saver(saveables, sharded=True)
saver_for_restore.restore(session, checkpoint_path)
# Perform the export
if not graph_rewrite_specs or graph_rewrite_specs[0].transforms:
raise ValueError('The first element of graph_rewrite_specs '
'must specify no transforms.')
untransformed_tags = graph_rewrite_specs[0].tags
builder.add_meta_graph_and_variables(
session,
untransformed_tags,
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS),
main_op=init_op,
strip_default_attrs=strip_default_attrs)
# pylint: disable=protected-access
base_meta_graph_def = builder._saved_model.meta_graphs[0]
# pylint: enable=protected-access
if graph_rewrite_specs[1:]:
# Prepare the input_names and output_names needed for the
# meta_graph_transform call below.
input_names = [
tensor.name
for input_dict in input_alternatives.values()
for tensor in input_dict.values()
]
output_names = [
tensor.name
for output_alternative in output_alternatives.values()
for tensor in output_alternative[1].values()
]
# Write the additional MetaGraphDefs
for graph_rewrite_spec in graph_rewrite_specs[1:]:
# TODO(soergel) consider moving most of this to saved_model.builder_impl
# as e.g. builder.add_rewritten_meta_graph(rewritten_graph_def, tags)
transformed_meta_graph_def = meta_graph_transform.meta_graph_transform(
base_meta_graph_def, input_names, output_names,
graph_rewrite_spec.transforms, graph_rewrite_spec.tags)
# pylint: disable=protected-access
meta_graph_def = builder._saved_model.meta_graphs.add()
# pylint: enable=protected-access
meta_graph_def.CopyFrom(transformed_meta_graph_def)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(
compat.as_bytes(temp_export_dir), compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(
compat.as_bytes(assets_extra_path), compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
builder.save(as_text)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Please switch to the Estimator interface.')
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(
x,
y,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(
input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None, name=None):
input_fn, feed_fn = _get_input_fn(
x,
y,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=False,
epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x,
None,
input_fn=None,
feed_fn=None,
batch_size=batch_size,
shuffle=False,
epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate([output[key] for output in results], axis=0)
for key in results[0]
}
| chemelnucfin/tensorflow | tensorflow/contrib/learn/python/learn/estimators/estimator.py | Python | apache-2.0 | 63,282 |
# Sample script for use by test_gdb.py
def foo(a, b, c):
bar(a, b, c)
def bar(a, b, c):
baz(a, b, c)
def baz(*args):
id(42)
foo(1, 2, 3)
| firmlyjin/brython | www/tests/unittests/test/gdb_sample.py | Python | bsd-3-clause | 153 |
class ImageClass():
"Stores the paths to images for a given class"
def __init__(self, name, image_paths):
self.name = name
self.image_paths = image_paths
def __str__(self):
return self.name + ', ' + str(len(self.image_paths)) + ' images'
def __len__(self):
return len(self.image_paths)
def get_dataset(paths):
dataset = []
for path in paths.split(':'):
path_exp = os.path.expanduser(path)
classes = os.listdir(path_exp)
classes.sort()
nrof_classes = len(classes)
for i in range(nrof_classes):
class_name = classes[i]
facedir = os.path.join(path_exp, class_name)
if os.path.isdir(facedir):
images = os.listdir(facedir)
image_paths = [os.path.join(facedir,img) for img in images]
dataset.append(ImageClass(class_name, image_paths))
return dataset
def split_dataset(dataset, split_ratio, mode):
if mode=='SPLIT_CLASSES':
nrof_classes = len(dataset)
class_indices = np.arange(nrof_classes)
np.random.shuffle(class_indices)
split = int(round(nrof_classes*split_ratio))
train_set = [dataset[i] for i in class_indices[0:split]]
test_set = [dataset[i] for i in class_indices[split:-1]]
elif mode=='SPLIT_IMAGES':
train_set = []
test_set = []
min_nrof_images = 2
for cls in dataset:
paths = cls.image_paths
np.random.shuffle(paths)
split = int(round(len(paths)*split_ratio))
if split<min_nrof_images:
continue # Not enough images for test set. Skip class...
train_set.append(ImageClass(cls.name, paths[0:split]))
test_set.append(ImageClass(cls.name, paths[split:-1]))
else:
raise ValueError('Invalid train/test split mode "%s"' % mode)
return train_set, test_set
| liuzz1983/open_vision | openvision/datasets/utils.py | Python | mit | 1,945 |
Subsets and Splits