prompt
large_stringlengths 72
9.34k
| completion
large_stringlengths 0
7.61k
|
---|---|
<|file_name|>tastiapp.py<|end_file_name|><|fim▁begin|>from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register
@route('/')
def myroot():
return_data = get_index()
return return_data
@route('/account', method=['GET', 'POST'])
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/add', method=['GET', 'POST'])
def <|fim_middle|>():
return_data = add_tags()
return return_data
@route('/bmarklet')
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/bmarks')
def bmarks():
return_data = get_bmarks()
return return_data
@route('/edit', method=['GET', 'POST'])
def bmarks():
return_data = do_edit()
return return_data
@route('/edit_tags', method=['GET', 'POST'])
def bmarks():
return_data = get_edit_tags()
return return_data
@route('/import', method=['GET', 'POST'])
def bmarks():
return_data = get_import_bm()
return return_data
@route('/login', method=['GET', 'POST'])
def bmarks():
return_data = do_login()
return return_data
@route('/register', method=['GET', 'POST'])
def bmarks():
return_data = do_register()
return return_data
@route('/tags')
def bmarks():
return_data = get_tags()
return return_data
# serve css
@get('/<filename:re:.*\.css>')
def send_css(filename):
return static_file(filename, root='css')
# serve javascript
@get('/<filename:re:.*\.js>')
def send_js(filename):
return static_file(filename, root='js')
# serve images
@get('<filename:re:.*\.png>')
def send_img(filename):
return static_file(filename, root='images')
# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def send_font(filename):
return static_file(filename, root='fonts')
@error(404)
def handle404(error):
return '<H1>Ooops, its not here<BR>'
@error(500)
def handle500(error):
return '<H1>Oops, its broken: {}<BR>'.format(error)
<|fim▁end|> | bmarks |
<|file_name|>tastiapp.py<|end_file_name|><|fim▁begin|>from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register
@route('/')
def myroot():
return_data = get_index()
return return_data
@route('/account', method=['GET', 'POST'])
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/add', method=['GET', 'POST'])
def bmarks():
return_data = add_tags()
return return_data
@route('/bmarklet')
def <|fim_middle|>():
return_data = get_bmarklet()
return return_data
@route('/bmarks')
def bmarks():
return_data = get_bmarks()
return return_data
@route('/edit', method=['GET', 'POST'])
def bmarks():
return_data = do_edit()
return return_data
@route('/edit_tags', method=['GET', 'POST'])
def bmarks():
return_data = get_edit_tags()
return return_data
@route('/import', method=['GET', 'POST'])
def bmarks():
return_data = get_import_bm()
return return_data
@route('/login', method=['GET', 'POST'])
def bmarks():
return_data = do_login()
return return_data
@route('/register', method=['GET', 'POST'])
def bmarks():
return_data = do_register()
return return_data
@route('/tags')
def bmarks():
return_data = get_tags()
return return_data
# serve css
@get('/<filename:re:.*\.css>')
def send_css(filename):
return static_file(filename, root='css')
# serve javascript
@get('/<filename:re:.*\.js>')
def send_js(filename):
return static_file(filename, root='js')
# serve images
@get('<filename:re:.*\.png>')
def send_img(filename):
return static_file(filename, root='images')
# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def send_font(filename):
return static_file(filename, root='fonts')
@error(404)
def handle404(error):
return '<H1>Ooops, its not here<BR>'
@error(500)
def handle500(error):
return '<H1>Oops, its broken: {}<BR>'.format(error)
<|fim▁end|> | bmarks |
<|file_name|>tastiapp.py<|end_file_name|><|fim▁begin|>from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register
@route('/')
def myroot():
return_data = get_index()
return return_data
@route('/account', method=['GET', 'POST'])
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/add', method=['GET', 'POST'])
def bmarks():
return_data = add_tags()
return return_data
@route('/bmarklet')
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/bmarks')
def <|fim_middle|>():
return_data = get_bmarks()
return return_data
@route('/edit', method=['GET', 'POST'])
def bmarks():
return_data = do_edit()
return return_data
@route('/edit_tags', method=['GET', 'POST'])
def bmarks():
return_data = get_edit_tags()
return return_data
@route('/import', method=['GET', 'POST'])
def bmarks():
return_data = get_import_bm()
return return_data
@route('/login', method=['GET', 'POST'])
def bmarks():
return_data = do_login()
return return_data
@route('/register', method=['GET', 'POST'])
def bmarks():
return_data = do_register()
return return_data
@route('/tags')
def bmarks():
return_data = get_tags()
return return_data
# serve css
@get('/<filename:re:.*\.css>')
def send_css(filename):
return static_file(filename, root='css')
# serve javascript
@get('/<filename:re:.*\.js>')
def send_js(filename):
return static_file(filename, root='js')
# serve images
@get('<filename:re:.*\.png>')
def send_img(filename):
return static_file(filename, root='images')
# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def send_font(filename):
return static_file(filename, root='fonts')
@error(404)
def handle404(error):
return '<H1>Ooops, its not here<BR>'
@error(500)
def handle500(error):
return '<H1>Oops, its broken: {}<BR>'.format(error)
<|fim▁end|> | bmarks |
<|file_name|>tastiapp.py<|end_file_name|><|fim▁begin|>from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register
@route('/')
def myroot():
return_data = get_index()
return return_data
@route('/account', method=['GET', 'POST'])
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/add', method=['GET', 'POST'])
def bmarks():
return_data = add_tags()
return return_data
@route('/bmarklet')
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/bmarks')
def bmarks():
return_data = get_bmarks()
return return_data
@route('/edit', method=['GET', 'POST'])
def <|fim_middle|>():
return_data = do_edit()
return return_data
@route('/edit_tags', method=['GET', 'POST'])
def bmarks():
return_data = get_edit_tags()
return return_data
@route('/import', method=['GET', 'POST'])
def bmarks():
return_data = get_import_bm()
return return_data
@route('/login', method=['GET', 'POST'])
def bmarks():
return_data = do_login()
return return_data
@route('/register', method=['GET', 'POST'])
def bmarks():
return_data = do_register()
return return_data
@route('/tags')
def bmarks():
return_data = get_tags()
return return_data
# serve css
@get('/<filename:re:.*\.css>')
def send_css(filename):
return static_file(filename, root='css')
# serve javascript
@get('/<filename:re:.*\.js>')
def send_js(filename):
return static_file(filename, root='js')
# serve images
@get('<filename:re:.*\.png>')
def send_img(filename):
return static_file(filename, root='images')
# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def send_font(filename):
return static_file(filename, root='fonts')
@error(404)
def handle404(error):
return '<H1>Ooops, its not here<BR>'
@error(500)
def handle500(error):
return '<H1>Oops, its broken: {}<BR>'.format(error)
<|fim▁end|> | bmarks |
<|file_name|>tastiapp.py<|end_file_name|><|fim▁begin|>from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register
@route('/')
def myroot():
return_data = get_index()
return return_data
@route('/account', method=['GET', 'POST'])
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/add', method=['GET', 'POST'])
def bmarks():
return_data = add_tags()
return return_data
@route('/bmarklet')
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/bmarks')
def bmarks():
return_data = get_bmarks()
return return_data
@route('/edit', method=['GET', 'POST'])
def bmarks():
return_data = do_edit()
return return_data
@route('/edit_tags', method=['GET', 'POST'])
def <|fim_middle|>():
return_data = get_edit_tags()
return return_data
@route('/import', method=['GET', 'POST'])
def bmarks():
return_data = get_import_bm()
return return_data
@route('/login', method=['GET', 'POST'])
def bmarks():
return_data = do_login()
return return_data
@route('/register', method=['GET', 'POST'])
def bmarks():
return_data = do_register()
return return_data
@route('/tags')
def bmarks():
return_data = get_tags()
return return_data
# serve css
@get('/<filename:re:.*\.css>')
def send_css(filename):
return static_file(filename, root='css')
# serve javascript
@get('/<filename:re:.*\.js>')
def send_js(filename):
return static_file(filename, root='js')
# serve images
@get('<filename:re:.*\.png>')
def send_img(filename):
return static_file(filename, root='images')
# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def send_font(filename):
return static_file(filename, root='fonts')
@error(404)
def handle404(error):
return '<H1>Ooops, its not here<BR>'
@error(500)
def handle500(error):
return '<H1>Oops, its broken: {}<BR>'.format(error)
<|fim▁end|> | bmarks |
<|file_name|>tastiapp.py<|end_file_name|><|fim▁begin|>from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register
@route('/')
def myroot():
return_data = get_index()
return return_data
@route('/account', method=['GET', 'POST'])
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/add', method=['GET', 'POST'])
def bmarks():
return_data = add_tags()
return return_data
@route('/bmarklet')
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/bmarks')
def bmarks():
return_data = get_bmarks()
return return_data
@route('/edit', method=['GET', 'POST'])
def bmarks():
return_data = do_edit()
return return_data
@route('/edit_tags', method=['GET', 'POST'])
def bmarks():
return_data = get_edit_tags()
return return_data
@route('/import', method=['GET', 'POST'])
def <|fim_middle|>():
return_data = get_import_bm()
return return_data
@route('/login', method=['GET', 'POST'])
def bmarks():
return_data = do_login()
return return_data
@route('/register', method=['GET', 'POST'])
def bmarks():
return_data = do_register()
return return_data
@route('/tags')
def bmarks():
return_data = get_tags()
return return_data
# serve css
@get('/<filename:re:.*\.css>')
def send_css(filename):
return static_file(filename, root='css')
# serve javascript
@get('/<filename:re:.*\.js>')
def send_js(filename):
return static_file(filename, root='js')
# serve images
@get('<filename:re:.*\.png>')
def send_img(filename):
return static_file(filename, root='images')
# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def send_font(filename):
return static_file(filename, root='fonts')
@error(404)
def handle404(error):
return '<H1>Ooops, its not here<BR>'
@error(500)
def handle500(error):
return '<H1>Oops, its broken: {}<BR>'.format(error)
<|fim▁end|> | bmarks |
<|file_name|>tastiapp.py<|end_file_name|><|fim▁begin|>from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register
@route('/')
def myroot():
return_data = get_index()
return return_data
@route('/account', method=['GET', 'POST'])
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/add', method=['GET', 'POST'])
def bmarks():
return_data = add_tags()
return return_data
@route('/bmarklet')
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/bmarks')
def bmarks():
return_data = get_bmarks()
return return_data
@route('/edit', method=['GET', 'POST'])
def bmarks():
return_data = do_edit()
return return_data
@route('/edit_tags', method=['GET', 'POST'])
def bmarks():
return_data = get_edit_tags()
return return_data
@route('/import', method=['GET', 'POST'])
def bmarks():
return_data = get_import_bm()
return return_data
@route('/login', method=['GET', 'POST'])
def <|fim_middle|>():
return_data = do_login()
return return_data
@route('/register', method=['GET', 'POST'])
def bmarks():
return_data = do_register()
return return_data
@route('/tags')
def bmarks():
return_data = get_tags()
return return_data
# serve css
@get('/<filename:re:.*\.css>')
def send_css(filename):
return static_file(filename, root='css')
# serve javascript
@get('/<filename:re:.*\.js>')
def send_js(filename):
return static_file(filename, root='js')
# serve images
@get('<filename:re:.*\.png>')
def send_img(filename):
return static_file(filename, root='images')
# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def send_font(filename):
return static_file(filename, root='fonts')
@error(404)
def handle404(error):
return '<H1>Ooops, its not here<BR>'
@error(500)
def handle500(error):
return '<H1>Oops, its broken: {}<BR>'.format(error)
<|fim▁end|> | bmarks |
<|file_name|>tastiapp.py<|end_file_name|><|fim▁begin|>from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register
@route('/')
def myroot():
return_data = get_index()
return return_data
@route('/account', method=['GET', 'POST'])
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/add', method=['GET', 'POST'])
def bmarks():
return_data = add_tags()
return return_data
@route('/bmarklet')
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/bmarks')
def bmarks():
return_data = get_bmarks()
return return_data
@route('/edit', method=['GET', 'POST'])
def bmarks():
return_data = do_edit()
return return_data
@route('/edit_tags', method=['GET', 'POST'])
def bmarks():
return_data = get_edit_tags()
return return_data
@route('/import', method=['GET', 'POST'])
def bmarks():
return_data = get_import_bm()
return return_data
@route('/login', method=['GET', 'POST'])
def bmarks():
return_data = do_login()
return return_data
@route('/register', method=['GET', 'POST'])
def <|fim_middle|>():
return_data = do_register()
return return_data
@route('/tags')
def bmarks():
return_data = get_tags()
return return_data
# serve css
@get('/<filename:re:.*\.css>')
def send_css(filename):
return static_file(filename, root='css')
# serve javascript
@get('/<filename:re:.*\.js>')
def send_js(filename):
return static_file(filename, root='js')
# serve images
@get('<filename:re:.*\.png>')
def send_img(filename):
return static_file(filename, root='images')
# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def send_font(filename):
return static_file(filename, root='fonts')
@error(404)
def handle404(error):
return '<H1>Ooops, its not here<BR>'
@error(500)
def handle500(error):
return '<H1>Oops, its broken: {}<BR>'.format(error)
<|fim▁end|> | bmarks |
<|file_name|>tastiapp.py<|end_file_name|><|fim▁begin|>from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register
@route('/')
def myroot():
return_data = get_index()
return return_data
@route('/account', method=['GET', 'POST'])
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/add', method=['GET', 'POST'])
def bmarks():
return_data = add_tags()
return return_data
@route('/bmarklet')
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/bmarks')
def bmarks():
return_data = get_bmarks()
return return_data
@route('/edit', method=['GET', 'POST'])
def bmarks():
return_data = do_edit()
return return_data
@route('/edit_tags', method=['GET', 'POST'])
def bmarks():
return_data = get_edit_tags()
return return_data
@route('/import', method=['GET', 'POST'])
def bmarks():
return_data = get_import_bm()
return return_data
@route('/login', method=['GET', 'POST'])
def bmarks():
return_data = do_login()
return return_data
@route('/register', method=['GET', 'POST'])
def bmarks():
return_data = do_register()
return return_data
@route('/tags')
def <|fim_middle|>():
return_data = get_tags()
return return_data
# serve css
@get('/<filename:re:.*\.css>')
def send_css(filename):
return static_file(filename, root='css')
# serve javascript
@get('/<filename:re:.*\.js>')
def send_js(filename):
return static_file(filename, root='js')
# serve images
@get('<filename:re:.*\.png>')
def send_img(filename):
return static_file(filename, root='images')
# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def send_font(filename):
return static_file(filename, root='fonts')
@error(404)
def handle404(error):
return '<H1>Ooops, its not here<BR>'
@error(500)
def handle500(error):
return '<H1>Oops, its broken: {}<BR>'.format(error)
<|fim▁end|> | bmarks |
<|file_name|>tastiapp.py<|end_file_name|><|fim▁begin|>from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register
@route('/')
def myroot():
return_data = get_index()
return return_data
@route('/account', method=['GET', 'POST'])
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/add', method=['GET', 'POST'])
def bmarks():
return_data = add_tags()
return return_data
@route('/bmarklet')
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/bmarks')
def bmarks():
return_data = get_bmarks()
return return_data
@route('/edit', method=['GET', 'POST'])
def bmarks():
return_data = do_edit()
return return_data
@route('/edit_tags', method=['GET', 'POST'])
def bmarks():
return_data = get_edit_tags()
return return_data
@route('/import', method=['GET', 'POST'])
def bmarks():
return_data = get_import_bm()
return return_data
@route('/login', method=['GET', 'POST'])
def bmarks():
return_data = do_login()
return return_data
@route('/register', method=['GET', 'POST'])
def bmarks():
return_data = do_register()
return return_data
@route('/tags')
def bmarks():
return_data = get_tags()
return return_data
# serve css
@get('/<filename:re:.*\.css>')
def <|fim_middle|>(filename):
return static_file(filename, root='css')
# serve javascript
@get('/<filename:re:.*\.js>')
def send_js(filename):
return static_file(filename, root='js')
# serve images
@get('<filename:re:.*\.png>')
def send_img(filename):
return static_file(filename, root='images')
# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def send_font(filename):
return static_file(filename, root='fonts')
@error(404)
def handle404(error):
return '<H1>Ooops, its not here<BR>'
@error(500)
def handle500(error):
return '<H1>Oops, its broken: {}<BR>'.format(error)
<|fim▁end|> | send_css |
<|file_name|>tastiapp.py<|end_file_name|><|fim▁begin|>from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register
@route('/')
def myroot():
return_data = get_index()
return return_data
@route('/account', method=['GET', 'POST'])
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/add', method=['GET', 'POST'])
def bmarks():
return_data = add_tags()
return return_data
@route('/bmarklet')
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/bmarks')
def bmarks():
return_data = get_bmarks()
return return_data
@route('/edit', method=['GET', 'POST'])
def bmarks():
return_data = do_edit()
return return_data
@route('/edit_tags', method=['GET', 'POST'])
def bmarks():
return_data = get_edit_tags()
return return_data
@route('/import', method=['GET', 'POST'])
def bmarks():
return_data = get_import_bm()
return return_data
@route('/login', method=['GET', 'POST'])
def bmarks():
return_data = do_login()
return return_data
@route('/register', method=['GET', 'POST'])
def bmarks():
return_data = do_register()
return return_data
@route('/tags')
def bmarks():
return_data = get_tags()
return return_data
# serve css
@get('/<filename:re:.*\.css>')
def send_css(filename):
return static_file(filename, root='css')
# serve javascript
@get('/<filename:re:.*\.js>')
def <|fim_middle|>(filename):
return static_file(filename, root='js')
# serve images
@get('<filename:re:.*\.png>')
def send_img(filename):
return static_file(filename, root='images')
# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def send_font(filename):
return static_file(filename, root='fonts')
@error(404)
def handle404(error):
return '<H1>Ooops, its not here<BR>'
@error(500)
def handle500(error):
return '<H1>Oops, its broken: {}<BR>'.format(error)
<|fim▁end|> | send_js |
<|file_name|>tastiapp.py<|end_file_name|><|fim▁begin|>from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register
@route('/')
def myroot():
return_data = get_index()
return return_data
@route('/account', method=['GET', 'POST'])
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/add', method=['GET', 'POST'])
def bmarks():
return_data = add_tags()
return return_data
@route('/bmarklet')
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/bmarks')
def bmarks():
return_data = get_bmarks()
return return_data
@route('/edit', method=['GET', 'POST'])
def bmarks():
return_data = do_edit()
return return_data
@route('/edit_tags', method=['GET', 'POST'])
def bmarks():
return_data = get_edit_tags()
return return_data
@route('/import', method=['GET', 'POST'])
def bmarks():
return_data = get_import_bm()
return return_data
@route('/login', method=['GET', 'POST'])
def bmarks():
return_data = do_login()
return return_data
@route('/register', method=['GET', 'POST'])
def bmarks():
return_data = do_register()
return return_data
@route('/tags')
def bmarks():
return_data = get_tags()
return return_data
# serve css
@get('/<filename:re:.*\.css>')
def send_css(filename):
return static_file(filename, root='css')
# serve javascript
@get('/<filename:re:.*\.js>')
def send_js(filename):
return static_file(filename, root='js')
# serve images
@get('<filename:re:.*\.png>')
def <|fim_middle|>(filename):
return static_file(filename, root='images')
# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def send_font(filename):
return static_file(filename, root='fonts')
@error(404)
def handle404(error):
return '<H1>Ooops, its not here<BR>'
@error(500)
def handle500(error):
return '<H1>Oops, its broken: {}<BR>'.format(error)
<|fim▁end|> | send_img |
<|file_name|>tastiapp.py<|end_file_name|><|fim▁begin|>from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register
@route('/')
def myroot():
return_data = get_index()
return return_data
@route('/account', method=['GET', 'POST'])
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/add', method=['GET', 'POST'])
def bmarks():
return_data = add_tags()
return return_data
@route('/bmarklet')
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/bmarks')
def bmarks():
return_data = get_bmarks()
return return_data
@route('/edit', method=['GET', 'POST'])
def bmarks():
return_data = do_edit()
return return_data
@route('/edit_tags', method=['GET', 'POST'])
def bmarks():
return_data = get_edit_tags()
return return_data
@route('/import', method=['GET', 'POST'])
def bmarks():
return_data = get_import_bm()
return return_data
@route('/login', method=['GET', 'POST'])
def bmarks():
return_data = do_login()
return return_data
@route('/register', method=['GET', 'POST'])
def bmarks():
return_data = do_register()
return return_data
@route('/tags')
def bmarks():
return_data = get_tags()
return return_data
# serve css
@get('/<filename:re:.*\.css>')
def send_css(filename):
return static_file(filename, root='css')
# serve javascript
@get('/<filename:re:.*\.js>')
def send_js(filename):
return static_file(filename, root='js')
# serve images
@get('<filename:re:.*\.png>')
def send_img(filename):
return static_file(filename, root='images')
# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def <|fim_middle|>(filename):
return static_file(filename, root='fonts')
@error(404)
def handle404(error):
return '<H1>Ooops, its not here<BR>'
@error(500)
def handle500(error):
return '<H1>Oops, its broken: {}<BR>'.format(error)
<|fim▁end|> | send_font |
<|file_name|>tastiapp.py<|end_file_name|><|fim▁begin|>from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register
@route('/')
def myroot():
return_data = get_index()
return return_data
@route('/account', method=['GET', 'POST'])
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/add', method=['GET', 'POST'])
def bmarks():
return_data = add_tags()
return return_data
@route('/bmarklet')
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/bmarks')
def bmarks():
return_data = get_bmarks()
return return_data
@route('/edit', method=['GET', 'POST'])
def bmarks():
return_data = do_edit()
return return_data
@route('/edit_tags', method=['GET', 'POST'])
def bmarks():
return_data = get_edit_tags()
return return_data
@route('/import', method=['GET', 'POST'])
def bmarks():
return_data = get_import_bm()
return return_data
@route('/login', method=['GET', 'POST'])
def bmarks():
return_data = do_login()
return return_data
@route('/register', method=['GET', 'POST'])
def bmarks():
return_data = do_register()
return return_data
@route('/tags')
def bmarks():
return_data = get_tags()
return return_data
# serve css
@get('/<filename:re:.*\.css>')
def send_css(filename):
return static_file(filename, root='css')
# serve javascript
@get('/<filename:re:.*\.js>')
def send_js(filename):
return static_file(filename, root='js')
# serve images
@get('<filename:re:.*\.png>')
def send_img(filename):
return static_file(filename, root='images')
# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def send_font(filename):
return static_file(filename, root='fonts')
@error(404)
def <|fim_middle|>(error):
return '<H1>Ooops, its not here<BR>'
@error(500)
def handle500(error):
return '<H1>Oops, its broken: {}<BR>'.format(error)
<|fim▁end|> | handle404 |
<|file_name|>tastiapp.py<|end_file_name|><|fim▁begin|>from bottle import route, template, error, request, static_file, get, post
from index import get_index
from bmarks import get_bmarks
from tags import get_tags
from add import add_tags
from bmarklet import get_bmarklet
from account import get_account
from edit_tags import get_edit_tags
from importbm import get_import_bm
from edit import do_edit
from login import do_login
from register import do_register
@route('/')
def myroot():
return_data = get_index()
return return_data
@route('/account', method=['GET', 'POST'])
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/add', method=['GET', 'POST'])
def bmarks():
return_data = add_tags()
return return_data
@route('/bmarklet')
def bmarks():
return_data = get_bmarklet()
return return_data
@route('/bmarks')
def bmarks():
return_data = get_bmarks()
return return_data
@route('/edit', method=['GET', 'POST'])
def bmarks():
return_data = do_edit()
return return_data
@route('/edit_tags', method=['GET', 'POST'])
def bmarks():
return_data = get_edit_tags()
return return_data
@route('/import', method=['GET', 'POST'])
def bmarks():
return_data = get_import_bm()
return return_data
@route('/login', method=['GET', 'POST'])
def bmarks():
return_data = do_login()
return return_data
@route('/register', method=['GET', 'POST'])
def bmarks():
return_data = do_register()
return return_data
@route('/tags')
def bmarks():
return_data = get_tags()
return return_data
# serve css
@get('/<filename:re:.*\.css>')
def send_css(filename):
return static_file(filename, root='css')
# serve javascript
@get('/<filename:re:.*\.js>')
def send_js(filename):
return static_file(filename, root='js')
# serve images
@get('<filename:re:.*\.png>')
def send_img(filename):
return static_file(filename, root='images')
# serve fonts
@get('<filename:re:.*\.(woff|woff2)>')
def send_font(filename):
return static_file(filename, root='fonts')
@error(404)
def handle404(error):
return '<H1>Ooops, its not here<BR>'
@error(500)
def <|fim_middle|>(error):
return '<H1>Oops, its broken: {}<BR>'.format(error)
<|fim▁end|> | handle500 |
<|file_name|>player_reinforce_rnn_2.py<|end_file_name|><|fim▁begin|>from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]<|fim▁hole|>
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque()<|fim▁end|> | curr_states[i,j,:,:] = self.experiences[ i + j ][1] |
<|file_name|>player_reinforce_rnn_2.py<|end_file_name|><|fim▁begin|>from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
<|fim_middle|>
<|fim▁end|> | def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque() |
<|file_name|>player_reinforce_rnn_2.py<|end_file_name|><|fim▁begin|>from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
<|fim_middle|>
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque()
<|fim▁end|> | player.__init__(self)
self.experiences = deque() |
<|file_name|>player_reinforce_rnn_2.py<|end_file_name|><|fim▁begin|>from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
<|fim_middle|>
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque()
<|fim▁end|> | return self.calculate(state) |
<|file_name|>player_reinforce_rnn_2.py<|end_file_name|><|fim▁begin|>from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
<|fim_middle|>
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque()
<|fim▁end|> | size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action) |
<|file_name|>player_reinforce_rnn_2.py<|end_file_name|><|fim▁begin|>from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
<|fim_middle|>
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque()
<|fim▁end|> | self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize() |
<|file_name|>player_reinforce_rnn_2.py<|end_file_name|><|fim▁begin|>from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
<|fim_middle|>
<|fim▁end|> | self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque() |
<|file_name|>player_reinforce_rnn_2.py<|end_file_name|><|fim▁begin|>from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
<|fim_middle|>
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque()
<|fim▁end|> | return self.create_random_action() |
<|file_name|>player_reinforce_rnn_2.py<|end_file_name|><|fim▁begin|>from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
<|fim_middle|>
<|fim▁end|> | batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque() |
<|file_name|>player_reinforce_rnn_2.py<|end_file_name|><|fim▁begin|>from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
<|fim_middle|>
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque()
<|fim▁end|> | running_add = 0 |
<|file_name|>player_reinforce_rnn_2.py<|end_file_name|><|fim▁begin|>from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def <|fim_middle|>(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque()
<|fim▁end|> | __init__ |
<|file_name|>player_reinforce_rnn_2.py<|end_file_name|><|fim▁begin|>from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def <|fim_middle|>(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque()
<|fim▁end|> | act |
<|file_name|>player_reinforce_rnn_2.py<|end_file_name|><|fim▁begin|>from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def <|fim_middle|>(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque()
<|fim▁end|> | calculate |
<|file_name|>player_reinforce_rnn_2.py<|end_file_name|><|fim▁begin|>from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def <|fim_middle|>(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def train(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque()
<|fim▁end|> | operations |
<|file_name|>player_reinforce_rnn_2.py<|end_file_name|><|fim▁begin|>from players.player import player
from auxiliar.aux_plot import *
import random
from collections import deque
import sys
sys.path.append('..')
import tensorblock as tb
import numpy as np
import tensorflow as tf
# PLAYER REINFORCE RNN
class player_reinforce_rnn_2(player):
# __INIT__
def __init__(self):
player.__init__(self)
self.experiences = deque()
# CHOOSE NEXT ACTION
def act(self, state):
return self.calculate(state)
# CALCULATE NETWORK
def calculate(self, state):
size = len( self.experiences )
if size < self.NUM_FRAMES:
return self.create_random_action()
states = np.zeros( (self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
for i , j in enumerate( range( size - self.NUM_FRAMES , size ) ):
states[i] = self.experiences[j][1]
states = np.expand_dims( states, 0 )
output = np.squeeze( self.brain.run('Output', [['Observation', states]]) )
action = np.random.choice( np.arange(len(output)), p=output )
return self.create_action(action)
# PREPARE NETWORK
def operations(self):
# Action Placeholders
self.brain.addInput( shape = [ None , self.num_actions ] , name = 'Actions' )
self.brain.addInput( shape = [ None ] , name = 'Target' )
# Operations
self.brain.addOperation( function = tb.ops.pgcost,
input = [ 'Output', 'Actions', 'Target' ],
name = 'Cost' )
# Optimizer
self.brain.addOperation( function = tb.optims.adam,
input = 'Cost',
learning_rate = self.LEARNING_RATE,
name = 'Optimizer' )
# TensorBoard
self.brain.addSummaryScalar( input = 'Cost' )
self.brain.addSummaryHistogram( input = 'Target' )
self.brain.addWriter( name = 'Writer' , dir = './' )
self.brain.addSummary( name = 'Summary' )
self.brain.initialize()
# TRAIN NETWORK
def <|fim_middle|>(self, prev_state, curr_state, actn, rewd, done, episode):
# Store New Experience Until Done
self.experiences.append((prev_state, curr_state, actn, rewd, done))
batchsize = len( self.experiences ) - self.NUM_FRAMES + 1
# Check for Train
if done:
# Select Batch
batch = self.experiences
# Separate Batch Data
prev_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
curr_states = np.zeros( ( batchsize , self.NUM_FRAMES , self.obsv_shape[0], self.obsv_shape[1] ) )
actions = np.zeros( ( batchsize , self.num_actions ) )
rewards = np.zeros( ( batchsize ) )
dones = np.zeros( ( batchsize ) )
# Select Batches
for i in range( 0 , batchsize ):
for j in range( 0 , self.NUM_FRAMES ):
prev_states[i,j,:,:] = self.experiences[ i + j ][0]
curr_states[i,j,:,:] = self.experiences[ i + j ][1]
actions[i] = self.experiences[ i + self.NUM_FRAMES - 1][2]
rewards[i] = self.experiences[ i + self.NUM_FRAMES - 1][3]
dones[i] = self.experiences[ i + self.NUM_FRAMES - 1][4]
# Calculate Discounted Reward
running_add = 0
discounted_r = np.zeros_like(rewards)
for t in reversed(range(0, len(rewards))):
if rewards[t] != 0: # pygame_catch specific
running_add = 0
running_add = running_add * self.REWARD_DISCOUNT + rewards[t]
discounted_r[t] = running_add
# Optimize Neural Network
_, summary = self.brain.run( ['Optimizer','Summary'], [ ['Observation', prev_states ],
['Actions', actions ],
['Target', discounted_r ] ] )
# TensorBoard
self.brain.write( summary = summary, iter = episode )
# Reset Batch
self.experiences = deque()
<|fim▁end|> | train |
<|file_name|>net.py<|end_file_name|><|fim▁begin|># Copyright 2018 Flight Lab authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for network related helpers."""
<|fim▁hole|> """Get primary IP (the one with a default route) of local machine.
This works on both Linux and Windows platforms, and doesn't require working
internet connection.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
return s.getsockname()[0]
except:
return '127.0.0.1'
finally:
s.close()<|fim▁end|> | import socket
def get_ip(): |
<|file_name|>net.py<|end_file_name|><|fim▁begin|># Copyright 2018 Flight Lab authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for network related helpers."""
import socket
def get_ip():
<|fim_middle|>
<|fim▁end|> | """Get primary IP (the one with a default route) of local machine.
This works on both Linux and Windows platforms, and doesn't require working
internet connection.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
return s.getsockname()[0]
except:
return '127.0.0.1'
finally:
s.close() |
<|file_name|>net.py<|end_file_name|><|fim▁begin|># Copyright 2018 Flight Lab authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for network related helpers."""
import socket
def <|fim_middle|>():
"""Get primary IP (the one with a default route) of local machine.
This works on both Linux and Windows platforms, and doesn't require working
internet connection.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
s.connect(('10.255.255.255', 1))
return s.getsockname()[0]
except:
return '127.0.0.1'
finally:
s.close()
<|fim▁end|> | get_ip |
<|file_name|>err_2021_001_logging_perm.py<|end_file_name|><|fim▁begin|># Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.<|fim▁hole|>#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
report.add_ok(np)<|fim▁end|> | # You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0 |
<|file_name|>err_2021_001_logging_perm.py<|end_file_name|><|fim▁begin|># Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
<|fim_middle|>
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
report.add_ok(np)
<|fim▁end|> | project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid) |
<|file_name|>err_2021_001_logging_perm.py<|end_file_name|><|fim▁begin|># Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
<|fim_middle|>
<|fim▁end|> | clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
report.add_ok(np) |
<|file_name|>err_2021_001_logging_perm.py<|end_file_name|><|fim▁begin|># Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
<|fim_middle|>
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
report.add_ok(np)
<|fim▁end|> | report.add_skipped(None, 'no clusters found') |
<|file_name|>err_2021_001_logging_perm.py<|end_file_name|><|fim▁begin|># Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
<|fim_middle|>
else:
# Verify service-account permissions for every nodepool.
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
report.add_ok(np)
<|fim▁end|> | report.add_skipped(c, 'logging disabled') |
<|file_name|>err_2021_001_logging_perm.py<|end_file_name|><|fim▁begin|># Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
<|fim_middle|>
<|fim▁end|> | for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
report.add_ok(np) |
<|file_name|>err_2021_001_logging_perm.py<|end_file_name|><|fim▁begin|># Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
<|fim_middle|>
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
report.add_ok(np)
<|fim▁end|> | report.add_failed(np, f'service account disabled or deleted: {sa}') |
<|file_name|>err_2021_001_logging_perm.py<|end_file_name|><|fim▁begin|># Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
<|fim_middle|>
else:
report.add_ok(np)
<|fim▁end|> | report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}') |
<|file_name|>err_2021_001_logging_perm.py<|end_file_name|><|fim▁begin|># Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
<|fim_middle|>
<|fim▁end|> | report.add_ok(np) |
<|file_name|>err_2021_001_logging_perm.py<|end_file_name|><|fim▁begin|># Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def <|fim_middle|>(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def run_rule(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
report.add_ok(np)
<|fim▁end|> | prefetch_rule |
<|file_name|>err_2021_001_logging_perm.py<|end_file_name|><|fim▁begin|># Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GKE nodes service account permissions for logging.
The service account used by GKE nodes should have the logging.logWriter
role, otherwise ingestion of logs won't work.
"""
from gcpdiag import lint, models
from gcpdiag.queries import gke, iam
ROLE = 'roles/logging.logWriter'
def prefetch_rule(context: models.Context):
# Make sure that we have the IAM policy in cache.
project_ids = {c.project_id for c in gke.get_clusters(context).values()}
for pid in project_ids:
iam.get_project_policy(pid)
def <|fim_middle|>(context: models.Context, report: lint.LintReportRuleInterface):
# Find all clusters with logging enabled.
clusters = gke.get_clusters(context)
iam_policy = iam.get_project_policy(context.project_id)
if not clusters:
report.add_skipped(None, 'no clusters found')
for _, c in sorted(clusters.items()):
if not c.has_logging_enabled():
report.add_skipped(c, 'logging disabled')
else:
# Verify service-account permissions for every nodepool.
for np in c.nodepools:
sa = np.service_account
if not iam.is_service_account_enabled(sa, context.project_id):
report.add_failed(np, f'service account disabled or deleted: {sa}')
elif not iam_policy.has_role_permissions(f'serviceAccount:{sa}', ROLE):
report.add_failed(np, f'service account: {sa}\nmissing role: {ROLE}')
else:
report.add_ok(np)
<|fim▁end|> | run_rule |
<|file_name|>test_realizer_arbitrary_reordering.py<|end_file_name|><|fim▁begin|>import unittest
import tagging
class TestRealizerArbitraryReordering(unittest.TestCase):
"""
Tests for the realizer with arbitrary reordering
enabled.
"""
def test_realize_output_in_order(self):
"""
Test for when source tokens occur
in the same relative order in the
target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|0', 'KEEP|1', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
expected = "word1 word2 and word3 "
self.assertEqual(expected, result)
def test_realize_output_out_of_order(self):
"""
Test for when the source tokens
do not occur in the same relative order
in the target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|1', 'KEEP|0', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
<|fim▁hole|>
self.assertEqual(expected, result)
if __name__ == '__main__':
unittest.main()<|fim▁end|> | expected = "word2 word1 and word3 " |
<|file_name|>test_realizer_arbitrary_reordering.py<|end_file_name|><|fim▁begin|>import unittest
import tagging
class TestRealizerArbitraryReordering(unittest.TestCase):
<|fim_middle|>
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | """
Tests for the realizer with arbitrary reordering
enabled.
"""
def test_realize_output_in_order(self):
"""
Test for when source tokens occur
in the same relative order in the
target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|0', 'KEEP|1', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
expected = "word1 word2 and word3 "
self.assertEqual(expected, result)
def test_realize_output_out_of_order(self):
"""
Test for when the source tokens
do not occur in the same relative order
in the target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|1', 'KEEP|0', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
expected = "word2 word1 and word3 "
self.assertEqual(expected, result) |
<|file_name|>test_realizer_arbitrary_reordering.py<|end_file_name|><|fim▁begin|>import unittest
import tagging
class TestRealizerArbitraryReordering(unittest.TestCase):
"""
Tests for the realizer with arbitrary reordering
enabled.
"""
def test_realize_output_in_order(self):
<|fim_middle|>
def test_realize_output_out_of_order(self):
"""
Test for when the source tokens
do not occur in the same relative order
in the target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|1', 'KEEP|0', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
expected = "word2 word1 and word3 "
self.assertEqual(expected, result)
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | """
Test for when source tokens occur
in the same relative order in the
target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|0', 'KEEP|1', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
expected = "word1 word2 and word3 "
self.assertEqual(expected, result) |
<|file_name|>test_realizer_arbitrary_reordering.py<|end_file_name|><|fim▁begin|>import unittest
import tagging
class TestRealizerArbitraryReordering(unittest.TestCase):
"""
Tests for the realizer with arbitrary reordering
enabled.
"""
def test_realize_output_in_order(self):
"""
Test for when source tokens occur
in the same relative order in the
target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|0', 'KEEP|1', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
expected = "word1 word2 and word3 "
self.assertEqual(expected, result)
def test_realize_output_out_of_order(self):
<|fim_middle|>
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | """
Test for when the source tokens
do not occur in the same relative order
in the target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|1', 'KEEP|0', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
expected = "word2 word1 and word3 "
self.assertEqual(expected, result) |
<|file_name|>test_realizer_arbitrary_reordering.py<|end_file_name|><|fim▁begin|>import unittest
import tagging
class TestRealizerArbitraryReordering(unittest.TestCase):
"""
Tests for the realizer with arbitrary reordering
enabled.
"""
def test_realize_output_in_order(self):
"""
Test for when source tokens occur
in the same relative order in the
target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|0', 'KEEP|1', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
expected = "word1 word2 and word3 "
self.assertEqual(expected, result)
def test_realize_output_out_of_order(self):
"""
Test for when the source tokens
do not occur in the same relative order
in the target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|1', 'KEEP|0', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
expected = "word2 word1 and word3 "
self.assertEqual(expected, result)
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | unittest.main() |
<|file_name|>test_realizer_arbitrary_reordering.py<|end_file_name|><|fim▁begin|>import unittest
import tagging
class TestRealizerArbitraryReordering(unittest.TestCase):
"""
Tests for the realizer with arbitrary reordering
enabled.
"""
def <|fim_middle|>(self):
"""
Test for when source tokens occur
in the same relative order in the
target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|0', 'KEEP|1', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
expected = "word1 word2 and word3 "
self.assertEqual(expected, result)
def test_realize_output_out_of_order(self):
"""
Test for when the source tokens
do not occur in the same relative order
in the target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|1', 'KEEP|0', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
expected = "word2 word1 and word3 "
self.assertEqual(expected, result)
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | test_realize_output_in_order |
<|file_name|>test_realizer_arbitrary_reordering.py<|end_file_name|><|fim▁begin|>import unittest
import tagging
class TestRealizerArbitraryReordering(unittest.TestCase):
"""
Tests for the realizer with arbitrary reordering
enabled.
"""
def test_realize_output_in_order(self):
"""
Test for when source tokens occur
in the same relative order in the
target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|0', 'KEEP|1', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
expected = "word1 word2 and word3 "
self.assertEqual(expected, result)
def <|fim_middle|>(self):
"""
Test for when the source tokens
do not occur in the same relative order
in the target string
"""
editing_task = tagging.EditingTask(["word1 word2 <::::> word3 "])
tags_str = ['KEEP|1', 'KEEP|0', 'KEEP|and', 'DELETE', 'KEEP|3']
tags = [tagging.Tag(tag) for tag in tags_str]
result = editing_task.realize_output([tags])
expected = "word2 word1 and word3 "
self.assertEqual(expected, result)
if __name__ == '__main__':
unittest.main()
<|fim▁end|> | test_realize_output_out_of_order |
<|file_name|>resources.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# String literals representing core resources.
ADDRESS_GROUP = 'address_group'
AGENT = 'agent'
FLOATING_IP = 'floatingip'
LOCAL_IP_ASSOCIATION = 'local_ip_association'
NETWORK = 'network'
NETWORKS = 'networks'
PORT = 'port'
PORTS = 'ports'
PORT_BINDING = 'port_binding'
PORT_DEVICE = 'port_device'
PROCESS = 'process'
RBAC_POLICY = 'rbac-policy'
ROUTER = 'router'<|fim▁hole|>ROUTER_GATEWAY = 'router_gateway'
ROUTER_INTERFACE = 'router_interface'
SECURITY_GROUP = 'security_group'
SECURITY_GROUP_RULE = 'security_group_rule'
SEGMENT = 'segment'
SEGMENT_HOST_MAPPING = 'segment_host_mapping'
SUBNET = 'subnet'
SUBNETS = 'subnets'
SUBNETPOOL_ADDRESS_SCOPE = 'subnetpool_address_scope'
SUBPORTS = 'subports'
TRUNK = 'trunk'
TRUNK_PLUGIN = 'trunk_plugin'<|fim▁end|> | ROUTER_CONTROLLER = 'router_controller' |
<|file_name|>fetch.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# This is a virtual module that is entirely implemented as an action plugin and runs on the controller
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: fetch
short_description: Fetch files from remote nodes
description:
- This module works like M(copy), but in reverse.
- It is used for fetching files from remote machines and storing them locally in a file tree, organized by hostname.
- Files that already exist at I(dest) will be overwritten if they are different than the I(src).
- This module is also supported for Windows targets.
version_added: '0.2'
options:
src:
description:
- The file on the remote system to fetch.
- This I(must) be a file, not a directory.
- Recursive fetching may be supported in a later release.
required: yes
dest:
description:
- A directory to save the file into.
- For example, if the I(dest) directory is C(/backup) a I(src) file named C(/etc/profile) on host
C(host.example.com), would be saved into C(/backup/host.example.com/etc/profile).
The host name is based on the inventory name.
required: yes
fail_on_missing:
version_added: '1.1'
description:
- When set to C(yes), the task will fail if the remote file cannot be read for any reason.
- Prior to Ansible 2.5, setting this would only fail if the source file was missing.
- The default was changed to C(yes) in Ansible 2.5.
type: bool
default: yes
validate_checksum:
version_added: '1.4'
description:
- Verify that the source and destination checksums match after the files are fetched.
type: bool
default: yes
flat:
version_added: '1.2'
description:
- Allows you to override the default behavior of appending hostname/path/to/file to the destination.
- If C(dest) ends with '/', it will use the basename of the source file, similar to the copy module.
- This can be useful if working with a single host, or if retrieving files that are uniquely named per host.
- If using multiple hosts with the same filename, the file will be overwritten for each host.
type: bool
default: no
notes:
- When running fetch with C(become), the M(slurp) module will also be
used to fetch the contents of the file for determining the remote
checksum. This effectively doubles the transfer size, and
depending on the file size can consume all available memory on the
remote or local hosts causing a C(MemoryError). Due to this it is
advisable to run this module without C(become) whenever possible.
- Prior to Ansible 2.5 this module would not fail if reading the remote
file was impossible unless C(fail_on_missing) was set.
- In Ansible 2.5 or later, playbook authors are encouraged to use<|fim▁hole|> also explicitly set C(fail_on_missing) to C(no) to get the
non-failing behaviour.
- This module is also supported for Windows targets.
seealso:
- module: copy
- module: slurp
author:
- Ansible Core Team
- Michael DeHaan
'''
EXAMPLES = r'''
- name: Store file into /tmp/fetched/host.example.com/tmp/somefile
fetch:
src: /tmp/somefile
dest: /tmp/fetched
- name: Specifying a path directly
fetch:
src: /tmp/somefile
dest: /tmp/prefix-{{ inventory_hostname }}
flat: yes
- name: Specifying a destination path
fetch:
src: /tmp/uniquefile
dest: /tmp/special/
flat: yes
- name: Storing in a path relative to the playbook
fetch:
src: /tmp/uniquefile
dest: special/prefix-{{ inventory_hostname }}
flat: yes
'''<|fim▁end|> | C(fail_when) or C(ignore_errors) to get this ability. They may |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|>from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required
# straight from the wtforms docs:
class TelephoneForm(Form):
country_code = IntegerField('Country Code', [validators.required()])
area_code = IntegerField('Area Code/Exchange', [validators.required()])
number = TextField('Number')
class ExampleForm(Form):
field1 = TextField('First Field', description='This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
radio_field = RadioField('This is a radio field', choices=[
('head_radio', 'Head radio'),
('radio_76fm', "Radio '76 FM"),
('lips_106', 'Lips 106'),
('wctr', 'WCTR'),
])
checkbox_field = BooleanField('This is a checkbox',
description='Checkboxes can be tricky.')
# subforms
mobile_phone = FormField(TelephoneForm)
# you can change the label as well
office_phone = FormField(TelephoneForm, label='Your office phone')
ff = FileField('Sample upload')
submit_button = SubmitField('Submit Form')
def validate_hidden_field(form, field):
raise ValidationError('Always wrong')
def create_app(configfile=None):
app = Flask(__name__)
AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Material_Lite(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def index():
form = ExampleForm()
form.validate_on_submit() # to get error messages to the browser
flash('critical message', 'critical')
flash('error message', 'error')
flash('warning message', 'warning')
flash('info message', 'info')
flash('debug message', 'debug')
flash('different message', 'different')
flash('uncategorized message')
return render_template('index.html', form=form)
return app
if __name__ == '__main__':
create_app().run(debug=True)<|fim▁end|> | from flask import Flask, render_template, flash
from flask_material_lite import Material_Lite |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from flask import Flask, render_template, flash
from flask_material_lite import Material_Lite
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required
# straight from the wtforms docs:
class TelephoneForm(Form):
<|fim_middle|>
class ExampleForm(Form):
field1 = TextField('First Field', description='This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
radio_field = RadioField('This is a radio field', choices=[
('head_radio', 'Head radio'),
('radio_76fm', "Radio '76 FM"),
('lips_106', 'Lips 106'),
('wctr', 'WCTR'),
])
checkbox_field = BooleanField('This is a checkbox',
description='Checkboxes can be tricky.')
# subforms
mobile_phone = FormField(TelephoneForm)
# you can change the label as well
office_phone = FormField(TelephoneForm, label='Your office phone')
ff = FileField('Sample upload')
submit_button = SubmitField('Submit Form')
def validate_hidden_field(form, field):
raise ValidationError('Always wrong')
def create_app(configfile=None):
app = Flask(__name__)
AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Material_Lite(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def index():
form = ExampleForm()
form.validate_on_submit() # to get error messages to the browser
flash('critical message', 'critical')
flash('error message', 'error')
flash('warning message', 'warning')
flash('info message', 'info')
flash('debug message', 'debug')
flash('different message', 'different')
flash('uncategorized message')
return render_template('index.html', form=form)
return app
if __name__ == '__main__':
create_app().run(debug=True)
<|fim▁end|> | country_code = IntegerField('Country Code', [validators.required()])
area_code = IntegerField('Area Code/Exchange', [validators.required()])
number = TextField('Number') |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from flask import Flask, render_template, flash
from flask_material_lite import Material_Lite
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required
# straight from the wtforms docs:
class TelephoneForm(Form):
country_code = IntegerField('Country Code', [validators.required()])
area_code = IntegerField('Area Code/Exchange', [validators.required()])
number = TextField('Number')
class ExampleForm(Form):
<|fim_middle|>
def create_app(configfile=None):
app = Flask(__name__)
AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Material_Lite(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def index():
form = ExampleForm()
form.validate_on_submit() # to get error messages to the browser
flash('critical message', 'critical')
flash('error message', 'error')
flash('warning message', 'warning')
flash('info message', 'info')
flash('debug message', 'debug')
flash('different message', 'different')
flash('uncategorized message')
return render_template('index.html', form=form)
return app
if __name__ == '__main__':
create_app().run(debug=True)
<|fim▁end|> | field1 = TextField('First Field', description='This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
radio_field = RadioField('This is a radio field', choices=[
('head_radio', 'Head radio'),
('radio_76fm', "Radio '76 FM"),
('lips_106', 'Lips 106'),
('wctr', 'WCTR'),
])
checkbox_field = BooleanField('This is a checkbox',
description='Checkboxes can be tricky.')
# subforms
mobile_phone = FormField(TelephoneForm)
# you can change the label as well
office_phone = FormField(TelephoneForm, label='Your office phone')
ff = FileField('Sample upload')
submit_button = SubmitField('Submit Form')
def validate_hidden_field(form, field):
raise ValidationError('Always wrong') |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from flask import Flask, render_template, flash
from flask_material_lite import Material_Lite
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required
# straight from the wtforms docs:
class TelephoneForm(Form):
country_code = IntegerField('Country Code', [validators.required()])
area_code = IntegerField('Area Code/Exchange', [validators.required()])
number = TextField('Number')
class ExampleForm(Form):
field1 = TextField('First Field', description='This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
radio_field = RadioField('This is a radio field', choices=[
('head_radio', 'Head radio'),
('radio_76fm', "Radio '76 FM"),
('lips_106', 'Lips 106'),
('wctr', 'WCTR'),
])
checkbox_field = BooleanField('This is a checkbox',
description='Checkboxes can be tricky.')
# subforms
mobile_phone = FormField(TelephoneForm)
# you can change the label as well
office_phone = FormField(TelephoneForm, label='Your office phone')
ff = FileField('Sample upload')
submit_button = SubmitField('Submit Form')
def validate_hidden_field(form, field):
<|fim_middle|>
def create_app(configfile=None):
app = Flask(__name__)
AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Material_Lite(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def index():
form = ExampleForm()
form.validate_on_submit() # to get error messages to the browser
flash('critical message', 'critical')
flash('error message', 'error')
flash('warning message', 'warning')
flash('info message', 'info')
flash('debug message', 'debug')
flash('different message', 'different')
flash('uncategorized message')
return render_template('index.html', form=form)
return app
if __name__ == '__main__':
create_app().run(debug=True)
<|fim▁end|> | raise ValidationError('Always wrong') |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from flask import Flask, render_template, flash
from flask_material_lite import Material_Lite
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required
# straight from the wtforms docs:
class TelephoneForm(Form):
country_code = IntegerField('Country Code', [validators.required()])
area_code = IntegerField('Area Code/Exchange', [validators.required()])
number = TextField('Number')
class ExampleForm(Form):
field1 = TextField('First Field', description='This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
radio_field = RadioField('This is a radio field', choices=[
('head_radio', 'Head radio'),
('radio_76fm', "Radio '76 FM"),
('lips_106', 'Lips 106'),
('wctr', 'WCTR'),
])
checkbox_field = BooleanField('This is a checkbox',
description='Checkboxes can be tricky.')
# subforms
mobile_phone = FormField(TelephoneForm)
# you can change the label as well
office_phone = FormField(TelephoneForm, label='Your office phone')
ff = FileField('Sample upload')
submit_button = SubmitField('Submit Form')
def validate_hidden_field(form, field):
raise ValidationError('Always wrong')
def create_app(configfile=None):
<|fim_middle|>
if __name__ == '__main__':
create_app().run(debug=True)
<|fim▁end|> | app = Flask(__name__)
AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Material_Lite(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def index():
form = ExampleForm()
form.validate_on_submit() # to get error messages to the browser
flash('critical message', 'critical')
flash('error message', 'error')
flash('warning message', 'warning')
flash('info message', 'info')
flash('debug message', 'debug')
flash('different message', 'different')
flash('uncategorized message')
return render_template('index.html', form=form)
return app |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from flask import Flask, render_template, flash
from flask_material_lite import Material_Lite
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required
# straight from the wtforms docs:
class TelephoneForm(Form):
country_code = IntegerField('Country Code', [validators.required()])
area_code = IntegerField('Area Code/Exchange', [validators.required()])
number = TextField('Number')
class ExampleForm(Form):
field1 = TextField('First Field', description='This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
radio_field = RadioField('This is a radio field', choices=[
('head_radio', 'Head radio'),
('radio_76fm', "Radio '76 FM"),
('lips_106', 'Lips 106'),
('wctr', 'WCTR'),
])
checkbox_field = BooleanField('This is a checkbox',
description='Checkboxes can be tricky.')
# subforms
mobile_phone = FormField(TelephoneForm)
# you can change the label as well
office_phone = FormField(TelephoneForm, label='Your office phone')
ff = FileField('Sample upload')
submit_button = SubmitField('Submit Form')
def validate_hidden_field(form, field):
raise ValidationError('Always wrong')
def create_app(configfile=None):
app = Flask(__name__)
AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Material_Lite(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def index():
<|fim_middle|>
return app
if __name__ == '__main__':
create_app().run(debug=True)
<|fim▁end|> | form = ExampleForm()
form.validate_on_submit() # to get error messages to the browser
flash('critical message', 'critical')
flash('error message', 'error')
flash('warning message', 'warning')
flash('info message', 'info')
flash('debug message', 'debug')
flash('different message', 'different')
flash('uncategorized message')
return render_template('index.html', form=form) |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from flask import Flask, render_template, flash
from flask_material_lite import Material_Lite
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required
# straight from the wtforms docs:
class TelephoneForm(Form):
country_code = IntegerField('Country Code', [validators.required()])
area_code = IntegerField('Area Code/Exchange', [validators.required()])
number = TextField('Number')
class ExampleForm(Form):
field1 = TextField('First Field', description='This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
radio_field = RadioField('This is a radio field', choices=[
('head_radio', 'Head radio'),
('radio_76fm', "Radio '76 FM"),
('lips_106', 'Lips 106'),
('wctr', 'WCTR'),
])
checkbox_field = BooleanField('This is a checkbox',
description='Checkboxes can be tricky.')
# subforms
mobile_phone = FormField(TelephoneForm)
# you can change the label as well
office_phone = FormField(TelephoneForm, label='Your office phone')
ff = FileField('Sample upload')
submit_button = SubmitField('Submit Form')
def validate_hidden_field(form, field):
raise ValidationError('Always wrong')
def create_app(configfile=None):
app = Flask(__name__)
AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Material_Lite(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def index():
form = ExampleForm()
form.validate_on_submit() # to get error messages to the browser
flash('critical message', 'critical')
flash('error message', 'error')
flash('warning message', 'warning')
flash('info message', 'info')
flash('debug message', 'debug')
flash('different message', 'different')
flash('uncategorized message')
return render_template('index.html', form=form)
return app
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | create_app().run(debug=True) |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from flask import Flask, render_template, flash
from flask_material_lite import Material_Lite
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required
# straight from the wtforms docs:
class TelephoneForm(Form):
country_code = IntegerField('Country Code', [validators.required()])
area_code = IntegerField('Area Code/Exchange', [validators.required()])
number = TextField('Number')
class ExampleForm(Form):
field1 = TextField('First Field', description='This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
radio_field = RadioField('This is a radio field', choices=[
('head_radio', 'Head radio'),
('radio_76fm', "Radio '76 FM"),
('lips_106', 'Lips 106'),
('wctr', 'WCTR'),
])
checkbox_field = BooleanField('This is a checkbox',
description='Checkboxes can be tricky.')
# subforms
mobile_phone = FormField(TelephoneForm)
# you can change the label as well
office_phone = FormField(TelephoneForm, label='Your office phone')
ff = FileField('Sample upload')
submit_button = SubmitField('Submit Form')
def <|fim_middle|>(form, field):
raise ValidationError('Always wrong')
def create_app(configfile=None):
app = Flask(__name__)
AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Material_Lite(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def index():
form = ExampleForm()
form.validate_on_submit() # to get error messages to the browser
flash('critical message', 'critical')
flash('error message', 'error')
flash('warning message', 'warning')
flash('info message', 'info')
flash('debug message', 'debug')
flash('different message', 'different')
flash('uncategorized message')
return render_template('index.html', form=form)
return app
if __name__ == '__main__':
create_app().run(debug=True)
<|fim▁end|> | validate_hidden_field |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from flask import Flask, render_template, flash
from flask_material_lite import Material_Lite
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required
# straight from the wtforms docs:
class TelephoneForm(Form):
country_code = IntegerField('Country Code', [validators.required()])
area_code = IntegerField('Area Code/Exchange', [validators.required()])
number = TextField('Number')
class ExampleForm(Form):
field1 = TextField('First Field', description='This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
radio_field = RadioField('This is a radio field', choices=[
('head_radio', 'Head radio'),
('radio_76fm', "Radio '76 FM"),
('lips_106', 'Lips 106'),
('wctr', 'WCTR'),
])
checkbox_field = BooleanField('This is a checkbox',
description='Checkboxes can be tricky.')
# subforms
mobile_phone = FormField(TelephoneForm)
# you can change the label as well
office_phone = FormField(TelephoneForm, label='Your office phone')
ff = FileField('Sample upload')
submit_button = SubmitField('Submit Form')
def validate_hidden_field(form, field):
raise ValidationError('Always wrong')
def <|fim_middle|>(configfile=None):
app = Flask(__name__)
AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Material_Lite(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def index():
form = ExampleForm()
form.validate_on_submit() # to get error messages to the browser
flash('critical message', 'critical')
flash('error message', 'error')
flash('warning message', 'warning')
flash('info message', 'info')
flash('debug message', 'debug')
flash('different message', 'different')
flash('uncategorized message')
return render_template('index.html', form=form)
return app
if __name__ == '__main__':
create_app().run(debug=True)
<|fim▁end|> | create_app |
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from flask import Flask, render_template, flash
from flask_material_lite import Material_Lite
from flask_appconfig import AppConfig
from flask_wtf import Form, RecaptchaField
from flask_wtf.file import FileField
from wtforms import TextField, HiddenField, ValidationError, RadioField,\
BooleanField, SubmitField, IntegerField, FormField, validators
from wtforms.validators import Required
# straight from the wtforms docs:
class TelephoneForm(Form):
country_code = IntegerField('Country Code', [validators.required()])
area_code = IntegerField('Area Code/Exchange', [validators.required()])
number = TextField('Number')
class ExampleForm(Form):
field1 = TextField('First Field', description='This is field one.')
field2 = TextField('Second Field', description='This is field two.',
validators=[Required()])
hidden_field = HiddenField('You cannot see this', description='Nope')
recaptcha = RecaptchaField('A sample recaptcha field')
radio_field = RadioField('This is a radio field', choices=[
('head_radio', 'Head radio'),
('radio_76fm', "Radio '76 FM"),
('lips_106', 'Lips 106'),
('wctr', 'WCTR'),
])
checkbox_field = BooleanField('This is a checkbox',
description='Checkboxes can be tricky.')
# subforms
mobile_phone = FormField(TelephoneForm)
# you can change the label as well
office_phone = FormField(TelephoneForm, label='Your office phone')
ff = FileField('Sample upload')
submit_button = SubmitField('Submit Form')
def validate_hidden_field(form, field):
raise ValidationError('Always wrong')
def create_app(configfile=None):
app = Flask(__name__)
AppConfig(app, configfile) # Flask-Appconfig is not necessary, but
# highly recommend =)
# https://github.com/mbr/flask-appconfig
Material_Lite(app)
# in a real app, these should be configured through Flask-Appconfig
app.config['SECRET_KEY'] = 'devkey'
app.config['RECAPTCHA_PUBLIC_KEY'] = \
'6Lfol9cSAAAAADAkodaYl9wvQCwBMr3qGR_PPHcw'
@app.route('/', methods=('GET', 'POST'))
def <|fim_middle|>():
form = ExampleForm()
form.validate_on_submit() # to get error messages to the browser
flash('critical message', 'critical')
flash('error message', 'error')
flash('warning message', 'warning')
flash('info message', 'info')
flash('debug message', 'debug')
flash('different message', 'different')
flash('uncategorized message')
return render_template('index.html', form=form)
return app
if __name__ == '__main__':
create_app().run(debug=True)
<|fim▁end|> | index |
<|file_name|>bpr.py<|end_file_name|><|fim▁begin|># Author: Immanuel Bayer
# License: BSD 3 clause
import ffm
import numpy as np
from .base import FactorizationMachine
from sklearn.utils.testing import assert_array_equal
from .validation import check_array, assert_all_finite
<|fim▁hole|>
Parameters
----------
n_iter : int, optional
The number of interations of individual samples .
init_stdev: float, optional
Sets the stdev for the initialization of the parameter
random_state: int, optional
The seed of the pseudo random number generator that
initializes the parameters and mcmc chain.
rank: int
The rank of the factorization used for the second order interactions.
l2_reg_w : float
L2 penalty weight for pairwise coefficients.
l2_reg_V : float
L2 penalty weight for linear coefficients.
l2_reg : float
L2 penalty weight for all coefficients (default=0).
step_size : float
Stepsize for the SGD solver, the solver uses a fixed step size and
might require a tunning of the number of iterations `n_iter`.
Attributes
---------
w0_ : float
bias term
w_ : float | array, shape = (n_features)
Coefficients for linear combination.
V_ : float | array, shape = (rank_pair, n_features)
Coefficients of second order factor matrix.
"""
def __init__(self, n_iter=100, init_stdev=0.1, rank=8, random_state=123,
l2_reg_w=0.1, l2_reg_V=0.1, l2_reg=0, step_size=0.1):
super(FMRecommender, self).\
__init__(n_iter=n_iter, init_stdev=init_stdev, rank=rank,
random_state=random_state)
if (l2_reg != 0):
self.l2_reg_V = l2_reg
self.l2_reg_w = l2_reg
else:
self.l2_reg_w = l2_reg_w
self.l2_reg_V = l2_reg_V
self.step_size = step_size
self.task = "ranking"
def fit(self, X, pairs):
""" Fit model with specified loss.
Parameters
----------
X : scipy.sparse.csc_matrix, (n_samples, n_features)
y : float | ndarray, shape = (n_compares, 2)
Each row `i` defines a pair of samples such that
the first returns a high value then the second
FM(X[i,0]) > FM(X[i, 1]).
"""
X = X.T
X = check_array(X, accept_sparse="csc", dtype=np.float64)
assert_all_finite(pairs)
pairs = pairs.astype(np.float64)
# check that pairs contain no real values
assert_array_equal(pairs, pairs.astype(np.int32))
assert pairs.max() <= X.shape[1]
assert pairs.min() >= 0
self.w0_, self.w_, self.V_ = ffm.ffm_fit_sgd_bpr(self, X, pairs)
return self<|fim▁end|> |
class FMRecommender(FactorizationMachine):
""" Factorization Machine Recommender with pairwise (BPR) loss solver. |
<|file_name|>bpr.py<|end_file_name|><|fim▁begin|># Author: Immanuel Bayer
# License: BSD 3 clause
import ffm
import numpy as np
from .base import FactorizationMachine
from sklearn.utils.testing import assert_array_equal
from .validation import check_array, assert_all_finite
class FMRecommender(FactorizationMachine):
<|fim_middle|>
<|fim▁end|> | """ Factorization Machine Recommender with pairwise (BPR) loss solver.
Parameters
----------
n_iter : int, optional
The number of interations of individual samples .
init_stdev: float, optional
Sets the stdev for the initialization of the parameter
random_state: int, optional
The seed of the pseudo random number generator that
initializes the parameters and mcmc chain.
rank: int
The rank of the factorization used for the second order interactions.
l2_reg_w : float
L2 penalty weight for pairwise coefficients.
l2_reg_V : float
L2 penalty weight for linear coefficients.
l2_reg : float
L2 penalty weight for all coefficients (default=0).
step_size : float
Stepsize for the SGD solver, the solver uses a fixed step size and
might require a tunning of the number of iterations `n_iter`.
Attributes
---------
w0_ : float
bias term
w_ : float | array, shape = (n_features)
Coefficients for linear combination.
V_ : float | array, shape = (rank_pair, n_features)
Coefficients of second order factor matrix.
"""
def __init__(self, n_iter=100, init_stdev=0.1, rank=8, random_state=123,
l2_reg_w=0.1, l2_reg_V=0.1, l2_reg=0, step_size=0.1):
super(FMRecommender, self).\
__init__(n_iter=n_iter, init_stdev=init_stdev, rank=rank,
random_state=random_state)
if (l2_reg != 0):
self.l2_reg_V = l2_reg
self.l2_reg_w = l2_reg
else:
self.l2_reg_w = l2_reg_w
self.l2_reg_V = l2_reg_V
self.step_size = step_size
self.task = "ranking"
def fit(self, X, pairs):
""" Fit model with specified loss.
Parameters
----------
X : scipy.sparse.csc_matrix, (n_samples, n_features)
y : float | ndarray, shape = (n_compares, 2)
Each row `i` defines a pair of samples such that
the first returns a high value then the second
FM(X[i,0]) > FM(X[i, 1]).
"""
X = X.T
X = check_array(X, accept_sparse="csc", dtype=np.float64)
assert_all_finite(pairs)
pairs = pairs.astype(np.float64)
# check that pairs contain no real values
assert_array_equal(pairs, pairs.astype(np.int32))
assert pairs.max() <= X.shape[1]
assert pairs.min() >= 0
self.w0_, self.w_, self.V_ = ffm.ffm_fit_sgd_bpr(self, X, pairs)
return self |
<|file_name|>bpr.py<|end_file_name|><|fim▁begin|># Author: Immanuel Bayer
# License: BSD 3 clause
import ffm
import numpy as np
from .base import FactorizationMachine
from sklearn.utils.testing import assert_array_equal
from .validation import check_array, assert_all_finite
class FMRecommender(FactorizationMachine):
""" Factorization Machine Recommender with pairwise (BPR) loss solver.
Parameters
----------
n_iter : int, optional
The number of interations of individual samples .
init_stdev: float, optional
Sets the stdev for the initialization of the parameter
random_state: int, optional
The seed of the pseudo random number generator that
initializes the parameters and mcmc chain.
rank: int
The rank of the factorization used for the second order interactions.
l2_reg_w : float
L2 penalty weight for pairwise coefficients.
l2_reg_V : float
L2 penalty weight for linear coefficients.
l2_reg : float
L2 penalty weight for all coefficients (default=0).
step_size : float
Stepsize for the SGD solver, the solver uses a fixed step size and
might require a tunning of the number of iterations `n_iter`.
Attributes
---------
w0_ : float
bias term
w_ : float | array, shape = (n_features)
Coefficients for linear combination.
V_ : float | array, shape = (rank_pair, n_features)
Coefficients of second order factor matrix.
"""
def __init__(self, n_iter=100, init_stdev=0.1, rank=8, random_state=123,
l2_reg_w=0.1, l2_reg_V=0.1, l2_reg=0, step_size=0.1):
<|fim_middle|>
def fit(self, X, pairs):
""" Fit model with specified loss.
Parameters
----------
X : scipy.sparse.csc_matrix, (n_samples, n_features)
y : float | ndarray, shape = (n_compares, 2)
Each row `i` defines a pair of samples such that
the first returns a high value then the second
FM(X[i,0]) > FM(X[i, 1]).
"""
X = X.T
X = check_array(X, accept_sparse="csc", dtype=np.float64)
assert_all_finite(pairs)
pairs = pairs.astype(np.float64)
# check that pairs contain no real values
assert_array_equal(pairs, pairs.astype(np.int32))
assert pairs.max() <= X.shape[1]
assert pairs.min() >= 0
self.w0_, self.w_, self.V_ = ffm.ffm_fit_sgd_bpr(self, X, pairs)
return self
<|fim▁end|> | super(FMRecommender, self).\
__init__(n_iter=n_iter, init_stdev=init_stdev, rank=rank,
random_state=random_state)
if (l2_reg != 0):
self.l2_reg_V = l2_reg
self.l2_reg_w = l2_reg
else:
self.l2_reg_w = l2_reg_w
self.l2_reg_V = l2_reg_V
self.step_size = step_size
self.task = "ranking" |
<|file_name|>bpr.py<|end_file_name|><|fim▁begin|># Author: Immanuel Bayer
# License: BSD 3 clause
import ffm
import numpy as np
from .base import FactorizationMachine
from sklearn.utils.testing import assert_array_equal
from .validation import check_array, assert_all_finite
class FMRecommender(FactorizationMachine):
""" Factorization Machine Recommender with pairwise (BPR) loss solver.
Parameters
----------
n_iter : int, optional
The number of interations of individual samples .
init_stdev: float, optional
Sets the stdev for the initialization of the parameter
random_state: int, optional
The seed of the pseudo random number generator that
initializes the parameters and mcmc chain.
rank: int
The rank of the factorization used for the second order interactions.
l2_reg_w : float
L2 penalty weight for pairwise coefficients.
l2_reg_V : float
L2 penalty weight for linear coefficients.
l2_reg : float
L2 penalty weight for all coefficients (default=0).
step_size : float
Stepsize for the SGD solver, the solver uses a fixed step size and
might require a tunning of the number of iterations `n_iter`.
Attributes
---------
w0_ : float
bias term
w_ : float | array, shape = (n_features)
Coefficients for linear combination.
V_ : float | array, shape = (rank_pair, n_features)
Coefficients of second order factor matrix.
"""
def __init__(self, n_iter=100, init_stdev=0.1, rank=8, random_state=123,
l2_reg_w=0.1, l2_reg_V=0.1, l2_reg=0, step_size=0.1):
super(FMRecommender, self).\
__init__(n_iter=n_iter, init_stdev=init_stdev, rank=rank,
random_state=random_state)
if (l2_reg != 0):
self.l2_reg_V = l2_reg
self.l2_reg_w = l2_reg
else:
self.l2_reg_w = l2_reg_w
self.l2_reg_V = l2_reg_V
self.step_size = step_size
self.task = "ranking"
def fit(self, X, pairs):
<|fim_middle|>
<|fim▁end|> | """ Fit model with specified loss.
Parameters
----------
X : scipy.sparse.csc_matrix, (n_samples, n_features)
y : float | ndarray, shape = (n_compares, 2)
Each row `i` defines a pair of samples such that
the first returns a high value then the second
FM(X[i,0]) > FM(X[i, 1]).
"""
X = X.T
X = check_array(X, accept_sparse="csc", dtype=np.float64)
assert_all_finite(pairs)
pairs = pairs.astype(np.float64)
# check that pairs contain no real values
assert_array_equal(pairs, pairs.astype(np.int32))
assert pairs.max() <= X.shape[1]
assert pairs.min() >= 0
self.w0_, self.w_, self.V_ = ffm.ffm_fit_sgd_bpr(self, X, pairs)
return self |
<|file_name|>bpr.py<|end_file_name|><|fim▁begin|># Author: Immanuel Bayer
# License: BSD 3 clause
import ffm
import numpy as np
from .base import FactorizationMachine
from sklearn.utils.testing import assert_array_equal
from .validation import check_array, assert_all_finite
class FMRecommender(FactorizationMachine):
""" Factorization Machine Recommender with pairwise (BPR) loss solver.
Parameters
----------
n_iter : int, optional
The number of interations of individual samples .
init_stdev: float, optional
Sets the stdev for the initialization of the parameter
random_state: int, optional
The seed of the pseudo random number generator that
initializes the parameters and mcmc chain.
rank: int
The rank of the factorization used for the second order interactions.
l2_reg_w : float
L2 penalty weight for pairwise coefficients.
l2_reg_V : float
L2 penalty weight for linear coefficients.
l2_reg : float
L2 penalty weight for all coefficients (default=0).
step_size : float
Stepsize for the SGD solver, the solver uses a fixed step size and
might require a tunning of the number of iterations `n_iter`.
Attributes
---------
w0_ : float
bias term
w_ : float | array, shape = (n_features)
Coefficients for linear combination.
V_ : float | array, shape = (rank_pair, n_features)
Coefficients of second order factor matrix.
"""
def __init__(self, n_iter=100, init_stdev=0.1, rank=8, random_state=123,
l2_reg_w=0.1, l2_reg_V=0.1, l2_reg=0, step_size=0.1):
super(FMRecommender, self).\
__init__(n_iter=n_iter, init_stdev=init_stdev, rank=rank,
random_state=random_state)
if (l2_reg != 0):
<|fim_middle|>
else:
self.l2_reg_w = l2_reg_w
self.l2_reg_V = l2_reg_V
self.step_size = step_size
self.task = "ranking"
def fit(self, X, pairs):
""" Fit model with specified loss.
Parameters
----------
X : scipy.sparse.csc_matrix, (n_samples, n_features)
y : float | ndarray, shape = (n_compares, 2)
Each row `i` defines a pair of samples such that
the first returns a high value then the second
FM(X[i,0]) > FM(X[i, 1]).
"""
X = X.T
X = check_array(X, accept_sparse="csc", dtype=np.float64)
assert_all_finite(pairs)
pairs = pairs.astype(np.float64)
# check that pairs contain no real values
assert_array_equal(pairs, pairs.astype(np.int32))
assert pairs.max() <= X.shape[1]
assert pairs.min() >= 0
self.w0_, self.w_, self.V_ = ffm.ffm_fit_sgd_bpr(self, X, pairs)
return self
<|fim▁end|> | self.l2_reg_V = l2_reg
self.l2_reg_w = l2_reg |
<|file_name|>bpr.py<|end_file_name|><|fim▁begin|># Author: Immanuel Bayer
# License: BSD 3 clause
import ffm
import numpy as np
from .base import FactorizationMachine
from sklearn.utils.testing import assert_array_equal
from .validation import check_array, assert_all_finite
class FMRecommender(FactorizationMachine):
""" Factorization Machine Recommender with pairwise (BPR) loss solver.
Parameters
----------
n_iter : int, optional
The number of interations of individual samples .
init_stdev: float, optional
Sets the stdev for the initialization of the parameter
random_state: int, optional
The seed of the pseudo random number generator that
initializes the parameters and mcmc chain.
rank: int
The rank of the factorization used for the second order interactions.
l2_reg_w : float
L2 penalty weight for pairwise coefficients.
l2_reg_V : float
L2 penalty weight for linear coefficients.
l2_reg : float
L2 penalty weight for all coefficients (default=0).
step_size : float
Stepsize for the SGD solver, the solver uses a fixed step size and
might require a tunning of the number of iterations `n_iter`.
Attributes
---------
w0_ : float
bias term
w_ : float | array, shape = (n_features)
Coefficients for linear combination.
V_ : float | array, shape = (rank_pair, n_features)
Coefficients of second order factor matrix.
"""
def __init__(self, n_iter=100, init_stdev=0.1, rank=8, random_state=123,
l2_reg_w=0.1, l2_reg_V=0.1, l2_reg=0, step_size=0.1):
super(FMRecommender, self).\
__init__(n_iter=n_iter, init_stdev=init_stdev, rank=rank,
random_state=random_state)
if (l2_reg != 0):
self.l2_reg_V = l2_reg
self.l2_reg_w = l2_reg
else:
<|fim_middle|>
self.step_size = step_size
self.task = "ranking"
def fit(self, X, pairs):
""" Fit model with specified loss.
Parameters
----------
X : scipy.sparse.csc_matrix, (n_samples, n_features)
y : float | ndarray, shape = (n_compares, 2)
Each row `i` defines a pair of samples such that
the first returns a high value then the second
FM(X[i,0]) > FM(X[i, 1]).
"""
X = X.T
X = check_array(X, accept_sparse="csc", dtype=np.float64)
assert_all_finite(pairs)
pairs = pairs.astype(np.float64)
# check that pairs contain no real values
assert_array_equal(pairs, pairs.astype(np.int32))
assert pairs.max() <= X.shape[1]
assert pairs.min() >= 0
self.w0_, self.w_, self.V_ = ffm.ffm_fit_sgd_bpr(self, X, pairs)
return self
<|fim▁end|> | self.l2_reg_w = l2_reg_w
self.l2_reg_V = l2_reg_V |
<|file_name|>bpr.py<|end_file_name|><|fim▁begin|># Author: Immanuel Bayer
# License: BSD 3 clause
import ffm
import numpy as np
from .base import FactorizationMachine
from sklearn.utils.testing import assert_array_equal
from .validation import check_array, assert_all_finite
class FMRecommender(FactorizationMachine):
""" Factorization Machine Recommender with pairwise (BPR) loss solver.
Parameters
----------
n_iter : int, optional
The number of interations of individual samples .
init_stdev: float, optional
Sets the stdev for the initialization of the parameter
random_state: int, optional
The seed of the pseudo random number generator that
initializes the parameters and mcmc chain.
rank: int
The rank of the factorization used for the second order interactions.
l2_reg_w : float
L2 penalty weight for pairwise coefficients.
l2_reg_V : float
L2 penalty weight for linear coefficients.
l2_reg : float
L2 penalty weight for all coefficients (default=0).
step_size : float
Stepsize for the SGD solver, the solver uses a fixed step size and
might require a tunning of the number of iterations `n_iter`.
Attributes
---------
w0_ : float
bias term
w_ : float | array, shape = (n_features)
Coefficients for linear combination.
V_ : float | array, shape = (rank_pair, n_features)
Coefficients of second order factor matrix.
"""
def <|fim_middle|>(self, n_iter=100, init_stdev=0.1, rank=8, random_state=123,
l2_reg_w=0.1, l2_reg_V=0.1, l2_reg=0, step_size=0.1):
super(FMRecommender, self).\
__init__(n_iter=n_iter, init_stdev=init_stdev, rank=rank,
random_state=random_state)
if (l2_reg != 0):
self.l2_reg_V = l2_reg
self.l2_reg_w = l2_reg
else:
self.l2_reg_w = l2_reg_w
self.l2_reg_V = l2_reg_V
self.step_size = step_size
self.task = "ranking"
def fit(self, X, pairs):
""" Fit model with specified loss.
Parameters
----------
X : scipy.sparse.csc_matrix, (n_samples, n_features)
y : float | ndarray, shape = (n_compares, 2)
Each row `i` defines a pair of samples such that
the first returns a high value then the second
FM(X[i,0]) > FM(X[i, 1]).
"""
X = X.T
X = check_array(X, accept_sparse="csc", dtype=np.float64)
assert_all_finite(pairs)
pairs = pairs.astype(np.float64)
# check that pairs contain no real values
assert_array_equal(pairs, pairs.astype(np.int32))
assert pairs.max() <= X.shape[1]
assert pairs.min() >= 0
self.w0_, self.w_, self.V_ = ffm.ffm_fit_sgd_bpr(self, X, pairs)
return self
<|fim▁end|> | __init__ |
<|file_name|>bpr.py<|end_file_name|><|fim▁begin|># Author: Immanuel Bayer
# License: BSD 3 clause
import ffm
import numpy as np
from .base import FactorizationMachine
from sklearn.utils.testing import assert_array_equal
from .validation import check_array, assert_all_finite
class FMRecommender(FactorizationMachine):
""" Factorization Machine Recommender with pairwise (BPR) loss solver.
Parameters
----------
n_iter : int, optional
The number of interations of individual samples .
init_stdev: float, optional
Sets the stdev for the initialization of the parameter
random_state: int, optional
The seed of the pseudo random number generator that
initializes the parameters and mcmc chain.
rank: int
The rank of the factorization used for the second order interactions.
l2_reg_w : float
L2 penalty weight for pairwise coefficients.
l2_reg_V : float
L2 penalty weight for linear coefficients.
l2_reg : float
L2 penalty weight for all coefficients (default=0).
step_size : float
Stepsize for the SGD solver, the solver uses a fixed step size and
might require a tunning of the number of iterations `n_iter`.
Attributes
---------
w0_ : float
bias term
w_ : float | array, shape = (n_features)
Coefficients for linear combination.
V_ : float | array, shape = (rank_pair, n_features)
Coefficients of second order factor matrix.
"""
def __init__(self, n_iter=100, init_stdev=0.1, rank=8, random_state=123,
l2_reg_w=0.1, l2_reg_V=0.1, l2_reg=0, step_size=0.1):
super(FMRecommender, self).\
__init__(n_iter=n_iter, init_stdev=init_stdev, rank=rank,
random_state=random_state)
if (l2_reg != 0):
self.l2_reg_V = l2_reg
self.l2_reg_w = l2_reg
else:
self.l2_reg_w = l2_reg_w
self.l2_reg_V = l2_reg_V
self.step_size = step_size
self.task = "ranking"
def <|fim_middle|>(self, X, pairs):
""" Fit model with specified loss.
Parameters
----------
X : scipy.sparse.csc_matrix, (n_samples, n_features)
y : float | ndarray, shape = (n_compares, 2)
Each row `i` defines a pair of samples such that
the first returns a high value then the second
FM(X[i,0]) > FM(X[i, 1]).
"""
X = X.T
X = check_array(X, accept_sparse="csc", dtype=np.float64)
assert_all_finite(pairs)
pairs = pairs.astype(np.float64)
# check that pairs contain no real values
assert_array_equal(pairs, pairs.astype(np.int32))
assert pairs.max() <= X.shape[1]
assert pairs.min() >= 0
self.w0_, self.w_, self.V_ = ffm.ffm_fit_sgd_bpr(self, X, pairs)
return self
<|fim▁end|> | fit |
<|file_name|>HHPH2013data_to_burnman.py<|end_file_name|><|fim▁begin|># BurnMan - a lower mantle toolkit
# Copyright (C) 2012-2014, Myhill, R., Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
# This is a standalone program that converts a tabulated version of the Stixrude and Lithgow-Bertelloni data format into the standard burnman format (printed to stdout)
import sys
def read_dataset(datafile):
f=open(datafile,'r')
ds=[]
for line in f:
ds.append(line.decode('utf-8').split())
return ds
ds=read_dataset('HHPH2013_endmembers.dat')
print '# BurnMan - a lower mantle toolkit'
print '# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.'
print '# Released under GPL v2 or later.'
print ''
print '"""'
print 'HHPH_2013'
print 'Minerals from Holland et al 2013 and references therein'
print 'The values in this document are all in S.I. units,'
print 'unlike those in the original paper'
print 'File autogenerated using HHPHdata_to_burnman.py'
print '"""'
print ''
print 'from burnman.mineral import Mineral'
print 'from burnman.solidsolution import SolidSolution'
print 'from burnman.solutionmodel import *'
print 'from burnman.processchemistry import read_masses, dictionarize_formula, formula_mass'
print ''
print 'atomic_masses=read_masses()'
print ''
print '"""'
print 'ENDMEMBERS'
print '"""'
print ''
param_scales = [ -1., -1., #not nubmers, so we won't scale
1.e3, 1.e3, #kJ -> J
1.0, # J/K/mol
1.e-5, # kJ/kbar/mol -> m^3/mol
1.e3, 1.e-2, 1.e3, 1.e3, # kJ -> J and table conversion for b
1.e-5, # table conversion
1.e8, # kbar -> Pa
1.0, # no scale for K'0
1.e-8] #GPa -> Pa # no scale for eta_s
formula='0'
for idx, m in enumerate(ds):
if idx == 0:
param_names=m
else: <|fim▁hole|> print ' self.params = {'
print ''.join([' \'name\': \'', m[0], '\','])
print ' \'formula\': formula,'
print ' \'equation_of_state\': \'hp_tmt\','
for pid, param in enumerate(m):
if pid > 1 and pid != 3 and pid<6:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'Cp\':', [round(float(m[i])*param_scales[i],10) for i in [6, 7, 8, 9]], ','
for pid, param in enumerate(m):
if pid > 9:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'n\': sum(formula.values()),'
print ' \'molar_mass\': formula_mass(formula, atomic_masses)}'
print ''
print ' self.uncertainties = {'
print ' \''+param_names[3]+'\':', float(m[3])*param_scales[3], '}'
print ' Mineral.__init__(self)'
print ''<|fim▁end|> | print 'class', m[0].lower(), '(Mineral):'
print ' def __init__(self):'
print ''.join([' formula=\'',m[1],'\''])
print ' formula = dictionarize_formula(formula)' |
<|file_name|>HHPH2013data_to_burnman.py<|end_file_name|><|fim▁begin|># BurnMan - a lower mantle toolkit
# Copyright (C) 2012-2014, Myhill, R., Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
# This is a standalone program that converts a tabulated version of the Stixrude and Lithgow-Bertelloni data format into the standard burnman format (printed to stdout)
import sys
def read_dataset(datafile):
<|fim_middle|>
ds=read_dataset('HHPH2013_endmembers.dat')
print '# BurnMan - a lower mantle toolkit'
print '# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.'
print '# Released under GPL v2 or later.'
print ''
print '"""'
print 'HHPH_2013'
print 'Minerals from Holland et al 2013 and references therein'
print 'The values in this document are all in S.I. units,'
print 'unlike those in the original paper'
print 'File autogenerated using HHPHdata_to_burnman.py'
print '"""'
print ''
print 'from burnman.mineral import Mineral'
print 'from burnman.solidsolution import SolidSolution'
print 'from burnman.solutionmodel import *'
print 'from burnman.processchemistry import read_masses, dictionarize_formula, formula_mass'
print ''
print 'atomic_masses=read_masses()'
print ''
print '"""'
print 'ENDMEMBERS'
print '"""'
print ''
param_scales = [ -1., -1., #not nubmers, so we won't scale
1.e3, 1.e3, #kJ -> J
1.0, # J/K/mol
1.e-5, # kJ/kbar/mol -> m^3/mol
1.e3, 1.e-2, 1.e3, 1.e3, # kJ -> J and table conversion for b
1.e-5, # table conversion
1.e8, # kbar -> Pa
1.0, # no scale for K'0
1.e-8] #GPa -> Pa # no scale for eta_s
formula='0'
for idx, m in enumerate(ds):
if idx == 0:
param_names=m
else:
print 'class', m[0].lower(), '(Mineral):'
print ' def __init__(self):'
print ''.join([' formula=\'',m[1],'\''])
print ' formula = dictionarize_formula(formula)'
print ' self.params = {'
print ''.join([' \'name\': \'', m[0], '\','])
print ' \'formula\': formula,'
print ' \'equation_of_state\': \'hp_tmt\','
for pid, param in enumerate(m):
if pid > 1 and pid != 3 and pid<6:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'Cp\':', [round(float(m[i])*param_scales[i],10) for i in [6, 7, 8, 9]], ','
for pid, param in enumerate(m):
if pid > 9:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'n\': sum(formula.values()),'
print ' \'molar_mass\': formula_mass(formula, atomic_masses)}'
print ''
print ' self.uncertainties = {'
print ' \''+param_names[3]+'\':', float(m[3])*param_scales[3], '}'
print ' Mineral.__init__(self)'
print ''
<|fim▁end|> | f=open(datafile,'r')
ds=[]
for line in f:
ds.append(line.decode('utf-8').split())
return ds |
<|file_name|>HHPH2013data_to_burnman.py<|end_file_name|><|fim▁begin|># BurnMan - a lower mantle toolkit
# Copyright (C) 2012-2014, Myhill, R., Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
# This is a standalone program that converts a tabulated version of the Stixrude and Lithgow-Bertelloni data format into the standard burnman format (printed to stdout)
import sys
def read_dataset(datafile):
f=open(datafile,'r')
ds=[]
for line in f:
ds.append(line.decode('utf-8').split())
return ds
ds=read_dataset('HHPH2013_endmembers.dat')
print '# BurnMan - a lower mantle toolkit'
print '# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.'
print '# Released under GPL v2 or later.'
print ''
print '"""'
print 'HHPH_2013'
print 'Minerals from Holland et al 2013 and references therein'
print 'The values in this document are all in S.I. units,'
print 'unlike those in the original paper'
print 'File autogenerated using HHPHdata_to_burnman.py'
print '"""'
print ''
print 'from burnman.mineral import Mineral'
print 'from burnman.solidsolution import SolidSolution'
print 'from burnman.solutionmodel import *'
print 'from burnman.processchemistry import read_masses, dictionarize_formula, formula_mass'
print ''
print 'atomic_masses=read_masses()'
print ''
print '"""'
print 'ENDMEMBERS'
print '"""'
print ''
param_scales = [ -1., -1., #not nubmers, so we won't scale
1.e3, 1.e3, #kJ -> J
1.0, # J/K/mol
1.e-5, # kJ/kbar/mol -> m^3/mol
1.e3, 1.e-2, 1.e3, 1.e3, # kJ -> J and table conversion for b
1.e-5, # table conversion
1.e8, # kbar -> Pa
1.0, # no scale for K'0
1.e-8] #GPa -> Pa # no scale for eta_s
formula='0'
for idx, m in enumerate(ds):
if idx == 0:
<|fim_middle|>
else:
print 'class', m[0].lower(), '(Mineral):'
print ' def __init__(self):'
print ''.join([' formula=\'',m[1],'\''])
print ' formula = dictionarize_formula(formula)'
print ' self.params = {'
print ''.join([' \'name\': \'', m[0], '\','])
print ' \'formula\': formula,'
print ' \'equation_of_state\': \'hp_tmt\','
for pid, param in enumerate(m):
if pid > 1 and pid != 3 and pid<6:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'Cp\':', [round(float(m[i])*param_scales[i],10) for i in [6, 7, 8, 9]], ','
for pid, param in enumerate(m):
if pid > 9:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'n\': sum(formula.values()),'
print ' \'molar_mass\': formula_mass(formula, atomic_masses)}'
print ''
print ' self.uncertainties = {'
print ' \''+param_names[3]+'\':', float(m[3])*param_scales[3], '}'
print ' Mineral.__init__(self)'
print ''
<|fim▁end|> | param_names=m |
<|file_name|>HHPH2013data_to_burnman.py<|end_file_name|><|fim▁begin|># BurnMan - a lower mantle toolkit
# Copyright (C) 2012-2014, Myhill, R., Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
# This is a standalone program that converts a tabulated version of the Stixrude and Lithgow-Bertelloni data format into the standard burnman format (printed to stdout)
import sys
def read_dataset(datafile):
f=open(datafile,'r')
ds=[]
for line in f:
ds.append(line.decode('utf-8').split())
return ds
ds=read_dataset('HHPH2013_endmembers.dat')
print '# BurnMan - a lower mantle toolkit'
print '# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.'
print '# Released under GPL v2 or later.'
print ''
print '"""'
print 'HHPH_2013'
print 'Minerals from Holland et al 2013 and references therein'
print 'The values in this document are all in S.I. units,'
print 'unlike those in the original paper'
print 'File autogenerated using HHPHdata_to_burnman.py'
print '"""'
print ''
print 'from burnman.mineral import Mineral'
print 'from burnman.solidsolution import SolidSolution'
print 'from burnman.solutionmodel import *'
print 'from burnman.processchemistry import read_masses, dictionarize_formula, formula_mass'
print ''
print 'atomic_masses=read_masses()'
print ''
print '"""'
print 'ENDMEMBERS'
print '"""'
print ''
param_scales = [ -1., -1., #not nubmers, so we won't scale
1.e3, 1.e3, #kJ -> J
1.0, # J/K/mol
1.e-5, # kJ/kbar/mol -> m^3/mol
1.e3, 1.e-2, 1.e3, 1.e3, # kJ -> J and table conversion for b
1.e-5, # table conversion
1.e8, # kbar -> Pa
1.0, # no scale for K'0
1.e-8] #GPa -> Pa # no scale for eta_s
formula='0'
for idx, m in enumerate(ds):
if idx == 0:
param_names=m
else:
<|fim_middle|>
<|fim▁end|> | print 'class', m[0].lower(), '(Mineral):'
print ' def __init__(self):'
print ''.join([' formula=\'',m[1],'\''])
print ' formula = dictionarize_formula(formula)'
print ' self.params = {'
print ''.join([' \'name\': \'', m[0], '\','])
print ' \'formula\': formula,'
print ' \'equation_of_state\': \'hp_tmt\','
for pid, param in enumerate(m):
if pid > 1 and pid != 3 and pid<6:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'Cp\':', [round(float(m[i])*param_scales[i],10) for i in [6, 7, 8, 9]], ','
for pid, param in enumerate(m):
if pid > 9:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'n\': sum(formula.values()),'
print ' \'molar_mass\': formula_mass(formula, atomic_masses)}'
print ''
print ' self.uncertainties = {'
print ' \''+param_names[3]+'\':', float(m[3])*param_scales[3], '}'
print ' Mineral.__init__(self)'
print '' |
<|file_name|>HHPH2013data_to_burnman.py<|end_file_name|><|fim▁begin|># BurnMan - a lower mantle toolkit
# Copyright (C) 2012-2014, Myhill, R., Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
# This is a standalone program that converts a tabulated version of the Stixrude and Lithgow-Bertelloni data format into the standard burnman format (printed to stdout)
import sys
def read_dataset(datafile):
f=open(datafile,'r')
ds=[]
for line in f:
ds.append(line.decode('utf-8').split())
return ds
ds=read_dataset('HHPH2013_endmembers.dat')
print '# BurnMan - a lower mantle toolkit'
print '# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.'
print '# Released under GPL v2 or later.'
print ''
print '"""'
print 'HHPH_2013'
print 'Minerals from Holland et al 2013 and references therein'
print 'The values in this document are all in S.I. units,'
print 'unlike those in the original paper'
print 'File autogenerated using HHPHdata_to_burnman.py'
print '"""'
print ''
print 'from burnman.mineral import Mineral'
print 'from burnman.solidsolution import SolidSolution'
print 'from burnman.solutionmodel import *'
print 'from burnman.processchemistry import read_masses, dictionarize_formula, formula_mass'
print ''
print 'atomic_masses=read_masses()'
print ''
print '"""'
print 'ENDMEMBERS'
print '"""'
print ''
param_scales = [ -1., -1., #not nubmers, so we won't scale
1.e3, 1.e3, #kJ -> J
1.0, # J/K/mol
1.e-5, # kJ/kbar/mol -> m^3/mol
1.e3, 1.e-2, 1.e3, 1.e3, # kJ -> J and table conversion for b
1.e-5, # table conversion
1.e8, # kbar -> Pa
1.0, # no scale for K'0
1.e-8] #GPa -> Pa # no scale for eta_s
formula='0'
for idx, m in enumerate(ds):
if idx == 0:
param_names=m
else:
print 'class', m[0].lower(), '(Mineral):'
print ' def __init__(self):'
print ''.join([' formula=\'',m[1],'\''])
print ' formula = dictionarize_formula(formula)'
print ' self.params = {'
print ''.join([' \'name\': \'', m[0], '\','])
print ' \'formula\': formula,'
print ' \'equation_of_state\': \'hp_tmt\','
for pid, param in enumerate(m):
if pid > 1 and pid != 3 and pid<6:
<|fim_middle|>
print ' \'Cp\':', [round(float(m[i])*param_scales[i],10) for i in [6, 7, 8, 9]], ','
for pid, param in enumerate(m):
if pid > 9:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'n\': sum(formula.values()),'
print ' \'molar_mass\': formula_mass(formula, atomic_masses)}'
print ''
print ' self.uncertainties = {'
print ' \''+param_names[3]+'\':', float(m[3])*param_scales[3], '}'
print ' Mineral.__init__(self)'
print ''
<|fim▁end|> | print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ',' |
<|file_name|>HHPH2013data_to_burnman.py<|end_file_name|><|fim▁begin|># BurnMan - a lower mantle toolkit
# Copyright (C) 2012-2014, Myhill, R., Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
# This is a standalone program that converts a tabulated version of the Stixrude and Lithgow-Bertelloni data format into the standard burnman format (printed to stdout)
import sys
def read_dataset(datafile):
f=open(datafile,'r')
ds=[]
for line in f:
ds.append(line.decode('utf-8').split())
return ds
ds=read_dataset('HHPH2013_endmembers.dat')
print '# BurnMan - a lower mantle toolkit'
print '# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.'
print '# Released under GPL v2 or later.'
print ''
print '"""'
print 'HHPH_2013'
print 'Minerals from Holland et al 2013 and references therein'
print 'The values in this document are all in S.I. units,'
print 'unlike those in the original paper'
print 'File autogenerated using HHPHdata_to_burnman.py'
print '"""'
print ''
print 'from burnman.mineral import Mineral'
print 'from burnman.solidsolution import SolidSolution'
print 'from burnman.solutionmodel import *'
print 'from burnman.processchemistry import read_masses, dictionarize_formula, formula_mass'
print ''
print 'atomic_masses=read_masses()'
print ''
print '"""'
print 'ENDMEMBERS'
print '"""'
print ''
param_scales = [ -1., -1., #not nubmers, so we won't scale
1.e3, 1.e3, #kJ -> J
1.0, # J/K/mol
1.e-5, # kJ/kbar/mol -> m^3/mol
1.e3, 1.e-2, 1.e3, 1.e3, # kJ -> J and table conversion for b
1.e-5, # table conversion
1.e8, # kbar -> Pa
1.0, # no scale for K'0
1.e-8] #GPa -> Pa # no scale for eta_s
formula='0'
for idx, m in enumerate(ds):
if idx == 0:
param_names=m
else:
print 'class', m[0].lower(), '(Mineral):'
print ' def __init__(self):'
print ''.join([' formula=\'',m[1],'\''])
print ' formula = dictionarize_formula(formula)'
print ' self.params = {'
print ''.join([' \'name\': \'', m[0], '\','])
print ' \'formula\': formula,'
print ' \'equation_of_state\': \'hp_tmt\','
for pid, param in enumerate(m):
if pid > 1 and pid != 3 and pid<6:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'Cp\':', [round(float(m[i])*param_scales[i],10) for i in [6, 7, 8, 9]], ','
for pid, param in enumerate(m):
if pid > 9:
<|fim_middle|>
print ' \'n\': sum(formula.values()),'
print ' \'molar_mass\': formula_mass(formula, atomic_masses)}'
print ''
print ' self.uncertainties = {'
print ' \''+param_names[3]+'\':', float(m[3])*param_scales[3], '}'
print ' Mineral.__init__(self)'
print ''
<|fim▁end|> | print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ',' |
<|file_name|>HHPH2013data_to_burnman.py<|end_file_name|><|fim▁begin|># BurnMan - a lower mantle toolkit
# Copyright (C) 2012-2014, Myhill, R., Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
# This is a standalone program that converts a tabulated version of the Stixrude and Lithgow-Bertelloni data format into the standard burnman format (printed to stdout)
import sys
def <|fim_middle|>(datafile):
f=open(datafile,'r')
ds=[]
for line in f:
ds.append(line.decode('utf-8').split())
return ds
ds=read_dataset('HHPH2013_endmembers.dat')
print '# BurnMan - a lower mantle toolkit'
print '# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.'
print '# Released under GPL v2 or later.'
print ''
print '"""'
print 'HHPH_2013'
print 'Minerals from Holland et al 2013 and references therein'
print 'The values in this document are all in S.I. units,'
print 'unlike those in the original paper'
print 'File autogenerated using HHPHdata_to_burnman.py'
print '"""'
print ''
print 'from burnman.mineral import Mineral'
print 'from burnman.solidsolution import SolidSolution'
print 'from burnman.solutionmodel import *'
print 'from burnman.processchemistry import read_masses, dictionarize_formula, formula_mass'
print ''
print 'atomic_masses=read_masses()'
print ''
print '"""'
print 'ENDMEMBERS'
print '"""'
print ''
param_scales = [ -1., -1., #not nubmers, so we won't scale
1.e3, 1.e3, #kJ -> J
1.0, # J/K/mol
1.e-5, # kJ/kbar/mol -> m^3/mol
1.e3, 1.e-2, 1.e3, 1.e3, # kJ -> J and table conversion for b
1.e-5, # table conversion
1.e8, # kbar -> Pa
1.0, # no scale for K'0
1.e-8] #GPa -> Pa # no scale for eta_s
formula='0'
for idx, m in enumerate(ds):
if idx == 0:
param_names=m
else:
print 'class', m[0].lower(), '(Mineral):'
print ' def __init__(self):'
print ''.join([' formula=\'',m[1],'\''])
print ' formula = dictionarize_formula(formula)'
print ' self.params = {'
print ''.join([' \'name\': \'', m[0], '\','])
print ' \'formula\': formula,'
print ' \'equation_of_state\': \'hp_tmt\','
for pid, param in enumerate(m):
if pid > 1 and pid != 3 and pid<6:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'Cp\':', [round(float(m[i])*param_scales[i],10) for i in [6, 7, 8, 9]], ','
for pid, param in enumerate(m):
if pid > 9:
print ' \''+param_names[pid]+'\':', float(param)*param_scales[pid], ','
print ' \'n\': sum(formula.values()),'
print ' \'molar_mass\': formula_mass(formula, atomic_masses)}'
print ''
print ' self.uncertainties = {'
print ' \''+param_names[3]+'\':', float(m[3])*param_scales[3], '}'
print ' Mineral.__init__(self)'
print ''
<|fim▁end|> | read_dataset |
<|file_name|>re.4.py<|end_file_name|><|fim▁begin|>import re<|fim▁hole|>p = re.compile(r'(\w+) (\w+)(?P<sign>.*)', re.DOTALL)
print re.DOTALL
print "p.pattern:", p.pattern
print "p.flags:", p.flags
print "p.groups:", p.groups
print "p.groupindex:", p.groupindex<|fim▁end|> | |
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>"""redblue_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home<|fim▁hole|> 1. Add an import: from blog import urls as blog_urls
2. Import the include() function: from django.conf.urls import url, include
3. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^red/', include('apps.red_app.urls', namespace='red_namespace')),
url(r'^blue/', include('apps.blue_app.urls', namespace='blue_namespace')),
url(r'^admin/', admin.site.urls),
]<|fim▁end|> | 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf |
<|file_name|>mixup.py<|end_file_name|><|fim▁begin|># Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
def augment(self, x, l, beta, **kwargs):
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix
def model(self, batch, lr, wd, ema, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def get_logits(x):
logits = classifier(x, training=True)
return logits
<|fim▁hole|> post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradient(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)<|fim▁end|> | x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x) |
<|file_name|>mixup.py<|end_file_name|><|fim▁begin|># Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
<|fim_middle|>
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
<|fim▁end|> | def augment(self, x, l, beta, **kwargs):
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix
def model(self, batch, lr, wd, ema, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def get_logits(x):
logits = classifier(x, training=True)
return logits
x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradient(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False))) |
<|file_name|>mixup.py<|end_file_name|><|fim▁begin|># Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
def augment(self, x, l, beta, **kwargs):
<|fim_middle|>
def model(self, batch, lr, wd, ema, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def get_logits(x):
logits = classifier(x, training=True)
return logits
x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradient(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
<|fim▁end|> | del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix |
<|file_name|>mixup.py<|end_file_name|><|fim▁begin|># Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
def augment(self, x, l, beta, **kwargs):
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix
def model(self, batch, lr, wd, ema, **kwargs):
<|fim_middle|>
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
<|fim▁end|> | hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def get_logits(x):
logits = classifier(x, training=True)
return logits
x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradient(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False))) |
<|file_name|>mixup.py<|end_file_name|><|fim▁begin|># Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
def augment(self, x, l, beta, **kwargs):
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix
def model(self, batch, lr, wd, ema, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def get_logits(x):
<|fim_middle|>
x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradient(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
<|fim▁end|> | logits = classifier(x, training=True)
return logits |
<|file_name|>mixup.py<|end_file_name|><|fim▁begin|># Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
def augment(self, x, l, beta, **kwargs):
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix
def model(self, batch, lr, wd, ema, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def get_logits(x):
logits = classifier(x, training=True)
return logits
x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradient(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
<|fim_middle|>
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
<|fim▁end|> | utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10) |
<|file_name|>mixup.py<|end_file_name|><|fim▁begin|># Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
def augment(self, x, l, beta, **kwargs):
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix
def model(self, batch, lr, wd, ema, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def get_logits(x):
logits = classifier(x, training=True)
return logits
x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradient(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
<|fim_middle|>
<|fim▁end|> | utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main) |
<|file_name|>mixup.py<|end_file_name|><|fim▁begin|># Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
def <|fim_middle|>(self, x, l, beta, **kwargs):
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix
def model(self, batch, lr, wd, ema, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def get_logits(x):
logits = classifier(x, training=True)
return logits
x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradient(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
<|fim▁end|> | augment |
<|file_name|>mixup.py<|end_file_name|><|fim▁begin|># Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
def augment(self, x, l, beta, **kwargs):
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix
def <|fim_middle|>(self, batch, lr, wd, ema, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def get_logits(x):
logits = classifier(x, training=True)
return logits
x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradient(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
<|fim▁end|> | model |
<|file_name|>mixup.py<|end_file_name|><|fim▁begin|># Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
def augment(self, x, l, beta, **kwargs):
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix
def model(self, batch, lr, wd, ema, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def <|fim_middle|>(x):
logits = classifier(x, training=True)
return logits
x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradient(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def main(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
<|fim▁end|> | get_logits |
<|file_name|>mixup.py<|end_file_name|><|fim▁begin|># Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mixup: Beyond Empirical Risk Minimization.
Adaption to SSL of MixUp: https://arxiv.org/abs/1710.09412
"""
import functools
import os
import tensorflow as tf
from absl import app
from absl import flags
from libml import data, utils, models
from libml.utils import EasyDict
FLAGS = flags.FLAGS
class Mixup(models.MultiModel):
def augment(self, x, l, beta, **kwargs):
del kwargs
mix = tf.distributions.Beta(beta, beta).sample([tf.shape(x)[0], 1, 1, 1])
mix = tf.maximum(mix, 1 - mix)
xmix = x * mix + x[::-1] * (1 - mix)
lmix = l * mix[:, :, 0, 0] + l[::-1] * (1 - mix[:, :, 0, 0])
return xmix, lmix
def model(self, batch, lr, wd, ema, **kwargs):
hwc = [self.dataset.height, self.dataset.width, self.dataset.colors]
xt_in = tf.placeholder(tf.float32, [batch] + hwc, 'xt') # For training
x_in = tf.placeholder(tf.float32, [None] + hwc, 'x')
y_in = tf.placeholder(tf.float32, [batch] + hwc, 'y')
l_in = tf.placeholder(tf.int32, [batch], 'labels')
wd *= lr
classifier = lambda x, **kw: self.classifier(x, **kw, **kwargs).logits
def get_logits(x):
logits = classifier(x, training=True)
return logits
x, labels_x = self.augment(xt_in, tf.one_hot(l_in, self.nclass), **kwargs)
logits_x = get_logits(x)
post_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
y, labels_y = self.augment(y_in, tf.nn.softmax(get_logits(y_in)), **kwargs)
labels_y = tf.stop_gradient(labels_y)
logits_y = get_logits(y)
loss_xe = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_x, logits=logits_x)
loss_xe = tf.reduce_mean(loss_xe)
loss_xeu = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_y, logits=logits_y)
loss_xeu = tf.reduce_mean(loss_xeu)
tf.summary.scalar('losses/xe', loss_xe)
tf.summary.scalar('losses/xeu', loss_xeu)
ema = tf.train.ExponentialMovingAverage(decay=ema)
ema_op = ema.apply(utils.model_vars())
ema_getter = functools.partial(utils.getter_ema, ema)
post_ops.append(ema_op)
post_ops.extend([tf.assign(v, v * (1 - wd)) for v in utils.model_vars('classify') if 'kernel' in v.name])
train_op = tf.train.AdamOptimizer(lr).minimize(loss_xe + loss_xeu, colocate_gradients_with_ops=True)
with tf.control_dependencies([train_op]):
train_op = tf.group(*post_ops)
return EasyDict(
xt=xt_in, x=x_in, y=y_in, label=l_in, train_op=train_op,
classify_raw=tf.nn.softmax(classifier(x_in, training=False)), # No EMA, for debugging.
classify_op=tf.nn.softmax(classifier(x_in, getter=ema_getter, training=False)))
def <|fim_middle|>(argv):
utils.setup_main()
del argv # Unused.
dataset = data.DATASETS()[FLAGS.dataset]()
log_width = utils.ilog2(dataset.width)
model = Mixup(
os.path.join(FLAGS.train_dir, dataset.name),
dataset,
lr=FLAGS.lr,
wd=FLAGS.wd,
arch=FLAGS.arch,
batch=FLAGS.batch,
nclass=dataset.nclass,
ema=FLAGS.ema,
beta=FLAGS.beta,
scales=FLAGS.scales or (log_width - 2),
filters=FLAGS.filters,
repeat=FLAGS.repeat)
model.train(FLAGS.train_kimg << 10, FLAGS.report_kimg << 10)
if __name__ == '__main__':
utils.setup_tf()
flags.DEFINE_float('wd', 0.02, 'Weight decay.')
flags.DEFINE_float('ema', 0.999, 'Exponential moving average of params.')
flags.DEFINE_float('beta', 0.5, 'Mixup beta distribution.')
flags.DEFINE_integer('scales', 0, 'Number of 2x2 downscalings in the classifier.')
flags.DEFINE_integer('filters', 32, 'Filter size of convolutions.')
flags.DEFINE_integer('repeat', 4, 'Number of residual layers per stage.')
FLAGS.set_default('dataset', 'cifar10.3@250-5000')
FLAGS.set_default('batch', 64)
FLAGS.set_default('lr', 0.002)
FLAGS.set_default('train_kimg', 1 << 16)
app.run(main)
<|fim▁end|> | main |
<|file_name|>dn.py<|end_file_name|><|fim▁begin|>#
# Copyright 2015 Fasih
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class DN(object):
def __init__(self, dn):
self._dn = dn.replace(',dn', '')
self._cn = []
self._displayName = []
self._givenName = []
self._homePhone = []
self._homePostalAddress = []
self._mail = []
self._mobile = []
self._o = []
self._objectClass = []
self._sn = []
self._telephoneNumber = []
self._title = []
@property
def dn(self): return self._dn
@property<|fim▁hole|> @cn.setter
def cn(self, v):
self._cn.append(v)
@property
def displayName(self): return self._displayName
@displayName.setter
def displayName(self, v):
self._displayName.append(v)
@property
def givenName(self): return self._givenName
@givenName.setter
def givenName(self, v):
self._givenName.append(v)
@property
def homePhone(self): return self._homePhone
@homePhone.setter
def homePhone(self, v):
self._homePhone.append(v)
@property
def homePostalAddress(self): return self._homePostalAddress
@homePostalAddress.setter
def homePostalAddress(self, v):
self._homePostalAddress.append(v)
@property
def mail(self): return self._mail
@mail.setter
def mail(self, v):
self._mail.append(v)
@property
def mobile(self): return self._mobile
@mobile.setter
def mobile(self, v):
self._mobile.append(v)
@property
def o(self): return self._o
@o.setter
def o(self, v):
self._o.append(v)
@property
def objectClass(self): return self._objectClass
@objectClass.setter
def objectClass(self, v):
self._objectClass.append(v)
@property
def sn(self): return self._sn
@sn.setter
def sn(self, v):
self._sn.append(v)
@property
def telephoneNumber(self): return self._telephoneNumber
@telephoneNumber.setter
def telephoneNumber(self, v):
self._telephoneNumber.append(v)
@property
def title(self): return self._title
@title.setter
def title(self, v):
self._title.append(v)
def csv(self):
items = []
items.append(self.displayName)
items.append(self.givenName)
items.append(self.sn)
items.append(self.title)
items.append(['Home'])
items.append(self.homePhone)
items.append(['Mobile'])
items.append(self.mobile)
items.append(['Mobile'])
items.append(self.telephoneNumber)
items.append(['Home'])
items.append(self.homePostalAddress)
items.append(self.mail)
items.append(self.o)
return ','.join([' ::: '.join([x.replace(',', ' ') for x in i]) for i in items])
def __str__(self):
s = 'DN<dn=%s' % self._dn
if self.cn != []: s += ', cn=%s' % self.cn
if self.displayName != []: s += ', displayName=%s' % self.displayName
if self.givenName != []: s += ', givenName=%s' % self.givenName
if self.homePhone != []: s += ', homePhone=%s' % self.homePhone
if self.homePostalAddress != []: s += ', homePostalAddress=%s' % self.homePostalAddress
if self.mail != []: s += ', mail=%s' % self.mail
if self.mobile != []: s += ', mobile=%s' % self.mobile
if self.o != []: s += ', o=%s' % self.o
if self.objectClass != []: s += ', objectClass=%s' % self.objectClass
if self.sn != []: s += ', sn=%s' % self.sn
if self.telephoneNumber != []: s += ', telephoneNumber=%s' % self.telephoneNumber
if self.title != []: s += ', title=%s' % self.title
return s + '>'<|fim▁end|> | def cn(self): return self._cn |
<|file_name|>dn.py<|end_file_name|><|fim▁begin|>#
# Copyright 2015 Fasih
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class DN(object):
<|fim_middle|>
<|fim▁end|> | def __init__(self, dn):
self._dn = dn.replace(',dn', '')
self._cn = []
self._displayName = []
self._givenName = []
self._homePhone = []
self._homePostalAddress = []
self._mail = []
self._mobile = []
self._o = []
self._objectClass = []
self._sn = []
self._telephoneNumber = []
self._title = []
@property
def dn(self): return self._dn
@property
def cn(self): return self._cn
@cn.setter
def cn(self, v):
self._cn.append(v)
@property
def displayName(self): return self._displayName
@displayName.setter
def displayName(self, v):
self._displayName.append(v)
@property
def givenName(self): return self._givenName
@givenName.setter
def givenName(self, v):
self._givenName.append(v)
@property
def homePhone(self): return self._homePhone
@homePhone.setter
def homePhone(self, v):
self._homePhone.append(v)
@property
def homePostalAddress(self): return self._homePostalAddress
@homePostalAddress.setter
def homePostalAddress(self, v):
self._homePostalAddress.append(v)
@property
def mail(self): return self._mail
@mail.setter
def mail(self, v):
self._mail.append(v)
@property
def mobile(self): return self._mobile
@mobile.setter
def mobile(self, v):
self._mobile.append(v)
@property
def o(self): return self._o
@o.setter
def o(self, v):
self._o.append(v)
@property
def objectClass(self): return self._objectClass
@objectClass.setter
def objectClass(self, v):
self._objectClass.append(v)
@property
def sn(self): return self._sn
@sn.setter
def sn(self, v):
self._sn.append(v)
@property
def telephoneNumber(self): return self._telephoneNumber
@telephoneNumber.setter
def telephoneNumber(self, v):
self._telephoneNumber.append(v)
@property
def title(self): return self._title
@title.setter
def title(self, v):
self._title.append(v)
def csv(self):
items = []
items.append(self.displayName)
items.append(self.givenName)
items.append(self.sn)
items.append(self.title)
items.append(['Home'])
items.append(self.homePhone)
items.append(['Mobile'])
items.append(self.mobile)
items.append(['Mobile'])
items.append(self.telephoneNumber)
items.append(['Home'])
items.append(self.homePostalAddress)
items.append(self.mail)
items.append(self.o)
return ','.join([' ::: '.join([x.replace(',', ' ') for x in i]) for i in items])
def __str__(self):
s = 'DN<dn=%s' % self._dn
if self.cn != []: s += ', cn=%s' % self.cn
if self.displayName != []: s += ', displayName=%s' % self.displayName
if self.givenName != []: s += ', givenName=%s' % self.givenName
if self.homePhone != []: s += ', homePhone=%s' % self.homePhone
if self.homePostalAddress != []: s += ', homePostalAddress=%s' % self.homePostalAddress
if self.mail != []: s += ', mail=%s' % self.mail
if self.mobile != []: s += ', mobile=%s' % self.mobile
if self.o != []: s += ', o=%s' % self.o
if self.objectClass != []: s += ', objectClass=%s' % self.objectClass
if self.sn != []: s += ', sn=%s' % self.sn
if self.telephoneNumber != []: s += ', telephoneNumber=%s' % self.telephoneNumber
if self.title != []: s += ', title=%s' % self.title
return s + '>' |
<|file_name|>dn.py<|end_file_name|><|fim▁begin|>#
# Copyright 2015 Fasih
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class DN(object):
def __init__(self, dn):
<|fim_middle|>
@property
def dn(self): return self._dn
@property
def cn(self): return self._cn
@cn.setter
def cn(self, v):
self._cn.append(v)
@property
def displayName(self): return self._displayName
@displayName.setter
def displayName(self, v):
self._displayName.append(v)
@property
def givenName(self): return self._givenName
@givenName.setter
def givenName(self, v):
self._givenName.append(v)
@property
def homePhone(self): return self._homePhone
@homePhone.setter
def homePhone(self, v):
self._homePhone.append(v)
@property
def homePostalAddress(self): return self._homePostalAddress
@homePostalAddress.setter
def homePostalAddress(self, v):
self._homePostalAddress.append(v)
@property
def mail(self): return self._mail
@mail.setter
def mail(self, v):
self._mail.append(v)
@property
def mobile(self): return self._mobile
@mobile.setter
def mobile(self, v):
self._mobile.append(v)
@property
def o(self): return self._o
@o.setter
def o(self, v):
self._o.append(v)
@property
def objectClass(self): return self._objectClass
@objectClass.setter
def objectClass(self, v):
self._objectClass.append(v)
@property
def sn(self): return self._sn
@sn.setter
def sn(self, v):
self._sn.append(v)
@property
def telephoneNumber(self): return self._telephoneNumber
@telephoneNumber.setter
def telephoneNumber(self, v):
self._telephoneNumber.append(v)
@property
def title(self): return self._title
@title.setter
def title(self, v):
self._title.append(v)
def csv(self):
items = []
items.append(self.displayName)
items.append(self.givenName)
items.append(self.sn)
items.append(self.title)
items.append(['Home'])
items.append(self.homePhone)
items.append(['Mobile'])
items.append(self.mobile)
items.append(['Mobile'])
items.append(self.telephoneNumber)
items.append(['Home'])
items.append(self.homePostalAddress)
items.append(self.mail)
items.append(self.o)
return ','.join([' ::: '.join([x.replace(',', ' ') for x in i]) for i in items])
def __str__(self):
s = 'DN<dn=%s' % self._dn
if self.cn != []: s += ', cn=%s' % self.cn
if self.displayName != []: s += ', displayName=%s' % self.displayName
if self.givenName != []: s += ', givenName=%s' % self.givenName
if self.homePhone != []: s += ', homePhone=%s' % self.homePhone
if self.homePostalAddress != []: s += ', homePostalAddress=%s' % self.homePostalAddress
if self.mail != []: s += ', mail=%s' % self.mail
if self.mobile != []: s += ', mobile=%s' % self.mobile
if self.o != []: s += ', o=%s' % self.o
if self.objectClass != []: s += ', objectClass=%s' % self.objectClass
if self.sn != []: s += ', sn=%s' % self.sn
if self.telephoneNumber != []: s += ', telephoneNumber=%s' % self.telephoneNumber
if self.title != []: s += ', title=%s' % self.title
return s + '>'
<|fim▁end|> | self._dn = dn.replace(',dn', '')
self._cn = []
self._displayName = []
self._givenName = []
self._homePhone = []
self._homePostalAddress = []
self._mail = []
self._mobile = []
self._o = []
self._objectClass = []
self._sn = []
self._telephoneNumber = []
self._title = [] |
<|file_name|>dn.py<|end_file_name|><|fim▁begin|>#
# Copyright 2015 Fasih
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class DN(object):
def __init__(self, dn):
self._dn = dn.replace(',dn', '')
self._cn = []
self._displayName = []
self._givenName = []
self._homePhone = []
self._homePostalAddress = []
self._mail = []
self._mobile = []
self._o = []
self._objectClass = []
self._sn = []
self._telephoneNumber = []
self._title = []
@property
def dn(self): <|fim_middle|>
@property
def cn(self): return self._cn
@cn.setter
def cn(self, v):
self._cn.append(v)
@property
def displayName(self): return self._displayName
@displayName.setter
def displayName(self, v):
self._displayName.append(v)
@property
def givenName(self): return self._givenName
@givenName.setter
def givenName(self, v):
self._givenName.append(v)
@property
def homePhone(self): return self._homePhone
@homePhone.setter
def homePhone(self, v):
self._homePhone.append(v)
@property
def homePostalAddress(self): return self._homePostalAddress
@homePostalAddress.setter
def homePostalAddress(self, v):
self._homePostalAddress.append(v)
@property
def mail(self): return self._mail
@mail.setter
def mail(self, v):
self._mail.append(v)
@property
def mobile(self): return self._mobile
@mobile.setter
def mobile(self, v):
self._mobile.append(v)
@property
def o(self): return self._o
@o.setter
def o(self, v):
self._o.append(v)
@property
def objectClass(self): return self._objectClass
@objectClass.setter
def objectClass(self, v):
self._objectClass.append(v)
@property
def sn(self): return self._sn
@sn.setter
def sn(self, v):
self._sn.append(v)
@property
def telephoneNumber(self): return self._telephoneNumber
@telephoneNumber.setter
def telephoneNumber(self, v):
self._telephoneNumber.append(v)
@property
def title(self): return self._title
@title.setter
def title(self, v):
self._title.append(v)
def csv(self):
items = []
items.append(self.displayName)
items.append(self.givenName)
items.append(self.sn)
items.append(self.title)
items.append(['Home'])
items.append(self.homePhone)
items.append(['Mobile'])
items.append(self.mobile)
items.append(['Mobile'])
items.append(self.telephoneNumber)
items.append(['Home'])
items.append(self.homePostalAddress)
items.append(self.mail)
items.append(self.o)
return ','.join([' ::: '.join([x.replace(',', ' ') for x in i]) for i in items])
def __str__(self):
s = 'DN<dn=%s' % self._dn
if self.cn != []: s += ', cn=%s' % self.cn
if self.displayName != []: s += ', displayName=%s' % self.displayName
if self.givenName != []: s += ', givenName=%s' % self.givenName
if self.homePhone != []: s += ', homePhone=%s' % self.homePhone
if self.homePostalAddress != []: s += ', homePostalAddress=%s' % self.homePostalAddress
if self.mail != []: s += ', mail=%s' % self.mail
if self.mobile != []: s += ', mobile=%s' % self.mobile
if self.o != []: s += ', o=%s' % self.o
if self.objectClass != []: s += ', objectClass=%s' % self.objectClass
if self.sn != []: s += ', sn=%s' % self.sn
if self.telephoneNumber != []: s += ', telephoneNumber=%s' % self.telephoneNumber
if self.title != []: s += ', title=%s' % self.title
return s + '>'
<|fim▁end|> | return self._dn |
<|file_name|>dn.py<|end_file_name|><|fim▁begin|>#
# Copyright 2015 Fasih
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class DN(object):
def __init__(self, dn):
self._dn = dn.replace(',dn', '')
self._cn = []
self._displayName = []
self._givenName = []
self._homePhone = []
self._homePostalAddress = []
self._mail = []
self._mobile = []
self._o = []
self._objectClass = []
self._sn = []
self._telephoneNumber = []
self._title = []
@property
def dn(self): return self._dn
@property
def cn(self): <|fim_middle|>
@cn.setter
def cn(self, v):
self._cn.append(v)
@property
def displayName(self): return self._displayName
@displayName.setter
def displayName(self, v):
self._displayName.append(v)
@property
def givenName(self): return self._givenName
@givenName.setter
def givenName(self, v):
self._givenName.append(v)
@property
def homePhone(self): return self._homePhone
@homePhone.setter
def homePhone(self, v):
self._homePhone.append(v)
@property
def homePostalAddress(self): return self._homePostalAddress
@homePostalAddress.setter
def homePostalAddress(self, v):
self._homePostalAddress.append(v)
@property
def mail(self): return self._mail
@mail.setter
def mail(self, v):
self._mail.append(v)
@property
def mobile(self): return self._mobile
@mobile.setter
def mobile(self, v):
self._mobile.append(v)
@property
def o(self): return self._o
@o.setter
def o(self, v):
self._o.append(v)
@property
def objectClass(self): return self._objectClass
@objectClass.setter
def objectClass(self, v):
self._objectClass.append(v)
@property
def sn(self): return self._sn
@sn.setter
def sn(self, v):
self._sn.append(v)
@property
def telephoneNumber(self): return self._telephoneNumber
@telephoneNumber.setter
def telephoneNumber(self, v):
self._telephoneNumber.append(v)
@property
def title(self): return self._title
@title.setter
def title(self, v):
self._title.append(v)
def csv(self):
items = []
items.append(self.displayName)
items.append(self.givenName)
items.append(self.sn)
items.append(self.title)
items.append(['Home'])
items.append(self.homePhone)
items.append(['Mobile'])
items.append(self.mobile)
items.append(['Mobile'])
items.append(self.telephoneNumber)
items.append(['Home'])
items.append(self.homePostalAddress)
items.append(self.mail)
items.append(self.o)
return ','.join([' ::: '.join([x.replace(',', ' ') for x in i]) for i in items])
def __str__(self):
s = 'DN<dn=%s' % self._dn
if self.cn != []: s += ', cn=%s' % self.cn
if self.displayName != []: s += ', displayName=%s' % self.displayName
if self.givenName != []: s += ', givenName=%s' % self.givenName
if self.homePhone != []: s += ', homePhone=%s' % self.homePhone
if self.homePostalAddress != []: s += ', homePostalAddress=%s' % self.homePostalAddress
if self.mail != []: s += ', mail=%s' % self.mail
if self.mobile != []: s += ', mobile=%s' % self.mobile
if self.o != []: s += ', o=%s' % self.o
if self.objectClass != []: s += ', objectClass=%s' % self.objectClass
if self.sn != []: s += ', sn=%s' % self.sn
if self.telephoneNumber != []: s += ', telephoneNumber=%s' % self.telephoneNumber
if self.title != []: s += ', title=%s' % self.title
return s + '>'
<|fim▁end|> | return self._cn |
<|file_name|>dn.py<|end_file_name|><|fim▁begin|>#
# Copyright 2015 Fasih
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class DN(object):
def __init__(self, dn):
self._dn = dn.replace(',dn', '')
self._cn = []
self._displayName = []
self._givenName = []
self._homePhone = []
self._homePostalAddress = []
self._mail = []
self._mobile = []
self._o = []
self._objectClass = []
self._sn = []
self._telephoneNumber = []
self._title = []
@property
def dn(self): return self._dn
@property
def cn(self): return self._cn
@cn.setter
def cn(self, v):
<|fim_middle|>
@property
def displayName(self): return self._displayName
@displayName.setter
def displayName(self, v):
self._displayName.append(v)
@property
def givenName(self): return self._givenName
@givenName.setter
def givenName(self, v):
self._givenName.append(v)
@property
def homePhone(self): return self._homePhone
@homePhone.setter
def homePhone(self, v):
self._homePhone.append(v)
@property
def homePostalAddress(self): return self._homePostalAddress
@homePostalAddress.setter
def homePostalAddress(self, v):
self._homePostalAddress.append(v)
@property
def mail(self): return self._mail
@mail.setter
def mail(self, v):
self._mail.append(v)
@property
def mobile(self): return self._mobile
@mobile.setter
def mobile(self, v):
self._mobile.append(v)
@property
def o(self): return self._o
@o.setter
def o(self, v):
self._o.append(v)
@property
def objectClass(self): return self._objectClass
@objectClass.setter
def objectClass(self, v):
self._objectClass.append(v)
@property
def sn(self): return self._sn
@sn.setter
def sn(self, v):
self._sn.append(v)
@property
def telephoneNumber(self): return self._telephoneNumber
@telephoneNumber.setter
def telephoneNumber(self, v):
self._telephoneNumber.append(v)
@property
def title(self): return self._title
@title.setter
def title(self, v):
self._title.append(v)
def csv(self):
items = []
items.append(self.displayName)
items.append(self.givenName)
items.append(self.sn)
items.append(self.title)
items.append(['Home'])
items.append(self.homePhone)
items.append(['Mobile'])
items.append(self.mobile)
items.append(['Mobile'])
items.append(self.telephoneNumber)
items.append(['Home'])
items.append(self.homePostalAddress)
items.append(self.mail)
items.append(self.o)
return ','.join([' ::: '.join([x.replace(',', ' ') for x in i]) for i in items])
def __str__(self):
s = 'DN<dn=%s' % self._dn
if self.cn != []: s += ', cn=%s' % self.cn
if self.displayName != []: s += ', displayName=%s' % self.displayName
if self.givenName != []: s += ', givenName=%s' % self.givenName
if self.homePhone != []: s += ', homePhone=%s' % self.homePhone
if self.homePostalAddress != []: s += ', homePostalAddress=%s' % self.homePostalAddress
if self.mail != []: s += ', mail=%s' % self.mail
if self.mobile != []: s += ', mobile=%s' % self.mobile
if self.o != []: s += ', o=%s' % self.o
if self.objectClass != []: s += ', objectClass=%s' % self.objectClass
if self.sn != []: s += ', sn=%s' % self.sn
if self.telephoneNumber != []: s += ', telephoneNumber=%s' % self.telephoneNumber
if self.title != []: s += ', title=%s' % self.title
return s + '>'
<|fim▁end|> | self._cn.append(v) |
<|file_name|>dn.py<|end_file_name|><|fim▁begin|>#
# Copyright 2015 Fasih
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class DN(object):
def __init__(self, dn):
self._dn = dn.replace(',dn', '')
self._cn = []
self._displayName = []
self._givenName = []
self._homePhone = []
self._homePostalAddress = []
self._mail = []
self._mobile = []
self._o = []
self._objectClass = []
self._sn = []
self._telephoneNumber = []
self._title = []
@property
def dn(self): return self._dn
@property
def cn(self): return self._cn
@cn.setter
def cn(self, v):
self._cn.append(v)
@property
def displayName(self): <|fim_middle|>
@displayName.setter
def displayName(self, v):
self._displayName.append(v)
@property
def givenName(self): return self._givenName
@givenName.setter
def givenName(self, v):
self._givenName.append(v)
@property
def homePhone(self): return self._homePhone
@homePhone.setter
def homePhone(self, v):
self._homePhone.append(v)
@property
def homePostalAddress(self): return self._homePostalAddress
@homePostalAddress.setter
def homePostalAddress(self, v):
self._homePostalAddress.append(v)
@property
def mail(self): return self._mail
@mail.setter
def mail(self, v):
self._mail.append(v)
@property
def mobile(self): return self._mobile
@mobile.setter
def mobile(self, v):
self._mobile.append(v)
@property
def o(self): return self._o
@o.setter
def o(self, v):
self._o.append(v)
@property
def objectClass(self): return self._objectClass
@objectClass.setter
def objectClass(self, v):
self._objectClass.append(v)
@property
def sn(self): return self._sn
@sn.setter
def sn(self, v):
self._sn.append(v)
@property
def telephoneNumber(self): return self._telephoneNumber
@telephoneNumber.setter
def telephoneNumber(self, v):
self._telephoneNumber.append(v)
@property
def title(self): return self._title
@title.setter
def title(self, v):
self._title.append(v)
def csv(self):
items = []
items.append(self.displayName)
items.append(self.givenName)
items.append(self.sn)
items.append(self.title)
items.append(['Home'])
items.append(self.homePhone)
items.append(['Mobile'])
items.append(self.mobile)
items.append(['Mobile'])
items.append(self.telephoneNumber)
items.append(['Home'])
items.append(self.homePostalAddress)
items.append(self.mail)
items.append(self.o)
return ','.join([' ::: '.join([x.replace(',', ' ') for x in i]) for i in items])
def __str__(self):
s = 'DN<dn=%s' % self._dn
if self.cn != []: s += ', cn=%s' % self.cn
if self.displayName != []: s += ', displayName=%s' % self.displayName
if self.givenName != []: s += ', givenName=%s' % self.givenName
if self.homePhone != []: s += ', homePhone=%s' % self.homePhone
if self.homePostalAddress != []: s += ', homePostalAddress=%s' % self.homePostalAddress
if self.mail != []: s += ', mail=%s' % self.mail
if self.mobile != []: s += ', mobile=%s' % self.mobile
if self.o != []: s += ', o=%s' % self.o
if self.objectClass != []: s += ', objectClass=%s' % self.objectClass
if self.sn != []: s += ', sn=%s' % self.sn
if self.telephoneNumber != []: s += ', telephoneNumber=%s' % self.telephoneNumber
if self.title != []: s += ', title=%s' % self.title
return s + '>'
<|fim▁end|> | return self._displayName |
<|file_name|>dn.py<|end_file_name|><|fim▁begin|>#
# Copyright 2015 Fasih
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class DN(object):
def __init__(self, dn):
self._dn = dn.replace(',dn', '')
self._cn = []
self._displayName = []
self._givenName = []
self._homePhone = []
self._homePostalAddress = []
self._mail = []
self._mobile = []
self._o = []
self._objectClass = []
self._sn = []
self._telephoneNumber = []
self._title = []
@property
def dn(self): return self._dn
@property
def cn(self): return self._cn
@cn.setter
def cn(self, v):
self._cn.append(v)
@property
def displayName(self): return self._displayName
@displayName.setter
def displayName(self, v):
<|fim_middle|>
@property
def givenName(self): return self._givenName
@givenName.setter
def givenName(self, v):
self._givenName.append(v)
@property
def homePhone(self): return self._homePhone
@homePhone.setter
def homePhone(self, v):
self._homePhone.append(v)
@property
def homePostalAddress(self): return self._homePostalAddress
@homePostalAddress.setter
def homePostalAddress(self, v):
self._homePostalAddress.append(v)
@property
def mail(self): return self._mail
@mail.setter
def mail(self, v):
self._mail.append(v)
@property
def mobile(self): return self._mobile
@mobile.setter
def mobile(self, v):
self._mobile.append(v)
@property
def o(self): return self._o
@o.setter
def o(self, v):
self._o.append(v)
@property
def objectClass(self): return self._objectClass
@objectClass.setter
def objectClass(self, v):
self._objectClass.append(v)
@property
def sn(self): return self._sn
@sn.setter
def sn(self, v):
self._sn.append(v)
@property
def telephoneNumber(self): return self._telephoneNumber
@telephoneNumber.setter
def telephoneNumber(self, v):
self._telephoneNumber.append(v)
@property
def title(self): return self._title
@title.setter
def title(self, v):
self._title.append(v)
def csv(self):
items = []
items.append(self.displayName)
items.append(self.givenName)
items.append(self.sn)
items.append(self.title)
items.append(['Home'])
items.append(self.homePhone)
items.append(['Mobile'])
items.append(self.mobile)
items.append(['Mobile'])
items.append(self.telephoneNumber)
items.append(['Home'])
items.append(self.homePostalAddress)
items.append(self.mail)
items.append(self.o)
return ','.join([' ::: '.join([x.replace(',', ' ') for x in i]) for i in items])
def __str__(self):
s = 'DN<dn=%s' % self._dn
if self.cn != []: s += ', cn=%s' % self.cn
if self.displayName != []: s += ', displayName=%s' % self.displayName
if self.givenName != []: s += ', givenName=%s' % self.givenName
if self.homePhone != []: s += ', homePhone=%s' % self.homePhone
if self.homePostalAddress != []: s += ', homePostalAddress=%s' % self.homePostalAddress
if self.mail != []: s += ', mail=%s' % self.mail
if self.mobile != []: s += ', mobile=%s' % self.mobile
if self.o != []: s += ', o=%s' % self.o
if self.objectClass != []: s += ', objectClass=%s' % self.objectClass
if self.sn != []: s += ', sn=%s' % self.sn
if self.telephoneNumber != []: s += ', telephoneNumber=%s' % self.telephoneNumber
if self.title != []: s += ', title=%s' % self.title
return s + '>'
<|fim▁end|> | self._displayName.append(v) |
<|file_name|>dn.py<|end_file_name|><|fim▁begin|>#
# Copyright 2015 Fasih
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class DN(object):
def __init__(self, dn):
self._dn = dn.replace(',dn', '')
self._cn = []
self._displayName = []
self._givenName = []
self._homePhone = []
self._homePostalAddress = []
self._mail = []
self._mobile = []
self._o = []
self._objectClass = []
self._sn = []
self._telephoneNumber = []
self._title = []
@property
def dn(self): return self._dn
@property
def cn(self): return self._cn
@cn.setter
def cn(self, v):
self._cn.append(v)
@property
def displayName(self): return self._displayName
@displayName.setter
def displayName(self, v):
self._displayName.append(v)
@property
def givenName(self): <|fim_middle|>
@givenName.setter
def givenName(self, v):
self._givenName.append(v)
@property
def homePhone(self): return self._homePhone
@homePhone.setter
def homePhone(self, v):
self._homePhone.append(v)
@property
def homePostalAddress(self): return self._homePostalAddress
@homePostalAddress.setter
def homePostalAddress(self, v):
self._homePostalAddress.append(v)
@property
def mail(self): return self._mail
@mail.setter
def mail(self, v):
self._mail.append(v)
@property
def mobile(self): return self._mobile
@mobile.setter
def mobile(self, v):
self._mobile.append(v)
@property
def o(self): return self._o
@o.setter
def o(self, v):
self._o.append(v)
@property
def objectClass(self): return self._objectClass
@objectClass.setter
def objectClass(self, v):
self._objectClass.append(v)
@property
def sn(self): return self._sn
@sn.setter
def sn(self, v):
self._sn.append(v)
@property
def telephoneNumber(self): return self._telephoneNumber
@telephoneNumber.setter
def telephoneNumber(self, v):
self._telephoneNumber.append(v)
@property
def title(self): return self._title
@title.setter
def title(self, v):
self._title.append(v)
def csv(self):
items = []
items.append(self.displayName)
items.append(self.givenName)
items.append(self.sn)
items.append(self.title)
items.append(['Home'])
items.append(self.homePhone)
items.append(['Mobile'])
items.append(self.mobile)
items.append(['Mobile'])
items.append(self.telephoneNumber)
items.append(['Home'])
items.append(self.homePostalAddress)
items.append(self.mail)
items.append(self.o)
return ','.join([' ::: '.join([x.replace(',', ' ') for x in i]) for i in items])
def __str__(self):
s = 'DN<dn=%s' % self._dn
if self.cn != []: s += ', cn=%s' % self.cn
if self.displayName != []: s += ', displayName=%s' % self.displayName
if self.givenName != []: s += ', givenName=%s' % self.givenName
if self.homePhone != []: s += ', homePhone=%s' % self.homePhone
if self.homePostalAddress != []: s += ', homePostalAddress=%s' % self.homePostalAddress
if self.mail != []: s += ', mail=%s' % self.mail
if self.mobile != []: s += ', mobile=%s' % self.mobile
if self.o != []: s += ', o=%s' % self.o
if self.objectClass != []: s += ', objectClass=%s' % self.objectClass
if self.sn != []: s += ', sn=%s' % self.sn
if self.telephoneNumber != []: s += ', telephoneNumber=%s' % self.telephoneNumber
if self.title != []: s += ', title=%s' % self.title
return s + '>'
<|fim▁end|> | return self._givenName |
Subsets and Splits